kernel
Loading...
Searching...
No Matches
fault.c
Go to the documentation of this file.
1/*++
2
3Module Name:
4
5 map.c
6
7Purpose:
8
9 This translation unit contains the implementation of access faults in the system. (page faults)
10
11Author:
12
13 slep (Matanel) 2025.
14
15Revision History:
16
17--*/
18
19#include "../../includes/mm.h"
20#include "../../includes/mh.h"
21#include "../../includes/me.h"
22#include "../../includes/ps.h"
23#include "../../includes/mg.h"
24#include "../../assert.h"
25#include "../../includes/fs.h"
26
29 IN uint64_t FaultBits,
30 IN uint64_t VirtualAddress,
31 IN PRIVILEGE_MODE PreviousMode,
32 IN PTRAP_FRAME TrapFrame
33)
34
35/*++
36
37 Routine description:
38
39 This function is called by the kernel on data or instruction access faults.
40 - The access fault was detected due to:
41 An Access Violation.
42 A PTE with the present bit clear.
43 A Valid PTE with the Dirty bit and a write operation.
44
45 Note that the page fault could occur because of the Page Directory contents as well.
46
47 This routine determines what type of fault it is and calls the appropriate routine to handle or write the page fault.
48
49 Arguments:
50
51 [IN] FaultBits - The error code pushed by the CPU.
52 [IN] VirtualAddress - The Memory Address Referenced (CR2)
53 [IN] PreviousMode - Supplies the mode (kernel or user) where the fault occured.
54 [IN] TrapFrame - Trap information at fault.
55
56 Return Values:
57
58 MTSTATUS Code resulting in the status of fault handling operation.
59
60 Could be:
61 MT_SUCCESS -- Fault handled, return.
62 MT_ACCESS_VIOLATION -- User mode only (or kernel mode probing).
63 MT_GUARD_PAGE_VIOLATION -- Bugchecks.
64
65 The function would bugcheck if an invalid kernel mode access occured (or in worst case, 0 memory is available to fill the VAD of the user mode process, but I want to change it to sleep instead..)
66
67--*/
68
69{
70 // Declarations
71#ifdef DEBUG
72 // These are used when I'm debugging.
73 PMMPTE ReferencedPml4e = MiGetPml4ePointer(VirtualAddress);
74 PMMPTE ReferencedPdpte = MiGetPdptePointer(VirtualAddress);
75 PMMPTE ReferencedPde = MiGetPdePointer(VirtualAddress);
76 UNREFERENCED_PARAMETER(ReferencedPml4e); UNREFERENCED_PARAMETER(ReferencedPdpte); UNREFERENCED_PARAMETER(ReferencedPde);
77#endif
78 PMMPTE ReferencedPte = MiGetPtePointer(VirtualAddress);
79 FAULT_OPERATION OperationDone = MiRetrieveOperationFromErrorCode(FaultBits);
80 IRQL PreviousIrql = MeGetCurrentIrql();
81
82#ifdef DEBUG
83 gop_printf(COLOR_RED, "Inside MmAccessFault | FaultBits: %llx | VirtualAddress: %p | PreviousMode: %d | TrapFrame->rip: %p | Operation: %d | Irql: %d\n", (unsigned long long)FaultBits, (void*)(uintptr_t)VirtualAddress, PreviousMode, (void*)(uintptr_t)TrapFrame->rip, OperationDone, PreviousIrql);
84#endif
85
86 if (!ReferencedPte) {
87 // If we cannot get the PTE for the VA, we raise access violation if its user mode, or bugcheck on kernel mode.
88 if (PreviousMode == UserMode) {
90 }
91
92 // Bugcheck here, operations in the Bugcheck label use the ReferencedPte pointer.
95 (void*)VirtualAddress,
96 (void*)MiRetrieveOperationFromErrorCode(TrapFrame->error_code),
97 (void*)TrapFrame->rip,
98 (void*)FaultBits
99 );
100 }
101
102 // If the VA given isn't canonical (sign extended after bit 47, required by CPU MMU Laws), we return or bugcheck depending on the previous mode.
103 if (!MI_IS_CANONICAL_ADDR(VirtualAddress)) {
104
105 if (PreviousMode == UserMode) {
106 // User mode fault on non canonical address, not destructive.
107 return MT_ACCESS_VIOLATION;
108 }
109
110 // Kernel mode page fault on a non canonical address.
111 goto BugCheck;
112
113 }
114
115 // Check for NX. (NX on anywhere is invalid, no matter the range)
116 if (OperationDone == ExecuteOperation) {
117 // Check if the page has NoExecute.
118 if (ReferencedPte->Hard.NoExecute) {
119 // Execution is disallowed.
120 if (PreviousMode == UserMode) {
121 // UserMode executions get an access violation.
122 return MT_ACCESS_VIOLATION;
123 }
124 else {
125 // KernelMode violations are bugchecks.
126 goto BugCheck;
127 }
128 }
129
130 // The page is executable allowed, we check if we demand allocate (of if it is a VAD for user mode)
131 // Previously it returned an access violation for every time we executed wrong in user mode, which was bad.
132 }
133
134 // Now we check for each address in the system, and handle the request based on that.
135 if (VirtualAddress >= MmSystemRangeStart) {
136 if (PreviousMode == UserMode) {
137 // User mode access in kernel memory, invalid.
138 return MT_ACCESS_VIOLATION;
139 }
140
141 MMPTE TempPte = *ReferencedPte;
142
143 // If this is a guard page, we MUST NOT demand allocate it. (pre guard)
144 if (TempPte.Hard.Present == 0 && TempPte.Soft.SoftwareFlags & MI_GUARD_PAGE_PROTECTION) {
145 // Guard pages for kernel mode do not raise an exception and fill in the PTE, this is only for user mode.
146 goto BugCheck;
147 }
148
149 // PTE Is present, but we got a fault.
150 if (TempPte.Hard.Present) {
151 // Write fault to read-only memory.
152 if ((OperationDone == WriteOperation) && (TempPte.Hard.Write == 0)) {
155 (void*)VirtualAddress,
156 (void*)ReferencedPte,
157 NULL,
158 NULL
159 );
160 }
161
162 // If we get here, it was an access/dirty update — set dirty bit if write
163 if (OperationDone == WriteOperation) {
164 // set dirty in PTE and, if needed, PFN->Dirty
165 // Prefer an atomic update: build NewPte = TempPte; NewPte.Hard.Dirty = 1; WriteValidPteAtomic(...)
166 MMPTE NewPte = TempPte;
167 NewPte.Hard.Dirty = 1;
168 MiAtomicExchangePte(ReferencedPte, NewPte.Value);
169 MiInvalidateTlbForVa((void*)VirtualAddress);
170 }
171 return MT_SUCCESS;
172 }
173
174 // Before any demand allocation, check IRQL.
175 if (PreviousIrql >= DISPATCH_LEVEL) {
176 // IRQL Isn't less than DISPATCH_LEVEL, so we cannot lazily allocate, since it would **block**.
179 (void*)VirtualAddress,
180 (void*)PreviousIrql,
181 (void*)OperationDone,
182 (void*)TrapFrame->rip
183 );
184 }
185
186
187 // PTE Isn't present, check for demand allocations.
188 if (MM_IS_DEMAND_ZERO_PTE(TempPte)) {
189 // Allocate a physical page for kernel demand-zero
191 if (pfn == PFN_ERROR) {
192 // out of memory.
193 goto BugCheck;
194 }
195
196 // The page first of all must be a protection with readable.
197 assert((TempPte.Soft.SoftwareFlags & PROT_KERNEL_READ) == 1, "Read protection flag isnt set on a DEMAND_ZERO pte.");
198
199 if ((TempPte.Soft.SoftwareFlags & PROT_KERNEL_READ) == 0) {
200 // Invalid DemandZero.
201 goto BugCheck;
202 }
203
204 // Check protection mask.
205 uint64_t ProtectionFlags = PAGE_PRESENT;
206 ProtectionFlags |= (TempPte.Soft.SoftwareFlags & PROT_KERNEL_WRITE) ? PAGE_RW : 0;
207 ProtectionFlags |= (TempPte.Soft.SoftwareFlags & PROT_KERNEL_NOEXECUTE) ? PAGE_NX : 0;
208
209 // Write the PTE.
210 MI_WRITE_PTE(ReferencedPte, VirtualAddress, PPFN_TO_PHYSICAL_ADDRESS(INDEX_TO_PPFN(pfn)), ProtectionFlags);
211
212 return MT_SUCCESS;
213 }
214
215 // PTE Isn't present, and its a transition (KERNEL MODE PATH)
216 if (TempPte.Soft.Transition == 1) {
217 // Retrieve the PFN Number written in the transition page.
218 PAGE_INDEX pfn = TempPte.Soft.PageFrameNumber;
219 if (!MiIsValidPfn(pfn)) goto BugCheck;
220
221 // Acquire Standby PFN DB List lock. (acquiring spinlock is okay, IRQL detection was checked above)
222 IRQL oldIrql;
223 MsAcquireSpinlock(&PfnDatabase.StandbyPageList.PfnListLock, &oldIrql);
224
225 // Check the PFN, it has to be in the StandBy list and be equal to our PTE, if not, bugcheck.
226 PPFN_ENTRY PPfn = INDEX_TO_PPFN(pfn);
227 if (PPfn->State != PfnStateStandby || PPfn->Descriptor.Mapping.PteAddress == NULL || PPfn->Descriptor.Mapping.PteAddress != ReferencedPte) {
228 // Release spinlock.
229 MsReleaseSpinlock(&PfnDatabase.StandbyPageList.PfnListLock, oldIrql);
230 goto BugCheck;
231 }
232 // PFN Is matching to this pte, now we can set the PTE.
233 // Check protection mask.
234 uint64_t ProtectionFlags = PAGE_PRESENT;
235 ProtectionFlags |= (TempPte.Soft.SoftwareFlags & PROT_KERNEL_WRITE) ? PAGE_RW : 0;
236 ProtectionFlags |= (TempPte.Soft.SoftwareFlags & PROT_KERNEL_NOEXECUTE) ? PAGE_NX : 0;
237
238 // Release this PFN from the list.
240
241 // Atomically set PTE.
242 MI_WRITE_PTE(ReferencedPte, VirtualAddress, PFN_TO_PHYS(pfn), ProtectionFlags);
243
244 // Release PFN Standby list lock.
245 MsReleaseSpinlock(&PfnDatabase.StandbyPageList.PfnListLock, oldIrql);
246
247 // Return success.
248 return MT_SUCCESS;
249 }
250
251 // TODO GRAB FROM PAGEFILE
252
253 // Unknown PTE format -> bugcheck (kernel space)
254 goto BugCheck;
255 }
256
257 // Address is below the kernel start, and above user address.
258 // This if statement should never pass, since these addresses are non canonical, and the first if statement checks for a non canonical adddres.
259 // basically kernel bloat at this point.
260 // i removed it, bye bye.
261
262 // Address is in user range.
263 // Both kernel and user mode are allowed to fault in here, guaranteeing there is a VAD backing it of course (and IRQL demands)
264 // If kernel faulted and no vad (Irql is good), then we search for exception handlers in return, if none we bugcheck.
265 // If a user faulted and no vad (Irql is good), then we search for exception handlers in return, if none we terminate the thread.
266 if (VirtualAddress <= MmHighestUserAddress) {
267 // Before any demand allocation, check IRQL.
268 if (PreviousIrql >= DISPATCH_LEVEL) {
269 // IRQL Isn't less than DISPATCH_LEVEL, so we cannot lazily allocate, since it would **block**.
272 (void*)VirtualAddress,
273 (void*)PreviousIrql,
274 (void*)OperationDone,
275 (void*)TrapFrame->rip
276 );
277 }
278
279 // Fault on a user address, we check if there is a vad for it, if so, allocate the page.
280 PMMVAD vad = MiFindVad(PsGetCurrentProcess(), VirtualAddress);
281 if (!vad) return MT_ACCESS_VIOLATION; // If kernel mode exception dispatcher should catch.
282
283 // Check if we are allowed to allocate.
284 if (vad->Flags & VAD_FLAG_RESERVED) {
285 // Check if this is a guard page.
286 if (vad->Flags & VAD_FLAG_GUARD_PAGE) {
287 // Raise an guard page violation status and allocate the page.
288 //ExpRaiseStatus() TODO
290 }
291
292 else {
293 // Allocation is forbidden, return access violation.
294 return MT_ACCESS_VIOLATION;
295 }
296 }
297
298 MMPTE TempPte = *ReferencedPte;
299
300 // Now check for transition PTE (after checking reserved vad flag)
301 // PTE Isn't present, and its a transition (USER MODE PATH) (ACCESS VIOLATION RETURN)
302 // If the previous mode is kernel mode and an access violation is returned, KMODE_EXCEPTION_NOT_HANDLED bugcheck comes
303 // unless the kernel has a try except handler set in the VA (checked in return path)
304 if (TempPte.Soft.Transition == 1) {
305 // Retrieve the PFN Number written in the transition page.
306 PAGE_INDEX pfn = TempPte.Soft.PageFrameNumber;
307 if (!MiIsValidPfn(pfn)) return MT_ACCESS_VIOLATION;
308
309 // Acquire Standby PFN DB List lock. (acquiring spinlock is okay, IRQL detection was checked above)
310 IRQL oldIrql;
311 MsAcquireSpinlock(&PfnDatabase.StandbyPageList.PfnListLock, &oldIrql);
312
313 // Check the PFN, it has to be in the StandBy list and be equal to our PTE, if not, bugcheck.
314 PPFN_ENTRY PPfn = INDEX_TO_PPFN(pfn);
315 if (PPfn->State != PfnStateStandby || PPfn->Descriptor.Mapping.PteAddress == NULL || PPfn->Descriptor.Mapping.PteAddress != ReferencedPte) {
316 // Release spinlock.
317 MsReleaseSpinlock(&PfnDatabase.StandbyPageList.PfnListLock, oldIrql);
318 return MT_ACCESS_VIOLATION;
319 }
320
321 // PFN Is matching to this pte, now we can set the PTE.
322 // Check protection mask.
323 uint64_t ProtectionFlags = PAGE_PRESENT;
324
325 // Writable
326 ProtectionFlags |= (TempPte.Soft.SoftwareFlags & PROT_KERNEL_WRITE) ? PAGE_RW : 0;
327
328 // User accessible.
329 assert((TempPte.Soft.SoftwareFlags & PROT_KERNEL_USER) != 0);
330 ProtectionFlags |= (TempPte.Soft.SoftwareFlags & PROT_KERNEL_USER) ? PAGE_USER : 0; // This should always be valid for user mode paths.
331
332 // NoExecute.
333 ProtectionFlags |= (TempPte.Soft.SoftwareFlags & PROT_KERNEL_NOEXECUTE) ? PAGE_NX : 0;
334
335 // Release this PFN from the list.
337
338 // Atomically set PTE.
339 MI_WRITE_PTE(ReferencedPte, VirtualAddress, PFN_TO_PHYS(pfn), ProtectionFlags);
340
341 // Release PFN Standby list lock.
342 MsReleaseSpinlock(&PfnDatabase.StandbyPageList.PfnListLock, oldIrql);
343
344 // Return success.
345 return MT_SUCCESS;
346 }
347
348 // Set to base values.
349 uint64_t PteFlags = PAGE_PRESENT | PAGE_NX | PAGE_USER;
350
351 // Apply flags.
352 if (vad->Flags & VAD_FLAG_WRITE) {
353 PteFlags |= PAGE_RW;
354 }
355
356 if (vad->Flags & VAD_FLAG_EXECUTE) {
357 PteFlags &= ~PAGE_NX;
358 }
359
360
361 // TODO COPY ON WRITE!!
362 /*
363 if (vad->Flags & VAD_FLAG_COPY_ON_WRITE) {
364 // Copy the physical address to the COW page.
365 }
366 */
367
368 // Looks like we have a valid vad, lets allocate.
370
371 if (pfn == PFN_ERROR) return MT_ACCESS_VIOLATION;
372
373 // Acquire the PTE for the faulty VA.
374 PMMPTE pte = MiGetPtePointer(VirtualAddress);
375
376 // Now we check if the VAD has any file attached to it, if it does, we copy the contents of the file to the RAM
377 // This could be a process file (executable, dll), or even our pagefile.
378 if (vad->File) {
379 // Calculate file offset to load into VAD.
380 uint64_t AlignedAddress = (uint64_t)PAGE_ALIGN(VirtualAddress);
381 uint64_t PageOffsetWithinVad = AlignedAddress - (uint64_t)vad->StartVa;
382 uint64_t ActualFileOffset = vad->FileOffset + PageOffsetWithinVad;
383
384 // Determine how many bytes to read from file to the page.
385 PFILE_OBJECT FileObject = vad->File;
386 uint64_t FileLength = FileObject->FileSize;
387 size_t ToRead = 0;
388
389 if (ActualFileOffset < FileLength) {
390 ToRead = (size_t)MIN((uint64_t)VirtualPageSize, FileLength - ActualFileOffset);
391 }
392 else {
393 ToRead = 0;
394 }
395
396 // Allocate enough buffer size to hold the file.
397 void* Tmp = MmAllocatePoolWithTag(NonPagedPool, VirtualPageSize, 'Fpmt'); // tmpF - Temporary Fault
398 if (!Tmp) return MT_ACCESS_VIOLATION;
399
400 // Read the file now.
401 if (ToRead > 0) {
402 MTSTATUS Status = FsReadFile(FileObject, ActualFileOffset, Tmp, ToRead, NULL);
403 if (MT_FAILURE(Status)) {
404 MmFreePool(Tmp);
405 return MT_ACCESS_VIOLATION;
406 }
407 }
408
409 // NOTE: Is this really needed? Pool allocations are zeroed, and we used a PfnStateZeroed phys page up top.
410 if (ToRead < VirtualPageSize) {
411 // zero the rest of the page
412 kmemset((uint8_t*)Tmp + ToRead, 0, VirtualPageSize - ToRead);
413 }
414
415 // Copy data from the file to the new user Page
416 // We must not access the virtual address, as if this is a page without write access, we would fault (like the .text section)
417 // Speaking from exprience btw.
418 // So we operate on the physical address.
419 // This should be IRQL fine, since we filtered dispatch and above, above.
420 // As well as performed the read operation in PASSIVE_LEVEL (or APC)
421 IRQL oldIrql;
422 void* AddressToOperate = MiMapPageInHyperspace(pfn, &oldIrql);
423 kmemcpy(AddressToOperate, Tmp, VirtualPageSize);
424 MiUnmapHyperSpaceMap(oldIrql);
425 }
426
427 // Write the PTE.
428 MI_WRITE_PTE(pte, VirtualAddress, PFN_TO_PHYS(pfn), PteFlags);
429
430 // Return success.
431 return MT_SUCCESS;
432 }
433
434 // Address, is, what... impossible!
435 // This comment means execution is impossible to reach here, as we sanitized all (valid) addresses in the 48bit paging hierarchy.
436 // If it does reach here, look below.
437
438BugCheck:
439 // Bugchecks for: IRQL_NOT_LESS_OR_EQUAL or ATTEMPTED_WRITE_TO_READONLY_MEMORY are handled above.
440
441 // Check if its a NoExecute page violation
442 if (ReferencedPte->Hard.Present && ReferencedPte->Hard.NoExecute && OperationDone == ExecuteOperation) {
445 (void*)VirtualAddress,
446 (void*)MiRetrieveOperationFromErrorCode(TrapFrame->error_code),
447 (void*)TrapFrame->rip,
448 (void*)FaultBits
449 );
450 }
451
452 // Check if its a guard page violation
453 if (ReferencedPte->Soft.SoftwareFlags & MI_GUARD_PAGE_PROTECTION) {
456 (void*)VirtualAddress,
457 (void*)MiRetrieveOperationFromErrorCode(TrapFrame->error_code),
458 (void*)TrapFrame->rip,
459 (void*)FaultBits
460 );
461 }
462
463 // Check if its a pool dereference (NonPagedPool first)
464 if (VirtualAddress >= MmNonPagedPoolStart && VirtualAddress <= MmNonPagedPoolEnd) {
467 (void*)VirtualAddress,
468 (void*)MiRetrieveOperationFromErrorCode(TrapFrame->error_code),
469 (void*)TrapFrame->rip,
470 (void*)FaultBits
471 );
472 }
473
474 // Check if Paged Pool Dereference. (the IRQL_NOT_LESS_OR_EQUAL bugcheck is up top)
475 if (VirtualAddress >= MmPagedPoolStart && VirtualAddress <= MmPagedPoolEnd) {
478 (void*)VirtualAddress,
479 (void*)MiRetrieveOperationFromErrorCode(TrapFrame->error_code),
480 (void*)TrapFrame->rip,
481 (void*)FaultBits
482 );
483 }
484
485 // Normal page fault.
488 (void*)VirtualAddress,
489 (void*)MiRetrieveOperationFromErrorCode(TrapFrame->error_code),
490 (void*)TrapFrame->rip,
491 (void*)FaultBits
492 );
493}
494
495bool
497 void
498)
499
500/*++
501
502 Routine description:
503 (UNUSED, ALWAYS FALSE)
504 This function determines if invalid access (e.g, a null pointer dereference), is allowed within the current context.
505
506 Arguments:
507
508 None.
509
510 Return Values:
511
512 True if invalid access is allowed, false otherwise.
513
514 Notes:
515
516 This routine is unused, but will be kept for future modifications if any.
517
518--*/
519
520
521{
522 return false;
523}
#define IN
Definition annotations.h:8
#define assert(...)
Definition assert.h:57
NORETURN void MeBugCheckEx(IN enum _BUGCHECK_CODES BugCheckCode, IN void *BugCheckParameter1, IN void *BugCheckParameter2, IN void *BugCheckParameter3, IN void *BugCheckParameter4)
Definition bugcheck.c:245
@ DISPATCH_LEVEL
Definition core.h:17
enum _IRQL IRQL
TRAP_FRAME * PTRAP_FRAME
Definition core.h:56
MTSTATUS MmAccessFault(IN uint64_t FaultBits, IN uint64_t VirtualAddress, IN PRIVILEGE_MODE PreviousMode, IN PTRAP_FRAME TrapFrame)
Definition fault.c:28
bool MmInvalidAccessAllowed(void)
Definition fault.c:496
struct _FILE_OBJECT * PFILE_OBJECT
void gop_printf(uint32_t color, const char *fmt,...)
Definition gop.c:633
void * MiMapPageInHyperspace(IN uint64_t PfnIndex, OUT PIRQL OldIrql)
Definition hypermap.c:37
void MiUnmapHyperSpaceMap(IN IRQL OldIrql)
Definition hypermap.c:95
#define UNREFERENCED_PARAMETER(x)
Definition intrin.h:29
#define MIN(a, b)
Definition macros.h:40
PMMPTE MiGetPdptePointer(IN uintptr_t va)
Definition map.c:177
PMMPTE MiGetPdePointer(IN uintptr_t va)
Definition map.c:219
PMMPTE MiGetPml4ePointer(IN uintptr_t va)
Definition map.c:147
PMMPTE MiGetPtePointer(IN uintptr_t va)
Definition map.c:76
void MiInvalidateTlbForVa(IN void *VirtualAddress)
Definition map.c:273
@ ATTEMPTED_WRITE_TO_READONLY_MEMORY
Definition me.h:123
@ PAGE_FAULT
Definition me.h:83
@ PAGE_FAULT_IN_FREED_NONPAGED_POOL
Definition me.h:125
@ GUARD_PAGE_DEREFERENCE
Definition me.h:116
@ PAGE_FAULT_IN_FREED_PAGED_POOL
Definition me.h:126
@ ATTEMPTED_EXECUTE_OF_NOEXECUTE_MEMORY
Definition me.h:137
@ IRQL_NOT_LESS_OR_EQUAL
Definition me.h:97
FORCEINLINE IRQL MeGetCurrentIrql(void)
Definition me.h:415
#define COLOR_RED
Colors definitions for easier access.
Definition mg.h:30
@ PAGE_RW
Definition mm.h:311
@ PAGE_USER
Definition mm.h:315
@ PAGE_PRESENT
Definition mm.h:307
@ PAGE_NX
Definition mm.h:347
#define MI_IS_CANONICAL_ADDR(va)
Definition mm.h:219
#define PFN_TO_PHYS(Pfn)
Definition mm.h:226
@ PfnStateZeroed
Definition mm.h:280
@ PfnStateStandby
Definition mm.h:277
FORCEINLINE void MiAtomicExchangePte(PMMPTE PtePtr, uint64_t NewPteValue)
Definition mm.h:762
@ NonPagedPool
Definition mm.h:355
FORCEINLINE bool MiIsValidPfn(IN PAGE_INDEX Pfn)
Definition mm.h:773
@ VAD_FLAG_READ
Definition mm.h:296
@ VAD_FLAG_RESERVED
Definition mm.h:302
@ VAD_FLAG_GUARD_PAGE
Definition mm.h:303
@ VAD_FLAG_EXECUTE
Definition mm.h:298
@ VAD_FLAG_WRITE
Definition mm.h:297
#define PROT_KERNEL_WRITE
Definition mm.h:233
enum _FAULT_OPERATION FAULT_OPERATION
struct _MMPTE * PMMPTE
FORCEINLINE FAULT_OPERATION MiRetrieveOperationFromErrorCode(uint64_t ErrorCode)
Definition mm.h:729
FORCEINLINE void * kmemcpy(void *dest, const void *src, size_t len)
Definition mm.h:669
enum _PRIVILEGE_MODE PRIVILEGE_MODE
#define PPFN_TO_PHYSICAL_ADDRESS(PPFN)
Definition mm.h:136
#define PROT_KERNEL_NOEXECUTE
Definition mm.h:234
uint64_t PAGE_INDEX
Definition mm.h:256
FORCEINLINE void * kmemset(void *dest, int64_t val, uint64_t len)
Definition mm.h:655
#define PROT_KERNEL_USER
Definition mm.h:235
@ ExecuteOperation
Definition mm.h:367
@ WriteOperation
Definition mm.h:366
#define PAGE_ALIGN(Va)
Definition mm.h:172
#define PFN_ERROR
Definition mm.h:229
struct _MMVAD * PMMVAD
#define VirtualPageSize
Definition mm.h:53
#define INDEX_TO_PPFN(Index)
Definition mm.h:62
#define MI_WRITE_PTE(_PtePointer, _Va, _Pa, _Flags)
Definition mm.h:90
struct _PFN_ENTRY * PPFN_ENTRY
struct _MMPTE MMPTE
#define MM_IS_DEMAND_ZERO_PTE(pte)
Definition mm.h:139
#define MI_GUARD_PAGE_PROTECTION
Definition mm.h:244
#define PROT_KERNEL_READ
Definition mm.h:232
@ UserMode
Definition mm.h:372
#define MT_SUCCESS
Definition mtstatus.h:22
#define MT_FAILURE(Status)
Definition mtstatus.h:16
int32_t MTSTATUS
Definition mtstatus.h:12
#define MT_ACCESS_VIOLATION
Definition mtstatus.h:131
PAGE_INDEX MiRequestPhysicalPage(IN PFN_STATE ListType)
Definition pfn.c:333
void MiUnlinkPageFromList(PPFN_ENTRY pfn)
Definition pfn.c:530
MM_PFN_DATABASE PfnDatabase
Definition pfn.c:29
uintptr_t MmNonPagedPoolEnd
Definition pool.c:31
uintptr_t MmNonPagedPoolStart
Definition pool.c:30
uintptr_t MmPagedPoolStart
Definition pool.c:32
void MmFreePool(IN void *buf)
Definition pool.c:632
uintptr_t MmPagedPoolEnd
Definition pool.c:33
void * MmAllocatePoolWithTag(IN enum _POOL_TYPE PoolType, IN size_t NumberOfBytes, IN uint32_t Tag)
Definition pool.c:443
uintptr_t MmSystemRangeStart
Definition process.c:31
uintptr_t MmHighestUserAddress
Definition process.c:32
FORCEINLINE PEPROCESS PsGetCurrentProcess(void)
Definition ps.h:300
void MsAcquireSpinlock(IN PSPINLOCK lock, IN PIRQL OldIrql)
Definition spinlock.c:13
void MsReleaseSpinlock(IN PSPINLOCK lock, IN IRQL OldIrql)
Definition spinlock.c:45
uint64_t FileSize
Definition fs.h:107
struct _MMPTE::@172372265215056352375070220246156106027174106113::@200357034104227323320222006243127050212100105247 Hard
uint64_t Dirty
Definition mm.h:436
uint64_t Write
Definition mm.h:431
uint64_t PageFrameNumber
Definition mm.h:442
uint64_t Present
Definition mm.h:430
uint64_t Transition
Definition mm.h:455
uint64_t Value
Definition mm.h:423
uint64_t NoExecute
Definition mm.h:444
uint64_t SoftwareFlags
Definition mm.h:460
struct _MMPTE::@172372265215056352375070220246156106027174106113::@277354034164206104264133322054061025100052052376 Soft
VAD_FLAGS Flags
Definition mm.h:517
uintptr_t StartVa
Definition mm.h:515
uint64_t FileOffset
Definition mm.h:529
struct _FILE_OBJECT * File
Definition mm.h:528
union _PFN_ENTRY::@217024126340164016372152071216274230164113211246 Descriptor
struct _PFN_ENTRY::@217024126340164016372152071216274230164113211246::@301110335271023021153236134322146064331241142124 Mapping
uint8_t State
Definition mm.h:471
PMMPTE PteAddress
Definition mm.h:482
PMMVAD MiFindVad(IN PEPROCESS Process, IN uintptr_t VirtualAddress)
Definition vad.c:352
MTSTATUS FsReadFile(IN PFILE_OBJECT FileObject, IN uint64_t FileOffset, OUT void *Buffer, IN size_t BufferSize, _Out_Opt size_t *BytesRead)
Definition vfs.c:117