My Project
Loading...
Searching...
No Matches
fault.c
Go to the documentation of this file.
1/*++
2
3Module Name:
4
5 map.c
6
7Purpose:
8
9 This translation unit contains the implementation of access faults in the system. (page faults)
10
11Author:
12
13 slep (Matanel) 2025.
14
15Revision History:
16
17--*/
18
19#include "../../includes/mm.h"
20#include "../../includes/mh.h"
21#include "../../includes/me.h"
22#include "../../includes/ps.h"
23#include "../../includes/mg.h"
24#include "../../assert.h"
25
28 IN uint64_t FaultBits,
29 IN uint64_t VirtualAddress,
30 IN PRIVILEGE_MODE PreviousMode,
31 IN PTRAP_FRAME TrapFrame
32)
33
34/*++
35
36 Routine description:
37
38 This function is called by the kernel on data or instruction access faults.
39 - The access fault was detected due to:
40 An Access Violation.
41 A PTE with the present bit clear.
42 A Valid PTE with the Dirty bit and a write operation.
43
44 Note that the page fault could occur because of the Page Directory contents as well.
45
46 This routine determines what type of fault it is and calls the appropriate routine to handle or write the page fault.
47
48 Arguments:
49
50 [IN] FaultBits - The error code pushed by the CPU.
51 [IN] VirtualAddress - The Memory Address Referenced (CR2)
52 [IN] PreviousMode - Supplies the mode (kernel or user) where the fault occured.
53 [IN] TrapFrame - Trap information at fault.
54
55 Return Values:
56
57 MTSTATUS Code resulting in the status of fault handling operation.
58
59 Could be:
60 MT_SUCCESS -- Fault handled, return.
61 MT_ACCESS_VIOLATION -- User mode only (or kernel mode probing).
62 MT_GUARD_PAGE_VIOLATION -- Bugchecks.
63
64 The function would bugcheck if an invalid kernel mode access occured (or in worst case, 0 memory is available to fill the VAD of the user mode process, but I want to change it to sleep instead..)
65
66--*/
67
68{
69 // Declarations
70#ifdef DEBUG
71 PMMPTE ReferencedPml4e = MiGetPml4ePointer(VirtualAddress);
72 PMMPTE ReferencedPdpte = MiGetPdptePointer(VirtualAddress);
73 PMMPTE ReferencedPde = MiGetPdePointer(VirtualAddress);
74 UNREFERENCED_PARAMETER(ReferencedPml4e); UNREFERENCED_PARAMETER(ReferencedPdpte); UNREFERENCED_PARAMETER(ReferencedPde);
75#endif
76 PMMPTE ReferencedPte = MiGetPtePointer(VirtualAddress);
77 FAULT_OPERATION OperationDone = MiRetrieveOperationFromErrorCode(FaultBits);
78 IRQL PreviousIrql = MeGetCurrentIrql();
79
80#ifdef DEBUG
81 gop_printf(COLOR_RED, "Inside MmAccessFault | FaultBits: %llx | VirtualAddress: %p | PreviousMode: %d | TrapFrame->rip: %p | Operation: %d | Irql: %d\n", (unsigned long long)FaultBits, (void*)(uintptr_t)VirtualAddress, PreviousMode, (void*)(uintptr_t)TrapFrame->rip, OperationDone, PreviousIrql);
82#endif
83
84 if (!ReferencedPte) {
85 // If we cannot get the PTE for the VA, we raise access violation if its user mode, or bugcheck on kernel mode.
86 if (PreviousMode == UserMode) {
88 }
89
90 goto BugCheck;
91 }
92
93 // If the VA given isn't canonical (sign extended after bit 47, required by CPU MMU Laws), we return or bugcheck depending on the previous mode.
94 if (!MI_IS_CANONICAL_ADDR(VirtualAddress)) {
95
96 if (PreviousMode == UserMode) {
97 // User mode fault on non canonical address, not destructive.
99 }
100
101 // Kernel mode page fault on a non canonical address.
102 goto BugCheck;
103
104 }
105
106 // Check for NX. (NX on anywhere is invalid, no matter the range)
107 if (OperationDone == ExecuteOperation) {
108 // Fault on NX bit set page.
109 if (PreviousMode == UserMode) return MT_ACCESS_VIOLATION;
110 // Bugcheck, its kernel mode.
111 goto BugCheck;
112 }
113
114 // Now we check for each address in the system, and handle the request based on that.
115 if (VirtualAddress >= MmSystemRangeStart) {
116 if (PreviousMode == UserMode) {
117 // User mode access in kernel memory, invalid.
118 return MT_ACCESS_VIOLATION;
119 }
120
121 MMPTE TempPte = *ReferencedPte;
122
123 // If this is a guard page, we MUST NOT demand allocate it. (pre guard)
125 goto BugCheck;
126 }
127
128 // PTE Is present, but we got a fault.
129 if (TempPte.Hard.Present) {
130 // Write fault to read-only memory.
131 if ((OperationDone == WriteOperation) && (TempPte.Hard.Write == 0)) {
134 (void*)VirtualAddress,
135 (void*)ReferencedPte,
136 NULL,
137 NULL
138 );
139 }
140
141 // If we get here, it was an access/dirty update — set dirty bit if write
142 if (OperationDone == WriteOperation) {
143 // set dirty in PTE and, if needed, PFN->Dirty
144 // Prefer an atomic update: build NewPte = TempPte; NewPte.Hard.Dirty = 1; WriteValidPteAtomic(...)
145 MMPTE NewPte = TempPte;
146 NewPte.Hard.Dirty = 1;
147 MiAtomicExchangePte(ReferencedPte, NewPte.Value);
148 MiInvalidateTlbForVa((void*)VirtualAddress);
149 }
150 return MT_SUCCESS;
151 }
152
153 // Before any demand allocation, check IRQL.
154 if (PreviousIrql >= DISPATCH_LEVEL) {
155 // IRQL Isn't less than DISPATCH_LEVEL, so we cannot lazily allocate, since it would **block**.
158 (void*)VirtualAddress,
159 (void*)PreviousIrql,
160 (void*)OperationDone,
161 (void*)TrapFrame->rip
162 );
163 }
164
165
166 // PTE Isn't present, check for demand allocations.
167 if (MM_IS_DEMAND_ZERO_PTE(TempPte)) {
168 // Allocate a physical page for kernel demand-zero
170 if (pfn == PFN_ERROR) {
171 // out of memory.
172 goto BugCheck;
173 }
174
175 // Check protection mask.
176 uint64_t ProtectionFlags = PAGE_PRESENT;
177 ProtectionFlags |= (TempPte.Soft.SoftwareFlags & PROT_KERNEL_WRITE) ? PAGE_RW : 0;
178
179 // Write the PTE.
180 MI_WRITE_PTE(ReferencedPte, VirtualAddress, PPFN_TO_PHYSICAL_ADDRESS(INDEX_TO_PPFN(pfn)), ProtectionFlags);
181
182 return MT_SUCCESS;
183 }
184
185 // PTE Isn't present, and its a transition
186 if (TempPte.Soft.Transition == 1) {
187 // Retrieve the PFN Number written in the transition page.
188 PAGE_INDEX pfn = TempPte.Soft.PageFrameNumber;
189 if (!MiIsValidPfn(pfn)) goto BugCheck;
190
191 // Check the PFN, it has to be in the StandBy list and be equal to our PTE, if not, bugcheck.
192 PPFN_ENTRY PPfn = INDEX_TO_PPFN(pfn);
193 if (PPfn->State != PfnStateStandby || PPfn->Descriptor.Mapping.PteAddress == NULL || PPfn->Descriptor.Mapping.PteAddress != ReferencedPte) goto BugCheck;
194
195 // PFN Is matching to this pte, now we can allocate, finally.
196 // Check protection mask.
197 uint64_t ProtectionFlags = PAGE_PRESENT;
198 ProtectionFlags |= (TempPte.Soft.SoftwareFlags & PROT_KERNEL_WRITE) ? PAGE_RW : 0;
199
200 MI_WRITE_PTE(ReferencedPte, VirtualAddress, PFN_TO_PHYS(pfn), ProtectionFlags);
201
202 return MT_SUCCESS;
203 }
204
205 // TODO GRAB FROM PAGEFILE
206
207 // Unknown PTE format -> bugcheck (kernel space)
208 goto BugCheck;
209 }
210
211 // Address is below the kernel start, and above user address.
212 // This if statement should never pass, since these addresses are non canonical, and the first if statement checks for a non canonical adddres.
213 // basically kernel bloat this point.
214 // i removed it, bye bye.
215
216 // Address is in user range.
217 if (VirtualAddress <= MmHighestUserAddress) {
218 if (PreviousMode == KernelMode) {
219 // Kernel mode dereference on a user address
220 goto BugCheck;
221 }
222
223 // User mode fault on a user address, we check if there is a vad for it, if so, allocate the page.
224 PMMVAD vad = MiFindVad(PsGetCurrentProcess()->VadRoot, VirtualAddress);
225 if (!vad) return MT_ACCESS_VIOLATION;
226
227 // Looks like we have a valid vad, lets allocate.
229 if (pfn == PFN_ERROR) goto BugCheck; // TODO OOM
230
231 // Acquire the PTE for the faulty VA.
232 PMMPTE pte = MiGetPtePointer(VirtualAddress);
233
234 // Write the PTE.
235 MI_WRITE_PTE(pte, VirtualAddress, PFN_TO_PHYS(pfn), PAGE_PRESENT | PAGE_RW | PAGE_USER);
236
237 // Return success.
238 return MT_SUCCESS;
239 }
240
241 // Address, is, what... impossible!
242 // This comment means execution is impossible to reach here, as we sanitized all (valid) addresses in the 48bit paging hierarchy.
243 // If it does reach here, look below.
244
245BugCheck:
246 // TODO Check for NX.
247
248 // Check if its a guard page violation
249 if (ReferencedPte->Soft.SoftwareFlags & MI_GUARD_PAGE_PROTECTION) {
252 (void*)VirtualAddress,
253 (void*)MiRetrieveOperationFromErrorCode(TrapFrame->error_code),
254 (void*)TrapFrame->rip,
255 (void*)FaultBits
256 );
257 }
258
259 // Check if its a pool dereference (NonPagedPool first)
260 if (VirtualAddress >= MmNonPagedPoolStart && VirtualAddress <= MmNonPagedPoolEnd) {
263 (void*)VirtualAddress,
264 (void*)MiRetrieveOperationFromErrorCode(TrapFrame->error_code),
265 (void*)TrapFrame->rip,
266 (void*)FaultBits
267 );
268 }
269
270 // Check if Paged Pool Dereference. (the IRQL_NOT_LESS_OR_EQUAL bugcheck is up top)
271 if (VirtualAddress >= MmPagedPoolStart && VirtualAddress <= MmPagedPoolEnd) {
274 (void*)VirtualAddress,
275 (void*)MiRetrieveOperationFromErrorCode(TrapFrame->error_code),
276 (void*)TrapFrame->rip,
277 (void*)FaultBits
278 );
279 }
280
281 // Normal page fault.
284 (void*)VirtualAddress,
285 (void*)MiRetrieveOperationFromErrorCode(TrapFrame->error_code),
286 (void*)TrapFrame->rip,
287 (void*)FaultBits
288 );
289}
290
291bool
293 void
294)
295
296/*++
297
298 Routine description:
299 (UNUSED, ALWAYS FALSE)
300 This function determines if invalid access (e.g, a null pointer dereference), is allowed within the current context.
301
302 Arguments:
303
304 None.
305
306 Return Values:
307
308 True if invalid access is allowed, false otherwise.
309
310 Notes:
311
312 This routine is unused, but will be kept for future modifications if any.
313
314--*/
315
316
317{
318 return false;
319}
#define IN
Definition annotations.h:7
NORETURN void MeBugCheckEx(IN enum _BUGCHECK_CODES BugCheckCode, IN void *BugCheckParameter1, IN void *BugCheckParameter2, IN void *BugCheckParameter3, IN void *BugCheckParameter4)
Definition bugcheck.c:305
@ DISPATCH_LEVEL
Definition core.h:15
enum _IRQL IRQL
TRAP_FRAME * PTRAP_FRAME
Definition core.h:54
MTSTATUS MmAccessFault(IN uint64_t FaultBits, IN uint64_t VirtualAddress, IN PRIVILEGE_MODE PreviousMode, IN PTRAP_FRAME TrapFrame)
Definition fault.c:27
bool MmInvalidAccessAllowed(void)
Definition fault.c:292
void gop_printf(uint32_t color, const char *fmt,...)
Definition gop.c:694
#define UNREFERENCED_PARAMETER(x)
Definition intrin.h:24
PMMPTE MiGetPdptePointer(IN uintptr_t va)
Definition map.c:177
PMMPTE MiGetPdePointer(IN uintptr_t va)
Definition map.c:219
PMMPTE MiGetPml4ePointer(IN uintptr_t va)
Definition map.c:147
PMMPTE MiGetPtePointer(IN uintptr_t va)
Definition map.c:76
void MiInvalidateTlbForVa(IN void *VirtualAddress)
Definition map.c:273
@ ATTEMPTED_WRITE_TO_READONLY_MEMORY
Definition me.h:123
@ PAGE_FAULT
Definition me.h:83
@ PAGE_FAULT_IN_FREED_NONPAGED_POOL
Definition me.h:125
@ GUARD_PAGE_DEREFERENCE
Definition me.h:116
@ PAGE_FAULT_IN_FREED_PAGED_POOL
Definition me.h:126
@ IRQL_NOT_LESS_OR_EQUAL
Definition me.h:97
FORCEINLINE IRQL MeGetCurrentIrql(void)
Definition me.h:402
#define COLOR_RED
Colors definitions for easier access.
Definition mg.h:29
@ PAGE_RW
Definition mm.h:272
@ PAGE_USER
Definition mm.h:276
@ PAGE_PRESENT
Definition mm.h:268
#define MI_IS_CANONICAL_ADDR(va)
Definition mm.h:198
#define PFN_TO_PHYS(Pfn)
Definition mm.h:205
@ PfnStateFree
Definition mm.h:242
@ PfnStateZeroed
Definition mm.h:243
@ PfnStateStandby
Definition mm.h:240
FORCEINLINE void MiAtomicExchangePte(PMMPTE PtePtr, uint64_t NewPteValue)
Definition mm.h:651
FORCEINLINE bool MiIsValidPfn(IN PAGE_INDEX Pfn)
Definition mm.h:667
#define PROT_KERNEL_WRITE
Definition mm.h:212
enum _FAULT_OPERATION FAULT_OPERATION
struct _MMPTE * PMMPTE
FORCEINLINE FAULT_OPERATION MiRetrieveOperationFromErrorCode(uint64_t ErrorCode)
Definition mm.h:619
enum _PRIVILEGE_MODE PRIVILEGE_MODE
#define PPFN_TO_PHYSICAL_ADDRESS(PPFN)
Definition mm.h:117
uint64_t PAGE_INDEX
Definition mm.h:232
@ ExecuteOperation
Definition mm.h:329
@ WriteOperation
Definition mm.h:328
#define PFN_ERROR
Definition mm.h:208
struct _MMVAD * PMMVAD
#define INDEX_TO_PPFN(Index)
Definition mm.h:62
#define MI_WRITE_PTE(_PtePointer, _Va, _Pa, _Flags)
Definition mm.h:90
struct _PFN_ENTRY * PPFN_ENTRY
struct _MMPTE MMPTE
#define MM_IS_DEMAND_ZERO_PTE(pte)
Definition mm.h:120
#define MI_GUARD_PAGE_PROTECTION
Definition mm.h:221
@ KernelMode
Definition mm.h:333
@ UserMode
Definition mm.h:334
#define MT_SUCCESS
Definition mtstatus.h:22
int32_t MTSTATUS
Definition mtstatus.h:12
#define MT_ACCESS_VIOLATION
Definition mtstatus.h:129
PAGE_INDEX MiRequestPhysicalPage(IN PFN_STATE ListType)
Definition pfn.c:325
uintptr_t MmNonPagedPoolEnd
Definition pool.c:31
uintptr_t MmNonPagedPoolStart
Definition pool.c:30
uintptr_t MmPagedPoolStart
Definition pool.c:32
uintptr_t MmPagedPoolEnd
Definition pool.c:33
uintptr_t MmSystemRangeStart
Definition process.c:26
uintptr_t MmHighestUserAddress
Definition process.c:27
FORCEINLINE PEPROCESS PsGetCurrentProcess(void)
Definition ps.h:212
struct _MMPTE::@172372265215056352375070220246156106027174106113::@200357034104227323320222006243127050212100105247 Hard
uint64_t Dirty
Definition mm.h:398
uint64_t Write
Definition mm.h:393
uint64_t PageFrameNumber
Definition mm.h:404
uint64_t Present
Definition mm.h:392
uint64_t Transition
Definition mm.h:417
uint64_t Value
Definition mm.h:385
uint64_t SoftwareFlags
Definition mm.h:422
struct _MMPTE::@172372265215056352375070220246156106027174106113::@277354034164206104264133322054061025100052052376 Soft
union _PFN_ENTRY::@217024126340164016372152071216274230164113211246 Descriptor
struct _PFN_ENTRY::@217024126340164016372152071216274230164113211246::@301110335271023021153236134322146064331241142124 Mapping
uint8_t State
Definition mm.h:430
PMMPTE PteAddress
Definition mm.h:441
PMMVAD MiFindVad(IN PMMVAD Root, IN uintptr_t VirtualAddress)
Definition vad.c:354