My Project
Loading...
Searching...
No Matches
mmproc.c
Go to the documentation of this file.
1/*++
2
3Module Name:
4
5 mmproc.c
6
7Purpose:
8
9 This translation unit contains the implementation of process supporting memory management routines.
10
11Author:
12
13 slep (Matanel) 2025.
14
15Revision History:
16
17--*/
18
19#include "../../includes/mm.h"
20#include "../../includes/me.h"
21#include "../../assert.h"
22#include "../../includes/mg.h"
23
24void*
26 IN bool LargeStack
27)
28
29/*++
30
31 Routine description:
32
33 Creates a kernel stack for general use.
34 The stack cannot be accessible in user mode (assert(cpl == 0);)
35
36 Arguments:
37
38 [IN] bool LargeStack - Determines if the stack allocated should be MI_LARGE_STACK_SIZE bytes long. (default is MI_STACK_SIZE)
39
40 Return Values:
41
42 Pointer to top of the stack, or NULL on failure.
43
44 Notes:
45
46 The previous comment stated that it would return at the end of the guard page, which was incorrect when I went through my code.
47 This means you CAN emit the PUSH instruction, as it will also subtract space from the stack automatically (based on the pushed immediate).
48 But do not subtract too much (hit the guard page), or add to this pointer (hit the next page, could very well be unmapped, or a guard page of another thread)
49 - As you risk a page fault.
50
51--*/
52
53{
54 // Declarations
55 size_t StackSize = LargeStack ? MI_LARGE_STACK_SIZE : MI_STACK_SIZE;
56 size_t GuardSize = VirtualPageSize;
57 size_t TotalSize = StackSize + GuardSize;
58 size_t PagesToMap = BYTES_TO_PAGES(StackSize);
59
60 // Allocate VA range, the stack + guard page.
61 uintptr_t BaseVa = MiAllocatePoolVa(NonPagedPool, TotalSize);
62 if (!BaseVa) return NULL;
63
64 // Define where we actually start mapping, we skip the Guard page as we obviously dont want to map it.
65 uintptr_t MapStartVa = BaseVa + GuardSize;
66
67 size_t Iterations = 0;
68 bool failure = false;
69
70 for (size_t i = 0; i < PagesToMap; i++) {
71 // Calculate current VA to map
72 uintptr_t currVa = MapStartVa + (i * VirtualPageSize);
73
75
76 if (pfn == PFN_ERROR) {
77 failure = true;
78 break;
79 }
80
81 PMMPTE pte = MiGetPtePointer(currVa);
82 if (!pte) {
84 failure = true;
85 break;
86 }
87
88 // Map the Stack Page
89 MI_WRITE_PTE(pte, currVa, PFN_TO_PHYS(pfn), PAGE_PRESENT | PAGE_RW);
90 Iterations++;
91 }
92
93 PMMPTE GuardPte = MiGetPtePointer(BaseVa);
94
95 if (!GuardPte) {
96 // Now, I could continue and just not mark the GuardPte as a guard page, as it is only used in bugcheck
97 // debugging, but I want to make my debugging life easier.
98 // I don't even have a stable kernel debugger, so excuse me for the horrifying line im about to put below.
100 (void*)RETADDR(0),
101 (void*)BaseVa,
102 (void*)TotalSize,
103 (void*)123432 /* special identifier for manually initiated crash to know its here */
104 );
105 // If the bugcheck is ever removed, it would be a failure.
106 failure = true;
107
108 }
109
110 if (failure) goto failure_cleanup;
111
112 // Clean the PTE.
113 GuardPte->Value = 0;
114
115 // Set the Guard page bit in the GuardPte.
116 GuardPte->Hard.Present = 0;
118
119 // Invalidate the guard page VA.
120 MiInvalidateTlbForVa((void*)BaseVa);
121
122 // Return the TOP of the stack.
123 return (void*)(BaseVa + TotalSize);
124
125failure_cleanup:
126 // Unmap the pages we successfully mapped
127 for (size_t j = 0; j < Iterations; j++) {
128 uintptr_t vaToFree = MapStartVa + (j * VirtualPageSize);
129 PMMPTE pte = MiGetPtePointer(vaToFree);
130
131 if (pte && pte->Hard.Present) {
133 MiUnmapPte(pte);
135 }
136 }
137
138 // Free the VA reservation
139 if (BaseVa) {
140 MiFreePoolVaContiguous(BaseVa, TotalSize, NonPagedPool);
141 }
142
143 assert(false, "This function is currently a Must-Succeed.");
144 return NULL;
145}
146
147void
149 IN void* AllocatedStackTop,
150 IN bool LargeStack
151)
152
153/*++
154
155 Routine description:
156
157 Frees the stack given to a kernel thread.
158
159 Arguments:
160
161 [IN] void* AllocatedStackBase - The pointer given by MiCreateKernelStack
162 [IN] bool LargeStack - Signifies if the stack being deleted is a MI_LARGE_STACK_SIZE bytes long (true), or MI_STACK_SIZE bytes long (false)
163
164 Return Values:
165
166 None.
167
168--*/
169
170{
171 gop_printf(COLOR_PINK, "**Reached MiFreeKernelStack | LargeStack: %s | AllocatedStackTop: %p**\n", (LargeStack ? "True" : "False"), AllocatedStackTop);
172 // Declarations
173 size_t StackSize = LargeStack ? MI_LARGE_STACK_SIZE : MI_STACK_SIZE;
174 size_t GuardSize = VirtualPageSize;
175 size_t TotalSize = StackSize + GuardSize;
176 size_t PagesToUnMap = BYTES_TO_PAGES(StackSize);
177
178 // 1. Calculate the START of the stack memory (The highest valid byte addressable page)
179 // AllocatedStackTop is the byte *after* the stack end.
180 // We start at Top - PageSize.
181 uintptr_t CurrentVA = (uintptr_t)AllocatedStackTop - VirtualPageSize;
182
183 for (size_t i = 0; i < PagesToUnMap; i++) {
184 PMMPTE pte = MiGetPtePointer(CurrentVA);
185
186 if (pte && pte->Hard.Present) {
187
188 // Get its PFN that was allocated to it.
190
191 // Unmap the PTE.
192 MiUnmapPte(pte);
193
194 // Release the physical page back to the PFN DB.
196 }
197
198 // Move down to the next page
199 CurrentVA -= VirtualPageSize;
200 }
201
202 // The Guard Page is at the very bottom of the allocation.
203 uintptr_t BaseVa = (uintptr_t)AllocatedStackTop - TotalSize;
204
205 PMMPTE GuardPte = MiGetPtePointer(BaseVa);
206 if (GuardPte) {
207 assert((GuardPte->Soft.SoftwareFlags & MI_GUARD_PAGE_PROTECTION) != 0, "The guard page must have the GUARD_PAGE_PROTECTION bit set.");
208 // Clean the page.
209 GuardPte->Value = 0;
210 }
211
212 // Invalidate the VA for the Guard Page.
213 MiInvalidateTlbForVa((void*)BaseVa);
214
215 // Free the Virtual Address allocation
216 MiFreePoolVaContiguous(BaseVa, TotalSize, NonPagedPool);
217}
218
221 OUT void** DirectoryTable
222)
223
224/*++
225
226 Routine description:
227
228 Creates a new paging address space for the process.
229
230 Arguments:
231
232 [OUT] void** DirectoryTable - Pointer to set the newly physical address of the process's CR3.
233
234 Return Values:
235
236 None.
237
238--*/
239
240{
241 // Declarations
242 PAGE_INDEX pfnIndex;
243 uint64_t* pml4Base;
244 IRQL oldIrql;
245 uint64_t physicalAddress;
246
247 // Allocate a physical page for the PML4.
249
250 if (pfnIndex == PFN_ERROR) {
251 return MT_NO_RESOURCES;
252 }
253
254 // Convert the Index to a Physical Address (needed for CR3 and Recursive entry)
255 physicalAddress = PPFN_TO_PHYSICAL_ADDRESS(INDEX_TO_PPFN(pfnIndex));
256
257 // Map the physical page into hypermap so we can edit it temporarily.
258 pml4Base = (uint64_t*)MiMapPageInHyperspace(pfnIndex, &oldIrql);
259
260 if (!pml4Base) {
261 // If hyperspace mapping fails, release the page and fail.
262 MiReleasePhysicalPage(pfnIndex);
263 return MT_GENERAL_FAILURE;
264 }
265
266 // Copy Kernel Address Space.
267 // The higher half of memory (Kernel Space) is shared across all processes.
268 uint64_t* currentPml4 = pml4_from_recursive();
269
270 // This copies the PML4 from PhysicalMemoryOffset all the way ot the end of the 48bit addressing.
271 // Excluding user regions.
272 for (int i = MiConvertVaToPml4Offset(PhysicalMemoryOffset); i < 512; i++) {
273 pml4Base[i] = currentPml4[i];
274 }
275
276 MMPTE recursivePte;
277 kmemset(&recursivePte, 0, sizeof(MMPTE)); // Ensure clean start
278
279 // Note: We pass NULL for the VA as it's a self-ref, we only care about the PFN and Flags.
280 // Ensure PFN_TO_PHYS is used if MI_WRITE_PTE expects a physical address.
281 MI_WRITE_PTE(&recursivePte,
282 (void*)0,
283 PFN_TO_PHYS(pfnIndex),
285
286 // Write to index 0x1FF (511)
287 pml4Base[RECURSIVE_INDEX] = recursivePte.Value;
288
289 // Ensure it is stored.
291
292 // Unmap from Hyperspace.
293 MiUnmapHyperSpaceMap(oldIrql);
294
295 // Return the Physical Address.
296 // The scheduler will load this into CR3 when switching to this process.
297 *DirectoryTable = (void*)physicalAddress;
298
299 return MT_SUCCESS;
300}
#define IN
Definition annotations.h:7
#define OUT
Definition annotations.h:8
#define assert(...)
Definition assert.h:57
NORETURN void MeBugCheckEx(IN enum _BUGCHECK_CODES BugCheckCode, IN void *BugCheckParameter1, IN void *BugCheckParameter2, IN void *BugCheckParameter3, IN void *BugCheckParameter4)
Definition bugcheck.c:305
enum _IRQL IRQL
void gop_printf(uint32_t color, const char *fmt,...)
Definition gop.c:694
void * MiMapPageInHyperspace(IN uint64_t PfnIndex, OUT PIRQL OldIrql)
Definition hypermap.c:33
void MiUnmapHyperSpaceMap(IN IRQL OldIrql)
Definition hypermap.c:83
#define RETADDR(level)
Definition macros.h:38
uint64_t * pml4_from_recursive(void)
Definition map.c:31
PAGE_INDEX MiTranslatePteToPfn(IN PMMPTE pte)
Definition map.c:310
PMMPTE MiGetPtePointer(IN uintptr_t va)
Definition map.c:76
void MiInvalidateTlbForVa(IN void *VirtualAddress)
Definition map.c:273
void MiUnmapPte(IN PMMPTE pte)
Definition map.c:385
@ MANUALLY_INITIATED_CRASH
Definition me.h:90
#define COLOR_PINK
Definition mg.h:43
@ PAGE_RW
Definition mm.h:272
@ PAGE_PRESENT
Definition mm.h:268
#define PFN_TO_PHYS(Pfn)
Definition mm.h:205
@ PfnStateZeroed
Definition mm.h:243
@ NonPagedPool
Definition mm.h:316
#define MI_LARGE_STACK_SIZE
Definition mm.h:220
#define RECURSIVE_INDEX
Definition mm.h:57
struct _MMPTE * PMMPTE
#define MmFullBarrier()
Definition mm.h:226
#define PPFN_TO_PHYSICAL_ADDRESS(PPFN)
Definition mm.h:117
uint64_t PAGE_INDEX
Definition mm.h:232
FORCEINLINE void * kmemset(void *dest, int64_t val, uint64_t len)
Definition mm.h:540
#define PhysicalMemoryOffset
Definition mm.h:56
#define BYTES_TO_PAGES(Bytes)
Definition mm.h:147
#define PFN_ERROR
Definition mm.h:208
#define VirtualPageSize
Definition mm.h:53
#define INDEX_TO_PPFN(Index)
Definition mm.h:62
#define MI_WRITE_PTE(_PtePointer, _Va, _Pa, _Flags)
Definition mm.h:90
struct _MMPTE MMPTE
#define MI_GUARD_PAGE_PROTECTION
Definition mm.h:221
#define MI_STACK_SIZE
Definition mm.h:219
void * MiCreateKernelStack(IN bool LargeStack)
Definition mmproc.c:25
MTSTATUS MmCreateProcessAddressSpace(OUT void **DirectoryTable)
Definition mmproc.c:220
void MiFreeKernelStack(IN void *AllocatedStackTop, IN bool LargeStack)
Definition mmproc.c:148
#define MT_SUCCESS
Definition mtstatus.h:22
#define MT_GENERAL_FAILURE
Definition mtstatus.h:31
int32_t MTSTATUS
Definition mtstatus.h:12
#define MT_NO_RESOURCES
Definition mtstatus.h:32
PAGE_INDEX MiRequestPhysicalPage(IN PFN_STATE ListType)
Definition pfn.c:325
void MiReleasePhysicalPage(IN PAGE_INDEX PfnIndex)
Definition pfn.c:421
struct _MMPTE::@172372265215056352375070220246156106027174106113::@200357034104227323320222006243127050212100105247 Hard
uint64_t Present
Definition mm.h:392
uint64_t Value
Definition mm.h:385
uint64_t SoftwareFlags
Definition mm.h:422
struct _MMPTE::@172372265215056352375070220246156106027174106113::@277354034164206104264133322054061025100052052376 Soft
uintptr_t MiAllocatePoolVa(IN POOL_TYPE PoolType, IN size_t NumberOfBytes)
Definition va.c:213
void MiFreePoolVaContiguous(IN uintptr_t va, IN size_t NumberOfBytes, IN POOL_TYPE PoolType)
Definition va.c:367