kernel
Loading...
Searching...
No Matches
mmproc.c
Go to the documentation of this file.
1/*++
2
3Module Name:
4
5 mmproc.c
6
7Purpose:
8
9 This translation unit contains the implementation of process supporting memory management routines.
10
11Author:
12
13 slep (Matanel) 2025.
14
15Revision History:
16
17--*/
18
19#include "../../includes/mm.h"
20#include "../../includes/me.h"
21#include "../../assert.h"
22#include "../../includes/mg.h"
23#include "../../includes/ps.h"
24
25void*
27 IN bool LargeStack
28)
29
30/*++
31
32 Routine description:
33
34 Creates a kernel stack for general use.
35 The stack cannot be accessible in user mode (assert(cpl == 0);)
36
37 Arguments:
38
39 [IN] bool LargeStack - Determines if the stack allocated should be MI_LARGE_STACK_SIZE bytes long. (default is MI_STACK_SIZE)
40
41 Return Values:
42
43 Pointer to top of the stack, or NULL on failure.
44
45 Notes:
46
47 The previous comment stated that it would return at the end of the guard page, which was incorrect when I went through my code.
48 This means you CAN emit the PUSH instruction, as it will also subtract space from the stack automatically (based on the pushed immediate).
49 But do not subtract too much (hit the guard page), or add to this pointer (hit the next page, could very well be unmapped, or a guard page of another thread)
50 - As you risk a page fault.
51
52--*/
53
54{
55 // Declarations
56 size_t StackSize = LargeStack ? MI_LARGE_STACK_SIZE : MI_STACK_SIZE;
57 size_t GuardSize = VirtualPageSize;
58 size_t TotalSize = StackSize + GuardSize;
59 size_t PagesToMap = BYTES_TO_PAGES(StackSize);
60
61 // Allocate VA range, the stack + guard page.
62 uintptr_t BaseVa = MiAllocatePoolVa(NonPagedPool, TotalSize);
63 if (!BaseVa) return NULL;
64
65 // Define where we actually start mapping, we skip the Guard page as we obviously dont want to map it.
66 uintptr_t MapStartVa = BaseVa + GuardSize;
67
68 size_t Iterations = 0;
69 bool failure = false;
70
71 for (size_t i = 0; i < PagesToMap; i++) {
72 // Calculate current VA to map
73 uintptr_t currVa = MapStartVa + (i * VirtualPageSize);
74
76
77 if (pfn == PFN_ERROR) {
78 failure = true;
79 break;
80 }
81
82 PMMPTE pte = MiGetPtePointer(currVa);
83 if (!pte) {
85 failure = true;
86 break;
87 }
88
89 // Map the Stack Page
90 MI_WRITE_PTE(pte, currVa, PFN_TO_PHYS(pfn), PAGE_PRESENT | PAGE_RW);
91 Iterations++;
92 }
93
94 PMMPTE GuardPte = MiGetPtePointer(BaseVa);
95
96 if (!GuardPte) {
97 // Now, I could continue and just not mark the GuardPte as a guard page, as it is only used in bugcheck
98 // debugging, but I want to make my debugging life easier.
99 // I don't even have a stable kernel debugger, so excuse me for the horrifying line im about to put below.
101 (void*)RETADDR(0),
102 (void*)BaseVa,
103 (void*)TotalSize,
104 (void*)123432 /* special identifier for manually initiated crash to know its here */
105 );
106 // If the bugcheck is ever removed, it would be a failure.
107 failure = true;
108
109 }
110
111 if (failure) goto failure_cleanup;
112
113 // Clean the PTE.
114 GuardPte->Value = 0;
115
116 // Set the Guard page bit in the GuardPte.
117 GuardPte->Hard.Present = 0;
119
120 // Invalidate the guard page VA.
121 MiInvalidateTlbForVa((void*)BaseVa);
122
123 // Return the TOP of the stack.
124 return (void*)(BaseVa + TotalSize);
125
126failure_cleanup:
127 // Unmap the pages we successfully mapped
128 for (size_t j = 0; j < Iterations; j++) {
129 uintptr_t vaToFree = MapStartVa + (j * VirtualPageSize);
130 PMMPTE pte = MiGetPtePointer(vaToFree);
131
132 if (pte && pte->Hard.Present) {
134 MiUnmapPte(pte);
136 }
137 }
138
139 // Free the VA reservation
140 if (BaseVa) {
141 MiFreePoolVaContiguous(BaseVa, TotalSize, NonPagedPool);
142 }
143
144 assert(false, "This function is currently a Must-Succeed.");
145 return NULL;
146}
147
148void
150 IN void* AllocatedStackTop,
151 IN bool LargeStack
152)
153
154/*++
155
156 Routine description:
157
158 Frees the stack given to a kernel thread.
159
160 Arguments:
161
162 [IN] void* AllocatedStackBase - The pointer given by MiCreateKernelStack
163 [IN] bool LargeStack - Signifies if the stack being deleted is a MI_LARGE_STACK_SIZE bytes long (true), or MI_STACK_SIZE bytes long (false)
164
165 Return Values:
166
167 None.
168
169--*/
170
171{
172 gop_printf(COLOR_PINK, "**Reached MiFreeKernelStack | LargeStack: %s | AllocatedStackTop: %p**\n", (LargeStack ? "True" : "False"), AllocatedStackTop);
173 // Declarations
174 size_t StackSize = LargeStack ? MI_LARGE_STACK_SIZE : MI_STACK_SIZE;
175 size_t GuardSize = VirtualPageSize;
176 size_t TotalSize = StackSize + GuardSize;
177 size_t PagesToUnMap = BYTES_TO_PAGES(StackSize);
178
179 // 1. Calculate the START of the stack memory (The highest valid byte addressable page)
180 // AllocatedStackTop is the byte *after* the stack end.
181 // We start at Top - PageSize.
182 uintptr_t CurrentVA = (uintptr_t)AllocatedStackTop - VirtualPageSize;
183
184 for (size_t i = 0; i < PagesToUnMap; i++) {
185 PMMPTE pte = MiGetPtePointer(CurrentVA);
186
187 if (pte && pte->Hard.Present) {
188
189 // Get its PFN that was allocated to it.
191
192 // Unmap the PTE.
193 MiUnmapPte(pte);
194
195 // Release the physical page back to the PFN DB.
197 }
198
199 // Move down to the next page
200 CurrentVA -= VirtualPageSize;
201 }
202
203 // The Guard Page is at the very bottom of the allocation.
204 uintptr_t BaseVa = (uintptr_t)AllocatedStackTop - TotalSize;
205
206 PMMPTE GuardPte = MiGetPtePointer(BaseVa);
207 if (GuardPte) {
208 assert((GuardPte->Soft.SoftwareFlags & MI_GUARD_PAGE_PROTECTION) != 0, "The guard page must have the GUARD_PAGE_PROTECTION bit set.");
209 // Clean the page.
210 GuardPte->Value = 0;
211 }
212
213 // Invalidate the VA for the Guard Page.
214 MiInvalidateTlbForVa((void*)BaseVa);
215
216 // Free the Virtual Address allocation
217 MiFreePoolVaContiguous(BaseVa, TotalSize, NonPagedPool);
218}
219
222 OUT void** DirectoryTable
223)
224
225/*++
226
227 Routine description:
228
229 Creates a new paging address space for the process.
230
231 Arguments:
232
233 [OUT] void** DirectoryTable - Pointer to set the newly physical address of the process's CR3.
234
235 Return Values:
236
237 None.
238
239--*/
240
241{
242 // Declarations
243 PAGE_INDEX pfnIndex;
244 uint64_t* pml4Base;
245 IRQL oldIrql;
246 uint64_t physicalAddress;
247
248 // Allocate a physical page for the PML4.
250
251 if (pfnIndex == PFN_ERROR) {
252 return MT_NO_RESOURCES;
253 }
254
255 // Convert the Index to a Physical Address (needed for CR3 and Recursive entry)
256 physicalAddress = PPFN_TO_PHYSICAL_ADDRESS(INDEX_TO_PPFN(pfnIndex));
257
258 // Map the physical page into hypermap so we can edit it temporarily.
259 pml4Base = (uint64_t*)MiMapPageInHyperspace(pfnIndex, &oldIrql);
260
261 if (!pml4Base) {
262 // If hyperspace mapping fails, release the page and fail.
263 MiReleasePhysicalPage(pfnIndex);
264 return MT_GENERAL_FAILURE;
265 }
266
267 // Copy Kernel Address Space.
268 // The higher half of memory (Kernel Space) is shared across all processes.
269 uint64_t* currentPml4 = pml4_from_recursive();
270
271 // This copies the PML4 from PhysicalMemoryOffset all the way ot the end of the 48bit addressing.
272 // Excluding user regions.
273 for (int i = MiConvertVaToPml4Offset(PhysicalMemoryOffset); i < 512; i++) {
274 pml4Base[i] = currentPml4[i];
275 }
276
277 MMPTE recursivePte;
278 kmemset(&recursivePte, 0, sizeof(MMPTE)); // Ensure clean start
279
280 // Note: We pass NULL for the VA as it's a self-ref, we only care about the PFN and Flags.
281 // Ensure PFN_TO_PHYS is used if MI_WRITE_PTE expects a physical address.
282 MI_WRITE_PTE(&recursivePte,
283 (void*)0,
284 PFN_TO_PHYS(pfnIndex),
286
287 // Write to index 0x1FF (511)
288 pml4Base[RECURSIVE_INDEX] = recursivePte.Value;
289
290 // Ensure it is stored.
292
293 // Unmap from Hyperspace.
294 MiUnmapHyperSpaceMap(oldIrql);
295
296 // Return the Physical Address.
297 // The scheduler will load this into CR3 when switching to this process.
298 *DirectoryTable = (void*)physicalAddress;
299
300 return MT_SUCCESS;
301}
302
303static
304void
305MiFreePageTableHierarchy(
306 IN PAGE_INDEX TablePfn,
307 IN int Level
308)
309
310/*++
311
312 Routine description:
313
314 Recursively deletes the page tables in hierarchial order (pml4,pdpt,pde,pt)
315
316 Arguments:
317
318 [IN] PAGE_INDEX TablePfn - The PML4 PFN to start from.
319 [IN] int Level - The level to start from (4 = PML4)
320
321 Return Values:
322
323 None.
324
325--*/
326
327{
328 uint64_t* mapping;
329 IRQL oldIrql;
330 int limit = 512;
331 int start = 0;
332
333 // If this is the PML4:
334 // We set the limit of removal to the PhysicalMemoryOffset (like the limit in MmCreateProcessAddressSpace)
335 // So we dont remove kernel page tables, as we would cause a triple fault.
336 if (Level == 4) {
337 limit = MiConvertVaToPml4Offset(PhysicalMemoryOffset);
338 }
339
340 // Iterate through the indices.
341 for (int i = start; i < limit; i++) {
342
343 PAGE_INDEX childPfn = PFN_ERROR;
344 bool isPresent = false;
345 bool isLargePage = false;
346
347 // Map the table to read the entry at i
348 mapping = (uint64_t*)MiMapPageInHyperspace(TablePfn, &oldIrql);
349 MMPTE pte;
350 pte.Value = mapping[i];
351
352 if (pte.Hard.Present) {
353 isPresent = true;
354 childPfn = MiTranslatePteToPfn(&pte);
355
356 // We dont support large pages yet (or we do and I didnt update this comment)
357 // But we will scan for them anyway to prevent bugs in the future (faults and such)
358 if (Level > 1 && (pte.Value & PAGE_PS)) {
359 isLargePage = true;
360 }
361 }
362
363 // Unmap immediately so we can use Hyperspace in the recursion
364 MiUnmapHyperSpaceMap(oldIrql);
365
366 // Process the entry if it was valid
367 if (isPresent && childPfn != PFN_ERROR) {
368
369 if (Level > 1) {
370 if (isLargePage) {
371 // It's a 2MB or 1GB user page. Release the physical memory directly.
372 MiReleasePhysicalPage(childPfn);
373 }
374 else {
375 // It's a pointer to a lower-level page table. Recurse.
376 MiFreePageTableHierarchy(childPfn, Level - 1);
377 }
378 }
379 else {
380 // The PTs, the vad should have already freed them, but if it didnt, we do it.
381 MiReleasePhysicalPage(childPfn);
382 }
383 }
384 }
385
386 // All children are freed, we can free the actual table now.
387 MiReleasePhysicalPage(TablePfn);
388}
389
392 IN PEPROCESS Process,
393 IN uintptr_t PageDirectoryPhysical
394)
395
396/*++
397
398 Routine description:
399
400 Deletes a process address space.
401
402 Arguments:
403
404 [IN] PEPROCESS Process - The process to delete the address space from.
405 [IN] uintptr_T PageDirectoryPhysical - Physical address of the process's address space. (CR3)
406
407 Return Values:
408
409 MTSTATUS Status code.
410
411--*/
412
413{
414 // Parameter check.
415 if (!Process || !PageDirectoryPhysical) {
416 return MT_INVALID_PARAM;
417 }
418
419 // Convert the physical address to its index.
420 PAGE_INDEX pml4Pfn = PHYS_TO_INDEX(PageDirectoryPhysical);
421
422 if (pml4Pfn == PFN_ERROR || !MiIsValidPfn(pml4Pfn)) {
423 return MT_INVALID_PARAM;
424 }
425
426 // Recursively tear down the page table.
427 MiFreePageTableHierarchy(pml4Pfn, 4);
428
429 // Flush CR3 across all processors.
430 MiReloadTLBs();
431
432 return MT_SUCCESS;
433}
434
437 IN PEPROCESS Process,
438 OUT void** OutStackTop,
439 _In_Opt size_t StackReserveSize
440)
441
442/*++
443
444 Routine description:
445
446 Creates a stack for a user thread in the process address space (with a guard page below)
447
448 Arguments:
449
450 [IN] PEPROCESS Process - The thread's process.
451 [OUT] void** OutStackTop - Top of stack allocated if successful.
452 [IN OPTIONAL] size_t StackReserveSize - A value that indicates how much data to reserve for the stack. If not supplied, MI_DEFAULT_USER_STACK_SIZE is used.
453
454 Return Values:
455
456 MTSTATUS Status code.
457
458 Notes:
459
460 If a process allocated too much virtual memory, his next allocation could at Process->NextStackHint
461 Which means, the Status will return MT_CONFLICTING_ADDRESSES, which means thread creation failure.
462
463--*/
464
465{
466 // If no stack reserve size, we use the default
467 if (!StackReserveSize) StackReserveSize = MI_DEFAULT_USER_STACK_SIZE;
468
469 // Acquire the exclusive push lock for the stack.
470 MsAcquirePushLockExclusive(&Process->AddressSpaceLock);
471
472 // Grab current hint.
473 uintptr_t CurrentStackHint = Process->NextStackHint;
474
475 // Compute the end of the stack.
476 uintptr_t EndOfStack = CurrentStackHint - StackReserveSize;
477
478 // Allocate a VAD for the address space.
479 MTSTATUS Status = MmAllocateVirtualMemory(Process, (void**) & EndOfStack, StackReserveSize, VAD_FLAG_WRITE | VAD_FLAG_READ);
480 if (MT_FAILURE(Status)) goto Cleanup;
481
482 // Create a VAD for the guard page (reserved)
483 void* GuardPageEnd = (void*)(EndOfStack - VirtualPageSize);
484 Status = MmAllocateVirtualMemory(Process, (void**)&GuardPageEnd, VirtualPageSize, VAD_FLAG_RESERVED | VAD_FLAG_GUARD_PAGE);
485 if (MT_FAILURE(Status)) goto CleanupWithVad;
486
487 // The next hint should be the end of the guard page.
488 Process->NextStackHint = (uintptr_t)GuardPageEnd;
489 // Success.
490 if (OutStackTop) *OutStackTop = (void*)CurrentStackHint;
491 goto Cleanup;
492
493CleanupWithVad:
494 MmFreeVirtualMemory(Process, (void*)EndOfStack);
495
496Cleanup:
497 MsReleasePushLockExclusive(&Process->AddressSpaceLock);
498 return Status;
499}
500
503 IN PEPROCESS Process,
504 OUT void** OutPeb,
505 OUT void** OutBasicMtdllTypes
506)
507
508{
509 // For now all this does is allocate memory really.
510 void* BaseAddress = NULL;
511 MTSTATUS Status = MmAllocateVirtualMemory(Process, &BaseAddress, sizeof(PEB), VAD_FLAG_WRITE | VAD_FLAG_READ);
512
513 // Kernel mode memory (struct), no need for try and attaching.
514 PPEB* Peb = (PPEB*)OutPeb;
515 if (MT_SUCCEEDED(Status)) {
516 *Peb = BaseAddress;
517 }
518 else {
519 return Status;
520 }
521
522 // Set Process->Peb (TODO)
523
524 BaseAddress = NULL;
525 Status = MmAllocateVirtualMemory(Process, &BaseAddress, sizeof(MTDLL_BASIC_TYPES), VAD_FLAG_WRITE | VAD_FLAG_READ);
526
527 PMTDLL_BASIC_TYPES* PBasictypes = (PMTDLL_BASIC_TYPES*)OutBasicMtdllTypes;
528 if (MT_SUCCEEDED(Status)) {
529 *PBasictypes = BaseAddress;
530 }
531
532 return Status;
533}
534
537 IN PETHREAD Thread,
538 OUT void** OutTeb
539)
540
541{
542 // Allocate memory for the TEB.
543 void* BaseAddress = NULL;
544 MTSTATUS Status = MmAllocateVirtualMemory(Thread->ParentProcess, &BaseAddress, sizeof(TEB), VAD_FLAG_WRITE | VAD_FLAG_READ);
545
546 PTEB* Teb = (PTEB*)OutTeb;
547 if (MT_SUCCEEDED(Status)) {
548 *Teb = BaseAddress;
549 }
550
551 // Set Thread->InternalThread->Teb (TODO)
552
553 return Status;
554}
#define _In_Opt
Definition annotations.h:10
#define IN
Definition annotations.h:8
#define OUT
Definition annotations.h:9
#define assert(...)
Definition assert.h:57
NORETURN void MeBugCheckEx(IN enum _BUGCHECK_CODES BugCheckCode, IN void *BugCheckParameter1, IN void *BugCheckParameter2, IN void *BugCheckParameter3, IN void *BugCheckParameter4)
Definition bugcheck.c:245
enum _IRQL IRQL
EPROCESS * PEPROCESS
Definition core.h:52
ETHREAD * PETHREAD
Definition core.h:44
void gop_printf(uint32_t color, const char *fmt,...)
Definition gop.c:633
void * MiMapPageInHyperspace(IN uint64_t PfnIndex, OUT PIRQL OldIrql)
Definition hypermap.c:37
void MiUnmapHyperSpaceMap(IN IRQL OldIrql)
Definition hypermap.c:95
#define RETADDR(level)
Definition macros.h:53
void MiReloadTLBs(void)
Definition map.c:499
uint64_t * pml4_from_recursive(void)
Definition map.c:31
PAGE_INDEX MiTranslatePteToPfn(IN PMMPTE pte)
Definition map.c:310
PMMPTE MiGetPtePointer(IN uintptr_t va)
Definition map.c:76
void MiInvalidateTlbForVa(IN void *VirtualAddress)
Definition map.c:273
void MiUnmapPte(IN PMMPTE pte)
Definition map.c:385
@ MANUALLY_INITIATED_CRASH
Definition me.h:90
#define COLOR_PINK
Definition mg.h:44
@ PAGE_RW
Definition mm.h:311
@ PAGE_PRESENT
Definition mm.h:307
@ PAGE_PS
Definition mm.h:335
#define PFN_TO_PHYS(Pfn)
Definition mm.h:226
@ PfnStateZeroed
Definition mm.h:280
@ NonPagedPool
Definition mm.h:355
FORCEINLINE bool MiIsValidPfn(IN PAGE_INDEX Pfn)
Definition mm.h:773
@ VAD_FLAG_READ
Definition mm.h:296
@ VAD_FLAG_RESERVED
Definition mm.h:302
@ VAD_FLAG_GUARD_PAGE
Definition mm.h:303
@ VAD_FLAG_WRITE
Definition mm.h:297
#define MI_LARGE_STACK_SIZE
Definition mm.h:243
#define RECURSIVE_INDEX
Definition mm.h:57
struct _MMPTE * PMMPTE
#define MmFullBarrier()
Definition mm.h:250
#define PPFN_TO_PHYSICAL_ADDRESS(PPFN)
Definition mm.h:136
uint64_t PAGE_INDEX
Definition mm.h:256
FORCEINLINE void * kmemset(void *dest, int64_t val, uint64_t len)
Definition mm.h:655
#define MI_DEFAULT_USER_STACK_SIZE
Definition mm.h:245
#define PhysicalMemoryOffset
Definition mm.h:56
#define PHYS_TO_INDEX(PhysicalAddress)
Definition mm.h:227
#define BYTES_TO_PAGES(Bytes)
Definition mm.h:167
#define PFN_ERROR
Definition mm.h:229
#define VirtualPageSize
Definition mm.h:53
#define INDEX_TO_PPFN(Index)
Definition mm.h:62
#define MI_WRITE_PTE(_PtePointer, _Va, _Pa, _Flags)
Definition mm.h:90
struct _MMPTE MMPTE
#define MI_GUARD_PAGE_PROTECTION
Definition mm.h:244
#define MI_STACK_SIZE
Definition mm.h:242
void * MiCreateKernelStack(IN bool LargeStack)
Definition mmproc.c:26
MTSTATUS MmCreateProcessAddressSpace(OUT void **DirectoryTable)
Definition mmproc.c:221
MTSTATUS MmCreateUserStack(IN PEPROCESS Process, OUT void **OutStackTop, _In_Opt size_t StackReserveSize)
Definition mmproc.c:436
MTSTATUS MmDeleteProcessAddressSpace(IN PEPROCESS Process, IN uintptr_t PageDirectoryPhysical)
Definition mmproc.c:391
MTSTATUS MmCreateTeb(IN PETHREAD Thread, OUT void **OutTeb)
Definition mmproc.c:536
void MiFreeKernelStack(IN void *AllocatedStackTop, IN bool LargeStack)
Definition mmproc.c:149
MTSTATUS MmCreatePeb(IN PEPROCESS Process, OUT void **OutPeb, OUT void **OutBasicMtdllTypes)
Definition mmproc.c:502
#define MT_SUCCESS
Definition mtstatus.h:22
#define MT_GENERAL_FAILURE
Definition mtstatus.h:31
#define MT_FAILURE(Status)
Definition mtstatus.h:16
#define MT_INVALID_PARAM
Definition mtstatus.h:24
int32_t MTSTATUS
Definition mtstatus.h:12
#define MT_SUCCEEDED(Status)
Macros to test status.
Definition mtstatus.h:15
#define MT_NO_RESOURCES
Definition mtstatus.h:32
PAGE_INDEX MiRequestPhysicalPage(IN PFN_STATE ListType)
Definition pfn.c:333
NOINLINE void MiReleasePhysicalPage(IN PAGE_INDEX PfnIndex)
Definition pfn.c:441
struct _MTDLL_BASIC_TYPES * PMTDLL_BASIC_TYPES
struct _TEB * PTEB
struct _PEB * PPEB
struct _TEB TEB
struct _MTDLL_BASIC_TYPES MTDLL_BASIC_TYPES
struct _PEB PEB
void MsAcquirePushLockExclusive(IN PUSH_LOCK *Lock)
Definition pushlock.c:80
void MsReleasePushLockExclusive(IN PUSH_LOCK *Lock)
Definition pushlock.c:99
struct _MMPTE::@172372265215056352375070220246156106027174106113::@200357034104227323320222006243127050212100105247 Hard
uint64_t Present
Definition mm.h:430
uint64_t Value
Definition mm.h:423
uint64_t SoftwareFlags
Definition mm.h:460
struct _MMPTE::@172372265215056352375070220246156106027174106113::@277354034164206104264133322054061025100052052376 Soft
uintptr_t MiAllocatePoolVa(IN POOL_TYPE PoolType, IN size_t NumberOfBytes)
Definition va.c:213
void MiFreePoolVaContiguous(IN uintptr_t va, IN size_t NumberOfBytes, IN POOL_TYPE PoolType)
Definition va.c:367
MTSTATUS MmFreeVirtualMemory(IN PEPROCESS Process, IN void *BaseAddress)
Definition vad.c:871
MTSTATUS MmAllocateVirtualMemory(IN PEPROCESS Process, _In_Opt _Out_Opt void **BaseAddress, IN size_t NumberOfBytes, IN VAD_FLAGS VadFlags)
Definition vad.c:740