kernel
Loading...
Searching...
No Matches
map.c
Go to the documentation of this file.
1/*++
2
3Module Name:
4
5 map.c
6
7Purpose:
8
9 This translation unit contains the implementation of the internal mapping functions for kernel use.
10
11Author:
12
13 slep (Matanel) 2025.
14
15Revision History:
16
17--*/
18
19#include "../../includes/mm.h"
20#include "../../includes/mh.h"
21#include "../../assert.h"
22
23static inline uint64_t canonical_high(uint64_t addr) {
24 // If bit 47 is set, set all higher bits
25 if (addr & (1ULL << 47)) {
26 return addr | 0xFFFF000000000000ULL;
27 }
28 return addr;
29}
30
31uint64_t* pml4_from_recursive(void) {
32 uint64_t va = ((uint64_t)RECURSIVE_INDEX << 39) |
33 ((uint64_t)RECURSIVE_INDEX << 30) |
34 ((uint64_t)RECURSIVE_INDEX << 21) |
35 ((uint64_t)RECURSIVE_INDEX << 12);
36 va = canonical_high(va);
37 return (uint64_t*)(uintptr_t)va;
38}
39
40static inline uint64_t* pdpt_from_recursive(size_t pml4_i) {
41 uint64_t va = ((uint64_t)RECURSIVE_INDEX << 39) |
42 ((uint64_t)RECURSIVE_INDEX << 30) |
43 ((uint64_t)RECURSIVE_INDEX << 21) |
44 ((uint64_t)pml4_i << 12); // <-- CORRECTED
45 va = canonical_high(va);
46 return (uint64_t*)(uintptr_t)va;
47}
48
49// To get PD page for pml4_i, pdpt_i
50static inline uint64_t* pd_from_recursive(size_t pml4_i, size_t pdpt_i) {
51 uint64_t va = ((uint64_t)RECURSIVE_INDEX << 39) |
52 ((uint64_t)RECURSIVE_INDEX << 30) |
53 ((uint64_t)pml4_i << 21) | // <-- CORRECTED
54 ((uint64_t)pdpt_i << 12); // <-- CORRECTED
55 va = canonical_high(va);
56 return (uint64_t*)(uintptr_t)va;
57}
58
59// To get PT page for pml4_i, pdpt_i, pd_i
60static inline uint64_t* pt_from_recursive(size_t pml4_i, size_t pdpt_i, size_t pd_i) {
61 uint64_t va = ((uint64_t)RECURSIVE_INDEX << 39) |
62 ((uint64_t)pml4_i << 30) |
63 ((uint64_t)pdpt_i << 21) |
64 ((uint64_t)pd_i << 12);
65 va = canonical_high(va);
66 return (uint64_t*)(uintptr_t)va;
67}
68
69// Extract indices from virtual address
70static inline size_t get_pml4_index(uint64_t va) { return (va >> 39) & 0x1FF; }
71static inline size_t get_pdpt_index(uint64_t va) { return (va >> 30) & 0x1FF; }
72static inline size_t get_pd_index(uint64_t va) { return (va >> 21) & 0x1FF; }
73static inline size_t get_pt_index(uint64_t va) { return (va >> 12) & 0x1FF; }
74
77 IN uintptr_t va
78)
79
80/*++
81
82 Routine description : Retrieves the pointer to the PTE from the virtual address given
83
84 Arguments:
85
86 [IN] Virtual Address.
87
88 Return Values:
89
90 Pointer to PTE associated with the Virtual Address. (NULL if out of memory)
91
92--*/
93
94{
95 // 1. Calculate Indices
96 size_t pml4_i = get_pml4_index(va);
97 size_t pdpt_i = get_pdpt_index(va);
98 size_t pd_i = get_pd_index(va);
99 size_t pt_i = get_pt_index(va);
100
101 uint64_t intermediateFlags = PAGE_PRESENT | PAGE_RW;
102
103 // If we are touching user address space, we add user accessibility.
104 if (va <= MmHighestUserAddress) {
105 intermediateFlags |= PAGE_USER;
106 }
107
108 uint64_t* pml4_va = pml4_from_recursive();
109 if (!(pml4_va[pml4_i] & PAGE_PRESENT)) {
110 // Allocate a new PDPT
112 if (pfn == PFN_ERROR) return NULL;
113
114 // We are modifying the recursive mapping of the PML4 entry.
115 PMMPTE pml4e = (PMMPTE)&pml4_va[pml4_i];
116 MI_WRITE_PTE(pml4e, pdpt_from_recursive(pml4_i), PFN_TO_PHYS(pfn), intermediateFlags);
117 }
118
119 uint64_t* pdpt_va = pdpt_from_recursive(pml4_i);
120 if (!(pdpt_va[pdpt_i] & PAGE_PRESENT)) {
121 // Allocate a new Page Directory
123 if (pfn == PFN_ERROR) return NULL;
124
125 // Link new PD into PDPT
126 PMMPTE pdpte = (PMMPTE)&pdpt_va[pdpt_i];
127 MI_WRITE_PTE(pdpte, pd_from_recursive(pml4_i, pdpt_i), PFN_TO_PHYS(pfn), intermediateFlags);
128 }
129
130 uint64_t* pd_va = pd_from_recursive(pml4_i, pdpt_i);
131 if (!(pd_va[pd_i] & PAGE_PRESENT)) {
132 // Allocate a new Page Table
134 if (pfn == PFN_ERROR) return NULL;
135
136 // Link new PT into PD
137 PMMPTE pde = (PMMPTE)&pd_va[pd_i];
138 MI_WRITE_PTE(pde, pt_from_recursive(pml4_i, pdpt_i, pd_i), PFN_TO_PHYS(pfn), intermediateFlags);
139 }
140
141 // Return addr of PTE.
142 uint64_t* pt_va = pt_from_recursive(pml4_i, pdpt_i, pd_i);
143 return (PMMPTE)&pt_va[pt_i];
144}
145
146PMMPTE
148 IN uintptr_t va
149)
150
151{
152 // 1. Calculate Indices
153 size_t pml4_i = get_pml4_index(va);
154
155 uint64_t intermediateFlags = PAGE_PRESENT | PAGE_RW;
156
157 // If we are touching user address space, we add user accessibility.
158 if (va <= MmHighestUserAddress) {
159 intermediateFlags |= PAGE_USER;
160 }
161
162 uint64_t* pml4_va = pml4_from_recursive();
163 if (!(pml4_va[pml4_i] & PAGE_PRESENT)) {
164 // Allocate a new PDPT
166 if (pfn == PFN_ERROR) return NULL;
167
168 // We are modifying the recursive mapping of the PML4 entry.
169 PMMPTE pml4e = (PMMPTE)&pml4_va[pml4_i];
170 MI_WRITE_PTE(pml4e, pdpt_from_recursive(pml4_i), PFN_TO_PHYS(pfn), intermediateFlags);
171 }
172
173 return (PMMPTE) & pml4_va[pml4_i];
174}
175
176PMMPTE
178 IN uintptr_t va
179)
180
181{
182 // 1. Calculate Indices
183 size_t pml4_i = get_pml4_index(va);
184 size_t pdpt_i = get_pdpt_index(va);
185
186 uint64_t intermediateFlags = PAGE_PRESENT | PAGE_RW;
187
188 // If we are touching user address space, we add user accessibility.
189 if (va <= MmHighestUserAddress) {
190 intermediateFlags |= PAGE_USER;
191 }
192
193 uint64_t* pml4_va = pml4_from_recursive();
194 if (!(pml4_va[pml4_i] & PAGE_PRESENT)) {
195 // Allocate a new PDPT
197 if (pfn == PFN_ERROR) return NULL;
198
199 // We are modifying the recursive mapping of the PML4 entry.
200 PMMPTE pml4e = (PMMPTE)&pml4_va[pml4_i];
201 MI_WRITE_PTE(pml4e, pdpt_from_recursive(pml4_i), PFN_TO_PHYS(pfn), intermediateFlags);
202 }
203
204 uint64_t* pdpt_va = pdpt_from_recursive(pml4_i);
205 if (!(pdpt_va[pdpt_i] & PAGE_PRESENT)) {
206 // Allocate a new Page Directory
208 if (pfn == PFN_ERROR) return NULL;
209
210 // Link new PD into PDPT
211 PMMPTE pdpte = (PMMPTE)&pdpt_va[pdpt_i];
212 MI_WRITE_PTE(pdpte, pd_from_recursive(pml4_i, pdpt_i), PFN_TO_PHYS(pfn), intermediateFlags);
213 }
214
215 return (PMMPTE)&pdpt_va[pdpt_i];
216}
217
218PMMPTE
220 IN uintptr_t va
221)
222
223{
224 // 1. Calculate Indices
225 size_t pml4_i = get_pml4_index(va);
226 size_t pdpt_i = get_pdpt_index(va);
227 size_t pd_i = get_pd_index(va);
228
229 uint64_t intermediateFlags = PAGE_PRESENT | PAGE_RW;
230
231 // If we are touching user address space, we add user accessibility.
232 if (va <= MmHighestUserAddress) {
233 intermediateFlags |= PAGE_USER;
234 }
235
236 uint64_t* pml4_va = pml4_from_recursive();
237 if (!(pml4_va[pml4_i] & PAGE_PRESENT)) {
238 // Allocate a new PDPT
240 if (pfn == PFN_ERROR) return NULL;
241
242 // We are modifying the recursive mapping of the PML4 entry.
243 PMMPTE pml4e = (PMMPTE)&pml4_va[pml4_i];
244 MI_WRITE_PTE(pml4e, pdpt_from_recursive(pml4_i), PFN_TO_PHYS(pfn), intermediateFlags);
245 }
246
247 uint64_t* pdpt_va = pdpt_from_recursive(pml4_i);
248 if (!(pdpt_va[pdpt_i] & PAGE_PRESENT)) {
249 // Allocate a new Page Directory
251 if (pfn == PFN_ERROR) return NULL;
252
253 // Link new PD into PDPT
254 PMMPTE pdpte = (PMMPTE)&pdpt_va[pdpt_i];
255 MI_WRITE_PTE(pdpte, pd_from_recursive(pml4_i, pdpt_i), PFN_TO_PHYS(pfn), intermediateFlags);
256 }
257
258 uint64_t* pd_va = pd_from_recursive(pml4_i, pdpt_i);
259 if (!(pd_va[pd_i] & PAGE_PRESENT)) {
260 // Allocate a new Page Table
262 if (pfn == PFN_ERROR) return NULL;
263
264 // Link new PT into PD
265 PMMPTE pde = (PMMPTE)&pd_va[pd_i];
266 MI_WRITE_PTE(pde, pt_from_recursive(pml4_i, pdpt_i, pd_i), PFN_TO_PHYS(pfn), intermediateFlags);
267 }
268
269 return (PMMPTE)&pd_va[pd_i];
270}
271
272void
274 IN void* VirtualAddress
275)
276
277/*++
278
279 Routine description:
280
281 Invalidates CPUs TLB for the specified virtual address.
282
283 Arguments:
284
285 [IN] void* VirtualAddress - Virtual address to flush for.
286
287 Return Values:
288
289 None.
290
291 Notes:
292
293 On the SMP Build, if APs are active, an IPI is sent to flush their TLB for the VA as well.
294
295--*/
296
297{
298 invlpg(VirtualAddress);
299#ifndef MT_UP
300 // If SMP is initialized, send IPI.
301 if (smpInitialized) {
302 IPI_PARAMS Param;
303 Param.pageParams.addressToInvalidate = (uint64_t)VirtualAddress;
305 }
306#endif
307}
308
311 IN PMMPTE pte
312)
313
314/*++
315
316 Routine description:
317
318 Translates the PTE given into the appropriate PFN behind its physical address.
319
320 Arguments:
321
322 [IN] pte - Pointer to MMPTE PTE in memory.
323
324 Return Values:
325
326 Page Frame Index.
327
328--*/
329
330{
331 if (!pte) return PFN_ERROR;
332 uintptr_t phys = PTE_TO_PHYSICAL(pte);
333 return PPFN_TO_INDEX(PHYSICAL_TO_PPFN(phys));
334}
335
336uintptr_t
338 IN PMMPTE pte
339)
340
341/*++
342
343 Routine description:
344
345 Translates the PTE given to its appropriate virtual address.
346
347 Arguments:
348
349 [IN] pte - Pointer to MMPTE PTE in memory.
350
351 Return Values:
352
353 Virtual Address associated with the PTE.
354
355 Notes:
356
357 The only reason this works is because the method used to find the indices for the VA (pml4, pdpt, pd, pt, pte)
358 Is reversible, since it is bit shifting.
359
360--*/
361
362{
363 uintptr_t p = (uintptr_t)pte;
364
365 size_t pml4_check = (p >> 39) & 0x1FF;
366 if (pml4_check != RECURSIVE_INDEX) {
367 /* not a recursive PTE pointer */
368 return (uintptr_t)0;
369 }
370
371 size_t pml4_i = (p >> 30) & 0x1FF;
372 size_t pdpt_i = (p >> 21) & 0x1FF;
373 size_t pd_i = (p >> 12) & 0x1FF;
374 size_t pt_i = (p >> 3) & 0x1FF; /* pt entry index */
375
376 uint64_t va = ((uint64_t)pml4_i << 39) |
377 ((uint64_t)pdpt_i << 30) |
378 ((uint64_t)pd_i << 21) |
379 ((uint64_t)pt_i << 12);
380
381 return canonical_high(va); /* page-aligned VA for invlpg */
382}
383
384void
386 IN PMMPTE pte
387)
388
389/*++
390
391 Routine description:
392
393 Unmaps the pte from the current address space.
394
395 Arguments:
396
397 [IN] pte - Pointer to MMPTE PTE in memory.
398
399 Return Values:
400
401 None.
402
403--*/
404
405{
406 if (!pte) return;
407 // First gets its PFN to write to the PMMPTE PresentNotSet union.
409 if (!pfn) return;
410 // Get the PTE's original VA.
411 uintptr_t origVa = MiTranslatePteToVa(pte);
412
413 // Atomically exchange old info with new info to avoid races.
414 MMPTE newPte;
415
416 // Zero out newPte
417 kmemset(&newPte, 0, sizeof(MMPTE));
418
419 // Keep only the protection flags. (so transition function set know which flags it had)
420 newPte.Hard.Write = pte->Hard.Write;
421 newPte.Hard.User = pte->Hard.User;
422 newPte.Hard.NoExecute = pte->Hard.NoExecute;
423
424 // Setting a transition PTE is at another function.
425
426 // Exchange now.
427 InterlockedExchangeU64((volatile uint64_t*)pte, newPte.Value);
428
429 // Invalidate TLBs
430 if (origVa) MiInvalidateTlbForVa((void*)origVa);
431 else MiReloadTLBs();
432
433 // Return.
434 return;
435}
436
437bool
439 IN PMMPTE Pte,
440 IN PAGE_INDEX Pfn
441)
442
443/*++
444
445 Routine description:
446
447 Atomically sets the Pte given to a transition PTE if it did not change while setting.
448
449 Arguments:
450
451 [IN] PMMPTE Pte - Pointer to MMPTE PTE in memory to set as transition.
452 [IN] PAGE_INDEX Pfn - The PFN number to set this PTE as a transition for.
453
454 Return Values:
455
456 None.
457
458 Note:
459
460 This function must always be called ONLY from MiReleasePhysicalPage.
461
462--*/
463
464{
465 // Assertion that the return address is within the bounds of MiReleasePhysicalPage, as this function MUST ONLY be called from there.
466 // Why didnt I make it there? Because I MIGHT plan that this function can be called somewhere else, we'll see.
468
469 // Set the baseline expected.
470 MMPTE Expected = *Pte;
471
472 // Runtime assertions to verify its a valid unmapped PTE before continuing.
473 assert(Expected.Hard.Present == 0);
474 assert(Expected.Soft.Transition == 0);
475
476 char buf[256];
477 ksnprintf(buf, sizeof(buf), "Address of PTE: %p", Pte);
478 assert(Expected.Hard.Prototype == 0, buf);
479
480 // Set the transition page properties.
481 MMPTE Transition = Expected;
482 Transition.Soft.Transition = 1;
483 Transition.Soft.PageFrameNumber = Pfn;
484
485 // Set the protection flags, this is so the page fault handler knows the properties of the pte.
486 Transition.Soft.SoftwareFlags |= (Pte->Hard.Write) ? PROT_KERNEL_WRITE : 0;
487 Transition.Soft.SoftwareFlags |= (Pte->Hard.NoExecute) ? PROT_KERNEL_NOEXECUTE : 0;
488 Transition.Soft.SoftwareFlags |= (Pte->Hard.User) ? PROT_KERNEL_USER : 0;
489
490 // Atomic compare exchange, if the PTE changed within the exchange, we do not set the value, and abort.
491 if (!InterlockedCompareExchangeU64((volatile uint64_t*)Pte, Transition.Value, Expected.Value)) return false;
492
493 // Exchange successful!
494 return true;
495}
496
497// Reloads CR3 to flush all TLBs (slow flush)
498void
500 void
501)
502
503{
505#ifndef MT_UP
506 IPI_PARAMS param;
508#endif
509}
510
511uintptr_t
513 IN void* VirtualAddress
514)
515
516/*++
517
518 Routine description:
519
520 Translates the given virtual address to its equivalent (**IF MAPPED TO**) physical address.
521
522 Arguments:
523
524 [IN] void* VirtualAddress - The mapped virtual address.
525
526 Return Values:
527
528 The physical address mapped to the virtual address, or 0 if invalid.
529
530 The physical address is returned with its equivalent offset (so not page aligned, maybe, modulus the VA given to check.). (e.g VA = 0xff8880 Phys = 0x4880)
531
532--*/
533
534{
535 PMMPTE pte = MiGetPtePointer((uintptr_t)VirtualAddress);
536 if (!pte) return 0;
537
538 if (!pte->Hard.Present) return 0;
539
540 return (uintptr_t)PTE_TO_PHYSICAL(pte) + VA_OFFSET(VirtualAddress);
541}
542
543bool
545 IN uintptr_t VirtualAddress
546)
547
548/*++
549
550 Routine description:
551
552 Checks if the given address is currently present in memory (won't cause a page fault on access)
553
554 Arguments:
555
556 [IN] uintptr_t VirtualAddress - The virtual address.
557
558 Return Values:
559
560 True if the address is valid and in memory, false otherwise.
561
562 Notes:
563
564 This function shouldn't be used in low IRQL situations, as addresses can very well be paged out to disk.
565 (In IRQL equal or higher than DISPATCH_LEVEL, this function is safe, as blocking operations are forbidden, which means memory cannot be paged out)
566
567--*/
568
569{
570 PMMPTE pte = MiGetPtePointer(VirtualAddress);
571 return pte->Hard.Present;
572}
#define IN
Definition annotations.h:8
#define assert(...)
Definition assert.h:57
FORCEINLINE uint64_t InterlockedCompareExchangeU64(volatile uint64_t *target, uint64_t value, uint64_t comparand)
Definition atomic.h:86
FORCEINLINE uint64_t InterlockedExchangeU64(volatile uint64_t *target, uint64_t value)
Definition atomic.h:42
bool smpInitialized
Definition kernel.c:149
int ksnprintf(char *buf, size_t bufsize, const char *fmt,...)
Definition gop.c:482
FORCEINLINE void invlpg(void *m)
Definition intrin.h:205
FORCEINLINE void __write_cr3(uint64_t val)
Definition intrin.h:98
FORCEINLINE uint64_t __read_cr3(void)
Definition intrin.h:93
#define RETADDR(level)
Definition macros.h:53
uintptr_t MiTranslateVirtualToPhysical(IN void *VirtualAddress)
Definition map.c:512
uintptr_t MiTranslatePteToVa(IN PMMPTE pte)
Definition map.c:337
void MiReloadTLBs(void)
Definition map.c:499
PMMPTE MiGetPdptePointer(IN uintptr_t va)
Definition map.c:177
PMMPTE MiGetPdePointer(IN uintptr_t va)
Definition map.c:219
uint64_t * pml4_from_recursive(void)
Definition map.c:31
bool MmIsAddressPresent(IN uintptr_t VirtualAddress)
Definition map.c:544
PMMPTE MiGetPml4ePointer(IN uintptr_t va)
Definition map.c:147
PAGE_INDEX MiTranslatePteToPfn(IN PMMPTE pte)
Definition map.c:310
PMMPTE MiGetPtePointer(IN uintptr_t va)
Definition map.c:76
void MiInvalidateTlbForVa(IN void *VirtualAddress)
Definition map.c:273
void MiUnmapPte(IN PMMPTE pte)
Definition map.c:385
bool MiAtomicSetTransitionPte(IN PMMPTE Pte, IN PAGE_INDEX Pfn)
Definition map.c:438
@ CPU_ACTION_FLUSH_CR3
Definition mh.h:95
@ CPU_ACTION_PERFORM_TLB_SHOOTDOWN
Definition mh.h:91
struct _IPI_PARAMS IPI_PARAMS
@ PAGE_RW
Definition mm.h:311
@ PAGE_USER
Definition mm.h:315
@ PAGE_PRESENT
Definition mm.h:307
#define PFN_TO_PHYS(Pfn)
Definition mm.h:226
@ PfnStateZeroed
Definition mm.h:280
#define PHYSICAL_TO_PPFN(PHYS)
Definition mm.h:64
#define PTE_TO_PHYSICAL(PMMPTE)
Definition mm.h:66
#define PPFN_TO_INDEX(PPFN)
Definition mm.h:135
#define PROT_KERNEL_WRITE
Definition mm.h:233
#define VA_OFFSET(_VirtualAddress)
Definition mm.h:138
#define RECURSIVE_INDEX
Definition mm.h:57
struct _MMPTE * PMMPTE
#define PROT_KERNEL_NOEXECUTE
Definition mm.h:234
uint64_t PAGE_INDEX
Definition mm.h:256
FORCEINLINE void * kmemset(void *dest, int64_t val, uint64_t len)
Definition mm.h:655
#define PROT_KERNEL_USER
Definition mm.h:235
#define PFN_ERROR
Definition mm.h:229
#define MI_WRITE_PTE(_PtePointer, _Va, _Pa, _Flags)
Definition mm.h:90
struct _MMPTE MMPTE
PAGE_INDEX MiRequestPhysicalPage(IN PFN_STATE ListType)
Definition pfn.c:333
bool MiIsWithinBoundsOfReleasePhysicalPage(void *VirtualAddress)
Definition pfn.c:516
uintptr_t MmHighestUserAddress
Definition process.c:32
void MhSendActionToCpusAndWait(CPU_ACTION action, IPI_PARAMS parameter)
Definition smp.c:231
struct _PAGE_PARAMETERS pageParams
Definition mh.h:400
uint64_t Prototype
Definition mm.h:440
struct _MMPTE::@172372265215056352375070220246156106027174106113::@200357034104227323320222006243127050212100105247 Hard
uint64_t Write
Definition mm.h:431
uint64_t PageFrameNumber
Definition mm.h:442
uint64_t Present
Definition mm.h:430
uint64_t Transition
Definition mm.h:455
uint64_t User
Definition mm.h:432
uint64_t Value
Definition mm.h:423
uint64_t NoExecute
Definition mm.h:444
uint64_t SoftwareFlags
Definition mm.h:460
struct _MMPTE::@172372265215056352375070220246156106027174106113::@277354034164206104264133322054061025100052052376 Soft
uint64_t addressToInvalidate
Definition mh.h:395