My Project
Loading...
Searching...
No Matches
map.c
Go to the documentation of this file.
1/*++
2
3Module Name:
4
5 map.c
6
7Purpose:
8
9 This translation unit contains the implementation of the internal mapping functions for kernel use.
10
11Author:
12
13 slep (Matanel) 2025.
14
15Revision History:
16
17--*/
18
19#include "../../includes/mm.h"
20#include "../../includes/mh.h"
21#include "../../assert.h"
22
23static inline uint64_t canonical_high(uint64_t addr) {
24 // If bit 47 is set, set all higher bits
25 if (addr & (1ULL << 47)) {
26 return addr | 0xFFFF000000000000ULL;
27 }
28 return addr;
29}
30
31uint64_t* pml4_from_recursive(void) {
32 uint64_t va = ((uint64_t)RECURSIVE_INDEX << 39) |
33 ((uint64_t)RECURSIVE_INDEX << 30) |
34 ((uint64_t)RECURSIVE_INDEX << 21) |
35 ((uint64_t)RECURSIVE_INDEX << 12);
36 va = canonical_high(va);
37 return (uint64_t*)(uintptr_t)va;
38}
39
40static inline uint64_t* pdpt_from_recursive(size_t pml4_i) {
41 uint64_t va = ((uint64_t)RECURSIVE_INDEX << 39) |
42 ((uint64_t)RECURSIVE_INDEX << 30) |
43 ((uint64_t)RECURSIVE_INDEX << 21) |
44 ((uint64_t)pml4_i << 12); // <-- CORRECTED
45 va = canonical_high(va);
46 return (uint64_t*)(uintptr_t)va;
47}
48
49// To get PD page for pml4_i, pdpt_i
50static inline uint64_t* pd_from_recursive(size_t pml4_i, size_t pdpt_i) {
51 uint64_t va = ((uint64_t)RECURSIVE_INDEX << 39) |
52 ((uint64_t)RECURSIVE_INDEX << 30) |
53 ((uint64_t)pml4_i << 21) | // <-- CORRECTED
54 ((uint64_t)pdpt_i << 12); // <-- CORRECTED
55 va = canonical_high(va);
56 return (uint64_t*)(uintptr_t)va;
57}
58
59// To get PT page for pml4_i, pdpt_i, pd_i
60static inline uint64_t* pt_from_recursive(size_t pml4_i, size_t pdpt_i, size_t pd_i) {
61 uint64_t va = ((uint64_t)RECURSIVE_INDEX << 39) |
62 ((uint64_t)pml4_i << 30) |
63 ((uint64_t)pdpt_i << 21) |
64 ((uint64_t)pd_i << 12);
65 va = canonical_high(va);
66 return (uint64_t*)(uintptr_t)va;
67}
68
69// Extract indices from virtual address
70static inline size_t get_pml4_index(uint64_t va) { return (va >> 39) & 0x1FF; }
71static inline size_t get_pdpt_index(uint64_t va) { return (va >> 30) & 0x1FF; }
72static inline size_t get_pd_index(uint64_t va) { return (va >> 21) & 0x1FF; }
73static inline size_t get_pt_index(uint64_t va) { return (va >> 12) & 0x1FF; }
74
77 IN uintptr_t va
78)
79
80/*++
81
82 Routine description : Retrieves the pointer to the PTE from the virtual address given
83
84 Arguments:
85
86 [IN] Virtual Address.
87
88 Return Values:
89
90 Pointer to PTE associated with the Virtual Address. (NULL if out of memory)
91
92--*/
93
94{
95 // 1. Calculate Indices
96 size_t pml4_i = get_pml4_index(va);
97 size_t pdpt_i = get_pdpt_index(va);
98 size_t pd_i = get_pd_index(va);
99 size_t pt_i = get_pt_index(va);
100
101 uint64_t intermediateFlags = PAGE_PRESENT | PAGE_RW;
102
103 // If we are touching user address space, we add user accessibility.
104 if (va <= MmHighestUserAddress) {
105 intermediateFlags |= PAGE_USER;
106 }
107
108 uint64_t* pml4_va = pml4_from_recursive();
109 if (!(pml4_va[pml4_i] & PAGE_PRESENT)) {
110 // Allocate a new PDPT
112 if (pfn == PFN_ERROR) return NULL;
113
114 // We are modifying the recursive mapping of the PML4 entry.
115 PMMPTE pml4e = (PMMPTE)&pml4_va[pml4_i];
116 MI_WRITE_PTE(pml4e, pdpt_from_recursive(pml4_i), PFN_TO_PHYS(pfn), intermediateFlags);
117 }
118
119 uint64_t* pdpt_va = pdpt_from_recursive(pml4_i);
120 if (!(pdpt_va[pdpt_i] & PAGE_PRESENT)) {
121 // Allocate a new Page Directory
123 if (pfn == PFN_ERROR) return NULL;
124
125 // Link new PD into PDPT
126 PMMPTE pdpte = (PMMPTE)&pdpt_va[pdpt_i];
127 MI_WRITE_PTE(pdpte, pd_from_recursive(pml4_i, pdpt_i), PFN_TO_PHYS(pfn), intermediateFlags);
128 }
129
130 uint64_t* pd_va = pd_from_recursive(pml4_i, pdpt_i);
131 if (!(pd_va[pd_i] & PAGE_PRESENT)) {
132 // Allocate a new Page Table
134 if (pfn == PFN_ERROR) return NULL;
135
136 // Link new PT into PD
137 PMMPTE pde = (PMMPTE)&pd_va[pd_i];
138 MI_WRITE_PTE(pde, pt_from_recursive(pml4_i, pdpt_i, pd_i), PFN_TO_PHYS(pfn), intermediateFlags);
139 }
140
141 // Return addr of PTE.
142 uint64_t* pt_va = pt_from_recursive(pml4_i, pdpt_i, pd_i);
143 return (PMMPTE)&pt_va[pt_i];
144}
145
146PMMPTE
148 IN uintptr_t va
149)
150
151{
152 // 1. Calculate Indices
153 size_t pml4_i = get_pml4_index(va);
154
155 uint64_t intermediateFlags = PAGE_PRESENT | PAGE_RW;
156
157 // If we are touching user address space, we add user accessibility.
158 if (va <= MmHighestUserAddress) {
159 intermediateFlags |= PAGE_USER;
160 }
161
162 uint64_t* pml4_va = pml4_from_recursive();
163 if (!(pml4_va[pml4_i] & PAGE_PRESENT)) {
164 // Allocate a new PDPT
166 if (pfn == PFN_ERROR) return NULL;
167
168 // We are modifying the recursive mapping of the PML4 entry.
169 PMMPTE pml4e = (PMMPTE)&pml4_va[pml4_i];
170 MI_WRITE_PTE(pml4e, pdpt_from_recursive(pml4_i), PFN_TO_PHYS(pfn), intermediateFlags);
171 }
172
173 return (PMMPTE) & pml4_va[pml4_i];
174}
175
176PMMPTE
178 IN uintptr_t va
179)
180
181{
182 // 1. Calculate Indices
183 size_t pml4_i = get_pml4_index(va);
184 size_t pdpt_i = get_pdpt_index(va);
185
186 uint64_t intermediateFlags = PAGE_PRESENT | PAGE_RW;
187
188 // If we are touching user address space, we add user accessibility.
189 if (va <= MmHighestUserAddress) {
190 intermediateFlags |= PAGE_USER;
191 }
192
193 uint64_t* pml4_va = pml4_from_recursive();
194 if (!(pml4_va[pml4_i] & PAGE_PRESENT)) {
195 // Allocate a new PDPT
197 if (pfn == PFN_ERROR) return NULL;
198
199 // We are modifying the recursive mapping of the PML4 entry.
200 PMMPTE pml4e = (PMMPTE)&pml4_va[pml4_i];
201 MI_WRITE_PTE(pml4e, pdpt_from_recursive(pml4_i), PFN_TO_PHYS(pfn), intermediateFlags);
202 }
203
204 uint64_t* pdpt_va = pdpt_from_recursive(pml4_i);
205 if (!(pdpt_va[pdpt_i] & PAGE_PRESENT)) {
206 // Allocate a new Page Directory
208 if (pfn == PFN_ERROR) return NULL;
209
210 // Link new PD into PDPT
211 PMMPTE pdpte = (PMMPTE)&pdpt_va[pdpt_i];
212 MI_WRITE_PTE(pdpte, pd_from_recursive(pml4_i, pdpt_i), PFN_TO_PHYS(pfn), intermediateFlags);
213 }
214
215 return (PMMPTE)&pdpt_va[pdpt_i];
216}
217
218PMMPTE
220 IN uintptr_t va
221)
222
223{
224 // 1. Calculate Indices
225 size_t pml4_i = get_pml4_index(va);
226 size_t pdpt_i = get_pdpt_index(va);
227 size_t pd_i = get_pd_index(va);
228
229 uint64_t intermediateFlags = PAGE_PRESENT | PAGE_RW;
230
231 // If we are touching user address space, we add user accessibility.
232 if (va <= MmHighestUserAddress) {
233 intermediateFlags |= PAGE_USER;
234 }
235
236 uint64_t* pml4_va = pml4_from_recursive();
237 if (!(pml4_va[pml4_i] & PAGE_PRESENT)) {
238 // Allocate a new PDPT
240 if (pfn == PFN_ERROR) return NULL;
241
242 // We are modifying the recursive mapping of the PML4 entry.
243 PMMPTE pml4e = (PMMPTE)&pml4_va[pml4_i];
244 MI_WRITE_PTE(pml4e, pdpt_from_recursive(pml4_i), PFN_TO_PHYS(pfn), intermediateFlags);
245 }
246
247 uint64_t* pdpt_va = pdpt_from_recursive(pml4_i);
248 if (!(pdpt_va[pdpt_i] & PAGE_PRESENT)) {
249 // Allocate a new Page Directory
251 if (pfn == PFN_ERROR) return NULL;
252
253 // Link new PD into PDPT
254 PMMPTE pdpte = (PMMPTE)&pdpt_va[pdpt_i];
255 MI_WRITE_PTE(pdpte, pd_from_recursive(pml4_i, pdpt_i), PFN_TO_PHYS(pfn), intermediateFlags);
256 }
257
258 uint64_t* pd_va = pd_from_recursive(pml4_i, pdpt_i);
259 if (!(pd_va[pd_i] & PAGE_PRESENT)) {
260 // Allocate a new Page Table
262 if (pfn == PFN_ERROR) return NULL;
263
264 // Link new PT into PD
265 PMMPTE pde = (PMMPTE)&pd_va[pd_i];
266 MI_WRITE_PTE(pde, pt_from_recursive(pml4_i, pdpt_i, pd_i), PFN_TO_PHYS(pfn), intermediateFlags);
267 }
268
269 return (PMMPTE)&pd_va[pd_i];
270}
271
272void
274 IN void* VirtualAddress
275)
276
277/*++
278
279 Routine description:
280
281 Invalidates CPUs TLB for the specified virtual address.
282
283 Arguments:
284
285 [IN] void* VirtualAddress - Virtual address to flush for.
286
287 Return Values:
288
289 None.
290
291 Notes:
292
293 On the SMP Build, if APs are active, an IPI is sent to flush their TLB for the VA as well.
294
295--*/
296
297{
298 invlpg(VirtualAddress);
299 // If SMP is initialized, send IPI.
300#ifndef MT_UP
301 if (smpInitialized) {
302 IPI_PARAMS Param;
303 Param.pageParams.addressToInvalidate = (uint64_t)VirtualAddress;
305 }
306#endif
307}
308
311 IN PMMPTE pte
312)
313
314/*++
315
316 Routine description:
317
318 Translates the PTE given into the appropriate PFN behind its physical address.
319
320 Arguments:
321
322 [IN] pte - Pointer to MMPTE PTE in memory.
323
324 Return Values:
325
326 Page Frame Index.
327
328--*/
329
330{
331 if (!pte) return PFN_ERROR;
332 uintptr_t phys = PTE_TO_PHYSICAL(pte);
333 return PPFN_TO_INDEX(PHYSICAL_TO_PPFN(phys));
334}
335
336uint64_t
338 IN PMMPTE pte
339)
340
341/*++
342
343 Routine description:
344
345 Translates the PTE given to its appropriate virtual address.
346
347 Arguments:
348
349 [IN] pte - Pointer to MMPTE PTE in memory.
350
351 Return Values:
352
353 Virtual Address associated with the PTE.
354
355 Notes:
356
357 The only reason this works is because the method used to find the indices for the VA (pml4, pdpt, pd, pt, pte)
358 Is reversible, since it is bit shifting.
359
360--*/
361
362{
363 uintptr_t p = (uintptr_t)pte;
364
365 size_t pml4_check = (p >> 39) & 0x1FF;
366 if (pml4_check != RECURSIVE_INDEX) {
367 /* not a recursive PTE pointer */
368 return (uintptr_t)0;
369 }
370
371 size_t pml4_i = (p >> 30) & 0x1FF;
372 size_t pdpt_i = (p >> 21) & 0x1FF;
373 size_t pd_i = (p >> 12) & 0x1FF;
374 size_t pt_i = (p >> 3) & 0x1FF; /* pt entry index */
375
376 uint64_t va = ((uint64_t)pml4_i << 39) |
377 ((uint64_t)pdpt_i << 30) |
378 ((uint64_t)pd_i << 21) |
379 ((uint64_t)pt_i << 12);
380
381 return canonical_high(va); /* page-aligned VA for invlpg */
382}
383
384void
386 IN PMMPTE pte
387)
388
389/*++
390
391 Routine description:
392
393 Unmaps the pte from the current address pace.
394
395 Arguments:
396
397 [IN] pte - Pointer to MMPTE PTE in memory.
398
399 Return Values:
400
401 None.
402
403 Notes:
404
405 This function DOES NOT release the PFN associated with the PTE back to the database, you must do so yourself.
406
407--*/
408
409{
410 if (!pte) return;
411 // First gets its PFN to write to the PMMPTE PresentNotSet union.
413 if (!pfn) return;
414 // Get the PTE's original VA.
415 uint64_t origVa = MiTranslatePteToVa(pte);
416
417 // Atomically exchange old info with new info to avoid races.
418 MMPTE newPte;
419
420 // Zero out newPte
421 kmemset(&newPte, 0, sizeof(MMPTE));
422
423 // Write new values.
424 newPte.Soft.PageFrameNumber = pfn;
425
426 // I removed the transition set here, even though it has a PFN assigned to it (to track last good PFN), we don't mark it as transition.
427 // Instead, when we put it in the standby list, there should be a unique function for it. TODO
428
429 // Exchange now.
430 InterlockedExchangeU64((volatile uint64_t*)pte, newPte.Value);
431
432 // Invalidate TLBs
433 if (origVa) MiInvalidateTlbForVa((void*)origVa);
434 else MiReloadTLBs();
435
436 // Return.
437 return;
438}
439
440
441// Reloads CR3 to flush all TLBs (slow flush)
442void
444 void
445)
446
447{
449#ifndef MT_UP
450 IPI_PARAMS param;
452#endif
453}
454
455uintptr_t
457 IN void* VirtualAddress
458)
459
460/*++
461
462 Routine description:
463
464 Translates the given virtual address to its equivalent (**IF MAPPED TO**) physical address.
465
466 Arguments:
467
468 [IN] void* VirtualAddress - The mapped virtual address.
469
470 Return Values:
471
472 The physical address mapped to the virtual address, or 0 if invalid.
473
474 The physical address is returned with its equivalent offset (so not page aligned, maybe, modulus the VA given to check.). (e.g VA = 0xff8880 Phys = 0x4880)
475
476--*/
477
478{
479 PMMPTE pte = MiGetPtePointer((uintptr_t)VirtualAddress);
480 if (!pte) return 0;
481
482 if (!pte->Hard.Present) return 0;
483
484 return (uintptr_t)PTE_TO_PHYSICAL(pte) + VA_OFFSET(VirtualAddress);
485}
486
487bool
489 IN uintptr_t VirtualAddress
490)
491
492/*++
493
494 Routine description:
495
496 Checks if the given address is currently present in memory (won't cause a page fault on access)
497
498 Arguments:
499
500 [IN] uintptr_t VirtualAddress - The virtual address.
501
502 Return Values:
503
504 True if the address is valid and in memory, false otherwise.
505
506--*/
507
508{
509 PMMPTE pte = MiGetPtePointer(VirtualAddress);
510 return pte->Hard.Present;
511}
#define IN
Definition annotations.h:7
FORCEINLINE uint64_t InterlockedExchangeU64(volatile uint64_t *target, uint64_t value)
Definition atomic.h:42
bool smpInitialized
Definition kernel.c:146
FORCEINLINE void invlpg(void *m)
Definition intrin.h:190
FORCEINLINE void __write_cr3(uint64_t val)
Definition intrin.h:83
FORCEINLINE uint64_t __read_cr3(void)
Definition intrin.h:78
uintptr_t MiTranslateVirtualToPhysical(IN void *VirtualAddress)
Definition map.c:456
void MiReloadTLBs(void)
Definition map.c:443
PMMPTE MiGetPdptePointer(IN uintptr_t va)
Definition map.c:177
PMMPTE MiGetPdePointer(IN uintptr_t va)
Definition map.c:219
uint64_t * pml4_from_recursive(void)
Definition map.c:31
bool MmIsAddressPresent(IN uintptr_t VirtualAddress)
Definition map.c:488
PMMPTE MiGetPml4ePointer(IN uintptr_t va)
Definition map.c:147
PAGE_INDEX MiTranslatePteToPfn(IN PMMPTE pte)
Definition map.c:310
uint64_t MiTranslatePteToVa(IN PMMPTE pte)
Definition map.c:337
PMMPTE MiGetPtePointer(IN uintptr_t va)
Definition map.c:76
void MiInvalidateTlbForVa(IN void *VirtualAddress)
Definition map.c:273
void MiUnmapPte(IN PMMPTE pte)
Definition map.c:385
@ CPU_ACTION_FLUSH_CR3
Definition mh.h:98
@ CPU_ACTION_PERFORM_TLB_SHOOTDOWN
Definition mh.h:94
struct _IPI_PARAMS IPI_PARAMS
@ PAGE_RW
Definition mm.h:272
@ PAGE_USER
Definition mm.h:276
@ PAGE_PRESENT
Definition mm.h:268
#define PFN_TO_PHYS(Pfn)
Definition mm.h:205
@ PfnStateZeroed
Definition mm.h:243
#define PHYSICAL_TO_PPFN(PHYS)
Definition mm.h:64
#define PTE_TO_PHYSICAL(PMMPTE)
Definition mm.h:66
#define PPFN_TO_INDEX(PPFN)
Definition mm.h:116
#define VA_OFFSET(_VirtualAddress)
Definition mm.h:119
#define RECURSIVE_INDEX
Definition mm.h:57
struct _MMPTE * PMMPTE
uint64_t PAGE_INDEX
Definition mm.h:232
FORCEINLINE void * kmemset(void *dest, int64_t val, uint64_t len)
Definition mm.h:540
#define PFN_ERROR
Definition mm.h:208
#define MI_WRITE_PTE(_PtePointer, _Va, _Pa, _Flags)
Definition mm.h:90
struct _MMPTE MMPTE
PAGE_INDEX MiRequestPhysicalPage(IN PFN_STATE ListType)
Definition pfn.c:325
uintptr_t MmHighestUserAddress
Definition process.c:27
void MhSendActionToCpusAndWait(CPU_ACTION action, IPI_PARAMS parameter)
Definition smp.c:212
struct _PAGE_PARAMETERS pageParams
Definition mh.h:399
struct _MMPTE::@172372265215056352375070220246156106027174106113::@200357034104227323320222006243127050212100105247 Hard
uint64_t PageFrameNumber
Definition mm.h:404
uint64_t Present
Definition mm.h:392
uint64_t Value
Definition mm.h:385
struct _MMPTE::@172372265215056352375070220246156106027174106113::@277354034164206104264133322054061025100052052376 Soft
uint64_t addressToInvalidate
Definition mh.h:394