My Project
Loading...
Searching...
No Matches
mm.h
Go to the documentation of this file.
1#ifndef X86_MATANEL_MEMORY_H
2#define X86_MATANEL_MEMORY_H
3
4/*++
5
6Module Name:
7
8 mm.h
9
10Purpose:
11
12 This module contains the header files required for memory management (virtual, physical, PFN, VAD, MMIO, init, etc.)
13
14Author:
15
16 slep (Matanel) 2025.
17
18Revision History:
19
20--*/
21
22// Base Includes
23#include <stdint.h>
24#include <stdbool.h>
25#include "annotations.h"
26#include "macros.h"
27#include "../mtstatus.h"
30
31// Needed for linked list and spinlocks
32#include "ms.h"
33#include "core.h"
34#include "efi.h"
35
36// ------------------ HEADER SPECIFIC MACROS ------------------
37
38#define PML4_INDEX_BITS 9
39#define PML4_INDEX_SHIFT 39
40#define PML4_INDEX_MASK ((1ULL << PML4_INDEX_BITS) - 1ULL)
41
42#define PML4_INDEX_FROM_VA(VA) ( ( (uintptr_t)(VA) >> PML4_INDEX_SHIFT ) & PML4_INDEX_MASK )
43
44/* If PhysicalMemoryOffset is the kernel VA base that maps physical 0:
45 index in PML4 for physical address PHYS is the index of (PHYS + PhysicalMemoryOffset) */
46#define PML4_INDEX_FROM_PHYS(PHYS) PML4_INDEX_FROM_VA( (uintptr_t)(PHYS) + (uintptr_t)PhysicalMemoryOffset )
47
48 /* safer typed helper */
49static inline int MiConvertVaToPml4Offset(uint64_t va) {
50 return (int)((va >> PML4_INDEX_SHIFT) & PML4_INDEX_MASK);
51}
52
53#define VirtualPageSize 4096ULL // Same as each physical frame.
54#define PhysicalFrameSize 4096ULL // Each physical frame.
55#define KernelVaStart 0xfffff80000000000ULL
56#define PhysicalMemoryOffset 0xffff880000000000ULL // Defines the offset in arithmetic for quick mapping
57#define RECURSIVE_INDEX 0x1FF
58
59#ifndef __INTELLISENSE__
60#ifndef __OFFSET_GENERATOR__
61/* Convert a PFN index to a PPFN_ENTRY pointer */
62#define INDEX_TO_PPFN(Index) \
63 (&(PfnDatabase.PfnEntries[(size_t)(Index)]))
64#define PHYSICAL_TO_PPFN(PHYS) \
65 (&PfnDatabase.PfnEntries[(size_t)((PHYS) / (uint64_t)PhysicalFrameSize)])
66#define PTE_TO_PHYSICAL(PMMPTE) ((PMMPTE)->Value & ~0xFFFULL)
67/* single-CPU build (no IPI shootdown code) */
68#ifdef MT_UP
69
70#define MI_WRITE_PTE(_PtePointer, _Va, _Pa, _Flags) \
71do { \
72 MMPTE* _pte = (MMPTE*)(_PtePointer); \
73 uint64_t _val = (((uintptr_t)(_Pa)) & ~0xFFFULL) | (uint64_t)(_Flags); \
74 MiAtomicExchangePte(_pte, _val); \
75 __asm__ volatile("" ::: "memory"); \
76 \
77 /* Only set PFN->PTE link if PFN database is initialized */ \
78 if (MmPfnDatabaseInitialized) { \
79 PPFN_ENTRY _pfn = PHYSICAL_TO_PPFN(_Pa); \
80 _pfn->Descriptor.Mapping.PteAddress = (PMMPTE)_pte; \
81 _pfn->State = PfnStateActive; \
82 _pfn->Flags = PFN_FLAG_NONPAGED; \
83 } \
84 \
85 invlpg((void*)(uintptr_t)(_Va)); \
86} while (0)
87
88#else /* MP / SMP build: include TLB shootdown via IPI */
89
90#define MI_WRITE_PTE(_PtePointer, _Va, _Pa, _Flags) \
91do { \
92 MMPTE* _pte = (MMPTE*)(_PtePointer); \
93 uint64_t _val = (((uintptr_t)(_Pa)) & ~0xFFFULL) | (uint64_t)(_Flags); \
94 MiAtomicExchangePte(_pte, _val); \
95 __asm__ volatile("" ::: "memory"); \
96 \
97 /* Only set PFN->PTE link if PFN database is initialized */ \
98 if (MmPfnDatabaseInitialized) { \
99 PPFN_ENTRY _pfn = PHYSICAL_TO_PPFN(_Pa); \
100 _pfn->Descriptor.Mapping.PteAddress = (PMMPTE)_pte; \
101 _pfn->State = PfnStateActive; \
102 _pfn->Flags = PFN_FLAG_NONPAGED; \
103 } \
104 \
105 invlpg((void*)(uintptr_t)(_Va)); \
106 \
107 /* Send IPIs if SMP is initialized */ \
108 if (smpInitialized && allApsInitialized) { \
109 IPI_PARAMS _Params; \
110 _Params.pageParams.addressToInvalidate = (uint64_t)(_Va); \
111 MhSendActionToCpusAndWait(CPU_ACTION_PERFORM_TLB_SHOOTDOWN, _Params);\
112 } \
113} while (0)
114
115#endif
116#define PPFN_TO_INDEX(PPFN) ((size_t)((PPFN) - PfnDatabase.PfnEntries))
117#define PPFN_TO_PHYSICAL_ADDRESS(PPFN) \
118 ((uint64_t)((uint64_t)PPFN_TO_INDEX(PPFN) * (uint64_t)PhysicalFrameSize))
119#define VA_OFFSET(_VirtualAddress) ((uintptr_t)(_VirtualAddress) & 0xFFF)
120#define MM_IS_DEMAND_ZERO_PTE(pte) \
121 (((pte).Soft.SoftwareFlags & MI_DEMAND_ZERO_BIT) != 0)
122#define MM_SET_DEMAND_ZERO_PTE(pte, prot_flags, nx) \
123 do { \
124 (pte).Value = 0; \
125 (pte).Soft.SoftwareFlags = (prot_flags) | MI_DEMAND_ZERO_BIT; \
126 (pte).Soft.NoExecute = (nx); \
127 } while(0)
128#define MM_UNSET_DEMAND_ZERO_PTE(pte) \
129 do { \
130 (pte).Soft.SoftwareFlags &= ~MI_DEMAND_ZERO_BIT; \
131 } while(0)
132#endif
133#else
134#define PTE_TO_PHYSICAL(PMMPTE) (0)
135#define MI_WRITE_PTE(_PtePointer, _Va, _Pa, _Flags) ((void)0)
136#define PPFN_TO_INDEX(PPFN) (0)
137#define PPFN_TO_PHYSICAL_ADDRESS(PPFN) (0)
138#define INDEX_TO_PPFN(Index) (NULL)
139#define PHYSICAL_TO_PPFN(PHYS) (NULL)
140#define VA_OFFSET(_VirtualAddress) (uintptr_t)(NULL)
141#define MM_IS_DEMAND_ZERO_PTE(pte) (NULL)
142#define MM_SET_DEMAND_ZERO_PTE(pte, prot_flags, nx) ((void)0)
143#define MM_UNSET_DEMAND_ZERO_PTE(pte) (NULL)
144#endif
145
146// Convert bytes to pages (rounding up)
147#define BYTES_TO_PAGES(Bytes) (((Bytes) + VirtualPageSize - 1) / VirtualPageSize)
148// Convert pages to bytes
149#define PAGES_TO_BYTES(Pages) ((Pages) * VirtualPageSize)
150
151#define MAX_POOL_DESCRIPTORS 7 // Allows for: 32, 64, 128, 256, 512, 1024, 2048 Bytes Per pool
152#define _32KB_POOL 1
153#define _64KB_POOL 2
154#define _128KB_POOL 3
155#define _256KB_POOL 4
156#define _512KB_POOL 5
157#define _1024KB_POOL 6
158#define _2048KB_POOL 7
159#define POOL_MIN_ALLOC 32
160#define USER_VA_END 0x00007FFFFFFFFFFF
161#define USER_VA_START 0x10000
162// You are allowed to request bytes above max allocation, the global pool would be used.
163#define POOL_MAX_ALLOC 2048
164// Pool sizes
165#define MI_NONPAGED_POOL_SIZE ((size_t)16ULL * 1024 * 1024 * 1024) // 16 GiB
166#define MI_PAGED_POOL_SIZE ((size_t)32ULL * 1024 * 1024 * 1024) // 32 GiB
167
168// Total pages in each pool
169#define NONPAGED_POOL_VA_TOTAL_PAGES (MI_NONPAGED_POOL_SIZE / VirtualPageSize)
170#define PAGED_POOL_VA_TOTAL_PAGES (MI_PAGED_POOL_SIZE / VirtualPageSize)
171
172// Bitmap QWORDs
173#define NONPAGED_POOL_VA_BITMAP_QWORDS ((NONPAGED_POOL_VA_TOTAL_PAGES + 63) / 64)
174#define PAGED_POOL_VA_BITMAP_QWORDS ((PAGED_POOL_VA_TOTAL_PAGES + 63) / 64)
175
176// Number of pages needed for each bitmap (page-aligned)
177#define MI_NONPAGED_BITMAP_PAGES_NEEDED ((NONPAGED_POOL_VA_BITMAP_QWORDS * sizeof(uint64_t) + VirtualPageSize - 1) / VirtualPageSize)
178#define MI_PAGED_BITMAP_PAGES_NEEDED ((PAGED_POOL_VA_BITMAP_QWORDS * sizeof(uint64_t) + VirtualPageSize - 1) / VirtualPageSize)
179
180// Alignment helper
181#define ALIGN_UP(x, align) (((uintptr_t)(x) + ((align)-1)) & ~((uintptr_t)((align)-1)))
182
183// Bitmap memory allocations (physical pages)
184#define MI_NONPAGED_BITMAP_BASE ALIGN_UP(LK_KERNEL_END, VirtualPageSize)
185#define MI_NONPAGED_BITMAP_END (MI_NONPAGED_BITMAP_BASE + MI_NONPAGED_BITMAP_PAGES_NEEDED * VirtualPageSize)
186
187#define MI_PAGED_BITMAP_BASE ALIGN_UP(MI_NONPAGED_BITMAP_END, VirtualPageSize)
188#define MI_PAGED_BITMAP_END (MI_PAGED_BITMAP_BASE + MI_PAGED_BITMAP_PAGES_NEEDED * VirtualPageSize)
189
190// Pool virtual address ranges (page-aligned)
191#define MI_NONPAGED_POOL_BASE ALIGN_UP(MI_NONPAGED_BITMAP_END, VirtualPageSize)
192#define MI_NONPAGED_POOL_END (MI_NONPAGED_POOL_BASE + MI_NONPAGED_POOL_SIZE)
193
194#define MI_PAGED_POOL_BASE ALIGN_UP(MI_NONPAGED_POOL_END, VirtualPageSize)
195#define MI_PAGED_POOL_END (MI_PAGED_POOL_BASE + MI_PAGED_POOL_SIZE)
196
197// Address Manipulation And Checks
198#define MI_IS_CANONICAL_ADDR(va) \
199({ \
200 uint64_t _va = (uint64_t)(va); \
201 uint64_t _mask = ~((1ULL << 48) - 1); /* bits 63:48 */ \
202 ((_va & _mask) == 0 || (_va & _mask) == _mask); \
203})
204
205#define PFN_TO_PHYS(Pfn) PPFN_TO_PHYSICAL_ADDRESS(INDEX_TO_PPFN(Pfn))
206#define PHYS_TO_INDEX(PhysicalAddress) PPFN_TO_INDEX(PHYSICAL_TO_PPFN(PhysicalAddress))
207
208#define PFN_ERROR UINT64_T_MAX
209
210// Lazy allocations macros
211#define PROT_KERNEL_READ 0x1
212#define PROT_KERNEL_WRITE 0x2
213#define MI_DEMAND_ZERO_BIT (1ULL << 16)
214
215// Tags
216#define MM_POOL_CANARY 'BEKA'
217
218// Stack sizes & protections.
219#define MI_STACK_SIZE 0x4000 // 16KiB
220#define MI_LARGE_STACK_SIZE 0xf000 // 60 KiB
221#define MI_GUARD_PAGE_PROTECTION (1ULL << 17)
222
223// Barriers
224
225// Prevents CPU Reordering as well as the MmBarrier functionality.
226#define MmFullBarrier() __sync_synchronize()
227
228// Ensure ordedring of memory operations (memory should be visible before continuing)
229#define MmBarrier() __asm__ __volatile__("mfence" ::: "memory")
230
231// ------------------ TYPE DEFINES ------------------
232typedef uint64_t PAGE_INDEX;
233
234#define MmIsAddressValid(VirtualAddress) MmIsAddressPresent(VirtualAddress)
235
236// ------------------ ENUMERATORS ------------------
237
238typedef enum _PFN_STATE {
239 PfnStateActive, // Actively mapped in a process (RefCount > 0)
240 PfnStateStandby, // Clean, in RAM, not mapped (RefCount == 0)
241 PfnStateModified, // Dirty, in RAM, not mapped (RefCount == 0)
242 PfnStateFree, // Contents are garbage (RefCount == 0)
243 PfnStateZeroed, // Contents are all zeros (RefCount == 0)
244 PfnStateTransition, // Locked for I/O (e.g being paged in/out)
245 PfnStateBad // Unusable (hardware error)
247
248// Page FLAGS (attributes that can be combined)
249typedef enum _PFN_FLAGS {
251 PFN_FLAG_NONPAGED = (1U << 0), // This PFN holds a nonpaged virtual address (not backed by a file), BIT 3 must NOT be set if this bit is active.
252 PFN_FLAG_COPY_ON_WRITE = (1U << 1), // This is a COW page
253 PFN_FLAG_MAPPED_FILE = (1U << 2), // Backed by a file (not swap)
254 PFN_FLAG_LOCKED_FOR_IO = (1U << 3) // Page is pinned for DMA, etc.
256
257typedef enum _VAD_FLAGS {
259 VAD_FLAG_READ = (1U << 0),
260 VAD_FLAG_WRITE = (1U << 1),
261 VAD_FLAG_EXECUTE = (1U << 2),
262 VAD_FLAG_PRIVATE = (1U << 3), // Private (backed by swap file, like pagefile.mtsys)
263 VAD_FLAG_MAPPED_FILE = (1U << 4), // Backed by a file (lets say data.mtdll)
266
267typedef enum _PAGE_FLAGS {
268 PAGE_PRESENT = 1 << 0, // Bit 0
269 // 0 = page not present (access causes page fault)
270 // 1 = page is present, MMU translates virtual addresses
271
272 PAGE_RW = 1 << 1, // Bit 1
273 // 0 = read-only
274 // 1 = read/write
275
276 PAGE_USER = 1 << 2, // Bit 2
277 // 0 = supervisor (kernel) only
278 // 1 = user-mode access allowed
279
280 PAGE_PWT = 0x8, // Bit 3
281 // Page Write-Through
282 // 0 = write-back caching
283 // 1 = write-through caching
284
285 PAGE_PCD = 0x10, // Bit 4
286 // Page Cache Disable
287 // 0 = cacheable
288 // 1 = cache disabled
289
290 PAGE_ACCESSED = 0x20, // Bit 5
291 // Set by CPU when page is read or written
292
293 PAGE_DIRTY = 0x40, // Bit 6
294 // Set by CPU when page is written to
295
296 PAGE_PS = 0x80, // Bit 7
297 // Page Size
298 // 0 = normal 4KB page
299 // 1 = large page (4MB in PDE, 2MB in PTE for PAE/long mode)
300
301#define PAGE_PAT (1ULL << 7)
302 // PAGE_PAT, Look at MEMORY_CACHING_TYPE enum.
303
304 PAGE_GLOBAL = 0x100, // Bit 8
305 // Global page
306 // Not flushed from TLB on CR3 reload
307
308 PAGE_NX = (1ULL << 63) // Bit 63
309 // No-Execute region
310 // Execution cannot happen in this page.
312
313// NonPagedPools - Allocations occur at max DISPATCH_LEVEL (inclusive). (e.g assert(IRQL == DISPATCH/PASSIVE/APC_LEVEL)
314// PagedPools - Allocations occur at max DISPATCH_LEVEL (exclusive) (e.g assert(IRQL == PASSIVE/APC_LEVEL)
315typedef enum _POOL_TYPE {
316 NonPagedPool = 0, // Non-pageable kernel pool (instant map, available at all IRQLs)
317 PagedPool = 1, // Pageable pool (can only be used when IRQL < DISPATCH_LEVEL).
318 NonPagedPoolCacheAligned = 2, // Non-paged, cache-aligned (UNIMPLEMENTED)
319 PagedPoolCacheAligned = 3, // Paged, cache-aligned (UNIMPLEMENTED)
320 NonPagedPoolNx = 4, // Non-paged, non-executable (NX) (UNIMPLEMENTED)
321 PagedPoolNx = 5, // Paged, non-executable (UNIMPLEMENTED)
322 // No MustSucceeds, these are a bad concept, handle errors gracefully.
324
331
336
338
339 MmNonCached = 0, // UC (Uncacheable)
340 // CPU never caches reads/writes.
341 // Every access goes directly to RAM or device.
342 // Most MMIO devices require this.
343
344 MmCached, // WB (Write-Back) (default)
345 // Normal DRAM caching behavior.
346 // Reads/writes go through CPU caches; writes may be delayed.
347 // Fastest and default for regular memory.
348
349 MmWriteCombined, // WC (Write-Combining)
350 // Writes are buffered and combined, NOT cached.
351 // Ideal for framebuffers / GPUs.
352 // Fast sequential writes; CPU collects them and bursts to memory.
353
354 MmWriteThrough, // WT (Write-Through)
355 // Reads are cached, but writes go straight to memory.
356 // Ensures memory is always coherent but slower for writes.
357 // Rarely used today.
358
359 MmNonCachedUnordered, // UC- (Uncacheable Minus)
360 // Similar to UC but allows some reordering and speculative reads.
361 // Safe for some device memory but not all.
362 // Used mostly by OSes for special mappings.
363
364 MmUSWCCached, // USWC (Uncached Speculative Write Combining)
365 // Read = UC-, Write = WC.
366 // Used for some high-end GPU/PCIe devices.
367 // Allows speculative reads + write-combined writes.
368
369 MmHardwareCoherentCached, // WB or WT depending on device
370 // For coherent DMA-capable devices.
371 // Typically WB unless device explicitly requires WT.
373
378
379// ------------------ STRUCTURES ------------------
380
381typedef struct _MMPTE
382{
383 union
384 {
385 uint64_t Value; // Raw 64-bit PTE value
386
387 //
388 // Hardware format when the page is present in memory
389 //
390 struct
391 {
392 uint64_t Present : 1; // 1 = Present
393 uint64_t Write : 1; // Writable
394 uint64_t User : 1; // User-accessible
395 uint64_t WriteThrough : 1; // Write-through cache
396 uint64_t CacheDisable : 1; // Disable caching
397 uint64_t Accessed : 1; // Set by CPU when accessed
398 uint64_t Dirty : 1; // Set by CPU when written
399 uint64_t LargePage : 1; // Large page flag (2MB/1GB) (valid only in PDE)
400 uint64_t Global : 1; // Global TLB entry
401 uint64_t CopyOnWrite : 1; // Software: copy-on-write
402 uint64_t Prototype : 1; // Software: prototype PTE (section)
403 uint64_t Reserved0 : 1; // Unused or software-available
404 uint64_t PageFrameNumber : 40;// Physical page frame number
405 uint64_t Reserved1 : 11; // Reserved by hardware
406 uint64_t NoExecute : 1; // NX bit
408
409 //
410 // Software format when not present
411 // (Paged out / transition / pagefile / prototype)
412 //
413 struct
414 {
415 uint64_t Present : 1; // 0 = Not present
416 uint64_t Write : 1; // Meaning depends on context
417 uint64_t Transition : 1; // 1 = Page is in transition (has PFN) (used for StandBy List)
418 uint64_t Prototype : 1; // 1 = Prototype PTE (mapped section)
419 uint64_t PageFile : 1; // 1 = Paged to disk (pagefile)
420 uint64_t Reserved : 7; // i'm sorry, h.c
421 uint64_t PageFrameNumber : 32; // Pagefile offset or PFN (if transition)
422 uint64_t SoftwareFlags : 20; // e.g. protection mask, pool type
423 uint64_t NoExecute : 1; // NX still meaningful in software
425 };
427
428typedef struct _PFN_ENTRY {
429 volatile uint32_t RefCount; // Atomic Reference Count
430 uint8_t State; // PFN_STATE of this Page.
431 uint8_t Flags; // Bitfield of PFN_FLAGS
432 // The Descriptor of the PFN (contains mapping data, the doubly linked list, and file offset, all that depend on the State)
433 union {
434 // State: PfnStateFree, PfnStateZeroed,
435 // PfnStateStandby, PfnStateModified (Used when - INACTIVE)
437
438 // State: PfnStateActive (this is the reverse mapping information) (Used when - ACTIVE, IN USE)
439 struct {
440 struct _MMVAD* Vad; // Pointer to VAD in memory. (might not always be in use)
441 PMMPTE PteAddress; // Pointer to PTE in memory. (is always valid when in use)
443
444 // State: PfnStateStandby or PfnStateModified (for file backed pages) (Used when - SEMI-ACTIVE, PAGED TO DISK, NOT IN CURRENT USE)
445 uint64_t FileOffset; // Offset of 4KiB pages in pagefile.mtsys
446
449
450typedef struct _MM_PFN_LIST {
451 struct _DOUBLY_LINKED_LIST ListEntry; // List Head
452 volatile uint64_t Count; // Number of pages in this list.
453 SPINLOCK PfnListLock; // Spinlock for each PFN List to ensure atomicity.
455
456typedef struct _MM_PFN_DATABASE {
457 PPFN_ENTRY PfnEntries; // Pointer to base of the PFN_ENTRY array.
458 size_t TotalPageCount; // Total count of pages in the PFN database.
459 SPINLOCK PfnDatabaseLock; // Global spinlock for adding/popping memory.
460
461 // Page lists
462 MM_PFN_LIST FreePageList; // Pages with garbage data.
463 MM_PFN_LIST ZeroedPageList; // Pages pre-filled with zeros for optimization purposes.
464 MM_PFN_LIST StandbyPageList; // Clean pages, candidates for reuse. (used for loading processes fast)
465 MM_PFN_LIST ModifiedPageList; // Dirty pages, must be written to disk for backing.
466 MM_PFN_LIST BadPageList; // List of bad memory pages
467
468 // Statistics
469 volatile size_t AvailablePages; // Free + Zeroed + Standby
470 volatile size_t TotalReserved; // Kernel, drivers, etc.
472
473typedef struct _MMVAD {
474 uintptr_t StartVa; // Starting Virtual Address.
475 uintptr_t EndVa; // Ending Virtual Address.
476 VAD_FLAGS Flags; // VAD_FLAGS Bitfield
477
478 // VAD Are per process, stored in an AVL.
481 struct _MMVAD* Parent;
482
483 // Height of the node in the tree.
485
486 // If VAD_FLAG_MAPPED_FILE bit is set.
487 struct _FILE_OBJECT* File; // Pointer to file object.
488 uint64_t FileOffset; // Offset into the file this region starts in.
489
490 // Pointer to owner process.
493
494typedef struct _POOL_HEADER
495{
496 uint32_t PoolCanary; // Must always be equal to - 'BEKA'
497 union
498 {
499 // When the block is FREE, it's part of a list.
501
502 // When the block is ALLOCATED, we store actual metadata info.
503 struct
504 {
505 uint16_t BlockSize; // Size of this block
506 uint16_t PoolIndex; // Index of the slab it came from
507 };
509 uint32_t PoolTag; // Tag of pool. (default - 'ADIR')
511
512typedef struct _POOL_DESCRIPTOR {
513 SINGLE_LINKED_LIST FreeListHead; // Head of the free list
514 size_t BlockSize; // The size of the block + header (so if this is a 32 byte slab, it would be (32 + sizeof(POOL_HEADER))
515 volatile uint64_t FreeCount; // Number of blocks on the free list
516 volatile uint64_t TotalBlocks; // Total blocks ever allocated (statistics)
517 SPINLOCK PoolLock; // Spinlock for this specific pool descriptor.
519
520// ------------------ FUNCTIONS ------------------
521
522extern MM_PFN_DATABASE PfnDatabase; // Database defined in 'pfn.c'
523// Global Declarations for signals & constants.
524extern bool MmPfnDatabaseInitialized;
526extern uintptr_t MmSystemRangeStart;
527extern uintptr_t MmHighestUserAddress;
528extern uintptr_t MmUserProbeAddress;
529extern uintptr_t MmNonPagedPoolStart;
530extern uintptr_t MmNonPagedPoolEnd;
531extern uintptr_t MmPagedPoolStart;
532extern uintptr_t MmPagedPoolEnd;
533
534// general functions
535uint64_t* pml4_from_recursive(void);
536
537// Memory Set.
539void*
541 void* dest, int64_t val, uint64_t len
542)
543{
544 uint8_t* ptr = dest;
545 for (size_t i = 0; i < (size_t)len; i++) {
546 ptr[i] = (uint8_t)val;
547 }
548 return dest;
549}
550
551// Memory copy
553void*
555 void* dest, const void* src, size_t len
556)
557{
558 uint8_t* d = (uint8_t*)dest;
559 const uint8_t* s = (const uint8_t*)src;
560 for (size_t i = 0; i < len; i++) d[i] = s[i];
561 return dest;
562}
563
565int
567 const void* s1, const void* s2, size_t n
568)
569{
570 const uint8_t* p1 = (const uint8_t*)s1;
571 const uint8_t* p2 = (const uint8_t*)s2;
572
573 for (size_t i = 0; i < n; i++) {
574 if (p1[i] != p2[i])
575 return (int)(p1[i] - p2[i]);
576 }
577 return 0;
578}
579
580void
582 void
583);
584
586uint64_t
588{
589 switch (type)
590 {
591 case MmCached: // WB
592 return 0;
593
594 case MmWriteThrough: // WT
595 return PAGE_PWT;
596
597 case MmNonCached: // UC
598 return PAGE_PCD | PAGE_PWT;
599
600 case MmWriteCombined: // WC
601 return PAGE_PAT; // (Index 5)
602
603 case MmNonCachedUnordered: // UC-
604 return PAGE_PAT | PAGE_PCD; // (Index 6)
605
606 case MmUSWCCached: // USWC (UC- reads + WC writes)
607 return PAGE_PAT | PAGE_PWT; // (Index 7 = UC, but write behavior is WC)
608
609 case MmHardwareCoherentCached: // Usually WB; fallback WT if required
610 return 0;
611
612 default:
613 return 0;
614 }
615}
616
620 uint64_t ErrorCode
621)
622
623{
624 FAULT_OPERATION operation;
625
626 if (ErrorCode & (1 << 4)) {
627 operation = ExecuteOperation; // Execute (NX Fault) (NX Bit set, and CPU attempted execution on an instruction with it present.)
628 }
629 else if (ErrorCode & (1 << 1)) {
630 operation = WriteOperation; // Write fault (read only page \ not present)
631 }
632 else {
633 operation = ReadOperation; // Read Fault (not present?)
634 }
635
636 return operation;
637}
638
640uint64_t
642 void
643)
644
645{
646 return __read_cr2();
647}
648
650void
652 PMMPTE PtePtr,
653 uint64_t NewPteValue
654)
655
656{
657 InterlockedExchangeU64((volatile uint64_t*)PtePtr, NewPteValue);
658}
659
660void
662 IN void* VirtualAddress
663);
664
666bool
668 IN PAGE_INDEX Pfn
669)
670
671{
672 return Pfn <= MmHighestPfn;
673}
674
675// module: pfn.c
676
679 IN PBOOT_INFO BootInfo
680);
681
684 IN PFN_STATE ListType
685);
686
687void
689 IN PAGE_INDEX PfnIndex
690);
691
692void
694 PPFN_ENTRY pfn
695);
696
697// module: map.c
698
699PMMPTE
701 IN uintptr_t va
702);
703
704PMMPTE
706 IN uintptr_t va
707);
708
709PMMPTE
711 IN uintptr_t va
712);
713
714PMMPTE
716 IN uintptr_t va
717);
718
719uint64_t
721 IN PMMPTE pte
722);
723
726 IN PMMPTE pte
727);
728
729uintptr_t
731 IN void* VirtualAddress
732);
733
734void
736 IN PMMPTE pte
737);
738
739bool
741 IN uintptr_t VirtualAddress
742);
743
744// module: hypermap.c
745
746void*
748 IN uint64_t PfnIndex,
749 OUT PIRQL OldIrql
750);
751
752void
754 IN IRQL OldIrql
755);
756
757// module: pool.c
758
761 void
762);
763
764// Only NonPagedPool and PagedPool are implemented out of the POOL_TYPE enumerator.
765void*
767 IN enum _POOL_TYPE PoolType,
768 IN size_t NumberOfBytes,
769 IN uint32_t Tag
770);
771
772void
774 IN void* buf
775);
776
777// module: mmproc.c
778
779void*
781 IN bool LargeStack
782);
783
784void
786 IN void* AllocatedStackTop,
787 IN bool LargeStack
788);
789
792 OUT void** DirectoryTable
793);
794
795// module: vad.c
796
799 IN PEPROCESS Process,
800 _In_Opt _Out_Opt void** BaseAddress,
801 IN size_t NumberOfBytes,
802 IN VAD_FLAGS VadFlags
803);
804
806// TODO Free with explicit size, split vad if needed.
808 IN PEPROCESS Process,
809 IN void* BaseAddress
810);
811
812PMMVAD
814 IN PMMVAD Root,
815 IN uintptr_t VirtualAddress
816);
817
818void
820 IN PBOOT_INFO BootInfo
821);
822
823uintptr_t
825 IN PEPROCESS Process,
826 IN size_t NumberOfBytes,
827 IN uintptr_t SearchStart,
828 IN uintptr_t SearchEnd // exclusive
829);
830
831// module: va.c
832
833bool
835 void
836);
837
838uintptr_t
840 IN POOL_TYPE PoolType,
841 IN size_t NumberOfBytes
842);
843
844void
846 IN uintptr_t va,
847 IN size_t NumberOfBytes,
848 IN POOL_TYPE PoolType
849);
850
851// module: fault.c
852
855 IN uint64_t FaultBits,
856 IN uint64_t VirtualAddress,
857 IN PRIVILEGE_MODE PreviousMode,
858 IN PTRAP_FRAME TrapFrame
859);
860
861bool
863 void
864);
865
866// module: mmio.c
867
868bool
870 IN void* StartAddress,
871 IN size_t NumberOfBytes
872);
873
874void*
876 IN size_t NumberOfBytes,
877 IN uint64_t HighestAcceptableAddress
878);
879
880void
882 IN void* BaseAddress,
883 IN size_t NumberOfBytes
884);
885
886void*
888 IN uintptr_t PhysicalAddress,
889 IN size_t NumberOfBytes,
890 IN MEMORY_CACHING_TYPE CacheType
891);
892
893// module: mminit.c
894
895bool
897 IN uint8_t Phase,
898 IN PBOOT_INFO BootInformation
899);
900
901// module: oom.c
902
903// TODO OOM KILLER, TO USE WHEN 0 PHYSICAL MEMORY IS AVAILABLE, AND PAGING TO DISK EVEN FAILED.
904
905#endif
#define _In_Opt
Definition annotations.h:9
#define FORCEINLINE
Definition annotations.h:22
#define _Out_Opt
Definition annotations.h:10
#define IN
Definition annotations.h:7
#define OUT
Definition annotations.h:8
FORCEINLINE uint64_t InterlockedExchangeU64(volatile uint64_t *target, uint64_t value)
Definition atomic.h:42
enum _IRQL IRQL
enum _IRQL * PIRQL
EPROCESS * PEPROCESS
Definition core.h:50
TRAP_FRAME * PTRAP_FRAME
Definition core.h:54
struct _SINGLE_LINKED_LIST SINGLE_LINKED_LIST
struct _BOOT_INFO * PBOOT_INFO
FORCEINLINE unsigned long __read_cr2(void)
Definition intrin.h:67
_PAGE_FLAGS
Definition mm.h:267
@ PAGE_RW
Definition mm.h:272
@ PAGE_ACCESSED
Definition mm.h:290
@ PAGE_PWT
Definition mm.h:280
@ PAGE_DIRTY
Definition mm.h:293
@ PAGE_GLOBAL
Definition mm.h:304
@ PAGE_USER
Definition mm.h:276
@ PAGE_PRESENT
Definition mm.h:268
@ PAGE_NX
Definition mm.h:308
@ PAGE_PCD
Definition mm.h:285
@ PAGE_PS
Definition mm.h:296
void MmFreeContigiousMemory(IN void *BaseAddress, IN size_t NumberOfBytes)
Definition mmio.c:213
enum _SYSTEM_PHASE_ROUTINE SYSTEM_PHASE_ROUTINE
PAGE_INDEX MiRequestPhysicalPage(IN PFN_STATE ListType)
Definition pfn.c:325
struct _MM_PFN_LIST MM_PFN_LIST
MTSTATUS MmAccessFault(IN uint64_t FaultBits, IN uint64_t VirtualAddress, IN PRIVILEGE_MODE PreviousMode, IN PTRAP_FRAME TrapFrame)
Definition fault.c:27
_PFN_STATE
Definition mm.h:238
@ PfnStateTransition
Definition mm.h:244
@ PfnStateFree
Definition mm.h:242
@ PfnStateZeroed
Definition mm.h:243
@ PfnStateModified
Definition mm.h:241
@ PfnStateActive
Definition mm.h:239
@ PfnStateStandby
Definition mm.h:240
@ PfnStateBad
Definition mm.h:245
FORCEINLINE void MiAtomicExchangePte(PMMPTE PtePtr, uint64_t NewPteValue)
Definition mm.h:651
bool MiCheckForContigiousMemory(IN void *StartAddress, IN size_t NumberOfBytes)
Definition mmio.c:25
FORCEINLINE uint64_t MiRetrieveLastFaultyAddress(void)
Definition mm.h:641
uintptr_t MiTranslateVirtualToPhysical(IN void *VirtualAddress)
Definition map.c:456
enum _PFN_STATE PFN_STATE
void MiUnlinkPageFromList(PPFN_ENTRY pfn)
Definition pfn.c:486
bool MmInvalidAccessAllowed(void)
Definition fault.c:292
_SYSTEM_PHASE_ROUTINE
Definition mm.h:374
@ SYSTEM_PHASE_INITIALIZE_PAT_ONLY
Definition mm.h:376
@ SYSTEM_PHASE_INITIALIZE_ALL
Definition mm.h:375
_POOL_TYPE
Definition mm.h:315
@ PagedPoolNx
Definition mm.h:321
@ PagedPoolCacheAligned
Definition mm.h:319
@ NonPagedPoolCacheAligned
Definition mm.h:318
@ NonPagedPool
Definition mm.h:316
@ PagedPool
Definition mm.h:317
@ NonPagedPoolNx
Definition mm.h:320
FORCEINLINE int kmemcmp(const void *s1, const void *s2, size_t n)
Definition mm.h:566
FORCEINLINE bool MiIsValidPfn(IN PAGE_INDEX Pfn)
Definition mm.h:667
_VAD_FLAGS
Definition mm.h:257
@ VAD_FLAG_NONE
Definition mm.h:258
@ VAD_FLAG_READ
Definition mm.h:259
@ VAD_FLAG_COPY_ON_WRITE
Definition mm.h:264
@ VAD_FLAG_PRIVATE
Definition mm.h:262
@ VAD_FLAG_MAPPED_FILE
Definition mm.h:263
@ VAD_FLAG_EXECUTE
Definition mm.h:261
@ VAD_FLAG_WRITE
Definition mm.h:260
enum _MEMORY_CACHING_TYPE MEMORY_CACHING_TYPE
void * MiCreateKernelStack(IN bool LargeStack)
Definition mmproc.c:25
_MEMORY_CACHING_TYPE
Definition mm.h:337
@ MmUSWCCached
Definition mm.h:364
@ MmCached
Definition mm.h:344
@ MmNonCached
Definition mm.h:339
@ MmWriteThrough
Definition mm.h:354
@ MmNonCachedUnordered
Definition mm.h:359
@ MmWriteCombined
Definition mm.h:349
@ MmHardwareCoherentCached
Definition mm.h:369
void MiReloadTLBs(void)
Definition map.c:443
enum _FAULT_OPERATION * PFAULT_OPERATION
void * MmMapIoSpace(IN uintptr_t PhysicalAddress, IN size_t NumberOfBytes, IN MEMORY_CACHING_TYPE CacheType)
Definition mmio.c:269
enum _PRIVILEGE_MODE * PPRIVILEGE_MODE
enum _POOL_TYPE POOL_TYPE
enum _FAULT_OPERATION FAULT_OPERATION
struct _MMPTE * PMMPTE
uintptr_t MiAllocatePoolVa(IN POOL_TYPE PoolType, IN size_t NumberOfBytes)
Definition va.c:213
void MiReleasePhysicalPage(IN PAGE_INDEX PfnIndex)
Definition pfn.c:421
struct _POOL_DESCRIPTOR POOL_DESCRIPTOR
FORCEINLINE FAULT_OPERATION MiRetrieveOperationFromErrorCode(uint64_t ErrorCode)
Definition mm.h:619
FORCEINLINE void * kmemcpy(void *dest, const void *src, size_t len)
Definition mm.h:554
struct _MM_PFN_DATABASE MM_PFN_DATABASE
void MiFreePoolVaContiguous(IN uintptr_t va, IN size_t NumberOfBytes, IN POOL_TYPE PoolType)
Definition va.c:367
FORCEINLINE uint64_t MiCacheToFlags(MEMORY_CACHING_TYPE type)
Definition mm.h:587
struct _PFN_ENTRY PFN_ENTRY
MTSTATUS MmCreateProcessAddressSpace(OUT void **DirectoryTable)
Definition mmproc.c:220
MTSTATUS MiInitializePoolSystem(void)
Definition pool.c:36
#define PAGE_PAT
Definition mm.h:301
enum _PRIVILEGE_MODE PRIVILEGE_MODE
struct _MMVAD MMVAD
uint64_t PAGE_INDEX
Definition mm.h:232
FORCEINLINE void * kmemset(void *dest, int64_t val, uint64_t len)
Definition mm.h:540
struct _POOL_DESCRIPTOR * PPOOL_DESCRIPTOR
PMMPTE MiGetPdptePointer(IN uintptr_t va)
Definition map.c:177
enum _VAD_FLAGS VAD_FLAGS
MTSTATUS MmFreeVirtualMemory(IN PEPROCESS Process, IN void *BaseAddress)
Definition vad.c:817
_FAULT_OPERATION
Definition mm.h:325
@ ExecuteOperation
Definition mm.h:329
@ ReadOperation
Definition mm.h:327
@ WriteOperation
Definition mm.h:328
@ FaultOpInvalid
Definition mm.h:326
#define PML4_INDEX_SHIFT
Definition mm.h:39
PMMVAD MiFindVad(IN PMMVAD Root, IN uintptr_t VirtualAddress)
Definition vad.c:354
void * MiMapPageInHyperspace(IN uint64_t PfnIndex, OUT PIRQL OldIrql)
Definition hypermap.c:33
void MiMoveUefiDataToHigherHalf(IN PBOOT_INFO BootInfo)
Definition mminit.c:154
MTSTATUS MiInitializePfnDatabase(IN PBOOT_INFO BootInfo)
Definition pfn.c:100
PMMPTE MiGetPdePointer(IN uintptr_t va)
Definition map.c:219
uint64_t * pml4_from_recursive(void)
Definition map.c:31
MTSTATUS MmAllocateVirtualMemory(IN PEPROCESS Process, _In_Opt _Out_Opt void **BaseAddress, IN size_t NumberOfBytes, IN VAD_FLAGS VadFlags)
Definition vad.c:723
bool MmIsAddressPresent(IN uintptr_t VirtualAddress)
Definition map.c:488
enum _PAGE_FLAGS PAGE_FLAGS
PMMPTE MiGetPml4ePointer(IN uintptr_t va)
Definition map.c:147
bool MmInitSystem(IN uint8_t Phase, IN PBOOT_INFO BootInformation)
Definition mminit.c:55
PAGE_INDEX MiTranslatePteToPfn(IN PMMPTE pte)
Definition map.c:310
struct _POOL_HEADER POOL_HEADER
uintptr_t MmFindFreeAddressSpace(IN PEPROCESS Process, IN size_t NumberOfBytes, IN uintptr_t SearchStart, IN uintptr_t SearchEnd)
Definition vad.c:708
void * MmAllocateContigiousMemory(IN size_t NumberOfBytes, IN uint64_t HighestAcceptableAddress)
Definition mmio.c:87
struct _MMVAD * PMMVAD
void MmFreePool(IN void *buf)
Definition pool.c:586
void MiFreeKernelStack(IN void *AllocatedStackTop, IN bool LargeStack)
Definition mmproc.c:148
uint64_t MiTranslatePteToVa(IN PMMPTE pte)
Definition map.c:337
void MiUnmapHyperSpaceMap(IN IRQL OldIrql)
Definition hypermap.c:83
struct _PFN_ENTRY * PPFN_ENTRY
struct _MMPTE MMPTE
_PFN_FLAGS
Definition mm.h:249
@ PFN_FLAG_NONE
Definition mm.h:250
@ PFN_FLAG_MAPPED_FILE
Definition mm.h:253
@ PFN_FLAG_COPY_ON_WRITE
Definition mm.h:252
@ PFN_FLAG_NONPAGED
Definition mm.h:251
@ PFN_FLAG_LOCKED_FOR_IO
Definition mm.h:254
bool MiInitializePoolVaSpace(void)
Definition va.c:35
PMMPTE MiGetPtePointer(IN uintptr_t va)
Definition map.c:76
void MiInvalidateTlbForVa(IN void *VirtualAddress)
Definition map.c:273
void MiUnmapPte(IN PMMPTE pte)
Definition map.c:385
void * MmAllocatePoolWithTag(IN enum _POOL_TYPE PoolType, IN size_t NumberOfBytes, IN uint32_t Tag)
Definition pool.c:427
enum _PFN_FLAGS PFN_FLAGS
struct _POOL_HEADER * PPOOL_HEADER
#define PML4_INDEX_MASK
Definition mm.h:40
_PRIVILEGE_MODE
Definition mm.h:332
@ KernelMode
Definition mm.h:333
@ UserMode
Definition mm.h:334
struct _SPINLOCK SPINLOCK
int32_t MTSTATUS
Definition mtstatus.h:12
PAGE_INDEX MmHighestPfn
Definition pfn.c:31
bool MmPfnDatabaseInitialized
Definition pfn.c:30
MM_PFN_DATABASE PfnDatabase
Definition pfn.c:29
uintptr_t MmNonPagedPoolEnd
Definition pool.c:31
uintptr_t MmNonPagedPoolStart
Definition pool.c:30
uintptr_t MmPagedPoolStart
Definition pool.c:32
uintptr_t MmPagedPoolEnd
Definition pool.c:33
uintptr_t MmSystemRangeStart
Definition process.c:26
uintptr_t MmUserProbeAddress
Definition process.c:28
uintptr_t MmHighestUserAddress
Definition process.c:27
Definition ps.h:91
MM_PFN_LIST StandbyPageList
Definition mm.h:464
volatile size_t AvailablePages
Definition mm.h:469
MM_PFN_LIST BadPageList
Definition mm.h:466
size_t TotalPageCount
Definition mm.h:458
MM_PFN_LIST ModifiedPageList
Definition mm.h:465
volatile size_t TotalReserved
Definition mm.h:470
PPFN_ENTRY PfnEntries
Definition mm.h:457
MM_PFN_LIST ZeroedPageList
Definition mm.h:463
SPINLOCK PfnDatabaseLock
Definition mm.h:459
MM_PFN_LIST FreePageList
Definition mm.h:462
volatile uint64_t Count
Definition mm.h:452
SPINLOCK PfnListLock
Definition mm.h:453
struct _DOUBLY_LINKED_LIST ListEntry
Definition mm.h:451
Definition mm.h:382
uint64_t Accessed
Definition mm.h:397
uint64_t Prototype
Definition mm.h:402
uint64_t Global
Definition mm.h:400
struct _MMPTE::@172372265215056352375070220246156106027174106113::@200357034104227323320222006243127050212100105247 Hard
uint64_t Dirty
Definition mm.h:398
uint64_t CopyOnWrite
Definition mm.h:401
uint64_t Reserved0
Definition mm.h:403
uint64_t Write
Definition mm.h:393
uint64_t PageFile
Definition mm.h:419
uint64_t PageFrameNumber
Definition mm.h:404
uint64_t WriteThrough
Definition mm.h:395
uint64_t Reserved
Definition mm.h:420
uint64_t Present
Definition mm.h:392
uint64_t Transition
Definition mm.h:417
uint64_t User
Definition mm.h:394
uint64_t LargePage
Definition mm.h:399
uint64_t Value
Definition mm.h:385
uint64_t CacheDisable
Definition mm.h:396
uint64_t NoExecute
Definition mm.h:406
uint64_t SoftwareFlags
Definition mm.h:422
struct _MMPTE::@172372265215056352375070220246156106027174106113::@277354034164206104264133322054061025100052052376 Soft
uint64_t Reserved1
Definition mm.h:405
Definition mm.h:473
VAD_FLAGS Flags
Definition mm.h:476
uintptr_t EndVa
Definition mm.h:475
struct _MMVAD * Parent
Definition mm.h:481
uintptr_t StartVa
Definition mm.h:474
uint64_t FileOffset
Definition mm.h:488
int Height
Definition mm.h:484
struct _FILE_OBJECT * File
Definition mm.h:487
struct _EPROCESS * OwningProcess
Definition mm.h:491
struct _MMVAD * LeftChild
Definition mm.h:479
struct _MMVAD * RightChild
Definition mm.h:480
union _PFN_ENTRY::@217024126340164016372152071216274230164113211246 Descriptor
volatile uint32_t RefCount
Definition mm.h:429
struct _PFN_ENTRY::@217024126340164016372152071216274230164113211246::@301110335271023021153236134322146064331241142124 Mapping
uint8_t State
Definition mm.h:430
uint64_t FileOffset
Definition mm.h:445
PMMPTE PteAddress
Definition mm.h:441
struct _MMVAD * Vad
Definition mm.h:440
struct _DOUBLY_LINKED_LIST ListEntry
Definition mm.h:436
uint8_t Flags
Definition mm.h:431
size_t BlockSize
Definition mm.h:514
SPINLOCK PoolLock
Definition mm.h:517
volatile uint64_t FreeCount
Definition mm.h:515
volatile uint64_t TotalBlocks
Definition mm.h:516
SINGLE_LINKED_LIST FreeListHead
Definition mm.h:513
uint16_t BlockSize
Definition mm.h:505
uint16_t PoolIndex
Definition mm.h:506
SINGLE_LINKED_LIST FreeListEntry
Definition mm.h:500
uint32_t PoolCanary
Definition mm.h:496
uint32_t PoolTag
Definition mm.h:509
union _POOL_HEADER::@321115223011072362277073135231015025151337071364 Metadata