1#ifndef X86_MATANEL_MEMORY_H
2#define X86_MATANEL_MEMORY_H
38#define PML4_INDEX_BITS 9
39#define PML4_INDEX_SHIFT 39
40#define PML4_INDEX_MASK ((1ULL << PML4_INDEX_BITS) - 1ULL)
42#define PML4_INDEX_FROM_VA(VA) ( ( (uintptr_t)(VA) >> PML4_INDEX_SHIFT ) & PML4_INDEX_MASK )
46#define PML4_INDEX_FROM_PHYS(PHYS) PML4_INDEX_FROM_VA( (uintptr_t)(PHYS) + (uintptr_t)PhysicalMemoryOffset )
49static inline int MiConvertVaToPml4Offset(uint64_t va) {
53#define VirtualPageSize 4096ULL
54#define PhysicalFrameSize 4096ULL
55#define KernelVaStart 0xfffff80000000000ULL
56#define PhysicalMemoryOffset 0xffff880000000000ULL
57#define RECURSIVE_INDEX 0x1FF
59#ifndef __INTELLISENSE__
60#ifndef __OFFSET_GENERATOR__
62#define INDEX_TO_PPFN(Index) \
63 (&(PfnDatabase.PfnEntries[(size_t)(Index)]))
64#define PHYSICAL_TO_PPFN(PHYS) \
65 (&PfnDatabase.PfnEntries[(size_t)((PHYS) / (uint64_t)PhysicalFrameSize)])
66#define PTE_TO_PHYSICAL(PMMPTE) ((PMMPTE)->Value & ~0xFFFULL)
70#define MI_WRITE_PTE(_PtePointer, _Va, _Pa, _Flags) \
72 MMPTE* _pte = (MMPTE*)(_PtePointer); \
73 uint64_t _val = (((uintptr_t)(_Pa)) & ~0xFFFULL) | (uint64_t)(_Flags); \
74 MiAtomicExchangePte(_pte, _val); \
75 __asm__ volatile("" ::: "memory"); \
78 if (MmPfnDatabaseInitialized) { \
79 PPFN_ENTRY _pfn = PHYSICAL_TO_PPFN(_Pa); \
80 _pfn->Descriptor.Mapping.PteAddress = (PMMPTE)_pte; \
81 _pfn->State = PfnStateActive; \
82 _pfn->Flags = PFN_FLAG_NONPAGED; \
85 invlpg((void*)(uintptr_t)(_Va)); \
90#define MI_WRITE_PTE(_PtePointer, _Va, _Pa, _Flags) \
92 MMPTE* _pte = (MMPTE*)(_PtePointer); \
93 uint64_t _val = (((uintptr_t)(_Pa)) & ~0xFFFULL) | (uint64_t)(_Flags); \
94 MiAtomicExchangePte(_pte, _val); \
95 __asm__ volatile("" ::: "memory"); \
98 if (MmPfnDatabaseInitialized) { \
99 PPFN_ENTRY _pfn = PHYSICAL_TO_PPFN(_Pa); \
100 _pfn->Descriptor.Mapping.PteAddress = (PMMPTE)_pte; \
101 _pfn->State = PfnStateActive; \
102 _pfn->Flags = PFN_FLAG_NONPAGED; \
105 invlpg((void*)(uintptr_t)(_Va)); \
108 if (smpInitialized && allApsInitialized) { \
109 IPI_PARAMS _Params; \
110 _Params.pageParams.addressToInvalidate = (uint64_t)(_Va); \
111 MhSendActionToCpusAndWait(CPU_ACTION_PERFORM_TLB_SHOOTDOWN, _Params);\
115#define MI_WRITE_PTE_NO_IPI(_PtePointer, _Va, _Pa, _Flags) \
117 MMPTE* _pte = (MMPTE*)(_PtePointer); \
118 uint64_t _val = (((uintptr_t)(_Pa)) & ~0xFFFULL) | (uint64_t)(_Flags); \
119 MiAtomicExchangePte(_pte, _val); \
120 __asm__ volatile("" ::: "memory"); \
123 if (MmPfnDatabaseInitialized) { \
124 PPFN_ENTRY _pfn = PHYSICAL_TO_PPFN(_Pa); \
125 _pfn->Descriptor.Mapping.PteAddress = (PMMPTE)_pte; \
126 _pfn->State = PfnStateActive; \
127 _pfn->Flags = PFN_FLAG_NONPAGED; \
130 invlpg((void*)(uintptr_t)(_Va)); \
135#define PPFN_TO_INDEX(PPFN) ((size_t)((PPFN) - PfnDatabase.PfnEntries))
136#define PPFN_TO_PHYSICAL_ADDRESS(PPFN) \
137 ((uint64_t)((uint64_t)PPFN_TO_INDEX(PPFN) * (uint64_t)PhysicalFrameSize))
138#define VA_OFFSET(_VirtualAddress) ((uintptr_t)(_VirtualAddress) & 0xFFF)
139#define MM_IS_DEMAND_ZERO_PTE(pte) \
140 (((pte).Soft.SoftwareFlags & MI_DEMAND_ZERO_BIT) != 0)
141#define MM_SET_DEMAND_ZERO_PTE(pte, prot_flags, nx) \
144 (pte).Soft.SoftwareFlags = (prot_flags) | MI_DEMAND_ZERO_BIT; \
145 (pte).Soft.NoExecute = (nx); \
147#define MM_UNSET_DEMAND_ZERO_PTE(pte) \
149 (pte).Soft.SoftwareFlags &= ~MI_DEMAND_ZERO_BIT; \
153#define PTE_TO_PHYSICAL(PMMPTE) (0)
154#define MI_WRITE_PTE(_PtePointer, _Va, _Pa, _Flags) ((void)0)
155#define MI_WRITE_PTE_NO_IPI(_PtePointer, _Va, _Pa, _Flags) ((void)0)
156#define PPFN_TO_INDEX(PPFN) (0)
157#define PPFN_TO_PHYSICAL_ADDRESS(PPFN) (0)
158#define INDEX_TO_PPFN(Index) (NULL)
159#define PHYSICAL_TO_PPFN(PHYS) (NULL)
160#define VA_OFFSET(_VirtualAddress) (uintptr_t)(NULL)
161#define MM_IS_DEMAND_ZERO_PTE(pte) (NULL)
162#define MM_SET_DEMAND_ZERO_PTE(pte, prot_flags, nx) ((void)0)
163#define MM_UNSET_DEMAND_ZERO_PTE(pte) (NULL)
167#define BYTES_TO_PAGES(Bytes) (((Bytes) + VirtualPageSize - 1) / VirtualPageSize)
169#define PAGES_TO_BYTES(Pages) ((Pages) * VirtualPageSize)
172#define PAGE_ALIGN(Va) ((void*)((uint64_t)(Va) & ~(VirtualPageSize - 1)))
174#define MAX_POOL_DESCRIPTORS 7
182#define POOL_MIN_ALLOC 32
184#define POOL_MAX_ALLOC 2048
186#define MI_NONPAGED_POOL_SIZE ((size_t)16ULL * 1024 * 1024 * 1024)
187#define MI_PAGED_POOL_SIZE ((size_t)32ULL * 1024 * 1024 * 1024)
190#define NONPAGED_POOL_VA_TOTAL_PAGES (MI_NONPAGED_POOL_SIZE / VirtualPageSize)
191#define PAGED_POOL_VA_TOTAL_PAGES (MI_PAGED_POOL_SIZE / VirtualPageSize)
194#define NONPAGED_POOL_VA_BITMAP_QWORDS ((NONPAGED_POOL_VA_TOTAL_PAGES + 63) / 64)
195#define PAGED_POOL_VA_BITMAP_QWORDS ((PAGED_POOL_VA_TOTAL_PAGES + 63) / 64)
198#define MI_NONPAGED_BITMAP_PAGES_NEEDED ((NONPAGED_POOL_VA_BITMAP_QWORDS * sizeof(uint64_t) + VirtualPageSize - 1) / VirtualPageSize)
199#define MI_PAGED_BITMAP_PAGES_NEEDED ((PAGED_POOL_VA_BITMAP_QWORDS * sizeof(uint64_t) + VirtualPageSize - 1) / VirtualPageSize)
202#define ALIGN_UP(x, align) (((uintptr_t)(x) + ((align)-1)) & ~((uintptr_t)((align)-1)))
205#define MI_NONPAGED_BITMAP_BASE ALIGN_UP(LK_KERNEL_END, VirtualPageSize)
206#define MI_NONPAGED_BITMAP_END (MI_NONPAGED_BITMAP_BASE + MI_NONPAGED_BITMAP_PAGES_NEEDED * VirtualPageSize)
208#define MI_PAGED_BITMAP_BASE ALIGN_UP(MI_NONPAGED_BITMAP_END, VirtualPageSize)
209#define MI_PAGED_BITMAP_END (MI_PAGED_BITMAP_BASE + MI_PAGED_BITMAP_PAGES_NEEDED * VirtualPageSize)
212#define MI_NONPAGED_POOL_BASE ALIGN_UP(MI_PAGED_BITMAP_END, VirtualPageSize)
213#define MI_NONPAGED_POOL_END (MI_NONPAGED_POOL_BASE + MI_NONPAGED_POOL_SIZE)
215#define MI_PAGED_POOL_BASE ALIGN_UP(MI_NONPAGED_POOL_END, VirtualPageSize)
216#define MI_PAGED_POOL_END (MI_PAGED_POOL_BASE + MI_PAGED_POOL_SIZE)
219#define MI_IS_CANONICAL_ADDR(va) \
221 uint64_t _va = (uint64_t)(va); \
222 uint64_t _mask = ~((1ULL << 48) - 1); \
223 ((_va & _mask) == 0 || (_va & _mask) == _mask); \
226#define PFN_TO_PHYS(Pfn) PPFN_TO_PHYSICAL_ADDRESS(INDEX_TO_PPFN(Pfn))
227#define PHYS_TO_INDEX(PhysicalAddress) PPFN_TO_INDEX(PHYSICAL_TO_PPFN(PhysicalAddress))
229#define PFN_ERROR UINT64_T_MAX
232#define PROT_KERNEL_READ (1ULL << 0)
233#define PROT_KERNEL_WRITE (1ULL << 1)
234#define PROT_KERNEL_NOEXECUTE (1ULL << 2)
235#define PROT_KERNEL_USER (1ULL << 3)
236#define MI_DEMAND_ZERO_BIT (1ULL << 16)
239#define MM_POOL_CANARY 'BEKA'
242#define MI_STACK_SIZE 0x4000
243#define MI_LARGE_STACK_SIZE 0xf000
244#define MI_GUARD_PAGE_PROTECTION (1ULL << 17)
245#define MI_DEFAULT_USER_STACK_SIZE 0x100000
250#define MmFullBarrier() __sync_synchronize()
253#define MmBarrier() __asm__ __volatile__("mfence" ::: "memory")
258#define MmIsAddressValid(VirtualAddress) MmIsAddressPresent(VirtualAddress)
262#define MT_SECTION_QUERY 0x0001
263#define MT_SECTION_MAP_WRITE 0x0002
264#define MT_SECTION_MAP_READ 0x0004
265#define MT_SECTION_MAP_EXECUTE 0x0008
266#define MT_SECTION_EXTEND_SIZE 0x0010
267#define MT_SECTION_MAP_EXECUTE_EXPL 0x0020
270#define MT_SECTION_ALL_ACCESS 0x003F
340#define PAGE_PAT (1ULL << 7)
467_Static_assert(
sizeof(
MMPTE) == 8,
"The size of a PTE in a 64bit system is always 8 bytes");
568#define R_X86_64_RELATIVE 8
646#define USER_VA_END 0x00007FFFFFFFFFFF
647#define USER_VA_START 0x10000
656 void* dest, int64_t val, uint64_t len
660 for (
size_t i = 0; i < (size_t)len; i++) {
661 ptr[i] = (uint8_t)val;
670 void* dest,
const void* src,
size_t len
673 uint8_t* d = (uint8_t*)dest;
674 const uint8_t* s = (
const uint8_t*)src;
675 for (
size_t i = 0; i < len; i++) d[i] = s[i];
682 const void* s1,
const void* s2,
size_t n
685 const uint8_t* p1 = (
const uint8_t*)s1;
686 const uint8_t* p2 = (
const uint8_t*)s2;
688 for (
size_t i = 0; i < n; i++) {
690 return (
int)(p1[i] - p2[i]);
737 if (ErrorCode & (1 << 4)) {
740 else if (ErrorCode & (1 << 1)) {
819 IN void* VirtualAddress
865 IN void* VirtualAddress
875 IN uintptr_t VirtualAddress
883 IN uint64_t PfnIndex,
905 IN size_t NumberOfBytes,
924 IN void* AllocatedStackTop,
930 OUT void** DirectoryTable
936 IN uintptr_t PageDirectoryPhysical
942 OUT void** OutStackTop,
943 _In_Opt size_t StackReserveSize
950 OUT void** OutBasicMtdllTypes
965 IN size_t NumberOfBytes,
980 IN uintptr_t VirtualAddress
987 IN size_t NumberOfBytes,
988 IN uintptr_t SearchStart,
989 IN uintptr_t SearchEnd
1011 IN size_t NumberOfBytes
1017 IN size_t NumberOfBytes,
1025 IN uint64_t FaultBits,
1026 IN uint64_t VirtualAddress,
1041 IN void* StartAddress,
1042 IN size_t NumberOfBytes
1048 IN size_t NumberOfBytes,
1049 IN uint64_t HighestAcceptableAddress
1054 IN void* BaseAddress,
1055 IN size_t NumberOfBytes
1061 IN uintptr_t PhysicalAddress,
1062 IN size_t NumberOfBytes,
1068 IN void* VirtualAddress,
1069 IN size_t NumberOfBytes
1102 OUT void** EntryPointAddress,
1103 OUT void** BaseAddress
#define VALIDATE_SIZE(struc, size)
FORCEINLINE uint64_t InterlockedExchangeU64(volatile uint64_t *target, uint64_t value)
struct _SINGLE_LINKED_LIST SINGLE_LINKED_LIST
struct _BOOT_INFO * PBOOT_INFO
FORCEINLINE unsigned long __read_cr2(void)
MUST_USE_RESULT void * MiCreateKernelStack(IN bool LargeStack)
void MmFreeContigiousMemory(IN void *BaseAddress, IN size_t NumberOfBytes)
enum _SYSTEM_PHASE_ROUTINE SYSTEM_PHASE_ROUTINE
struct _MM_PFN_LIST MM_PFN_LIST
MTSTATUS MmAccessFault(IN uint64_t FaultBits, IN uint64_t VirtualAddress, IN PRIVILEGE_MODE PreviousMode, IN PTRAP_FRAME TrapFrame)
FORCEINLINE void MiAtomicExchangePte(PMMPTE PtePtr, uint64_t NewPteValue)
bool MiCheckForContigiousMemory(IN void *StartAddress, IN size_t NumberOfBytes)
MUST_USE_RESULT bool MmInvalidAccessAllowed(void)
FORCEINLINE uint64_t MiRetrieveLastFaultyAddress(void)
uintptr_t MiTranslateVirtualToPhysical(IN void *VirtualAddress)
enum _PFN_STATE PFN_STATE
uintptr_t MiTranslatePteToVa(IN PMMPTE pte)
void MiUnlinkPageFromList(PPFN_ENTRY pfn)
struct _MM_SUBSECTION * PMM_SUBSECTION
MUST_USE_RESULT uintptr_t MiAllocatePoolVa(IN POOL_TYPE PoolType, IN size_t NumberOfBytes)
@ SYSTEM_PHASE_INITIALIZE_PAT_ONLY
@ SYSTEM_PHASE_INITIALIZE_ALL
@ NonPagedPoolCacheAligned
FORCEINLINE int kmemcmp(const void *s1, const void *s2, size_t n)
FORCEINLINE bool MiIsValidPfn(IN PAGE_INDEX Pfn)
enum _MEMORY_CACHING_TYPE MEMORY_CACHING_TYPE
MUST_USE_RESULT void * MmMapIoSpace(IN uintptr_t PhysicalAddress, IN size_t NumberOfBytes, IN MEMORY_CACHING_TYPE CacheType)
MTSTATUS MmMapViewOfSection(IN HANDLE SectionHandle, IN PEPROCESS Process, OUT void **EntryPointAddress, OUT void **BaseAddress)
MUST_USE_RESULT MTSTATUS MmIsAddressRangeFree(PEPROCESS Process, uintptr_t StartVa, uintptr_t EndVa)
struct _MM_SECTION * PMM_SECTION
void MmpDeleteSection(void *Object)
@ MmHardwareCoherentCached
enum _FAULT_OPERATION * PFAULT_OPERATION
enum _PRIVILEGE_MODE * PPRIVILEGE_MODE
enum _POOL_TYPE POOL_TYPE
enum _FAULT_OPERATION FAULT_OPERATION
void MmUnmapIoSpace(IN void *VirtualAddress, IN size_t NumberOfBytes)
void MiReleasePhysicalPage(IN PAGE_INDEX PfnIndex)
struct _MM_SUBSECTION MM_SUBSECTION
struct _POOL_DESCRIPTOR POOL_DESCRIPTOR
MUST_USE_RESULT void * MiMapPageInHyperspace(IN uint64_t PfnIndex, OUT PIRQL OldIrql)
FORCEINLINE FAULT_OPERATION MiRetrieveOperationFromErrorCode(uint64_t ErrorCode)
FORCEINLINE void * kmemcpy(void *dest, const void *src, size_t len)
struct _MM_PFN_DATABASE MM_PFN_DATABASE
void MiFreePoolVaContiguous(IN uintptr_t va, IN size_t NumberOfBytes, IN POOL_TYPE PoolType)
FORCEINLINE uint64_t MiCacheToFlags(MEMORY_CACHING_TYPE type)
struct _PFN_ENTRY PFN_ENTRY
MTSTATUS MmCreateProcessAddressSpace(OUT void **DirectoryTable)
MTSTATUS MiInitializePoolSystem(void)
enum _PRIVILEGE_MODE PRIVILEGE_MODE
bool MiIsWithinBoundsOfReleasePhysicalPage(void *VirtualAddress)
MUST_USE_RESULT void * MmAllocateContigiousMemory(IN size_t NumberOfBytes, IN uint64_t HighestAcceptableAddress)
FORCEINLINE void * kmemset(void *dest, int64_t val, uint64_t len)
struct _POOL_DESCRIPTOR * PPOOL_DESCRIPTOR
PMMPTE MiGetPdptePointer(IN uintptr_t va)
enum _VAD_FLAGS VAD_FLAGS
struct _MM_SECTION MM_SECTION
MTSTATUS MmFreeVirtualMemory(IN PEPROCESS Process, IN void *BaseAddress)
MTSTATUS MmCreateUserStack(IN PEPROCESS Process, OUT void **OutStackTop, _In_Opt size_t StackReserveSize)
MTSTATUS MmDeleteProcessAddressSpace(IN PEPROCESS Process, IN uintptr_t PageDirectoryPhysical)
MUST_USE_RESULT HOT PAGE_INDEX MiRequestPhysicalPage(IN PFN_STATE ListType)
void MiMoveUefiDataToHigherHalf(IN PBOOT_INFO BootInfo)
MTSTATUS MiInitializePfnDatabase(IN PBOOT_INFO BootInfo)
PMMPTE MiGetPdePointer(IN uintptr_t va)
uint64_t * pml4_from_recursive(void)
MTSTATUS MmAllocateVirtualMemory(IN PEPROCESS Process, _In_Opt _Out_Opt void **BaseAddress, IN size_t NumberOfBytes, IN VAD_FLAGS VadFlags)
MUST_USE_RESULT PMMVAD MiFindVad(IN PEPROCESS Process, IN uintptr_t VirtualAddress)
bool MmIsAddressPresent(IN uintptr_t VirtualAddress)
enum _PAGE_FLAGS PAGE_FLAGS
PMMPTE MiGetPml4ePointer(IN uintptr_t va)
bool MmInitSystem(IN uint8_t Phase, IN PBOOT_INFO BootInformation)
PAGE_INDEX MiTranslatePteToPfn(IN PMMPTE pte)
MTSTATUS MmCreateTeb(IN PETHREAD Thread, OUT void **OutTeb)
struct _POOL_HEADER POOL_HEADER
void MmFreePool(IN void *buf)
void MiFreeKernelStack(IN void *AllocatedStackTop, IN bool LargeStack)
void MiUnmapHyperSpaceMap(IN IRQL OldIrql)
MUST_USE_RESULT uintptr_t MmFindFreeAddressSpace(IN PEPROCESS Process, IN size_t NumberOfBytes, IN uintptr_t SearchStart, IN uintptr_t SearchEnd)
struct _PFN_ENTRY * PPFN_ENTRY
bool MiInitializePoolVaSpace(void)
PMMPTE MiGetPtePointer(IN uintptr_t va)
MUST_USE_RESULT HOT void * MmAllocatePoolWithTag(IN enum _POOL_TYPE PoolType, IN size_t NumberOfBytes, IN uint32_t Tag)
void MiInvalidateTlbForVa(IN void *VirtualAddress)
void MiUnmapPte(IN PMMPTE pte)
bool MiAtomicSetTransitionPte(IN PMMPTE Pte, IN PAGE_INDEX Pfn)
enum _PFN_FLAGS PFN_FLAGS
struct _POOL_HEADER * PPOOL_HEADER
MTSTATUS MmCreateSection(OUT PHANDLE SectionHandle, IN struct _FILE_OBJECT *FileObject)
MTSTATUS MmCreatePeb(IN PEPROCESS Process, OUT void **OutPeb, OUT void **OutBasicMtdllTypes)
MTSTATUS MmInitSections(void)
struct _SPINLOCK SPINLOCK
bool MmPfnDatabaseInitialized
uint64_t MmTotalUsableMemory
MM_PFN_DATABASE PfnDatabase
uintptr_t MmNonPagedPoolEnd
uintptr_t MmNonPagedPoolStart
uintptr_t MmPagedPoolStart
uintptr_t MmSystemRangeStart
uintptr_t MmUserProbeAddress
uintptr_t MmUserStartAddress
uintptr_t MmHighestUserAddress
MM_PFN_LIST StandbyPageList
volatile size_t AvailablePages
MM_PFN_LIST ModifiedPageList
volatile size_t TotalReserved
MM_PFN_LIST ZeroedPageList
struct _DOUBLY_LINKED_LIST ListEntry
uint64_t EntryPointOffset
struct _FILE_OBJECT * FileObject
MM_SUBSECTION WholeFileSection
struct _MMPTE::@172372265215056352375070220246156106027174106113::@200357034104227323320222006243127050212100105247 Hard
struct _MMPTE::@172372265215056352375070220246156106027174106113::@277354034164206104264133322054061025100052052376 Soft
struct _FILE_OBJECT * File
struct _EPROCESS * OwningProcess
struct _MMVAD * LeftChild
struct _MMVAD * RightChild
union _PFN_ENTRY::@217024126340164016372152071216274230164113211246 Descriptor
volatile uint32_t RefCount
struct _PFN_ENTRY::@217024126340164016372152071216274230164113211246::@301110335271023021153236134322146064331241142124 Mapping
struct _DOUBLY_LINKED_LIST ListEntry
volatile uint64_t FreeCount
volatile uint64_t TotalBlocks
SINGLE_LINKED_LIST FreeListHead
SINGLE_LINKED_LIST FreeListEntry
union _POOL_HEADER::@321115223011072362277073135231015025151337071364 Metadata
uint64_t lib_name_absolute
uint64_t func_name_absolute
uint64_t iat_addr_absolute
uint64_t PreferredImageBase