My Project
Loading...
Searching...
No Matches
mmio.c
Go to the documentation of this file.
1/*++
2
3Module Name:
4
5 mmio.c
6
7Purpose:
8
9 This translation unit contains the implementation of MMIO functions responsible for easy interaction with physical hardware.
10
11Author:
12
13 slep (Matanel) 2025.
14
15Revision History:
16
17--*/
18
19#include "../../includes/mm.h"
20#include "../../includes/me.h"
21#include "../../assert.h"
22
23
24bool
26 IN void* StartAddress,
27 IN size_t NumberOfBytes
28)
29
30/*++
31
32 Routine description:
33
34 Checks if the given address + amount of bytes is contigious in physical memory.
35
36 Arguments:
37
38 [IN] void* StartAddress - The base address to check for.
39 [IN] size_t NumberOfBytes - The amount of contigious bytes to check.
40
41 Return Values:
42
43 True if contigious, false otherwise.
44
45--*/
46
47{
48 // Assertions & Declarations
49 assert(NumberOfBytes > 0);
50 assert(StartAddress != 0);
51 if (!NumberOfBytes || !StartAddress) return false;
52
53 size_t AmtPages = BYTES_TO_PAGES(NumberOfBytes);
54 uintptr_t CurrentAddress = (uintptr_t)StartAddress;
55
56 // Get the First PFN.
57 PMMPTE CurrentPte = MiGetPtePointer(CurrentAddress);
58
59 // Check if PTE exists and is valid before translating
60 if (!CurrentPte || !CurrentPte->Hard.Present) return false;
61
62 PAGE_INDEX StartPfn = MiTranslatePteToPfn(CurrentPte);
63 if (StartPfn == PFN_ERROR) return false;
64
65 // 3. Loop from i = 1 (we already checked the first page)
66 for (size_t i = 1; i < AmtPages; i++) {
67
68 // Advance VA.
69 CurrentAddress += VirtualPageSize;
70 CurrentPte = MiGetPtePointer(CurrentAddress);
71
72 // Check if page is even present.
73 if (!CurrentPte || !CurrentPte->Hard.Present) return false;
74
75 PAGE_INDEX CurrentPfn = MiTranslatePteToPfn(CurrentPte);
76
77 // If the current Pfn isn't adjacent to the previous one, its not contigious.
78 if (CurrentPfn != (StartPfn + i)) {
79 return false;
80 }
81 }
82
83 return true;
84}
85
86void*
88 IN size_t NumberOfBytes,
89 IN uint64_t HighestAcceptableAddress
90)
91
92/*++
93
94 Routine description:
95
96 Allocate contingious physical memory pages and maps them. (used for DMA)
97
98 Arguments:
99
100 [IN] size_t NumberOfBytes - The amount of contigious bytes to allocate.
101 [IN] uint64_t HighestAcceptableAddress - The highest physical address to find contigious bytes for. (used for drivers that cannot see the full 64bit system memory amount)
102
103 Return Values:
104
105 Base virtual address to allocated memory, or NULL on failure.
106
107 Notes:
108
109 This will probably cause fragmentation, and is very expensive as it iterates O(n) over the PFN Database, use sparingly.
110
111--*/
112
113{
114 // According to MSDN this must be satisfied (this isnt NT compatible, but it follows its rules)
115 if (MeGetCurrentIrql() > DISPATCH_LEVEL) return NULL;
116
117 // Declarations
118 size_t pageCount = BYTES_TO_PAGES(NumberOfBytes);
119 PAGE_INDEX MaxPfn = PPFN_TO_INDEX(PHYSICAL_TO_PPFN(HighestAcceptableAddress));
120 size_t ConsecutiveFound = 0;
121 IRQL DbIrql;
122 PAGE_INDEX StartIndex = 0;
123 void* BaseAddress = NULL; // Null initially, unless enough pages.
124
125 /* FIXME NonPagedPoolCacheAligned type. (That returns an addres that is page aligned actually), since ahci wanted & 0x3FF, for alignment.
126 // First, try to allocate from the NonPagedPool, if it returned a contigious physical memory address, we are lucky! (if we reach MiRefillPool we are less lucky, its actually worse...)
127 if (HighestAcceptableAddress == UINT64_T_MAX) {
128 BaseAddress = MmAllocatePoolWithTag(NonPagedPool, NumberOfBytes, 'mCmM');
129
130 if (BaseAddress) {
131 if (MiCheckForContigiousMemory(BaseAddress, NumberOfBytes)) {
132 // Its physically contigious!
133 return BaseAddress;
134 }
135 else {
136 // It's not.. Free allocated memory.
137 MmFreePool(BaseAddress);
138 BaseAddress = NULL;
139 }
140 }
141 }
142 */
143
144 // Acquire the global DB lock so we dont get the contigious pages stolen from us.
145 MsAcquireSpinlock(&PfnDatabase.PfnDatabaseLock, &DbIrql);
146
147 for (PAGE_INDEX i = 0; i < PfnDatabase.TotalPageCount; i++) {
148 // Check bounds.
149 if (i >= MaxPfn) break;
150
151 PPFN_ENTRY pfn = &PfnDatabase.PfnEntries[i];
152
153 // Is this page a candidate
154 bool isCandidate = (pfn->State == PfnStateFree || pfn->State == PfnStateZeroed || pfn->State == PfnStateStandby);
155
156 if (isCandidate) {
157 if (ConsecutiveFound == 0) {
158 StartIndex = i;
159 }
160 ConsecutiveFound++;
161 }
162 else {
163 ConsecutiveFound = 0;
164 }
165
166 // Found a good enough block?
167 if (ConsecutiveFound == pageCount) {
168 // We found a range! Now we must claim them.
169 bool first = true;
170 for (PAGE_INDEX j = 0; j < pageCount; j++) {
171 PPFN_ENTRY pageToClaim = &PfnDatabase.PfnEntries[StartIndex + j];
172
173 // Remove from whatever list it is currently in
174 MiUnlinkPageFromList(pageToClaim);
175
176 // Mark as active
177 pageToClaim->State = PfnStateActive;
178 pageToClaim->RefCount = 1;
179 pageToClaim->Flags = PFN_FLAG_LOCKED_FOR_IO;
180
181 // Clear mapping info
182 pageToClaim->Descriptor.Mapping.PteAddress = NULL;
183 pageToClaim->Descriptor.Mapping.Vad = NULL;
184
185 // Map the physical to the offset.
186 uintptr_t phys = PPFN_TO_PHYSICAL_ADDRESS(pageToClaim);
187 uintptr_t virt = (phys + PhysicalMemoryOffset);
188
189 PMMPTE pte = MiGetPtePointer(virt);
190 assert((pte) != NULL);
191
192 // Set the return value to the first address.
193 if (first) {
194 first = false;
195 BaseAddress = (void*)virt;
196 }
197
198 // Write through is set, we want immediate flush to main memory.
199 MI_WRITE_PTE(pte, virt, phys, PAGE_PRESENT | PAGE_RW | PAGE_PWT);
200 }
201 InterlockedAddU64(&PfnDatabase.TotalReserved, pageCount);
202 // Break out of the 'i' loop
203 break;
204 }
205 }
206
207 MsReleaseSpinlock(&PfnDatabase.PfnDatabaseLock, DbIrql);
208 // This could be NULL if we didnt find a contigious amount, or the valid pointer to start of block (mapped with PhysicalMemoryOffset)
209 return BaseAddress;
210}
211
212void
214 IN void* BaseAddress,
215 IN size_t NumberOfBytes
216)
217
218/*++
219
220 Routine description:
221
222 Releases contigious physical memory allocated by the MmAllocateContigiousMemory routine.
223
224 Arguments:
225
226 [IN] void* BaseAddress - Base virtual address to allocated memory, returned by the allocation routine.
227 [IN] size_t NumberOfBytes - Number of bytes allocated.
228
229 Return Values:
230
231 None.
232
233--*/
234
235{
236 // Declarations
237 IRQL DbIrql;
238 size_t pageCount = BYTES_TO_PAGES(NumberOfBytes);
239 uintptr_t CurrentAddress = (uintptr_t)BaseAddress;
240
241 // Check if the base address is from the NPG Pool allocation.
242 if (BaseAddress >= (void*)MI_NONPAGED_POOL_BASE && BaseAddress <= (void*)MI_NONPAGED_POOL_END) {
243 MmFreePool(BaseAddress);
244 return;
245 }
246
247 // Just unmap each page, and return the PFN to DB.
248 MsAcquireSpinlock(&PfnDatabase.PfnDatabaseLock, &DbIrql);
249
250 for (size_t i = 0; i < pageCount; i++) {
251 // Retrieve the PTE for the current VA.
252 PMMPTE pte = MiGetPtePointer(CurrentAddress);
253 if (!pte) break;
254 // Retrieve the PFN for the current PTE.
256 // Unmap the PTE.
257 MiUnmapPte(pte);
258 // Release the PFN back.
260
261 // Advance VA by VirtualPageSize
262 CurrentAddress += VirtualPageSize;
263 }
264
265 MsReleaseSpinlock(&PfnDatabase.PfnDatabaseLock, DbIrql);
266}
267
268void*
270 IN uintptr_t PhysicalAddress,
271 IN size_t NumberOfBytes,
272 IN MEMORY_CACHING_TYPE CacheType
273)
274
275/*++
276
277 Routine description:
278
279 Maps the given physical address + NumberOfBytes to nonpaged system space.
280
281 Arguments:
282
283 [IN] uintptr_t PhysicalAddress - Specifies the starting physical address of the I/O range to be mapped.
284 [IN] size_t NumberOfBytes - Specifies a value greater than zero, indicating the number of bytes to be mapped.
285 [IN] MEMORY_CACHING_TYPE CacheType - Specifies the cache attribute to use to map the physical address range.
286
287 Return Values:
288
289 Base Virtual Address that is mapped to the base physical address, or NULL on failure.
290
291--*/
292
293{
294 // Declarations
295 void* BaseAddress = NULL;
296 size_t NumberOfPages = BYTES_TO_PAGES(NumberOfBytes);
297 uint64_t CacheFlags = MiCacheToFlags(CacheType);
298
299 // Runtime Assertions
300 assert(NumberOfBytes > 0);
302
303 // Get space reservation for amount of bytes. (we could also use PhysicalMemoryOffset, but the caller must adhere that the PhysicalAddress given is NOT mapped, and I dont have time for their shenangians)
304 uintptr_t VA = MiAllocatePoolVa(NonPagedPool, NumberOfBytes);
305 if (!VA) return NULL;
306
307 // Good, now all we do is map, easy as that.
308 uintptr_t CurrentVA = VA;
309 uintptr_t CurrentPhys = PhysicalAddress;
310 for (size_t i = 0; i < NumberOfPages; i++) {
311 PMMPTE pte = MiGetPtePointer(CurrentVA);
312 assert(pte != NULL);
313 if (!pte) goto failure;
314
315 // Write the PTE with the appropriate cache flags (requires PAT, enabled in MmInitSystem)
316 MI_WRITE_PTE(pte, CurrentVA, CurrentPhys, PAGE_PRESENT | PAGE_RW | CacheFlags);
317
318 // Advance the current addresses.
319 CurrentPhys += PhysicalFrameSize;
320 CurrentVA += VirtualPageSize;
321 }
322
323 BaseAddress = (void*)VA;
324 return BaseAddress;
325
326failure:
327 if (VA) {
328 MiFreePoolVaContiguous(VA, NumberOfBytes, NonPagedPool);
329 }
330
331 return NULL;
332}
#define IN
Definition annotations.h:7
#define assert(...)
Definition assert.h:57
FORCEINLINE uint64_t InterlockedAddU64(volatile uint64_t *target, uint64_t value)
Definition atomic.h:110
@ DISPATCH_LEVEL
Definition core.h:15
enum _IRQL IRQL
PAGE_INDEX MiTranslatePteToPfn(IN PMMPTE pte)
Definition map.c:310
PMMPTE MiGetPtePointer(IN uintptr_t va)
Definition map.c:76
void MiUnmapPte(IN PMMPTE pte)
Definition map.c:385
FORCEINLINE IRQL MeGetCurrentIrql(void)
Definition me.h:402
@ PAGE_RW
Definition mm.h:272
@ PAGE_PWT
Definition mm.h:280
@ PAGE_PRESENT
Definition mm.h:268
#define PhysicalFrameSize
Definition mm.h:54
@ PfnStateFree
Definition mm.h:242
@ PfnStateZeroed
Definition mm.h:243
@ PfnStateActive
Definition mm.h:239
@ PfnStateStandby
Definition mm.h:240
#define PHYSICAL_TO_PPFN(PHYS)
Definition mm.h:64
@ NonPagedPool
Definition mm.h:316
enum _MEMORY_CACHING_TYPE MEMORY_CACHING_TYPE
#define PPFN_TO_INDEX(PPFN)
Definition mm.h:116
struct _MMPTE * PMMPTE
FORCEINLINE uint64_t MiCacheToFlags(MEMORY_CACHING_TYPE type)
Definition mm.h:587
#define PPFN_TO_PHYSICAL_ADDRESS(PPFN)
Definition mm.h:117
uint64_t PAGE_INDEX
Definition mm.h:232
#define PhysicalMemoryOffset
Definition mm.h:56
#define MI_NONPAGED_POOL_BASE
Definition mm.h:191
#define BYTES_TO_PAGES(Bytes)
Definition mm.h:147
#define MI_NONPAGED_POOL_END
Definition mm.h:192
#define PFN_ERROR
Definition mm.h:208
#define VirtualPageSize
Definition mm.h:53
#define MI_WRITE_PTE(_PtePointer, _Va, _Pa, _Flags)
Definition mm.h:90
struct _PFN_ENTRY * PPFN_ENTRY
@ PFN_FLAG_LOCKED_FOR_IO
Definition mm.h:254
void MmFreeContigiousMemory(IN void *BaseAddress, IN size_t NumberOfBytes)
Definition mmio.c:213
bool MiCheckForContigiousMemory(IN void *StartAddress, IN size_t NumberOfBytes)
Definition mmio.c:25
void * MmMapIoSpace(IN uintptr_t PhysicalAddress, IN size_t NumberOfBytes, IN MEMORY_CACHING_TYPE CacheType)
Definition mmio.c:269
void * MmAllocateContigiousMemory(IN size_t NumberOfBytes, IN uint64_t HighestAcceptableAddress)
Definition mmio.c:87
void MiUnlinkPageFromList(PPFN_ENTRY pfn)
Definition pfn.c:486
void MiReleasePhysicalPage(IN PAGE_INDEX PfnIndex)
Definition pfn.c:421
MM_PFN_DATABASE PfnDatabase
Definition pfn.c:29
void MmFreePool(IN void *buf)
Definition pool.c:586
void MsAcquireSpinlock(IN PSPINLOCK lock, IN PIRQL OldIrql)
Definition spinlock.c:13
void MsReleaseSpinlock(IN PSPINLOCK lock, IN IRQL OldIrql)
Definition spinlock.c:45
struct _MMPTE::@172372265215056352375070220246156106027174106113::@200357034104227323320222006243127050212100105247 Hard
uint64_t Present
Definition mm.h:392
union _PFN_ENTRY::@217024126340164016372152071216274230164113211246 Descriptor
volatile uint32_t RefCount
Definition mm.h:429
struct _PFN_ENTRY::@217024126340164016372152071216274230164113211246::@301110335271023021153236134322146064331241142124 Mapping
uint8_t State
Definition mm.h:430
PMMPTE PteAddress
Definition mm.h:441
struct _MMVAD * Vad
Definition mm.h:440
uint8_t Flags
Definition mm.h:431
uintptr_t MiAllocatePoolVa(IN POOL_TYPE PoolType, IN size_t NumberOfBytes)
Definition va.c:213
void MiFreePoolVaContiguous(IN uintptr_t va, IN size_t NumberOfBytes, IN POOL_TYPE PoolType)
Definition va.c:367