My Project
Loading...
Searching...
No Matches
va.c
Go to the documentation of this file.
1/*++
2
3Module Name:
4
5 va.c
6
7Purpose:
8
9 This translation unit contains the implementation of virtual address pool of the kernel.
10
11Author:
12
13 slep (Matanel) 2025.
14
15Revision History:
16
17--*/
18
19#include "../../includes/mm.h"
20#include "../../includes/ps.h"
21
22// NONPAGED ----
23static uint64_t* g_NonpagedPoolVaBitmap;
24
25// Hint for next search
26static volatile uint64_t g_NonpagedPoolHintIndex = 0;
27
28// PAGED ----
29static uint64_t* g_PagedPoolVaBitmap;
30
31// Hint for next search
32static volatile uint64_t g_PagedPoolHintIndex = 0;
33
34bool
36 void
37)
38
39/*++
40
41 Routine description:
42
43 Initializes the nonpaged & paged pool virtual address bitmap.
44
45 Arguments:
46
47 None.
48
49 Return Values:
50
51 True or False based on succession.
52
53--*/
54
55{
56 // Initialize the nonpaged bitmap first.
57 uintptr_t currNpgBitmapVa = MI_NONPAGED_BITMAP_BASE;
58 uintptr_t currPgBitmapVa = MI_PAGED_BITMAP_BASE;
59
60 for (size_t i = 0; i < MI_NONPAGED_BITMAP_PAGES_NEEDED; i++) {
61 // Request a physical page.
63 if (pfn == PFN_ERROR) return false; // Would bugcheck, no need for physical page release back. (loop unroll)
64
65 // Get the PTE ptr for the curr va.
66 PMMPTE pte = MiGetPtePointer(currNpgBitmapVa);
67 if (!pte) return false;
68 // Get the physical address of the PFN.
69 uint64_t phys = PPFN_TO_PHYSICAL_ADDRESS(INDEX_TO_PPFN(pfn));
70 // Map it.
71 MI_WRITE_PTE(pte, currNpgBitmapVa, phys, PAGE_PRESENT | PAGE_RW);
72
73 // Set the PFNs states.
74 PPFN_ENTRY pfnEntry = INDEX_TO_PPFN(pfn);
75 pfnEntry->State = PfnStateActive;
76 pfnEntry->Flags = PFN_FLAG_NONPAGED;
77 pfnEntry->Descriptor.Mapping.PteAddress = pte;
78 pfnEntry->Descriptor.Mapping.Vad = NULL; // Not VAD-backed
79
80 // Advance VA by 4KiB.
81 currNpgBitmapVa += VirtualPageSize;
82 }
83
84 for (size_t i = 0; i < MI_PAGED_BITMAP_PAGES_NEEDED; i++) {
85 // Request a physical page.
87 if (pfn == PFN_ERROR) return false; // Would bugcheck, no need for physical page release back. (loop unroll)
88
89 // Get the PTE ptr for the curr va.
90 PMMPTE pte = MiGetPtePointer(currPgBitmapVa);
91 if (!pte) return false;
92 // Get the physical address of the PFN.
93 uint64_t phys = PPFN_TO_PHYSICAL_ADDRESS(INDEX_TO_PPFN(pfn));
94 // Map it.
95 MI_WRITE_PTE(pte, currNpgBitmapVa, phys, PAGE_PRESENT | PAGE_RW);
96
97 // Set the PFNs states.
98 PPFN_ENTRY pfnEntry = INDEX_TO_PPFN(pfn);
99 pfnEntry->State = PfnStateActive;
100 pfnEntry->Flags = PFN_FLAG_NONPAGED;
101 pfnEntry->Descriptor.Mapping.PteAddress = pte;
102 pfnEntry->Descriptor.Mapping.Vad = NULL; // Not VAD-backed
103
104 // Advance VA by 4KiB.
105 currPgBitmapVa += VirtualPageSize;
106 }
107
108 g_NonpagedPoolVaBitmap = (uint64_t*)MI_NONPAGED_BITMAP_BASE;
109 g_PagedPoolVaBitmap = (uint64_t*)MI_PAGED_BITMAP_BASE;
110
111 // Both bitmaps are mapped, begin building them.
112 // Initialize both bitmaps to FREE.
113 size_t nonpaged_bitmap_bytes = (size_t)NONPAGED_POOL_VA_BITMAP_QWORDS * sizeof(uint64_t);
114 size_t paged_bitmap_bytes = (size_t)PAGED_POOL_VA_BITMAP_QWORDS * sizeof(uint64_t);
115
116 kmemset(g_NonpagedPoolVaBitmap, 0, nonpaged_bitmap_bytes);
117 kmemset(g_PagedPoolVaBitmap, 0, paged_bitmap_bytes);
118
119 // Initialize hints.
120 g_NonpagedPoolHintIndex = 0;
121 g_PagedPoolHintIndex = 0;
122
123 // Both bitmaps fully setupped
124 return true;
125}
126
127// Testing and applying functions.
128
130bool
132 uint64_t* bitmap,
133 size_t bit
134)
135
136// Description: Tests a bit in the bitmap provided.
137// Return Values: True if bit is set, false otherwise
138
139{
140 size_t q = bit >> 6; // QWORD Index
141 size_t b = bit & 63; // Bit index within that qword.
142
143 // Atomically read the 64bit word.
144 uint64_t value = InterlockedFetchU64((volatile uint64_t*)&bitmap[q]);
145 return (value >> b) & 1ULL;
146}
147
149bool
151 uint64_t* bitmap,
152 size_t bit
153)
154
155// Description: This routine tests if the bit isn't set, and if so, sets it, and returns true (all atomically). Otherwise, returns false.
156
157{
158 size_t q = bit >> 6;
159 size_t b = bit & 63;
160 uint64_t mask = (1ULL << b);
161
162 // Atomically OR the mask in and return the original qword value
163 uint64_t old_qword = __sync_fetch_and_or(&bitmap[q], mask);
164
165 // Return 'true' if our bit was NOT set in the old value
166 return (old_qword & mask) == 0;
167}
168
170void
172 uint64_t* bitmap,
173 size_t bit
174)
175
176// Description: Clears a bit from locked in the bitmap.
177// Return Values: None.
178
179{
180 size_t q = bit >> 6;
181 size_t b = bit & 63;
182 // ~ signifies the opposite of the set.
183 InterlockedAndU64((volatile uint64_t*)&bitmap[q], ~(1ULL << b));
184}
185
187uintptr_t
189 uintptr_t poolBase,
190 size_t index
191)
192
193// Converts a pool base index to its corresponding virtual address.
194
195{
196 return poolBase + (index * VirtualPageSize);
197}
198
200size_t
202 uintptr_t poolBase,
203 uintptr_t va
204)
205
206// Converts a VA into its corresponding Pool index.
207
208{
209 return (va - poolBase) / VirtualPageSize; // The caller must ensure the VA is in range.
210}
211
212uintptr_t
214 IN POOL_TYPE PoolType,
215 IN size_t NumberOfBytes
216)
217
218/*++
219
220 Routine description:
221
222 Searches for a free VA (NumberOfBytes size) in the pool, and returns it.
223
224 Arguments:
225
226 [IN] POOL_TYPE PoolType - The type of pool to return the VA for.
227 [IN] size_t NumberOfBytes - The amount of contingious VA bytes to find for. (rounds up to next page)
228
229 Return Values:
230
231 The VA on success, otherwise 0 on failure.
232
233 Notes:
234
235 The returned VA is not mapped to any physical memory.
236
237--*/
238
239{
240 // Declarations for mixed pools
241 size_t total_pages, total_qwords;
242 size_t hint;
243 uint64_t* bitmap;
244 uintptr_t poolBase;
245 volatile uint64_t* hintIndexPtr;
246
247 // Calculate pages needed, rounding up.
248 size_t NumberOfPages = BYTES_TO_PAGES(NumberOfBytes);
249 if (NumberOfPages == 0) return 0;
250
251 // Set-up pool specific parameters.
252 if (PoolType == NonPagedPool) {
253 total_pages = NONPAGED_POOL_VA_TOTAL_PAGES;
254 hint = (size_t)InterlockedFetchU64(&g_NonpagedPoolHintIndex);
255 bitmap = g_NonpagedPoolVaBitmap;
256 poolBase = MI_NONPAGED_POOL_BASE;
257 hintIndexPtr = &g_NonpagedPoolHintIndex;
258 }
259 else if (PoolType == PagedPool) {
260 total_pages = PAGED_POOL_VA_TOTAL_PAGES;
261 hint = (size_t)InterlockedFetchU64(&g_PagedPoolHintIndex);
262 bitmap = g_PagedPoolVaBitmap;
263 poolBase = MI_PAGED_POOL_BASE;
264 hintIndexPtr = &g_PagedPoolHintIndex;
265 }
266 else {
267 // Invalid parameter.
268 return 0;
269 }
270
271 total_qwords = total_pages / 64;
272
273 // SINGLE PAGE ALLOCATION
274 if (NumberOfPages == 1) {
275 size_t start_q = (hint / 64) % total_qwords;
276
277 // Scan qword-by-qword
278 for (size_t i = 0; i < total_qwords; i++) {
279 size_t q_idx = (start_q + i) % total_qwords;
280
281 // rescan loop
282 while (true)
283 {
284 uint64_t qword = bitmap[q_idx];
285 if (qword == 0xFFFFFFFFFFFFFFFFULL) {
286 break; // This qword is full, move to the next q_idx
287 }
288
289 uint64_t inverted_qword = ~qword;
290 unsigned long bit_index_in_qword = __builtin_ctzll(inverted_qword);
291 size_t global_bit_idx = (q_idx * 64) + bit_index_in_qword;
292
293 if (MiBitmapTestAndSetBitLocked(bitmap, global_bit_idx)) {
294 // We successfully claimed it!
295 InterlockedExchangeU64(hintIndexPtr, (uint64_t)(global_bit_idx + 1));
296 return MiIndexToVa(poolBase, global_bit_idx);
297 }
298 // If we failed, another CPU beat us. The while(true) loop
299 // will just retry on the same qword.
300 }
301 }
302 return 0; // No free VA pages found
303 }
304
305 // CONTINGUOUS PAGE ALLOCATION
306 size_t start_idx = hint % total_pages;
307 size_t contiguous_found = 0;
308 size_t start_of_run_idx = 0;
309
310 // This loop must check every bit.
311 for (size_t i = 0; i < total_pages; i++) {
312 size_t current_idx = (start_idx + i) % total_pages;
313
314 // We can't use TestAndSet yet. Just read the bit.
315 if (MiBitmapTestBit(bitmap, current_idx)) {
316 // This bit is set. Reset our contiguous run.
317 contiguous_found = 0;
318 continue;
319 }
320
321 // Free bit.
322 if (contiguous_found == 0) {
323 // This is the start of a potential run
324 start_of_run_idx = current_idx;
325 }
326 contiguous_found++;
327
328 if (current_idx < start_of_run_idx) {
329 contiguous_found = 0; // Wrapped around, reset
330 continue;
331 }
332
333 // Do we have enough pages?
334 if (contiguous_found == NumberOfPages) {
335 // We found a potential run from 'start_of_run_idx' for NumberOfPages, attempt to claim all of them.
336
337 size_t j = 0;
338 for (; j < NumberOfPages; j++) {
339 size_t idx_to_claim = start_of_run_idx + j;
340
341 if (!MiBitmapTestAndSetBitLocked(bitmap, idx_to_claim)) {
342 // WE FAILED! Another CPU grabbed a bit in our run.
343 // We must roll back all the bits we *did* claim.
344 for (size_t k = 0; k < j; k++) {
345 MiBitmapClearBitLocked(bitmap, start_of_run_idx + k);
346 }
347
348 // Reset contiguous_found and continue the outer search
349 contiguous_found = 0;
350 break; // Break from this 'j' loop
351 }
352 }
353
354 // If 'j' == NumberOfPages, it means we successfully claimed ALL bits
355 if (j == NumberOfPages) {
356 InterlockedExchangeU64(hintIndexPtr, (start_of_run_idx + NumberOfPages));
357 return MiIndexToVa(poolBase, start_of_run_idx);
358 }
359 // If we're here, we failed the claim and rolled back, outer loop will continue.
360 }
361 }
362
363 return 0; // No contiguous range found
364}
365
366void
368 IN uintptr_t va,
369 IN size_t NumberOfBytes,
370 IN POOL_TYPE PoolType
371)
372
373/*++
374
375 Routine description:
376
377 Frees a VA in the bitmap.
378
379 Arguments:
380
381 [IN] uintptr_t va - The virtual address to free in the bitmap.
382 [IN] size_t NumberOfBytes - Number of bytes used to allocate from the VA allocation.
383 [IN] POOL_TYPE PoolType - The type of pool to free the VA for.
384
385 Return Values:
386
387 None.
388
389--*/
390
391{
392 size_t NumberOfPages = BYTES_TO_PAGES(NumberOfBytes);
393 uint64_t* bitmap;
394 uintptr_t poolBase;
395 uintptr_t poolEnd;
396
397 if (PoolType == NonPagedPool) {
398 poolBase = MI_NONPAGED_POOL_BASE;
399 poolEnd = MI_NONPAGED_POOL_END;
400 bitmap = g_NonpagedPoolVaBitmap;
401 }
402 else {
403 poolBase = MI_PAGED_POOL_BASE;
404 poolEnd = MI_PAGED_POOL_END;
405 bitmap = g_PagedPoolVaBitmap;
406 return;
407 }
408
409 if (va < poolBase || va >= poolEnd) return;
410
411 size_t start_idx = MiVaToIndex(poolBase, va);
412
413 // Loop and free all bits in the range
414 for (size_t i = 0; i < NumberOfPages; i++) {
415 MiBitmapClearBitLocked(bitmap, start_idx + i);
416 }
417}
#define FORCEINLINE
Definition annotations.h:22
#define IN
Definition annotations.h:7
FORCEINLINE uint64_t InterlockedAndU64(volatile uint64_t *target, uint64_t value)
Definition atomic.h:134
FORCEINLINE uint64_t InterlockedFetchU64(volatile uint64_t *target)
Definition atomic.h:213
FORCEINLINE uint64_t InterlockedExchangeU64(volatile uint64_t *target, uint64_t value)
Definition atomic.h:42
PMMPTE MiGetPtePointer(IN uintptr_t va)
Definition map.c:76
@ PAGE_RW
Definition mm.h:272
@ PAGE_PRESENT
Definition mm.h:268
#define PAGED_POOL_VA_TOTAL_PAGES
Definition mm.h:170
@ PfnStateZeroed
Definition mm.h:243
@ PfnStateActive
Definition mm.h:239
#define NONPAGED_POOL_VA_BITMAP_QWORDS
Definition mm.h:173
@ NonPagedPool
Definition mm.h:316
@ PagedPool
Definition mm.h:317
#define PAGED_POOL_VA_BITMAP_QWORDS
Definition mm.h:174
#define MI_PAGED_BITMAP_BASE
Definition mm.h:187
enum _POOL_TYPE POOL_TYPE
struct _MMPTE * PMMPTE
#define NONPAGED_POOL_VA_TOTAL_PAGES
Definition mm.h:169
#define MI_PAGED_POOL_BASE
Definition mm.h:194
#define PPFN_TO_PHYSICAL_ADDRESS(PPFN)
Definition mm.h:117
uint64_t PAGE_INDEX
Definition mm.h:232
FORCEINLINE void * kmemset(void *dest, int64_t val, uint64_t len)
Definition mm.h:540
#define MI_NONPAGED_POOL_BASE
Definition mm.h:191
#define MI_PAGED_BITMAP_PAGES_NEEDED
Definition mm.h:178
#define BYTES_TO_PAGES(Bytes)
Definition mm.h:147
#define MI_NONPAGED_POOL_END
Definition mm.h:192
#define PFN_ERROR
Definition mm.h:208
#define MI_PAGED_POOL_END
Definition mm.h:195
#define MI_NONPAGED_BITMAP_PAGES_NEEDED
Definition mm.h:177
#define VirtualPageSize
Definition mm.h:53
#define INDEX_TO_PPFN(Index)
Definition mm.h:62
#define MI_WRITE_PTE(_PtePointer, _Va, _Pa, _Flags)
Definition mm.h:90
struct _PFN_ENTRY * PPFN_ENTRY
@ PFN_FLAG_NONPAGED
Definition mm.h:251
#define MI_NONPAGED_BITMAP_BASE
Definition mm.h:184
PAGE_INDEX MiRequestPhysicalPage(IN PFN_STATE ListType)
Definition pfn.c:325
union _PFN_ENTRY::@217024126340164016372152071216274230164113211246 Descriptor
struct _PFN_ENTRY::@217024126340164016372152071216274230164113211246::@301110335271023021153236134322146064331241142124 Mapping
uint8_t State
Definition mm.h:430
PMMPTE PteAddress
Definition mm.h:441
struct _MMVAD * Vad
Definition mm.h:440
uint8_t Flags
Definition mm.h:431
FORCEINLINE size_t MiVaToIndex(uintptr_t poolBase, uintptr_t va)
Definition va.c:201
FORCEINLINE void MiBitmapClearBitLocked(uint64_t *bitmap, size_t bit)
Definition va.c:171
FORCEINLINE bool MiBitmapTestAndSetBitLocked(uint64_t *bitmap, size_t bit)
Definition va.c:150
FORCEINLINE uintptr_t MiIndexToVa(uintptr_t poolBase, size_t index)
Definition va.c:188
uintptr_t MiAllocatePoolVa(IN POOL_TYPE PoolType, IN size_t NumberOfBytes)
Definition va.c:213
FORCEINLINE bool MiBitmapTestBit(uint64_t *bitmap, size_t bit)
Definition va.c:131
void MiFreePoolVaContiguous(IN uintptr_t va, IN size_t NumberOfBytes, IN POOL_TYPE PoolType)
Definition va.c:367
bool MiInitializePoolVaSpace(void)
Definition va.c:35