My Project
Loading...
Searching...
No Matches
pool.c
Go to the documentation of this file.
1/*++
2
3Module Name:
4
5 pool.c
6
7Purpose:
8
9 This translation unit contains the implementation of pool allocations in the kernel.
10
11Author:
12
13 slep (Matanel) 2025.
14
15Revision History:
16
17--*/
18
19#include "../../includes/mm.h"
20#include "../../includes/me.h"
21#include "../../includes/mg.h"
22#include "../../assert.h"
23
24// Can hold any size.
26
27#define POOL_TYPE_GLOBAL 9999
28#define POOL_TYPE_PAGED 1234
29
30uintptr_t MmNonPagedPoolStart = 0;
31uintptr_t MmNonPagedPoolEnd = 0;
32uintptr_t MmPagedPoolStart = 0;
33uintptr_t MmPagedPoolEnd = 0;
34
37 void
38)
39
40/*++
41
42 Routine description:
43
44 Initializes the Pool Allocation System of the kernel.
45
46 Arguments:
47
48 None.
49
50 Return Values:
51
52 MTSTATUS Status Code.
53
54--*/
55
56{
58 if (!cpu) return MT_NOT_FOUND;
59
60 size_t base = 32; // Start size
61 for (int i = 0; i < MAX_POOL_DESCRIPTORS; i++) {
62 PPOOL_DESCRIPTOR desc = &cpu->LookasidePools[i];
63
64 // Would grow in binary (32,64,128,256... + sizeof(POOL_HEADER)) (max would be 2048)
65 size_t blockSize = (base << i) + sizeof(POOL_HEADER);
66 desc->BlockSize = blockSize;
67 desc->FreeCount = 0;
68 desc->FreeListHead.Next = NULL;
69 desc->TotalBlocks = 0;
70 desc->PoolLock.locked = 0;
71 }
72
77 return MT_SUCCESS;
78}
79
80static
81bool
82MiRefillPool(
84 size_t PoolIndex
85)
86
87/*++
88
89 Routine description:
90
91 Refills the specified pool with a block of its size.
92
93 Arguments:
94
95 [IN] PPOOL_DESCRIPTOR Desc - Pointer to descriptor.
96 [IN] size_T PoolIndex - Index of the slab in the CPU Lookaside buffer.
97
98 Return Values:
99
100 True or False based if allocation and/or refill succeeded.
101
102--*/
103
104{
105 // Before allocating a va or another PFN, lets check the global pool first, see if we have a free 4KiB block.
106 IRQL oldIrql;
107 uintptr_t PageVa = 0;
108 size_t HeaderBlockSize = 0;
109 size_t Iterations = 0;
110
111 // Acquire the spinlock for atomicity.
112 MsAcquireSpinlock(&GlobalPool.PoolLock, &oldIrql);
113
114 // Initialize the local list so that we push back to not memory leak blocks from the global pol.
115 SINGLE_LINKED_LIST localList;
116 localList.Next = NULL;
117
118 while (GlobalPool.FreeCount) {
119 // As long as we have a free block in the global pool, we check it.
120 PSINGLE_LINKED_LIST list = GlobalPool.FreeListHead.Next;
121 if (list == NULL) break; // FreeCount was wrong, but that's ok
122 GlobalPool.FreeListHead.Next = list->Next;
123 PPOOL_HEADER header = CONTAINING_RECORD(list, POOL_HEADER, Metadata.FreeListEntry);
124 GlobalPool.FreeCount--;
125
126 if (header->PoolCanary != 'BEKA') {
129 (void*)header,
130 (void*)__read_rip(),
131 NULL,
132 NULL
133 );
134 }
135
136 if (Desc->BlockSize > header->Metadata.BlockSize) {
137 // If the block gotten from the global pool is smaller than the required refill size, we continue and add this block to the list (to push back later)
138 header->Metadata.FreeListEntry.Next = localList.Next;
139 localList.Next = &header->Metadata.FreeListEntry;
140 Iterations++;
141 continue;
142 }
143
144 // The block is good! The loop that refills the desc wil overwrite this header data. (this is why we dont add sizeof)
145 PageVa = (uintptr_t)header;
146 HeaderBlockSize = header->Metadata.BlockSize;
147 break;
148 }
149
150 // Refill back the pool.
151 while (Iterations--) {
152 PSINGLE_LINKED_LIST entryToPushBack = localList.Next;
153 if (entryToPushBack == NULL) {
154 // Shouldn't happen if iterations is correct, but I always admire checking.
155 break;
156 }
157
158 localList.Next = entryToPushBack->Next;
159 entryToPushBack->Next = GlobalPool.FreeListHead.Next;
160 GlobalPool.FreeListHead.Next = entryToPushBack;
161
162 GlobalPool.FreeCount++;
163 }
164
165 // Release global pool lock.
166 MsReleaseSpinlock(&GlobalPool.PoolLock, oldIrql);
167
168 if (!PageVa) {
169 // The global pool is empty... lets allocate.
170 // Allocate a 4KiB virtual address.
172 if (!PageVa) return false; // Out of VA Space.
173
174 // Allocate a 4KiB Physical page.
176 if (pfn == PFN_ERROR) {
178 return false;
179 }
180
181 // Map the page permanently.
182 PMMPTE pte = MiGetPtePointer((uintptr_t)PageVa);
183 uint64_t phys = PPFN_TO_PHYSICAL_ADDRESS(INDEX_TO_PPFN(pfn));
184 MI_WRITE_PTE(pte, PageVa, phys, PAGE_PRESENT | PAGE_RW);
185
186 // Update PFN metadata.
187 PPFN_ENTRY ppfn = INDEX_TO_PPFN(pfn);
188 ppfn->State = PfnStateActive;
189 ppfn->Flags = PFN_FLAG_NONPAGED;
190 ppfn->Descriptor.Mapping.PteAddress = pte;
191 ppfn->Descriptor.Mapping.Vad = NULL;
192 HeaderBlockSize = VirtualPageSize;
193 }
194
195 // Reaching here means we got a valid page (either from the global pool, or allocation), now we must carve it up to the appropriate slab's size.
196 // Acquire the spinlock for the descriptor given before modifying its data.
197 IRQL descIrql;
198 MsAcquireSpinlock(&Desc->PoolLock, &descIrql);
199
200 // Loop from the start to the end of the page, stepping by the small block size.
201 for (size_t offset = 0; (offset + Desc->BlockSize) <= HeaderBlockSize; offset += Desc->BlockSize) {
202 // newBlock points to the start of this Desc->BlockSize chunk.
203 PPOOL_HEADER newBlock = (PPOOL_HEADER)((uint8_t*)PageVa + offset);
204
205 // Set its header metadata.
206 newBlock->Metadata.BlockSize = Desc->BlockSize;
207 newBlock->Metadata.PoolIndex = PoolIndex;
208 newBlock->PoolCanary = 'BEKA'; // Pool Canary
209 newBlock->PoolTag = 'ADIR'; // Default Tag
210
211 // Add this block to the list of the descriptor.
212 newBlock->Metadata.FreeListEntry.Next = Desc->FreeListHead.Next;
213 Desc->FreeListHead.Next = &newBlock->Metadata.FreeListEntry;
214 Desc->TotalBlocks++;
215 Desc->FreeCount++;
216 }
217
218 MsReleaseSpinlock(&Desc->PoolLock, descIrql);
219 return true;
220}
221
222static
223void*
224MiAllocateLargePool(
225 size_t NumberOfBytes,
226 uint32_t Tag
227)
228
229/*++
230
231 Routine description:
232
233 Allocates a NonPagedPool pool, and returns a pointer to start of region.
234
235 Arguments:
236
237 [IN] size_t NumberOfBytes - Number of bytes needed to allocate.
238 [IN] uint32_t Tag - A 4 byte integer that signifies the current allocation, in little endian (e.g 'TSET' -> 'TEST')
239
240 Return Values:
241
242 Pointer to start of allocated region
243
244--*/
245
246{
247 // Large pool allocations are always popped / pushed into the global free list (since its the only one that holds more than 2048 bytes)
248 IRQL oldIrql;
249 MsAcquireSpinlock(&GlobalPool.PoolLock, &oldIrql);
250
251 // Compute the actual allocation size in bytes.
252 size_t RequiredSize = NumberOfBytes + sizeof(POOL_HEADER);
253
254 PSINGLE_LINKED_LIST* PtrToPrevNext = &GlobalPool.FreeListHead.Next;
255 PSINGLE_LINKED_LIST list = GlobalPool.FreeListHead.Next;
256 PPOOL_HEADER foundHeader = NULL;
257
258 while (list) {
259 PPOOL_HEADER header = CONTAINING_RECORD(list, POOL_HEADER, Metadata.FreeListEntry);
260
261 if (header->PoolCanary != 'BEKA') {
264 (void*)header,
265 (void*)__read_rip(),
266 NULL,
267 NULL
268 );
269 }
270
271 if (header->Metadata.BlockSize >= RequiredSize) {
272 // Found a block that holds the amount of bytes we need!
273 foundHeader = header;
274
275 // Unlink it from the list.
276 *PtrToPrevNext = list->Next;
277 GlobalPool.FreeCount--;
278 break;
279 }
280
281 // Didn't find an appropriate block, move to next.
282 PtrToPrevNext = &list->Next;
283 list = list->Next;
284 }
285
286 // Release the lock.
287 MsReleaseSpinlock(&GlobalPool.PoolLock, oldIrql);
288
289 // Now lets check if we found a block or we didn't.
290 if (foundHeader) {
291 // Good, lets set metadata, and return it to the caller.
292 foundHeader->PoolTag = Tag;
293 return (void*)((uint8_t*)foundHeader + sizeof(POOL_HEADER));
294 }
295
296 // Looks like we didn't find a block that has the amount of bytes we need, allocate one.
297 size_t neededPages = BYTES_TO_PAGES(RequiredSize);
298
299 // Allocate contiguous VAs.
300 uintptr_t pageVa = MiAllocatePoolVa(NonPagedPool, RequiredSize);
301
302 if (!pageVa) {
303 // We don't have enough VA space to allocate required pool.
304 return NULL;
305 }
306
307 // Now lets loop to request PFNs, we also create a safeguard to unroll the loop if failure happens.
308 bool failure = false;
309 size_t Iterations = 0;
310
311 for (size_t i = 0; i < neededPages; i++) {
312 // Increment by 4KiB each time.
313 uint8_t* currVa = (uint8_t*)pageVa + (i * VirtualPageSize);
314
316
317 if (pfn == PFN_ERROR) {
318 // Allocation for a physical page failed, free the VA allocated, and unroll the loop (see code below loop)
319 MiFreePoolVaContiguous(pageVa, RequiredSize, NonPagedPool);
320 failure = true;
321 break;
322 }
323
324 // Map the page.
325 PMMPTE pte = MiGetPtePointer((uintptr_t)currVa);
326 uint64_t phys = PPFN_TO_PHYSICAL_ADDRESS(INDEX_TO_PPFN(pfn));
327 MI_WRITE_PTE(pte, currVa, phys, PAGE_PRESENT | PAGE_RW);
328
329 // Update PFN metadata.
330 PPFN_ENTRY ppfn = INDEX_TO_PPFN(pfn);
331 ppfn->State = PfnStateActive;
332 ppfn->Flags = PFN_FLAG_NONPAGED;
333 ppfn->Descriptor.Mapping.PteAddress = pte;
334
335 // Update iterations for loop unroll (if failure)
336 Iterations++;
337 }
338
339 // If failure, unroll PFNs using Iterations.
340 if (failure) {
341 for (size_t j = 0; j < Iterations; j++) {
342 uint8_t* vaToFree = (uint8_t*)pageVa + (j * VirtualPageSize);
343 PMMPTE pte = MiGetPtePointer((uintptr_t)vaToFree);
345 MiUnmapPte(pte);
347 }
348 return NULL;
349 }
350
351 // Success! Initialize the block and return the pointer to caller.
352 PPOOL_HEADER newHeader = (PPOOL_HEADER)pageVa;
353 newHeader->PoolCanary = 'BEKA';
354 newHeader->PoolTag = Tag;
355 newHeader->Metadata.BlockSize = neededPages * VirtualPageSize; // Store allocated size.
357
358 void* UserAddress = (void*)((uint8_t*)newHeader + sizeof(POOL_HEADER));
359 // Set to zero (to avoid kernel issues)
360 kmemset(UserAddress, 0, NumberOfBytes);
361 // Return the pointer (exclude metadata start).
362 return UserAddress;
363}
364
365static
366void*
367MiAllocatePagedPool(
368 IN size_t NumberOfBytes,
369 IN uint32_t Tag
370)
371
372/*++
373
374 Routine description:
375
376 Allocates a paged pool.
377
378 Arguments:
379
380 [IN] size_t NumberOfBytes - Number of bytes needed to allocate.
381 [IN] uint32_t Tag - A 4 byte integer that signifies the current allocation, in big endian (e.g 'TSET' - "TEST")
382
383 Return Values:
384
385 Pointer to allocated region, or NULL on failure.
386
387 Notes:
388
389 This function AND access to it's pool contents MUST be with IRQL < DISPATCH_LEVEL.
390
391--*/
392
393{
394 size_t ActualSize = NumberOfBytes + sizeof(POOL_HEADER);
395 uintptr_t PagedVa = MiAllocatePoolVa(PagedPool, ActualSize);
396 if (!PagedVa) return NULL;
397
398 PPOOL_HEADER header = (PPOOL_HEADER)PagedVa;
399 // The first page will not be resident in memory anymore, it should be paged in as well, that means that pool free routines will need to be executed at < DISPATCH_LEVEL too.
400 // Set each page to be a demand lazy allocation.
401 size_t NumberOfPages = BYTES_TO_PAGES(ActualSize);
402 size_t currVa = PagedVa;
403 for (size_t i = 0; i < NumberOfPages; i++) {
404 PMMPTE tmpPte = MiGetPtePointer(currVa);
405 if (!tmpPte) continue; // TODO Unroll.
406 MMPTE TempPte = *tmpPte;
407 // Set the PTE as demand zero.
409 // Atomically exchange new value.
410 MiAtomicExchangePte(tmpPte, TempPte.Value);
411 // Invalidate the VA.
412 MiInvalidateTlbForVa((void*)currVa);
413 currVa += VirtualPageSize;
414 }
415
416 // Set metadata. (header should get paged in now).
417 header->PoolCanary = 'BEKA';
418 header->PoolTag = Tag;
419 header->Metadata.BlockSize = ActualSize;
421
422 // Return VA.
423 return (void*)((uint8_t*)PagedVa + sizeof(POOL_HEADER));
424}
425
426void*
428 IN enum _POOL_TYPE PoolType,
429 IN size_t NumberOfBytes,
430 IN uint32_t Tag
431)
432
433/*++
434
435 Routine description:
436
437 Allocates a pool block of the specified type and returns a pointer to allocated block.
438 On any allocation, the returned block(s) is/are zeroed, no exceptions.
439
440 Arguments:
441
442 [IN] enum _POOL_TYPE - POOL_TYPE Enumerator, specifying the type of pool that will be allocated.
443 [IN] size_t NumberOfBytes - Number of bytes needed to allocate.
444 [IN] uint32_t Tag - A 4 byte integer that signifies the current allocation, in big endian (e.g 'TSET' - "TEST")
445
446 Return Values:
447
448 Pointer to allocated region, or NULL on failure.
449
450 Notes:
451
452 PagedPool allocations CANNOT happen at IRQL => DISPATCH_LEVEL.
453 NonPagedPool allocations CANNOT happen at IRQL > DISPATCH_LEVEL.
454
455--*/
456
457{
458 // Declarations
459 IRQL oldIrql;
460 size_t ActualSize;
461 PPROCESSOR cpu;
462 PPOOL_DESCRIPTOR Desc;
463 PPOOL_HEADER header;
465 size_t Index;
466
467 // Runtime assertions
468 //assert((NumberOfBytes) != 0); Better to use the if statement, supplies retaddr.
469 assert((Tag) != 0);
470
471 if (NumberOfBytes == 0) {
472 // Bad pool caller.
475 RETADDR(0),
476 NULL,
477 NULL,
478 NULL
479 );
480 }
481
482 IRQL currIrql = MeGetCurrentIrql();
483
484 // IRQL Must be less or equal to DISPATCH_LEVEL if allocating with NonPagedPool.
485 // IRQL Must be LESS than DISPATCH_LEVEL if allocating with PagedPool.
486 if (currIrql <= DISPATCH_LEVEL) {
487 if (PoolType == PagedPool && currIrql == DISPATCH_LEVEL) {
490 (void*)&MmAllocatePoolWithTag,
491 (void*)MeGetCurrentIrql(),
492 (void*)8,
493 (void*)__builtin_return_address(0)
494 );
495 }
496 }
497 // IRQL Must NOT be greater than DISPATCH_LEVEL at any allocation.
498 else {
501 (void*)&MmAllocatePoolWithTag,
502 (void*)MeGetCurrentIrql(),
503 (void*)8,
504 (void*)__builtin_return_address(0)
505 );
506 }
507
508 if (PoolType == PagedPool) {
509 // Use the internal paged pool allocator.
510 return MiAllocatePagedPool(NumberOfBytes, Tag);
511 }
512
513 ActualSize = NumberOfBytes + sizeof(POOL_HEADER);
514 cpu = MeGetCurrentProcessor();
515
516
517 // It's NonPagedPool. Find the correct slab.
518 Desc = NULL;
519 for (int i = 0; i < MAX_POOL_DESCRIPTORS; i++) {
520 PPOOL_DESCRIPTOR currentSlab = &cpu->LookasidePools[i];
521 if (ActualSize <= currentSlab->BlockSize) {
522 Desc = currentSlab;
523 Index = i;
524 break; // Found the best-fit slab
525 }
526 }
527
528 if (Desc == NULL) {
529 // Allocation is larger than 2048 bytes, use the large pool allocator.
530 return MiAllocateLargePool(NumberOfBytes, Tag);
531 }
532
533 MsAcquireSpinlock(&Desc->PoolLock, &oldIrql);
534 assert((Desc->FreeCount) != UINT64_T_MAX);
535
536 if (Desc->FreeCount == 0) {
537 // Looks like the pool is empty, refill all empty pools.
538 // First, release the spinlock.
539 MsReleaseSpinlock(&Desc->PoolLock, oldIrql);
540 if (!MiRefillPool(Desc, Index)) {
541 // If we failed allocation, act on failure.
542 return NULL;
543 }
544 // Retry allocation.
545 return MmAllocatePoolWithTag(PoolType, NumberOfBytes, Tag);
546 }
547
548 // Looks like we have a block to return! Return its PTR.
549 // First, acquire it. (we are under spinlock, no need for interlocked pop)
550 list = Desc->FreeListHead.Next;
551 assert((list) != NULL, "Pool is nullptr even though freecount isn't zero.");
552 Desc->FreeListHead.Next = list->Next; // Finish the pop
553 header = CONTAINING_RECORD(list, POOL_HEADER, Metadata.FreeListEntry);
554
555 // We must restore the metadata because the linked list pointer
556 // overwrote it while the block was sitting in the free list.
557 header->Metadata.PoolIndex = (uint16_t)Index;
558 header->Metadata.BlockSize = (uint16_t)Desc->BlockSize;
559
560 // First check if the canary is wrong.
561 if (header->PoolCanary != 'BEKA') {
564 (void*)header,
565 (void*)__read_rip(),
566 NULL,
567 NULL
568 );
569 }
570
571 // Rewrite its tag.
572 header->PoolTag = Tag;
573 // Decrement descriptor free count.
574 Desc->FreeCount--;
575 assert((Desc->FreeCount) != SIZE_T_MAX); // Check for underflow.
576 // Release spinlock.
577 MsReleaseSpinlock(&Desc->PoolLock, oldIrql);
578 void* UserAddress = (void*)((uint8_t*)header + sizeof(POOL_HEADER));
579 // Set to zero (to avoid kernel issues)
580 kmemset(UserAddress, 0, NumberOfBytes);
581 // Return the pointer (exclude metadata start).
582 return UserAddress;
583}
584
585void
587 IN void* buf
588)
589
590/*++
591
592 Routine description:
593
594 Deallocates the buffer allocated by the MmAllocatePoolWithTag routine (or other pool allocation routines if present).
595
596 Arguments:
597
598 [IN] void* buf - The pointer given by the routine. (start of allocated region)
599
600 Return Values:
601
602 None.
603
604 Notes:
605
606 Memory is freed here, do not use the pointer after freeing the pool.
607 The Pool tag will not be modified when freeing, useful for debugging.
608
609 Memory allocated with PagedPoolXxX type must be deallocated at IRQL < DISPATCH_LEVEL.
610
611--*/
612
613{
614 if (!buf) return;
616
617 // Convert the buffer to the header.
618 PPOOL_HEADER header = (PPOOL_HEADER)((uint8_t*)buf - sizeof(POOL_HEADER));
619
620 gop_printf(COLOR_YELLOW, "MmFreePool called with IRQL: %d | Header: %p\n", MeGetCurrentIrql(), header);
621
622 if (header->PoolCanary != 'BEKA') {
625 (void*)header,
626 (void*)RETADDR(0),
627 NULL,
628 NULL
629 );
630 }
631
632 // Obtain the pool index to free the region back into.
633 uint16_t PoolIndex = header->Metadata.PoolIndex;
634
635 if (PoolIndex == POOL_TYPE_GLOBAL) {
636 // Big pool allocation, return it to the global pool.
637 IRQL oldIrql;
638 MsAcquireSpinlock(&GlobalPool.PoolLock, &oldIrql);
639
640 // Push the block back onto the global free list
641 header->Metadata.FreeListEntry.Next = GlobalPool.FreeListHead.Next;
642 GlobalPool.FreeListHead.Next = &header->Metadata.FreeListEntry;
643 GlobalPool.FreeCount++;
644
645 MsReleaseSpinlock(&GlobalPool.PoolLock, oldIrql);
646 return;
647 }
648
649 if (PoolIndex == POOL_TYPE_PAGED) {
650 assert(MeGetCurrentIrql() < DISPATCH_LEVEL); // I mean, this assertion is KINDA useless, as we cant get here in the first place, since we would IRQL_NOT_LESS_OR_EQUAL while acquiring the index.
651 // For a paged pool allocation, we just free every PTE, then returned the VA space consumed.
652 // The BlockSize field in a PagedPool allocation is how many bytes were requested + sizeof(POOL_HEADER)
653 size_t NumberOfPages = BYTES_TO_PAGES(header->Metadata.BlockSize);
654
655 // Loop over the amount, if the PTE is present, unmap it and clear the demand page.
656 uintptr_t CurrentVA = (uintptr_t)header;
657
658 for (size_t i = 0; i < NumberOfPages; i++) {
659 PMMPTE pte = MiGetPtePointer(CurrentVA);
660 if (!pte) goto advance;
661 assert(MM_IS_DEMAND_ZERO_PTE(*pte) == true);
662
663 // Check if the PTE is present, if it is, the demand zero page has been consumed, we deallocate, and unset the demand zero.
664 if (pte->Hard.Present) {
665 // It has a PFN.
667
668 // Unmap PTE, free PFN.
669 MiUnmapPte(pte);
671 }
672
673 // Flip the demand zero bit.
674 MMPTE TempPte = *pte;
676 MiAtomicExchangePte(pte, TempPte.Value);
677 // Invalidate the VA.
678 MiInvalidateTlbForVa((void*)CurrentVA);
679
680 advance:
681 CurrentVA += VirtualPageSize;
682 }
683 return;
684 }
685
686 //
687 // Nonpaged pool allocation
688 //
689
691 PPOOL_DESCRIPTOR Desc = &cpu->LookasidePools[PoolIndex];
692
693 // Acquire the same lock used by the allocator
694 IRQL oldIrql;
695 MsAcquireSpinlock(&Desc->PoolLock, &oldIrql);
696
697 // Push the entry back onto the list (it's protected by the lock)
699 Desc->FreeListHead.Next = &header->Metadata.FreeListEntry;
700
701 // Increment the free count.
702 Desc->FreeCount++;
703
704 // Release the lock
705 MsReleaseSpinlock(&Desc->PoolLock, oldIrql);
706}
#define IN
Definition annotations.h:7
#define assert(...)
Definition assert.h:57
NORETURN void MeBugCheckEx(IN enum _BUGCHECK_CODES BugCheckCode, IN void *BugCheckParameter1, IN void *BugCheckParameter2, IN void *BugCheckParameter3, IN void *BugCheckParameter4)
Definition bugcheck.c:305
@ DISPATCH_LEVEL
Definition core.h:15
PROCESSOR * PPROCESSOR
Definition core.h:46
enum _IRQL IRQL
struct _SINGLE_LINKED_LIST SINGLE_LINKED_LIST
struct _SINGLE_LINKED_LIST * PSINGLE_LINKED_LIST
void gop_printf(uint32_t color, const char *fmt,...)
Definition gop.c:694
FORCEINLINE uint64_t __read_rip(void)
Definition intrin.h:218
#define CONTAINING_RECORD(ptr, type, member)
Definition macros.h:11
#define RETADDR(level)
Definition macros.h:38
#define SIZE_T_MAX
Definition macros.h:17
#define UINT64_T_MAX
Definition macros.h:22
PAGE_INDEX MiTranslatePteToPfn(IN PMMPTE pte)
Definition map.c:310
PMMPTE MiGetPtePointer(IN uintptr_t va)
Definition map.c:76
void MiInvalidateTlbForVa(IN void *VirtualAddress)
Definition map.c:273
void MiUnmapPte(IN PMMPTE pte)
Definition map.c:385
@ BAD_POOL_CALLER
Definition me.h:122
@ MEMORY_CORRUPT_HEADER
Definition me.h:113
FORCEINLINE IRQL MeGetCurrentIrql(void)
Definition me.h:402
FORCEINLINE PPROCESSOR MeGetCurrentProcessor(void)
Definition me.h:356
#define COLOR_YELLOW
Definition mg.h:34
@ PAGE_RW
Definition mm.h:272
@ PAGE_PRESENT
Definition mm.h:268
@ PfnStateFree
Definition mm.h:242
@ PfnStateZeroed
Definition mm.h:243
@ PfnStateActive
Definition mm.h:239
FORCEINLINE void MiAtomicExchangePte(PMMPTE PtePtr, uint64_t NewPteValue)
Definition mm.h:651
#define MAX_POOL_DESCRIPTORS
Definition mm.h:151
#define MM_SET_DEMAND_ZERO_PTE(pte, prot_flags, nx)
Definition mm.h:122
_POOL_TYPE
Definition mm.h:315
@ NonPagedPool
Definition mm.h:316
@ PagedPool
Definition mm.h:317
#define PROT_KERNEL_WRITE
Definition mm.h:212
struct _MMPTE * PMMPTE
struct _POOL_DESCRIPTOR POOL_DESCRIPTOR
#define MI_PAGED_POOL_BASE
Definition mm.h:194
#define PPFN_TO_PHYSICAL_ADDRESS(PPFN)
Definition mm.h:117
uint64_t PAGE_INDEX
Definition mm.h:232
FORCEINLINE void * kmemset(void *dest, int64_t val, uint64_t len)
Definition mm.h:540
struct _POOL_DESCRIPTOR * PPOOL_DESCRIPTOR
#define MI_NONPAGED_POOL_BASE
Definition mm.h:191
#define BYTES_TO_PAGES(Bytes)
Definition mm.h:147
#define MI_NONPAGED_POOL_END
Definition mm.h:192
#define PFN_ERROR
Definition mm.h:208
struct _POOL_HEADER POOL_HEADER
#define MI_PAGED_POOL_END
Definition mm.h:195
#define MM_UNSET_DEMAND_ZERO_PTE(pte)
Definition mm.h:128
#define VirtualPageSize
Definition mm.h:53
#define INDEX_TO_PPFN(Index)
Definition mm.h:62
#define MI_WRITE_PTE(_PtePointer, _Va, _Pa, _Flags)
Definition mm.h:90
struct _PFN_ENTRY * PPFN_ENTRY
struct _MMPTE MMPTE
@ PFN_FLAG_NONPAGED
Definition mm.h:251
#define MM_IS_DEMAND_ZERO_PTE(pte)
Definition mm.h:120
struct _POOL_HEADER * PPOOL_HEADER
#define PROT_KERNEL_READ
Definition mm.h:211
#define MT_SUCCESS
Definition mtstatus.h:22
int32_t MTSTATUS
Definition mtstatus.h:12
#define MT_NOT_FOUND
Definition mtstatus.h:30
PAGE_INDEX MiRequestPhysicalPage(IN PFN_STATE ListType)
Definition pfn.c:325
void MiReleasePhysicalPage(IN PAGE_INDEX PfnIndex)
Definition pfn.c:421
uintptr_t MmNonPagedPoolEnd
Definition pool.c:31
uintptr_t MmNonPagedPoolStart
Definition pool.c:30
#define POOL_TYPE_PAGED
Definition pool.c:28
MTSTATUS MiInitializePoolSystem(void)
Definition pool.c:36
#define POOL_TYPE_GLOBAL
Definition pool.c:27
POOL_DESCRIPTOR GlobalPool
Definition pool.c:25
uintptr_t MmPagedPoolStart
Definition pool.c:32
void MmFreePool(IN void *buf)
Definition pool.c:586
uintptr_t MmPagedPoolEnd
Definition pool.c:33
void * MmAllocatePoolWithTag(IN enum _POOL_TYPE PoolType, IN size_t NumberOfBytes, IN uint32_t Tag)
Definition pool.c:427
void MsAcquireSpinlock(IN PSPINLOCK lock, IN PIRQL OldIrql)
Definition spinlock.c:13
void MsReleaseSpinlock(IN PSPINLOCK lock, IN IRQL OldIrql)
Definition spinlock.c:45
struct _MMPTE::@172372265215056352375070220246156106027174106113::@200357034104227323320222006243127050212100105247 Hard
uint64_t Present
Definition mm.h:392
uint64_t Value
Definition mm.h:385
union _PFN_ENTRY::@217024126340164016372152071216274230164113211246 Descriptor
struct _PFN_ENTRY::@217024126340164016372152071216274230164113211246::@301110335271023021153236134322146064331241142124 Mapping
uint8_t State
Definition mm.h:430
PMMPTE PteAddress
Definition mm.h:441
struct _MMVAD * Vad
Definition mm.h:440
uint8_t Flags
Definition mm.h:431
size_t BlockSize
Definition mm.h:514
SPINLOCK PoolLock
Definition mm.h:517
volatile uint64_t FreeCount
Definition mm.h:515
volatile uint64_t TotalBlocks
Definition mm.h:516
SINGLE_LINKED_LIST FreeListHead
Definition mm.h:513
uint16_t BlockSize
Definition mm.h:505
uint16_t PoolIndex
Definition mm.h:506
SINGLE_LINKED_LIST FreeListEntry
Definition mm.h:500
uint32_t PoolCanary
Definition mm.h:496
uint32_t PoolTag
Definition mm.h:509
union _POOL_HEADER::@321115223011072362277073135231015025151337071364 Metadata
POOL_DESCRIPTOR LookasidePools[MAX_POOL_DESCRIPTORS]
Definition me.h:325
struct _SINGLE_LINKED_LIST * Next
Definition core.h:24
volatile uint32_t locked
Definition ms.h:37
uintptr_t MiAllocatePoolVa(IN POOL_TYPE PoolType, IN size_t NumberOfBytes)
Definition va.c:213
void MiFreePoolVaContiguous(IN uintptr_t va, IN size_t NumberOfBytes, IN POOL_TYPE PoolType)
Definition va.c:367