kernel
Loading...
Searching...
No Matches
pool.c
Go to the documentation of this file.
1/*++
2
3Module Name:
4
5 pool.c
6
7Purpose:
8
9 This translation unit contains the implementation of pool allocations in the kernel.
10
11Author:
12
13 slep (Matanel) 2025.
14
15Revision History:
16
17--*/
18
19#include "../../includes/mm.h"
20#include "../../includes/me.h"
21#include "../../includes/mg.h"
22#include "../../assert.h"
23
24// Can hold any size. (NonPaged)
26
27#define POOL_TYPE_GLOBAL 9999
28#define POOL_TYPE_PAGED 1234
29
30uintptr_t MmNonPagedPoolStart = 0;
31uintptr_t MmNonPagedPoolEnd = 0;
32uintptr_t MmPagedPoolStart = 0;
33uintptr_t MmPagedPoolEnd = 0;
34
37 void
38)
39
40/*++
41
42 Routine description:
43
44 Initializes the Pool Allocation System of the kernel.
45
46 Arguments:
47
48 None.
49
50 Return Values:
51
52 MTSTATUS Status Code.
53
54--*/
55
56{
58 if (!cpu) return MT_NOT_FOUND;
59 size_t base = 32; // Start size
60
61 // Initialize normal NonPagedPool.
62 for (int i = 0; i < MAX_POOL_DESCRIPTORS; i++) {
63 PPOOL_DESCRIPTOR desc = &cpu->LookasidePools[i];
64
65 // Would grow in binary (32,64,128,256... + sizeof(POOL_HEADER)) (max would be 2048)
66 size_t blockSize = (base << i) + sizeof(POOL_HEADER);
67 desc->BlockSize = blockSize;
68 desc->FreeCount = 0;
69 desc->FreeListHead.Next = NULL;
70 desc->TotalBlocks = 0;
71 desc->PoolLock.locked = 0;
72 desc->PoolType = NonPagedPool;
73 }
74
75 // Initialize NonPagedPoolNx (no-execute)
76 base = 32;
77 for (int i = 0; i < MAX_POOL_DESCRIPTORS; i++) {
78 PPOOL_DESCRIPTOR desc = &cpu->LookasidePoolsNx[i];
79
80 size_t blockSize = (base << i) + sizeof(POOL_HEADER);
81 desc->BlockSize = blockSize;
82 desc->FreeCount = 0;
83 desc->FreeListHead.Next = NULL;
84 desc->TotalBlocks = 0;
85 desc->PoolLock.locked = 0;
87 }
88
89 // NPG and NPGNx pools reside in the same VA space.
94 return MT_SUCCESS;
95}
96
97static
98bool
99MiRefillPool(
100 PPOOL_DESCRIPTOR Desc,
101 size_t PoolIndex
102)
103
104/*++
105
106 Routine description:
107
108 Refills the specified pool with a block of its size.
109
110 Arguments:
111
112 [IN] PPOOL_DESCRIPTOR Desc - Pointer to descriptor.
113 [IN] size_T PoolIndex - Index of the slab in the CPU Lookaside buffer.
114
115 Return Values:
116
117 True or False based if allocation and/or refill succeeded.
118
119--*/
120
121{
122 // Before allocating a va or another PFN, lets check the global pool first, see if we have a free 4KiB block.
123 //IRQL oldIrql;
124 uintptr_t PageVa = 0;
125 size_t HeaderBlockSize = 0;
126 //size_t Iterations = 0;
127 /* If I ever return back freeing to global pool, I should check that Desc->BlockSize == VirtualPageSize, else it wont use it.
128 * Since we have memory corruptions for larger block sizes..
129 // Acquire the spinlock for atomicity.
130 MsAcquireSpinlock(&GlobalPool.PoolLock, &oldIrql);
131
132 // Initialize the local list so that we push back to not memory leak blocks from the global pol.
133 SINGLE_LINKED_LIST localList;
134 localList.Next = NULL;
135
136 while (GlobalPool.FreeCount) {
137 // As long as we have a free block in the global pool, we check it.
138 PSINGLE_LINKED_LIST list = GlobalPool.FreeListHead.Next;
139 if (list == NULL) break; // FreeCount was wrong, but that's ok
140 GlobalPool.FreeListHead.Next = list->Next;
141 PPOOL_HEADER header = CONTAINING_RECORD(list, POOL_HEADER, Metadata.FreeListEntry);
142 GlobalPool.FreeCount--;
143
144 if (header->PoolCanary != 'BEKA') {
145 MeBugCheckEx(
146 MEMORY_CORRUPT_HEADER,
147 (void*)header,
148 (void*)__read_rip(),
149 NULL,
150 NULL
151 );
152 }
153
154 if (Desc->BlockSize > header->Metadata.BlockSize) {
155 // If the block gotten from the global pool is smaller than the required refill size, we continue and add this block to the list (to push back later)
156 header->Metadata.FreeListEntry.Next = localList.Next;
157 localList.Next = &header->Metadata.FreeListEntry;
158 Iterations++;
159 continue;
160 }
161
162 // The block is good! The loop that refills the desc wil overwrite this header data. (this is why we dont add sizeof)
163 PageVa = (uintptr_t)header;
164 HeaderBlockSize = header->Metadata.BlockSize;
165 break;
166 }
167
168 // Refill back the pool.
169 while (Iterations--) {
170 PSINGLE_LINKED_LIST entryToPushBack = localList.Next;
171 if (entryToPushBack == NULL) {
172 // Shouldn't happen if iterations is correct, but I always admire checking.
173 break;
174 }
175
176 localList.Next = entryToPushBack->Next;
177 entryToPushBack->Next = GlobalPool.FreeListHead.Next;
178 GlobalPool.FreeListHead.Next = entryToPushBack;
179
180 GlobalPool.FreeCount++;
181 }
182
183 // Release global pool lock.
184 MsReleaseSpinlock(&GlobalPool.PoolLock, oldIrql);
185 */
186 if (!PageVa) {
187 // The global pool is empty... lets allocate.
188 // Allocate a 4KiB virtual address.
190 if (!PageVa) return false; // Out of VA Space.
191
192 // Allocate a 4KiB Physical page.
194 if (pfn == PFN_ERROR) {
196 return false;
197 }
198
199 // Map the page permanently.
200 PMMPTE pte = MiGetPtePointer((uintptr_t)PageVa);
201 if (!pte) {
204 return false;
205 }
206
207 uint64_t PteFlags = PAGE_PRESENT | PAGE_RW;
208
209 // If the descriptor is a NonPagedPoolNx type, we add the NX bit.
210 if (Desc->PoolType == NonPagedPoolNx) {
211 PteFlags |= PAGE_NX;
212 }
213
214 // Get the PFN Physical address.
215 uint64_t phys = PPFN_TO_PHYSICAL_ADDRESS(INDEX_TO_PPFN(pfn));
216
217 MI_WRITE_PTE(pte, PageVa, phys, PteFlags);
218
219 // Set block size.
220 HeaderBlockSize = VirtualPageSize;
221 }
222
223 // Reaching here means we got a valid page (either from the global pool, or allocation), now we must carve it up to the appropriate slab's size.
224 // Acquire the spinlock for the descriptor given before modifying its data.
225 IRQL descIrql;
226 MsAcquireSpinlock(&Desc->PoolLock, &descIrql);
227
228 // Loop from the start to the end of the page, stepping by the small block size.
229 for (size_t offset = 0; (offset + Desc->BlockSize) <= HeaderBlockSize; offset += Desc->BlockSize) {
230 // newBlock points to the start of this Desc->BlockSize chunk.
231 PPOOL_HEADER newBlock = (PPOOL_HEADER)((uint8_t*)PageVa + offset);
232
233 // Set its header metadata.
234 newBlock->Metadata.BlockSize = Desc->BlockSize;
235 newBlock->Metadata.PoolIndex = PoolIndex;
236 newBlock->PoolCanary = 'BEKA'; // Pool Canary
237 newBlock->PoolTag = 'ADIR'; // Default Tag
238
239 // Add this block to the list of the descriptor.
240 newBlock->Metadata.FreeListEntry.Next = Desc->FreeListHead.Next;
241 Desc->FreeListHead.Next = &newBlock->Metadata.FreeListEntry;
242 Desc->TotalBlocks++;
243 Desc->FreeCount++;
244 }
245
246 MsReleaseSpinlock(&Desc->PoolLock, descIrql);
247 return true;
248}
249
250static
251void*
252MiAllocateLargePool(
253 enum _POOL_TYPE PoolType,
254 size_t NumberOfBytes,
255 uint32_t Tag
256)
257
258/*++
259
260 Routine description:
261
262 Allocates a PoolType pool, and returns a pointer to start of region.
263
264 Arguments:
265
266 [IN] enum _POOL_TYPE PoolType - The type of pool to allocate for. (must be NonPagedXXX variant)
267 [IN] size_t NumberOfBytes - Number of bytes needed to allocate.
268 [IN] uint32_t Tag - A 4 byte integer that signifies the current allocation, in little endian (e.g 'TSET' -> 'TEST')
269
270 Return Values:
271
272 Pointer to start of allocated region
273
274--*/
275
276{
277 // Calculate required size.
278 size_t RequiredSize = NumberOfBytes + sizeof(POOL_HEADER);
279
280 // Convert to pages.
281 size_t neededPages = BYTES_TO_PAGES(RequiredSize);
282
283 // Allocate contiguous VAs.
284 uintptr_t pageVa = MiAllocatePoolVa(NonPagedPool, RequiredSize);
285
286 if (!pageVa) {
287 // We don't have enough VA space to allocate required pool.
288 return NULL;
289 }
290
291 // Now lets loop to request PFNs, we also create a safeguard to unroll the loop if failure happens.
292 bool failure = false;
293 size_t Iterations = 0;
294
295 for (size_t i = 0; i < neededPages; i++) {
296 // Increment by 4KiB each time.
297 uint8_t* currVa = (uint8_t*)pageVa + (i * VirtualPageSize);
298
300
301 if (pfn == PFN_ERROR) {
302 // Allocation for a physical page failed, free the VA allocated, and unroll the loop (see code below loop)
303 MiFreePoolVaContiguous(pageVa, RequiredSize, NonPagedPool);
304 failure = true;
305 break;
306 }
307
308 // Map the page.
309 PMMPTE pte = MiGetPtePointer((uintptr_t)currVa);
310 uint64_t phys = PPFN_TO_PHYSICAL_ADDRESS(INDEX_TO_PPFN(pfn));
311
312 uint64_t PteFlags = PAGE_PRESENT | PAGE_RW;
313
314 // If its NX pool, we add the NX bit.
315 if (PoolType == NonPagedPoolNx) {
316 PteFlags |= PAGE_NX;
317 }
318
319 MI_WRITE_PTE(pte, currVa, phys, PAGE_PRESENT | PAGE_RW);
320
321 // Update PFN metadata.
322 PPFN_ENTRY ppfn = INDEX_TO_PPFN(pfn);
323 ppfn->State = PfnStateActive;
324 ppfn->Flags = PFN_FLAG_NONPAGED;
325 ppfn->Descriptor.Mapping.PteAddress = pte;
326
327 // Update iterations for loop unroll (if failure)
328 Iterations++;
329 }
330
331 // If failure, unroll PFNs using Iterations.
332 if (failure) {
333 for (size_t j = 0; j < Iterations; j++) {
334 uint8_t* vaToFree = (uint8_t*)pageVa + (j * VirtualPageSize);
335 PMMPTE pte = MiGetPtePointer((uintptr_t)vaToFree);
337 MiUnmapPte(pte);
339 }
340 return NULL;
341 }
342
343 // Success! Initialize the block and return the pointer to caller.
344 PPOOL_HEADER newHeader = (PPOOL_HEADER)pageVa;
345 newHeader->PoolCanary = 'BEKA';
346 newHeader->PoolTag = Tag;
347 newHeader->Metadata.BlockSize = neededPages * VirtualPageSize; // Store allocated size.
349
350 void* UserAddress = (void*)((uint8_t*)newHeader + sizeof(POOL_HEADER));
351 // Set to zero (to avoid kernel issues)
352 kmemset(UserAddress, 0, NumberOfBytes);
353 // Return the pointer (exclude metadata start).
354 return UserAddress;
355}
356
357static
358void*
359MiAllocatePagedPool(
360 IN size_t NumberOfBytes,
361 IN uint32_t Tag
362)
363
364/*++
365
366 Routine description:
367
368 Allocates a paged pool.
369
370 Arguments:
371
372 [IN] size_t NumberOfBytes - Number of bytes needed to allocate.
373 [IN] uint32_t Tag - A 4 byte integer that signifies the current allocation, in big endian (e.g 'TSET' - "TEST")
374
375 Return Values:
376
377 Pointer to allocated region, or NULL on failure.
378
379 Notes:
380
381 This function AND access to it's pool contents MUST be with IRQL < DISPATCH_LEVEL.
382
383--*/
384
385{
386 size_t ActualSize = NumberOfBytes + sizeof(POOL_HEADER);
387 uintptr_t PagedVa = MiAllocatePoolVa(PagedPool, ActualSize);
388 if (!PagedVa) return NULL;
389
390 PPOOL_HEADER header = (PPOOL_HEADER)PagedVa;
391 // The first page will not be resident in memory anymore, it should be paged in as well, that means that pool free routines will need to be executed at < DISPATCH_LEVEL too.
392 // Set each page to be a demand lazy allocation.
393 size_t NumberOfPages = BYTES_TO_PAGES(ActualSize);
394 size_t currVa = PagedVa;
395
396 size_t i = 0;
397 bool failure = false;
398 for (; i < NumberOfPages; i++) {
399 PMMPTE tmpPte = MiGetPtePointer(currVa);
400 if (!tmpPte) { failure = true; break; }
401 MMPTE TempPte = *tmpPte;
402 // Set the PTE as demand zero. (paged pool is also NoExecute)
404 // Atomically exchange new value.
405 MiAtomicExchangePte(tmpPte, TempPte.Value);
406 // Invalidate the VA.
407 MiInvalidateTlbForVa((void*)currVa);
408 currVa += VirtualPageSize;
409 }
410
411 // Check if we failed getting a PTE, if so, unroll loop.
412 if (failure) {
413 while (i-- > 0) {
414 // Go back to the page we processed before.
415 currVa -= VirtualPageSize;
416
417 // Acquire its PTE.
418 PMMPTE Pte = MiGetPtePointer(currVa);
419 if (!Pte) {
420 assert(false, "Acquiring PTE pointer after it was valid before resulted in NULL, severe bug.");
421 break;
422 }
423
424 // Clear PTE.
425 MiUnmapPte(Pte);
426 }
427
428 // Return NULL, allocation failure.
429 return NULL;
430 }
431
432 // Set metadata. (header should get paged in now).
433 header->PoolCanary = 'BEKA';
434 header->PoolTag = Tag;
435 header->Metadata.BlockSize = ActualSize;
437
438 // Return VA.
439 return (void*)((uint8_t*)PagedVa + sizeof(POOL_HEADER));
440}
441
442void*
444 IN enum _POOL_TYPE PoolType,
445 IN size_t NumberOfBytes,
446 IN uint32_t Tag
447)
448
449/*++
450
451 Routine description:
452
453 Allocates a pool block of the specified type and returns a pointer to allocated block.
454 On any allocation, the returned block(s) is/are zeroed, no exceptions.
455
456 Arguments:
457
458 [IN] enum _POOL_TYPE - POOL_TYPE Enumerator, specifying the type of pool that will be allocated.
459 [IN] size_t NumberOfBytes - Number of bytes needed to allocate.
460 [IN] uint32_t Tag - A 4 byte integer that signifies the current allocation, in big endian (e.g 'TSET' - "TEST")
461
462 Return Values:
463
464 Pointer to allocated region, or NULL on failure.
465
466 Notes:
467
468 PagedPool allocations CANNOT happen at IRQL => DISPATCH_LEVEL.
469 NonPagedPool allocations CANNOT happen at IRQL > DISPATCH_LEVEL.
470
471 NonPagedPoolCachedXxX pool allocations are not supported.
472
473 Todos:
474
475 Implement POOL_TAGGING (global doubly linked list of all pool allocs, so we can have a poolmon of our own!)
476
477--*/
478
479{
480 // Declarations
481 IRQL oldIrql;
482 size_t ActualSize;
483 PPROCESSOR cpu;
484 PPOOL_DESCRIPTOR Desc;
485 PPOOL_HEADER header;
487 size_t Index;
488
489 // Runtime assertions
490 //assert((NumberOfBytes) != 0); Better to use the if statement, supplies retaddr.
491 assert((Tag) != 0);
492
493 if (NumberOfBytes == 0) {
494 // Bad pool caller.
497 RETADDR(0),
498 NULL,
499 NULL,
500 NULL
501 );
502 }
503
504 IRQL currIrql = MeGetCurrentIrql();
505
506 // IRQL Must be less or equal to DISPATCH_LEVEL if allocating with NonPagedPool.
507 // IRQL Must be LESS than DISPATCH_LEVEL if allocating with PagedPool.
508 if (currIrql <= DISPATCH_LEVEL) {
509 if (PoolType == PagedPool && currIrql == DISPATCH_LEVEL) {
512 (void*)&MmAllocatePoolWithTag,
513 (void*)MeGetCurrentIrql(),
514 (void*)8,
515 (void*)__builtin_return_address(0)
516 );
517 }
518 }
519 // IRQL Must NOT be greater than DISPATCH_LEVEL at any allocation.
520 else {
523 (void*)&MmAllocatePoolWithTag,
524 (void*)MeGetCurrentIrql(),
525 (void*)8,
526 (void*)__builtin_return_address(0)
527 );
528 }
529
530 if (PoolType == PagedPool) {
531 // Use the internal paged pool allocator.
532 return MiAllocatePagedPool(NumberOfBytes, Tag);
533 }
534
535 ActualSize = NumberOfBytes + sizeof(POOL_HEADER);
536 cpu = MeGetCurrentProcessor();
537
538
539 // It's NonPagedPool OR NonPagedPooLNx. Find the correct slab.
540 PPOOL_DESCRIPTOR TypeDescriptor = NULL;
541
542 if (PoolType == NonPagedPool) {
543 // Normal
544 TypeDescriptor = cpu->LookasidePools;
545 }
546 else if (PoolType == NonPagedPoolNx) {
547 // Nx
548 TypeDescriptor = cpu->LookasidePoolsNx;
549 }
550 else {
551 // Pool type is not supported.
552 return NULL;
553 }
554
555 Desc = NULL;
556 for (int i = 0; i < MAX_POOL_DESCRIPTORS; i++) {
557 PPOOL_DESCRIPTOR currentSlab = &TypeDescriptor[i];
558 if (ActualSize <= currentSlab->BlockSize) {
559 Desc = currentSlab;
560 Index = i;
561 break; // Found the best-fit slab
562 }
563 }
564
565 if (Desc == NULL) {
566 // Allocation is larger than 2048 bytes, use the large pool allocator.
567 return MiAllocateLargePool(PoolType, NumberOfBytes, Tag);
568 }
569
570 MsAcquireSpinlock(&Desc->PoolLock, &oldIrql);
571 assert((Desc->FreeCount) != UINT64_T_MAX);
572
573 if (Desc->FreeCount == 0) {
574 // Looks like the pool is empty, refill all empty pools.
575 // First, release the spinlock.
576 MsReleaseSpinlock(&Desc->PoolLock, oldIrql);
577 if (!MiRefillPool(Desc, Index)) {
578 // If we failed allocation, act on failure.
579 return NULL;
580 }
581
582 // Retry allocation.
583 return MmAllocatePoolWithTag(PoolType, NumberOfBytes, Tag);
584 }
585
586 // Looks like we have a block to return! Return its PTR.
587 // First, acquire it. (we are under spinlock, no need for interlocked pop)
588 list = Desc->FreeListHead.Next;
589 assert((list) != NULL, "Pool is nullptr even though freecount isn't zero.");
590 Desc->FreeListHead.Next = list->Next; // Finish the pop
591 header = CONTAINING_RECORD(list, POOL_HEADER, Metadata.FreeListEntry);
592
593 // We must restore the metadata because the linked list pointer
594 // overwrote it while the block was sitting in the free list.
595 header->Metadata.PoolIndex = (uint16_t)Index;
596 header->Metadata.BlockSize = (uint16_t)Desc->BlockSize;
597
598 // First check if the canary is wrong.
599 if (header->PoolCanary != 'BEKA') {
602 (void*)header,
603 (void*)__read_rip(),
604 NULL,
605 NULL
606 );
607 }
608
609 // Rewrite its tag.
610 header->PoolTag = Tag;
611 // Decrement descriptor free count.
612 Desc->FreeCount--;
613 assert((Desc->FreeCount) != SIZE_T_MAX); // Check for underflow.
614 // Release spinlock.
615 MsReleaseSpinlock(&Desc->PoolLock, oldIrql);
616 void* UserAddress = (void*)((uint8_t*)header + sizeof(POOL_HEADER));
617
618 // Set to zero (to avoid kernel issues)
619 // If this is ever removed, massive kernel bugs will appear with uninitialized memory.
620 // So we kinda depend on it now.
621 // brilliant engineering, brilliant (its sarcasm)
622 // (NOTE: If pool is allocated freshly by MiRefillPool (needed loop to find out) (or double counters),
623 // it usually (USUALLY, maybe it changed) came from acquiring a physical page with a zeroed pfn state value
624 // so the page comes zeroed, which means the memset below can be skipped) (PERFORMANCE TODO)
625 kmemset(UserAddress, 0, NumberOfBytes);
626
627 // Return the pointer (exclude metadata start).
628 return UserAddress;
629}
630
631void
633 IN void* buf
634)
635
636/*++
637
638 Routine description:
639
640 Deallocates the buffer allocated by the MmAllocatePoolWithTag routine (or other pool allocation routines if present).
641
642 Arguments:
643
644 [IN] void* buf - The pointer given by the routine. (start of allocated region)
645
646 Return Values:
647
648 None.
649
650 Notes:
651
652 Memory is freed here, do not use the pointer after freeing the pool.
653 The Pool tag will not be modified when freeing, useful for debugging.
654
655 Memory allocated with PagedPoolXxX type must be deallocated at IRQL < DISPATCH_LEVEL.
656
657--*/
658
659{
660 if (!buf) return;
662
663 // Convert the buffer to the header.
664 PPOOL_HEADER header = (PPOOL_HEADER)((uint8_t*)buf - sizeof(POOL_HEADER));
665
666 gop_printf(COLOR_YELLOW, "MmFreePool called with IRQL: %d | Header: %p\n", MeGetCurrentIrql(), header);
667
668 if (header->PoolCanary != 'BEKA') {
671 (void*)header,
672 (void*)RETADDR(0),
673 NULL,
674 NULL
675 );
676 }
677
678 // Obtain the pool index to free the region back into.
679 uint16_t PoolIndex = header->Metadata.PoolIndex;
680
681 if (PoolIndex == POOL_TYPE_GLOBAL) {
682 // We destroy global pool allocations and free them back to main memory.
683 size_t BlockSize = header->Metadata.BlockSize;
684 size_t NumberOfPages = BYTES_TO_PAGES(BlockSize);
685 uintptr_t CurrentVA = (uintptr_t)header;
686
687 // Release physical pages and unmap PTEs.
688 for (size_t i = 0; i < NumberOfPages; i++) {
689 PMMPTE pte = MiGetPtePointer(CurrentVA);
690
691 // Check if PTE is present, it should be though.
692 if (pte && pte->Hard.Present) {
694
695 // Invalidate PTE.
696 MiUnmapPte(pte);
697 // Free the PFN back to db.
699 }
700 else {
701 MeBugCheckEx(MEMORY_CORRUPT_HEADER, (void*)CurrentVA, 0, 0, 0);
702 }
703
704 CurrentVA += VirtualPageSize;
705 }
706
707 // Free VA space given.
708 MiFreePoolVaContiguous((uintptr_t)header, BlockSize, NonPagedPool);
709
710 return;
711 }
712
713 if (PoolIndex == POOL_TYPE_PAGED) {
714 // For a paged pool allocation, we just free every PTE, then returned the VA space consumed.
715 // The BlockSize field in a PagedPool allocation is how many bytes were requested + sizeof(POOL_HEADER)
716 size_t NumberOfPages = BYTES_TO_PAGES(header->Metadata.BlockSize);
717
718 // Loop over the amount, if the PTE is present, unmap it and clear the demand page.
719 uintptr_t CurrentVA = (uintptr_t)header;
720
721 for (size_t i = 0; i < NumberOfPages; i++) {
722 PMMPTE pte = MiGetPtePointer(CurrentVA);
723 if (unlikely(!pte)) goto advance;
724
725 // Check if the PTE is present, if it is, the demand zero page has been consumed, we deallocate, and unset the demand zero.
726 if (pte->Hard.Present) {
727 // It has a PFN.
729 // Unmap PTE, free PFN.
730 MiUnmapPte(pte);
732 }
733 else {
734 // If the PTE isnt present, it still must contain a demand zero bit.
735 assert(MM_IS_DEMAND_ZERO_PTE(*pte) == true);
736 MMPTE TempPte = *pte;
738 // Flip the demand zero bit.
739 MiAtomicExchangePte(pte, TempPte.Value);
740 }
741
742 // Invalidate the VA. (only necessary for else though, as in MiUnmapPte it does invalidate TLB, but im so scared of bugs ill leave this here)
743 MiInvalidateTlbForVa((void*)CurrentVA);
744
745 advance:
746 CurrentVA += VirtualPageSize;
747 }
748 return;
749 }
750
751 //
752 // Nonpaged pool allocation
753 //
754
756 PPOOL_DESCRIPTOR Desc = &cpu->LookasidePools[PoolIndex];
757
758 // Acquire the same lock used by the allocator
759 IRQL oldIrql;
760 MsAcquireSpinlock(&Desc->PoolLock, &oldIrql);
761
762 // Push the entry back onto the list (it's protected by the lock)
764 Desc->FreeListHead.Next = &header->Metadata.FreeListEntry;
765
766 // Increment the free count.
767 Desc->FreeCount++;
768
769 // Release the lock
770 MsReleaseSpinlock(&Desc->PoolLock, oldIrql);
771}
#define IN
Definition annotations.h:8
#define assert(...)
Definition assert.h:57
NORETURN void MeBugCheckEx(IN enum _BUGCHECK_CODES BugCheckCode, IN void *BugCheckParameter1, IN void *BugCheckParameter2, IN void *BugCheckParameter3, IN void *BugCheckParameter4)
Definition bugcheck.c:245
@ DISPATCH_LEVEL
Definition core.h:17
PROCESSOR * PPROCESSOR
Definition core.h:48
enum _IRQL IRQL
struct _SINGLE_LINKED_LIST * PSINGLE_LINKED_LIST
void gop_printf(uint32_t color, const char *fmt,...)
Definition gop.c:633
FORCEINLINE uint64_t __read_rip(void)
Definition intrin.h:233
#define CONTAINING_RECORD(ptr, type, member)
Definition macros.h:11
#define RETADDR(level)
Definition macros.h:53
#define unlikely(x)
Definition macros.h:62
#define SIZE_T_MAX
Definition macros.h:17
#define UINT64_T_MAX
Definition macros.h:22
PAGE_INDEX MiTranslatePteToPfn(IN PMMPTE pte)
Definition map.c:310
PMMPTE MiGetPtePointer(IN uintptr_t va)
Definition map.c:76
void MiInvalidateTlbForVa(IN void *VirtualAddress)
Definition map.c:273
void MiUnmapPte(IN PMMPTE pte)
Definition map.c:385
@ BAD_POOL_CALLER
Definition me.h:122
@ MEMORY_CORRUPT_HEADER
Definition me.h:113
FORCEINLINE IRQL MeGetCurrentIrql(void)
Definition me.h:415
FORCEINLINE PPROCESSOR MeGetCurrentProcessor(void)
Definition me.h:369
#define COLOR_YELLOW
Definition mg.h:35
@ PAGE_RW
Definition mm.h:311
@ PAGE_PRESENT
Definition mm.h:307
@ PAGE_NX
Definition mm.h:347
@ PfnStateFree
Definition mm.h:279
@ PfnStateZeroed
Definition mm.h:280
@ PfnStateActive
Definition mm.h:276
FORCEINLINE void MiAtomicExchangePte(PMMPTE PtePtr, uint64_t NewPteValue)
Definition mm.h:762
#define MAX_POOL_DESCRIPTORS
Definition mm.h:174
#define MM_SET_DEMAND_ZERO_PTE(pte, prot_flags, nx)
Definition mm.h:141
_POOL_TYPE
Definition mm.h:354
@ NonPagedPool
Definition mm.h:355
@ PagedPool
Definition mm.h:356
@ NonPagedPoolNx
Definition mm.h:359
#define PROT_KERNEL_WRITE
Definition mm.h:233
struct _MMPTE * PMMPTE
struct _POOL_DESCRIPTOR POOL_DESCRIPTOR
#define MI_PAGED_POOL_BASE
Definition mm.h:215
#define PPFN_TO_PHYSICAL_ADDRESS(PPFN)
Definition mm.h:136
#define PROT_KERNEL_NOEXECUTE
Definition mm.h:234
uint64_t PAGE_INDEX
Definition mm.h:256
FORCEINLINE void * kmemset(void *dest, int64_t val, uint64_t len)
Definition mm.h:655
struct _POOL_DESCRIPTOR * PPOOL_DESCRIPTOR
#define MI_NONPAGED_POOL_BASE
Definition mm.h:212
#define BYTES_TO_PAGES(Bytes)
Definition mm.h:167
#define MI_NONPAGED_POOL_END
Definition mm.h:213
#define PFN_ERROR
Definition mm.h:229
struct _POOL_HEADER POOL_HEADER
#define MI_PAGED_POOL_END
Definition mm.h:216
#define MM_UNSET_DEMAND_ZERO_PTE(pte)
Definition mm.h:147
#define VirtualPageSize
Definition mm.h:53
#define INDEX_TO_PPFN(Index)
Definition mm.h:62
#define MI_WRITE_PTE(_PtePointer, _Va, _Pa, _Flags)
Definition mm.h:90
struct _PFN_ENTRY * PPFN_ENTRY
struct _MMPTE MMPTE
@ PFN_FLAG_NONPAGED
Definition mm.h:288
#define MM_IS_DEMAND_ZERO_PTE(pte)
Definition mm.h:139
struct _POOL_HEADER * PPOOL_HEADER
#define PROT_KERNEL_READ
Definition mm.h:232
#define MT_SUCCESS
Definition mtstatus.h:22
int32_t MTSTATUS
Definition mtstatus.h:12
#define MT_NOT_FOUND
Definition mtstatus.h:30
PAGE_INDEX MiRequestPhysicalPage(IN PFN_STATE ListType)
Definition pfn.c:333
NOINLINE void MiReleasePhysicalPage(IN PAGE_INDEX PfnIndex)
Definition pfn.c:441
uintptr_t MmNonPagedPoolEnd
Definition pool.c:31
uintptr_t MmNonPagedPoolStart
Definition pool.c:30
#define POOL_TYPE_PAGED
Definition pool.c:28
MTSTATUS MiInitializePoolSystem(void)
Definition pool.c:36
#define POOL_TYPE_GLOBAL
Definition pool.c:27
POOL_DESCRIPTOR GlobalPool
Definition pool.c:25
uintptr_t MmPagedPoolStart
Definition pool.c:32
void MmFreePool(IN void *buf)
Definition pool.c:632
uintptr_t MmPagedPoolEnd
Definition pool.c:33
void * MmAllocatePoolWithTag(IN enum _POOL_TYPE PoolType, IN size_t NumberOfBytes, IN uint32_t Tag)
Definition pool.c:443
void MsAcquireSpinlock(IN PSPINLOCK lock, IN PIRQL OldIrql)
Definition spinlock.c:13
void MsReleaseSpinlock(IN PSPINLOCK lock, IN IRQL OldIrql)
Definition spinlock.c:45
struct _MMPTE::@172372265215056352375070220246156106027174106113::@200357034104227323320222006243127050212100105247 Hard
uint64_t Present
Definition mm.h:430
uint64_t Value
Definition mm.h:423
union _PFN_ENTRY::@217024126340164016372152071216274230164113211246 Descriptor
struct _PFN_ENTRY::@217024126340164016372152071216274230164113211246::@301110335271023021153236134322146064331241142124 Mapping
uint8_t State
Definition mm.h:471
PMMPTE PteAddress
Definition mm.h:482
uint8_t Flags
Definition mm.h:472
enum _POOL_TYPE PoolType
Definition mm.h:559
size_t BlockSize
Definition mm.h:555
SPINLOCK PoolLock
Definition mm.h:558
volatile uint64_t FreeCount
Definition mm.h:556
volatile uint64_t TotalBlocks
Definition mm.h:557
SINGLE_LINKED_LIST FreeListHead
Definition mm.h:554
uint16_t BlockSize
Definition mm.h:546
uint16_t PoolIndex
Definition mm.h:547
SINGLE_LINKED_LIST FreeListEntry
Definition mm.h:541
uint32_t PoolCanary
Definition mm.h:537
uint32_t PoolTag
Definition mm.h:550
union _POOL_HEADER::@321115223011072362277073135231015025151337071364 Metadata
POOL_DESCRIPTOR LookasidePoolsNx[MAX_POOL_DESCRIPTORS]
Definition me.h:334
POOL_DESCRIPTOR LookasidePools[MAX_POOL_DESCRIPTORS]
Definition me.h:333
struct _SINGLE_LINKED_LIST * Next
Definition core.h:26
volatile uint32_t locked
Definition ms.h:37
uintptr_t MiAllocatePoolVa(IN POOL_TYPE PoolType, IN size_t NumberOfBytes)
Definition va.c:213
void MiFreePoolVaContiguous(IN uintptr_t va, IN size_t NumberOfBytes, IN POOL_TYPE PoolType)
Definition va.c:367