kernel
Loading...
Searching...
No Matches
rundown.c
Go to the documentation of this file.
1#include "../../includes/me.h"
3
4#define TEARDOWN_ACTIVE (1ULL << 63)
5#define REFERENCE_COUNT (0x7FFFFFFFFFFFFFFF)
6
7bool
9 IN PRUNDOWN_REF rundown
10)
11
12/*++
13
14 Routine description : Safely acquires rundown protection on a shared resource to prevent it from being deleted or "rundown" while in use.
15
16 Arguments:
17
18 Pointer to RUNDOWN_REF Object.
19
20 Return Values:
21
22 True - The rundown protection acquisition has succeeded, the object is safe from memory deletion.
23 False - Teardown has started on the object, handle gracefully.
24
25--*/
26
27{
28 uint64_t expected, desired;
29 do {
30 expected = __atomic_load_n(&rundown->Count, __ATOMIC_SEQ_CST);
31
32 // If teardown has started we refuse.
33 if (expected & TEARDOWN_ACTIVE) return false;
34
35 desired = expected + 1;
36 } while (!InterlockedCompareExchangeU64_bool(&rundown->Count, desired, &expected));
37 return true;
38}
39
40void
42 IN PRUNDOWN_REF rundown
43)
44
45/*++
46
47 Routine description : Releases rundown protection from the object.
48
49 Arguments:
50
51 Pointer to RUNDOWN_REF Object.
52
53 Return Values:
54
55 None.
56
57--*/
58
59{
60 InterlockedDecrementU64(&rundown->Count);
61}
62
63// Wait for rundown (teardown)
65 IN PRUNDOWN_REF rundown
66)
67
68/*++
69
70 Routine description : Waits until all rundown protections have been released from the object, then starts Teardown.
71 Use this when you want to gurantee an object will not be used after free.
72
73 Arguments:
74
75 Pointer to RUNDOWN_REF Object.
76
77 Return Values:
78
79 None.
80
81--*/
82
83{
84 uint64_t expected = __atomic_load_n(&rundown->Count, __ATOMIC_SEQ_CST);
85 for (;;) {
86 uint64_t desired = expected | TEARDOWN_ACTIVE;
87
88 // try to set TEARDOWN_ACTIVE while preserving the refcount bits
89 if (InterlockedCompareExchangeU64_bool(&rundown->Count, desired, &expected)) {
90 // success — we hold the TEARDOWN_ACTIVE marker now
91 break;
92 }
93
94 if (expected & TEARDOWN_ACTIVE) {
95 // Another thread set teardown already
96 break;
97 }
98 // otherwise loop and try again with updated expected
99 }
100
101 // Spin until no references remain
102 while ((__atomic_load_n(&rundown->Count, __ATOMIC_SEQ_CST) & REFERENCE_COUNT) != 0) {
103 __pause();
104 }
105}
#define IN
Definition annotations.h:8
FORCEINLINE uint64_t InterlockedDecrementU64(volatile uint64_t *target)
Definition atomic.h:128
FORCEINLINE bool InterlockedCompareExchangeU64_bool(volatile uint64_t *target, uint64_t value, uint64_t *expected)
Definition atomic.h:92
FORCEINLINE void __pause(void)
Definition intrin.h:239
struct _RUNDOWN_REF * PRUNDOWN_REF
#define REFERENCE_COUNT
Definition rundown.c:5
#define TEARDOWN_ACTIVE
Definition rundown.c:4
bool MsAcquireRundownProtection(IN PRUNDOWN_REF rundown)
Definition rundown.c:8
void MsReleaseRundownProtection(IN PRUNDOWN_REF rundown)
Definition rundown.c:41
void MsWaitForRundownProtectionRelease(IN PRUNDOWN_REF rundown)
Definition rundown.c:64