Skip to content

Commit 8c141e5

Browse files
committed
fix(tasks): allocation tracker prevent counter overflow (#13085)
Allocation tracker did not deal with possibility that counters could reach `usize::MAX`. In this case, addition would wrap around back to 0. This problem was pointed out by @aapoalas in #13043 (review). Introduce an `AtomicCounter` type which uses saturating addition, to prevent this happening. Reaching such a high allocation count is highly unlikely on 64-bit systems, but might just about be possible on 32-bit. Maybe this is overkill, but since `cargo allocs` is just an internal tool, performance is not the highest priority. So I figure on balance probably better to handle this edge case. It doesn't seem to hurt perf much. ``` # Before % hyperfine -i --warmup 3 'cargo allocs' Benchmark 1: cargo allocs Time (mean ± σ): 299.9 ms ± 2.4 ms [User: 132.8 ms, System: 31.0 ms] Range (min … max): 297.6 ms … 303.9 ms 10 runs # After % hyperfine -i --warmup 3 'cargo allocs' Benchmark 1: cargo allocs Time (mean ± σ): 301.3 ms ± 3.4 ms [User: 133.3 ms, System: 31.5 ms] Range (min … max): 297.8 ms … 310.0 ms 10 runs ``` Note: The same problem in arena allocation tracker is addressed in #13043.
1 parent df3829c commit 8c141e5

File tree

1 file changed

+38
-9
lines changed
  • tasks/track_memory_allocations/src

1 file changed

+38
-9
lines changed

tasks/track_memory_allocations/src/lib.rs

Lines changed: 38 additions & 9 deletions
Original file line numberDiff line numberDiff line change
@@ -21,17 +21,46 @@ static GLOBAL: TrackedAllocator = TrackedAllocator;
2121

2222
struct TrackedAllocator;
2323

24+
/// Atomic counter.
25+
///
26+
/// Mainly just a wrapper around `AtomicUsize`, but `increment` method ensures that counter value
27+
/// doesn't wrap around if counter reaches `usize::MAX`.
28+
/// This is practically infeasible on 64-bit systems, but might just be possible on 32-bit.
29+
///
30+
/// Note: `SeqCst` ordering may be stronger than required, but performance is not the primary concern here,
31+
/// so play it safe.
32+
struct AtomicCounter(AtomicUsize);
33+
34+
impl AtomicCounter {
35+
const fn new() -> Self {
36+
Self(AtomicUsize::new(0))
37+
}
38+
39+
fn get(&self) -> usize {
40+
self.0.load(SeqCst)
41+
}
42+
43+
fn increment(&self) {
44+
// Result of `fetch_update` cannot be `Err` as closure always returns `Some`
45+
let _ = self.0.fetch_update(SeqCst, SeqCst, |count| Some(count.saturating_add(1)));
46+
}
47+
48+
fn reset(&self) {
49+
self.0.store(0, SeqCst);
50+
}
51+
}
52+
2453
/// Number of system allocations
2554
// NOTE: We are only tracking the number of system allocations here, and not the number of bytes that are allocated.
2655
// The original version of this tool did track the number of bytes, but there was some variance between platforms that
2756
// made it less reliable as a measurement. In general, the number of allocations is closely correlated with the size of
2857
// allocations, so just tracking the number of allocations is sufficient for our purposes.
29-
static NUM_ALLOC: AtomicUsize = AtomicUsize::new(0);
30-
static NUM_REALLOC: AtomicUsize = AtomicUsize::new(0);
58+
static NUM_ALLOC: AtomicCounter = AtomicCounter::new();
59+
static NUM_REALLOC: AtomicCounter = AtomicCounter::new();
3160

3261
fn reset_global_allocs() {
33-
NUM_ALLOC.store(0, SeqCst);
34-
NUM_REALLOC.store(0, SeqCst);
62+
NUM_ALLOC.reset();
63+
NUM_REALLOC.reset();
3564
}
3665

3766
// SAFETY: Methods simply delegate to `MiMalloc` allocator to ensure that the allocator
@@ -41,7 +70,7 @@ unsafe impl GlobalAlloc for TrackedAllocator {
4170
unsafe fn alloc(&self, layout: Layout) -> *mut u8 {
4271
let ret = unsafe { MiMalloc.alloc(layout) };
4372
if !ret.is_null() {
44-
NUM_ALLOC.fetch_add(1, SeqCst);
73+
NUM_ALLOC.increment();
4574
}
4675
ret
4776
}
@@ -53,15 +82,15 @@ unsafe impl GlobalAlloc for TrackedAllocator {
5382
unsafe fn alloc_zeroed(&self, layout: Layout) -> *mut u8 {
5483
let ret = unsafe { MiMalloc.alloc_zeroed(layout) };
5584
if !ret.is_null() {
56-
NUM_ALLOC.fetch_add(1, SeqCst);
85+
NUM_ALLOC.increment();
5786
}
5887
ret
5988
}
6089

6190
unsafe fn realloc(&self, ptr: *mut u8, layout: Layout, new_size: usize) -> *mut u8 {
6291
let ret = unsafe { MiMalloc.realloc(ptr, layout, new_size) };
6392
if !ret.is_null() {
64-
NUM_REALLOC.fetch_add(1, SeqCst);
93+
NUM_REALLOC.increment();
6594
}
6695
ret
6796
}
@@ -116,8 +145,8 @@ pub fn run() -> Result<(), io::Error> {
116145

117146
Parser::new(&allocator, &file.source_text, file.source_type).with_options(options).parse();
118147

119-
let sys_allocs = NUM_ALLOC.load(SeqCst);
120-
let sys_reallocs = NUM_REALLOC.load(SeqCst);
148+
let sys_allocs = NUM_ALLOC.get();
149+
let sys_reallocs = NUM_REALLOC.get();
121150
#[cfg(not(feature = "is_all_features"))]
122151
let (arena_allocs, arena_reallocs) = allocator.get_allocation_stats();
123152
#[cfg(feature = "is_all_features")]

0 commit comments

Comments
 (0)