Skip to content

Sweep abandoned blocks in eager sweeping #830

New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Merged
merged 6 commits into from
Jun 21, 2023
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension


Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
3 changes: 2 additions & 1 deletion Cargo.toml
Original file line number Diff line number Diff line change
Expand Up @@ -51,7 +51,8 @@ paste = "1.0.8"
rand = "0.8.5"

[build-dependencies]
built = { version = "0.6.0", features = ["git2"] }
# Fix to 0.6.0. Updating to 0.6.1 requires MSRV 1.64.
built = { version = "=0.6.0", features = ["git2"] }

[features]
default = []
Expand Down
8 changes: 7 additions & 1 deletion src/policy/marksweepspace/native_ms/block.rs
Original file line number Diff line number Diff line change
Expand Up @@ -23,10 +23,16 @@ use std::num::NonZeroUsize;
/// size of `Option<Block>` is the same as `Block` itself.
// TODO: If we actually use the first block, we would need to turn the type into `Block(Address)`, and use `None` and
// `Block(Address::ZERO)` to differentiate those.
#[derive(Debug, Clone, Copy, PartialOrd, PartialEq)]
#[derive(Clone, Copy, PartialOrd, PartialEq)]
#[repr(transparent)]
pub struct Block(NonZeroUsize);

impl std::fmt::Debug for Block {
fn fmt(&self, f: &mut std::fmt::Formatter) -> std::fmt::Result {
write!(f, "Block(0x{:x})", self.0)
}
}

impl Region for Block {
const LOG_BYTES: usize = 16;

Expand Down
37 changes: 36 additions & 1 deletion src/policy/marksweepspace/native_ms/block_list.rs
Original file line number Diff line number Diff line change
Expand Up @@ -6,7 +6,6 @@ use std::sync::atomic::AtomicBool;
use std::sync::atomic::Ordering;

/// List of blocks owned by the allocator
#[derive(Debug)]
#[repr(C)]
pub struct BlockList {
pub first: Option<Block>,
Expand All @@ -15,6 +14,12 @@ pub struct BlockList {
pub lock: AtomicBool,
}

impl std::fmt::Debug for BlockList {
fn fmt(&self, f: &mut std::fmt::Formatter) -> std::fmt::Result {
write!(f, "BlockList {:?}", self.iter().collect::<Vec<Block>>())
}
}

impl BlockList {
const fn new(size: usize) -> BlockList {
BlockList {
Expand Down Expand Up @@ -153,6 +158,36 @@ impl BlockList {
pub fn unlock(&mut self) {
self.lock.store(false, Ordering::SeqCst);
}

/// Get an iterator for the block list.
pub fn iter(&self) -> BlockListIterator {
BlockListIterator { cursor: self.first }
}

/// Sweep all the blocks in the block list.
pub fn sweep_blocks<VM: VMBinding>(&self, space: &super::MarkSweepSpace<VM>) {
for block in self.iter() {
if !block.attempt_release(space) {
block.sweep::<VM>();
}
}
}
}

pub struct BlockListIterator {
cursor: Option<Block>,
}

impl Iterator for BlockListIterator {
type Item = Block;

fn next(&mut self) -> Option<Self::Item> {
let ret = self.cursor;
if let Some(cur) = self.cursor {
self.cursor = cur.load_next_block();
}
ret
}
}

/// Log2 of pointer size
Expand Down
29 changes: 27 additions & 2 deletions src/policy/marksweepspace/native_ms/global.rs
Original file line number Diff line number Diff line change
Expand Up @@ -71,6 +71,23 @@ impl AbandonedBlockLists {
i += 1;
}
}

fn sweep<VM: VMBinding>(&mut self, space: &MarkSweepSpace<VM>) {
for i in 0..MI_BIN_FULL {
self.available[i].sweep_blocks(space);
self.consumed[i].sweep_blocks(space);
self.unswept[i].sweep_blocks(space);

// As we have swept blocks, move blocks in the unswept list to available or consumed list.
while let Some(block) = self.unswept[i].pop() {
if block.has_free_cells() {
self.available[i].push(block);
} else {
self.consumed[i].push(block);
}
}
}
}
}

impl<VM: VMBinding> SFT for MarkSweepSpace<VM> {
Expand Down Expand Up @@ -267,8 +284,16 @@ impl<VM: VMBinding> MarkSweepSpace<VM> {
let work_packets = self.generate_sweep_tasks();
self.scheduler.work_buckets[WorkBucketStage::Release].bulk_add(work_packets);

let mut abandoned = self.abandoned.lock().unwrap();
abandoned.move_consumed_to_unswept();
if cfg!(feature = "eager_sweeping") {
// For eager sweeping, we have to sweep the lists that are abandoned to these global lists.
let mut abandoned = self.abandoned.lock().unwrap();
abandoned.sweep(self);
} else {
// For lazy sweeping, we just move blocks from consumed to unswept. When an allocator tries
// to use them, they will sweep the block.
let mut abandoned = self.abandoned.lock().unwrap();
abandoned.move_consumed_to_unswept();
}
}

/// Release a block.
Expand Down
41 changes: 9 additions & 32 deletions src/util/alloc/free_list_allocator.rs
Original file line number Diff line number Diff line change
Expand Up @@ -428,8 +428,7 @@ impl<VM: VMBinding> FreeListAllocator<VM> {
fn reset(&mut self) {
trace!("reset");
// consumed and available are now unswept
let mut bin = 0;
while bin < MAX_BIN + 1 {
for bin in 0..MI_BIN_FULL {
let unswept = self.unswept_blocks.get_mut(bin).unwrap();
unswept.lock();

Expand All @@ -444,7 +443,6 @@ impl<VM: VMBinding> FreeListAllocator<VM> {
sweep_later(&mut self.consumed_blocks[bin]);

unswept.unlock();
bin += 1;
}

if Self::ABANDON_BLOCKS_IN_RESET {
Expand All @@ -456,29 +454,13 @@ impl<VM: VMBinding> FreeListAllocator<VM> {
fn reset(&mut self) {
debug!("reset");
// sweep all blocks and push consumed onto available list
let mut bin = 0;
while bin < MAX_BIN + 1 {
let sweep = |first_block: Option<Block>, used_blocks: bool| {
let mut cursor = first_block;
while let Some(block) = cursor {
if used_blocks {
block.sweep::<VM>();
cursor = block.load_next_block();
} else {
let next = block.load_next_block();
if !block.attempt_release(self.space) {
block.sweep::<VM>();
}
cursor = next;
}
}
};

sweep(self.available_blocks[bin].first, true);
sweep(self.available_blocks_stress[bin].first, true);
for bin in 0..MI_BIN_FULL {
// Sweep available blocks
self.available_blocks[bin].sweep_blocks(self.space);
self.available_blocks_stress[bin].sweep_blocks(self.space);

// Sweep consumed blocks, and also push the blocks back to the available list.
sweep(self.consumed_blocks[bin].first, false);
self.consumed_blocks[bin].sweep_blocks(self.space);
if self.plan.base().is_precise_stress() && self.plan.base().is_stress_test_gc_enabled()
{
debug_assert!(self.plan.base().is_precise_stress());
Expand All @@ -487,11 +469,8 @@ impl<VM: VMBinding> FreeListAllocator<VM> {
self.available_blocks[bin].append(&mut self.consumed_blocks[bin]);
}

bin += 1;

if Self::ABANDON_BLOCKS_IN_RESET {
self.abandon_blocks();
}
// For eager sweeping, we should not have unswept blocks
assert!(self.unswept_blocks[bin].is_empty());
}

if Self::ABANDON_BLOCKS_IN_RESET {
Expand All @@ -501,8 +480,7 @@ impl<VM: VMBinding> FreeListAllocator<VM> {

fn abandon_blocks(&mut self) {
let mut abandoned = self.space.abandoned.lock().unwrap();
let mut i = 0;
while i < MI_BIN_FULL {
for i in 0..MI_BIN_FULL {
let available = self.available_blocks.get_mut(i).unwrap();
if !available.is_empty() {
abandoned.available[i].append(available);
Expand All @@ -522,7 +500,6 @@ impl<VM: VMBinding> FreeListAllocator<VM> {
if !unswept.is_empty() {
abandoned.unswept[i].append(unswept);
}
i += 1;
}
}
}