Skip to content

Allow stress test to trigger at byte-level granularity #198

New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Merged
merged 7 commits into from
Jan 12, 2021
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
29 changes: 21 additions & 8 deletions src/plan/global.rs
Original file line number Diff line number Diff line change
Expand Up @@ -272,17 +272,17 @@ pub trait Plan: Sized + 'static + Sync + Send {

#[inline]
fn stress_test_gc_required(&self) -> bool {
let pages = self.base().vm_map.get_cumulative_committed_pages();
trace!("stress_gc pages={}", pages);

let stress_factor = self.base().options.stress_factor;
if self.is_initialized()
&& (pages ^ self.base().last_stress_pages.load(Ordering::Relaxed)
> self.options().stress_factor)
&& (self.base().allocation_bytes.load(Ordering::SeqCst) > stress_factor)
{
self.base()
.last_stress_pages
.store(pages, Ordering::Relaxed);
trace!(
"Stress GC: allocation_bytes = {}, stress_factor = {}",
self.base().allocation_bytes.load(Ordering::Relaxed),
stress_factor
);
trace!("Doing stress GC");
self.base().allocation_bytes.store(0, Ordering::SeqCst);
true
} else {
false
Expand Down Expand Up @@ -354,6 +354,8 @@ pub struct BasePlan<VM: VMBinding> {
// A counter for per-mutator stack scanning
pub scanned_stacks: AtomicUsize,
pub mutator_iterator_lock: Mutex<()>,
// A counter that keeps tracks of the number of bytes allocated since last stress test
pub allocation_bytes: AtomicUsize,
}

#[cfg(feature = "base_spaces")]
Expand Down Expand Up @@ -442,6 +444,7 @@ impl<VM: VMBinding> BasePlan<VM> {
inside_sanity: AtomicBool::new(false),
scanned_stacks: AtomicUsize::new(0),
mutator_iterator_lock: Mutex::new(()),
allocation_bytes: AtomicUsize::new(0),
}
}

Expand Down Expand Up @@ -636,6 +639,16 @@ impl<VM: VMBinding> BasePlan<VM> {
}

fn force_full_heap_collection(&self) {}

pub fn increase_allocation_bytes_by(&self, size: usize) {
let old_allocation_bytes = self.allocation_bytes.fetch_add(size, Ordering::SeqCst);
trace!(
"Stress GC: old_allocation_bytes = {}, size = {}, allocation_bytes = {}",
old_allocation_bytes,
size,
self.allocation_bytes.load(Ordering::Relaxed),
);
}
}

/**
Expand Down
20 changes: 18 additions & 2 deletions src/util/alloc/allocator.rs
Original file line number Diff line number Diff line change
@@ -1,4 +1,5 @@
use crate::util::address::Address;
use crate::util::constants::DEFAULT_STRESS_FACTOR;

use std::sync::atomic::Ordering;

Expand Down Expand Up @@ -120,9 +121,12 @@ pub trait Allocator<VM: VMBinding>: Downcast {
#[inline(always)]
fn alloc_slow_inline(&mut self, size: usize, align: usize, offset: isize) -> Address {
let tls = self.get_tls();
let plan = self.get_plan().base();
let stress_test = plan.options.stress_factor != DEFAULT_STRESS_FACTOR;

// Information about the previous collection.
let mut emergency_collection = false;
let mut previous_result_zero = false;
loop {
// Try to allocate using the slow path
let result = self.alloc_slow_once(size, align, offset);
Expand All @@ -132,7 +136,6 @@ pub trait Allocator<VM: VMBinding>: Downcast {
return result;
}

let plan = self.get_plan().base();
if !result.is_zero() {
// TODO: Check if we need oom lock.
// It seems the lock only protects access to the atomic boolean. We could possibly do
Expand All @@ -147,6 +150,18 @@ pub trait Allocator<VM: VMBinding>: Downcast {
plan.allocation_success.store(true, Ordering::Relaxed);
drop(guard);
}

// When a GC occurs, the resultant address provided by `acquire()` is 0x0.
// Hence, another iteration of this loop occurs. In such a case, the second
// iteration tries to allocate again, and if is successful, then the allocation
// bytes are updated. However, this leads to double counting of the allocation:
// (i) by the original alloc_slow_inline(); and (ii) by the alloc_slow_inline()
// called by acquire(). In order to not double count the allocation, we only
// update allocation bytes if the previous result wasn't 0x0.
if stress_test && self.get_plan().is_initialized() && !previous_result_zero {
plan.increase_allocation_bytes_by(size);
}

return result;
}

Expand All @@ -155,7 +170,7 @@ pub trait Allocator<VM: VMBinding>: Downcast {
// leave this loop between the two GCs. The local var 'emergency_collection' was set to true
// after the first GC. But when we execute this check below, we just finished the second GC,
// which is not emergency. In such case, we will give a false OOM.
// We cannot just rely on the local var. Instead, we get the emergency colleciton value again,
// We cannot just rely on the local var. Instead, we get the emergency collection value again,
// and check both.
if emergency_collection && self.get_plan().is_emergency_collection() {
trace!("Emergency collection");
Expand Down Expand Up @@ -189,6 +204,7 @@ pub trait Allocator<VM: VMBinding>: Downcast {
*/
emergency_collection = self.get_plan().is_emergency_collection();
trace!("Got emergency collection as {}", emergency_collection);
previous_result_zero = true;
}
}

Expand Down
106 changes: 90 additions & 16 deletions src/util/alloc/bumpallocator.rs
Original file line number Diff line number Diff line change
@@ -1,13 +1,17 @@
use crate::util::constants::DEFAULT_STRESS_FACTOR;
use std::sync::atomic::Ordering;

use super::allocator::{align_allocation_no_fill, fill_alignment_gap};
use crate::util::Address;

use crate::util::alloc::Allocator;

use crate::plan::global::Plan;
use crate::plan::selected_plan::SelectedPlan;
use crate::policy::space::Space;
use crate::util::conversions::bytes_to_pages;
use crate::util::OpaquePointer;
use crate::vm::VMBinding;
use crate::vm::{ActivePlan, VMBinding};

const BYTES_IN_PAGE: usize = 1 << 12;
const BLOCK_SIZE: usize = 8 * BYTES_IN_PAGE;
Expand Down Expand Up @@ -72,22 +76,12 @@ impl<VM: VMBinding> Allocator<VM> for BumpAllocator<VM> {
fn alloc_slow_once(&mut self, size: usize, align: usize, offset: isize) -> Address {
trace!("alloc_slow");
// TODO: internalLimit etc.
let block_size = (size + BLOCK_MASK) & (!BLOCK_MASK);
let acquired_start: Address = self
.space
.unwrap()
.acquire(self.tls, bytes_to_pages(block_size));
if acquired_start.is_zero() {
trace!("Failed to acquire a new block");
acquired_start
let base = &self.plan.base();

if base.options.stress_factor == DEFAULT_STRESS_FACTOR {
self.acquire_block(size, align, offset, false)
} else {
trace!(
"Acquired a new block of size {} with start address {}",
block_size,
acquired_start
);
self.set_limit(acquired_start, acquired_start + block_size);
self.alloc(size, align, offset)
self.alloc_slow_once_stress_test(size, align, offset)
}
}

Expand All @@ -110,4 +104,84 @@ impl<VM: VMBinding> BumpAllocator<VM> {
plan,
}
}

// Slow path for allocation if the stress test flag has been enabled. It works
// by manipulating the limit to be below the cursor always.
// Performs three kinds of allocations: (i) if the hard limit has been met;
// (ii) the bump pointer semantics from the fastpath; and (iii) if the stress
// factor has been crossed.
fn alloc_slow_once_stress_test(&mut self, size: usize, align: usize, offset: isize) -> Address {
trace!("alloc_slow stress_test");
let result = align_allocation_no_fill::<VM>(self.cursor, align, offset);
let new_cursor = result + size;

// For stress test, limit is [0, block_size) to artificially make the
// check in the fastpath (alloc()) fail. The real limit is recovered by
// adding it to the current cursor.
if new_cursor > self.cursor + self.limit.as_usize() {
self.acquire_block(size, align, offset, true)
} else {
let base = &self.plan.base();
let is_mutator =
unsafe { VM::VMActivePlan::is_mutator(self.tls) } && self.plan.is_initialized();

if is_mutator
&& (base.allocation_bytes.load(Ordering::SeqCst) > base.options.stress_factor)
{
trace!(
"Stress GC: allocation_bytes = {} more than stress_factor = {}",
base.allocation_bytes.load(Ordering::Relaxed),
base.options.stress_factor
);
return self.acquire_block(size, align, offset, true);
}

fill_alignment_gap::<VM>(self.cursor, result);
self.limit -= new_cursor - self.cursor;
self.cursor = new_cursor;
trace!(
"alloc_slow: Bump allocation size: {}, result: {}, new_cursor: {}, limit: {}",
size,
result,
self.cursor,
self.limit
);
result
}
}

#[inline]
fn acquire_block(
&mut self,
size: usize,
align: usize,
offset: isize,
stress_test: bool,
) -> Address {
let block_size = (size + BLOCK_MASK) & (!BLOCK_MASK);
let acquired_start = self
.space
.unwrap()
.acquire(self.tls, bytes_to_pages(block_size));
if acquired_start.is_zero() {
trace!("Failed to acquire a new block");
acquired_start
} else {
trace!(
"Acquired a new block of size {} with start address {}",
block_size,
acquired_start
);
if !stress_test {
self.set_limit(acquired_start, acquired_start + block_size);
} else {
// For a stress test, we artificially make the fastpath fail by
// manipulating the limit as below.
// The assumption here is that we use an address range such that
// cursor > block_size always.
self.set_limit(acquired_start, unsafe { Address::from_usize(block_size) });
}
self.alloc(size, align, offset)
}
}
}
7 changes: 7 additions & 0 deletions src/util/constants.rs
Original file line number Diff line number Diff line change
Expand Up @@ -100,3 +100,10 @@ pub const BITS_IN_PAGE: usize = 1 << LOG_BITS_IN_PAGE;

/* Assume byte-addressability */
pub const LOG_BYTES_IN_ADDRESS_SPACE: u8 = BITS_IN_ADDRESS as u8;

/****************************************************************************
*
* Default options
*/

pub const DEFAULT_STRESS_FACTOR: usize = usize::max_value();
2 changes: 0 additions & 2 deletions src/util/heap/layout/map.rs
Original file line number Diff line number Diff line change
Expand Up @@ -56,6 +56,4 @@ pub trait Map: Sized {
}

fn add_to_cumulative_committed_pages(&self, pages: usize);

fn get_cumulative_committed_pages(&self) -> usize;
}
4 changes: 0 additions & 4 deletions src/util/heap/layout/map32.rs
Original file line number Diff line number Diff line change
Expand Up @@ -230,10 +230,6 @@ impl Map for Map32 {
self.cumulative_committed_pages
.fetch_add(pages, Ordering::Relaxed);
}

fn get_cumulative_committed_pages(&self) -> usize {
self.cumulative_committed_pages.load(Ordering::Relaxed)
}
}

impl Map32 {
Expand Down
4 changes: 0 additions & 4 deletions src/util/heap/layout/map64.rs
Original file line number Diff line number Diff line change
Expand Up @@ -208,10 +208,6 @@ impl Map for Map64 {
self.cumulative_committed_pages
.fetch_add(pages, Ordering::Relaxed);
}

fn get_cumulative_committed_pages(&self) -> usize {
self.cumulative_committed_pages.load(Ordering::Relaxed)
}
}

impl Map64 {
Expand Down
8 changes: 3 additions & 5 deletions src/util/options.rs
Original file line number Diff line number Diff line change
@@ -1,4 +1,4 @@
use crate::util::constants::LOG_BYTES_IN_PAGE;
use crate::util::constants::DEFAULT_STRESS_FACTOR;
use std::cell::UnsafeCell;
use std::default::Default;
use std::ops::Deref;
Expand Down Expand Up @@ -97,7 +97,7 @@ options! {
// Note: This gets ignored. Use RUST_LOG to specify log level.
// TODO: Delete this option.
verbose: usize [always_valid] = 0,
stress_factor: usize [always_valid] = usize::max_value() >> LOG_BYTES_IN_PAGE,
stress_factor: usize [always_valid] = DEFAULT_STRESS_FACTOR,
// vmspace
// FIXME: These options are set for JikesRVM. We need a proper way to set options.
// We need to set these values programmatically in VM specific code.
Expand Down Expand Up @@ -136,12 +136,10 @@ impl Options {

#[cfg(test)]
mod tests {
use crate::util::constants::LOG_BYTES_IN_PAGE;
use crate::util::constants::DEFAULT_STRESS_FACTOR;
use crate::util::options::Options;
use crate::util::test_util::serial_test;

const DEFAULT_STRESS_FACTOR: usize = usize::max_value() >> LOG_BYTES_IN_PAGE;

#[test]
fn no_env_var() {
serial_test(|| {
Expand Down