Skip to content

Commit a424ef2

Browse files
authored
Malloc API (#608)
This PR adds a set of malloc related API, and another set of malloc API that will count allocation size into MMTk heap under the feature `malloc_counted_size`. This PR adds things described in #572 (comment). The changes in this PR: * tidy up the malloc library import * provide a set of standard malloc functions (`malloc`, `calloc`, `realloc` and `free`) * provide a set of malloc functions that will count the malloc size into MMTk heap under the feature `malloc_counted_size` * provide `gc_poll()` that can be used to check GC when a binding uses counted malloc functions. Small refactoring to allow `poll()` without a space reference.
1 parent 7373168 commit a424ef2

File tree

27 files changed

+555
-115
lines changed

27 files changed

+555
-115
lines changed

.github/scripts/ci-test.sh

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -18,7 +18,7 @@ cd vmbindings/dummyvm
1818
for fn in $(ls src/tests/*.rs); do
1919
t=$(basename -s .rs $fn)
2020

21-
if [[ $t == "mod.rs" ]]; then
21+
if [[ $t == "mod" ]]; then
2222
continue
2323
fi
2424

Cargo.toml

Lines changed: 4 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -24,7 +24,7 @@ mmtk-macros = { version = "0.12.0", path = "macros/" }
2424
libc = "0.2"
2525
jemalloc-sys = { version = "0.3.2", features = ["disable_initial_exec_tls"], optional = true }
2626
mimalloc-sys = { version = "0.1.6", optional = true }
27-
hoard-sys = { version = "0.1.1", optional = true }
27+
hoard-sys = { version = "0.1.2", optional = true }
2828
lazy_static = "1.1"
2929
log = { version = "0.4", features = ["max_level_trace", "release_max_level_off"] }
3030
crossbeam = "0.8.1"
@@ -89,6 +89,9 @@ nogc_multi_space = []
8989
# To collect statistics for each GC work packet. Enabling this may introduce a small overhead (several percentage slowdown on benchmark time).
9090
work_packet_stats = []
9191

92+
# Count the malloc'd memory into the heap size
93+
malloc_counted_size = []
94+
9295
# Do not modify the following line - ci-common.sh matches it
9396
# -- Mutally exclusive features --
9497
# Only one feature from each group can be provided. Otherwise build will fail.

docs/tutorial/code/mygc_semispace/global.rs

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -102,8 +102,8 @@ impl<VM: VMBinding> Plan for MyGC<VM> {
102102
// ANCHOR_END: schedule_collection
103103

104104
// ANCHOR: collection_required()
105-
fn collection_required(&self, space_full: bool, space: &dyn Space<Self::VM>) -> bool {
106-
self.base().collection_required(self, space_full, space)
105+
fn collection_required(&self, space_full: bool, _space: Option<&dyn Space<Self::VM>>) -> bool {
106+
self.base().collection_required(self, space_full)
107107
}
108108
// ANCHOR_END: collection_required()
109109

src/memory_manager.rs

Lines changed: 78 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -153,6 +153,84 @@ pub fn get_allocator_mapping<VM: VMBinding>(
153153
mmtk.plan.get_allocator_mapping()[semantics]
154154
}
155155

156+
/// The standard malloc. MMTk either uses its own allocator, or forward the call to a
157+
/// library malloc.
158+
pub fn malloc(size: usize) -> Address {
159+
crate::util::malloc::malloc(size)
160+
}
161+
162+
/// The standard malloc except that with the feature `malloc_counted_size`, MMTk will count the allocated memory into its heap size.
163+
/// Thus the method requires a reference to an MMTk instance. MMTk either uses its own allocator, or forward the call to a
164+
/// library malloc.
165+
#[cfg(feature = "malloc_counted_size")]
166+
pub fn counted_malloc<VM: VMBinding>(mmtk: &MMTK<VM>, size: usize) -> Address {
167+
crate::util::malloc::counted_malloc(mmtk, size)
168+
}
169+
170+
/// The standard calloc.
171+
pub fn calloc(num: usize, size: usize) -> Address {
172+
crate::util::malloc::calloc(num, size)
173+
}
174+
175+
/// The standard calloc except that with the feature `malloc_counted_size`, MMTk will count the allocated memory into its heap size.
176+
/// Thus the method requires a reference to an MMTk instance.
177+
#[cfg(feature = "malloc_counted_size")]
178+
pub fn counted_calloc<VM: VMBinding>(mmtk: &MMTK<VM>, num: usize, size: usize) -> Address {
179+
crate::util::malloc::counted_calloc(mmtk, num, size)
180+
}
181+
182+
/// The standard realloc.
183+
pub fn realloc(addr: Address, size: usize) -> Address {
184+
crate::util::malloc::realloc(addr, size)
185+
}
186+
187+
/// The standard realloc except that with the feature `malloc_counted_size`, MMTk will count the allocated memory into its heap size.
188+
/// Thus the method requires a reference to an MMTk instance, and the size of the existing memory that will be reallocated.
189+
/// The `addr` in the arguments must be an address that is earlier returned from MMTk's `malloc()`, `calloc()` or `realloc()`.
190+
#[cfg(feature = "malloc_counted_size")]
191+
pub fn realloc_with_old_size<VM: VMBinding>(
192+
mmtk: &MMTK<VM>,
193+
addr: Address,
194+
size: usize,
195+
old_size: usize,
196+
) -> Address {
197+
crate::util::malloc::realloc_with_old_size(mmtk, addr, size, old_size)
198+
}
199+
200+
/// The standard free.
201+
/// The `addr` in the arguments must be an address that is earlier returned from MMTk's `malloc()`, `calloc()` or `realloc()`.
202+
pub fn free(addr: Address) {
203+
crate::util::malloc::free(addr)
204+
}
205+
206+
/// The standard free except that with the feature `malloc_counted_size`, MMTk will count the allocated memory into its heap size.
207+
/// Thus the method requires a reference to an MMTk instance, and the size of the memory to free.
208+
/// The `addr` in the arguments must be an address that is earlier returned from MMTk's `malloc()`, `calloc()` or `realloc()`.
209+
#[cfg(feature = "malloc_counted_size")]
210+
pub fn free_with_size<VM: VMBinding>(mmtk: &MMTK<VM>, addr: Address, old_size: usize) {
211+
crate::util::malloc::free_with_size(mmtk, addr, old_size)
212+
}
213+
214+
/// Poll for GC. MMTk will decide if a GC is needed. If so, this call will block
215+
/// the current thread, and trigger a GC. Otherwise, it will simply return.
216+
/// Usually a binding does not need to call this function. MMTk will poll for GC during its allocation.
217+
/// However, if a binding uses counted malloc (which won't poll for GC), they may want to poll for GC manually.
218+
/// This function should only be used by mutator threads.
219+
pub fn gc_poll<VM: VMBinding>(mmtk: &MMTK<VM>, tls: VMMutatorThread) {
220+
use crate::vm::{ActivePlan, Collection};
221+
debug_assert!(
222+
VM::VMActivePlan::is_mutator(tls.0),
223+
"gc_poll() can only be called by a mutator thread."
224+
);
225+
226+
let plan = mmtk.get_plan();
227+
if plan.should_trigger_gc_when_heap_is_full() && plan.poll(false, None) {
228+
debug!("Collection required");
229+
assert!(plan.is_initialized(), "GC is not allowed here: collection is not initialized (did you call initialize_collection()?).");
230+
VM::VMCollection::block_for_gc(tls);
231+
}
232+
}
233+
156234
/// Run the main loop for the GC controller thread. This method does not return.
157235
///
158236
/// Arguments:

src/plan/generational/copying/global.rs

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -64,7 +64,7 @@ impl<VM: VMBinding> Plan for GenCopy<VM> {
6464
}
6565
}
6666

67-
fn collection_required(&self, space_full: bool, space: &dyn Space<Self::VM>) -> bool
67+
fn collection_required(&self, space_full: bool, space: Option<&dyn Space<Self::VM>>) -> bool
6868
where
6969
Self: Sized,
7070
{

src/plan/generational/global.rs

Lines changed: 10 additions & 5 deletions
Original file line numberDiff line numberDiff line change
@@ -106,21 +106,26 @@ impl<VM: VMBinding> Gen<VM> {
106106
&self,
107107
plan: &P,
108108
space_full: bool,
109-
space: &dyn Space<VM>,
109+
space: Option<&dyn Space<VM>>,
110110
) -> bool {
111111
let nursery_full = self.nursery.reserved_pages()
112112
>= (conversions::bytes_to_pages_up(*self.common.base.options.max_nursery));
113113
if nursery_full {
114114
return true;
115115
}
116116

117-
if space_full && space.common().descriptor != self.nursery.common().descriptor {
117+
// Is the GC triggered by nursery?
118+
// - if space is none, it is not. Return false immediately.
119+
// - if space is some, we further check its descriptor.
120+
let is_triggered_by_nursery = space.map_or(false, |s| {
121+
s.common().descriptor == self.nursery.common().descriptor
122+
});
123+
// If space is full and the GC is not triggered by nursery, next GC will be full heap GC.
124+
if space_full && !is_triggered_by_nursery {
118125
self.next_gc_full_heap.store(true, Ordering::SeqCst);
119126
}
120127

121-
self.common
122-
.base
123-
.collection_required(plan, space_full, space)
128+
self.common.base.collection_required(plan, space_full)
124129
}
125130

126131
pub fn force_full_heap_collection(&self) {

src/plan/generational/immix/global.rs

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -96,7 +96,7 @@ impl<VM: VMBinding> Plan for GenImmix<VM> {
9696
self.gen.last_collection_full_heap()
9797
}
9898

99-
fn collection_required(&self, space_full: bool, space: &dyn Space<Self::VM>) -> bool
99+
fn collection_required(&self, space_full: bool, space: Option<&dyn Space<Self::VM>>) -> bool
100100
where
101101
Self: Sized,
102102
{

src/plan/global.rs

Lines changed: 45 additions & 13 deletions
Original file line numberDiff line numberDiff line change
@@ -203,7 +203,14 @@ pub trait Plan: 'static + Sync + Downcast {
203203
/// This is invoked once per GC by one worker thread. 'tls' is the worker thread that executes this method.
204204
fn release(&mut self, tls: VMWorkerThread);
205205

206-
fn poll(&self, space_full: bool, space: &dyn Space<Self::VM>) -> bool {
206+
/// This method is called periodically by the allocation subsystem
207+
/// (by default, each time a page is consumed), and provides the
208+
/// collector with an opportunity to collect.
209+
///
210+
/// Arguments:
211+
/// * `space_full`: Space request failed, must recover pages within 'space'.
212+
/// * `space`: The space that triggered the poll. This could `None` if the poll is not triggered by a space.
213+
fn poll(&self, space_full: bool, space: Option<&dyn Space<Self::VM>>) -> bool {
207214
if self.collection_required(space_full, space) {
208215
// FIXME
209216
/*if space == META_DATA_SPACE {
@@ -236,8 +243,12 @@ pub trait Plan: 'static + Sync + Downcast {
236243
false
237244
}
238245

239-
fn log_poll(&self, space: &dyn Space<Self::VM>, message: &'static str) {
240-
info!(" [POLL] {}: {}", space.get_name(), message);
246+
fn log_poll(&self, space: Option<&dyn Space<Self::VM>>, message: &'static str) {
247+
if let Some(space) = space {
248+
info!(" [POLL] {}: {}", space.get_name(), message);
249+
} else {
250+
info!(" [POLL] {}", message);
251+
}
241252
}
242253

243254
/**
@@ -248,7 +259,7 @@ pub trait Plan: 'static + Sync + Downcast {
248259
* @param space TODO
249260
* @return <code>true</code> if a collection is requested by the plan.
250261
*/
251-
fn collection_required(&self, space_full: bool, _space: &dyn Space<Self::VM>) -> bool;
262+
fn collection_required(&self, space_full: bool, _space: Option<&dyn Space<Self::VM>>) -> bool;
252263

253264
// Note: The following methods are about page accounting. The default implementation should
254265
// work fine for non-copying plans. For copying plans, the plan should override any of these methods
@@ -372,9 +383,12 @@ pub struct BasePlan<VM: VMBinding> {
372383
/// Have we scanned all the stacks?
373384
stacks_prepared: AtomicBool,
374385
pub mutator_iterator_lock: Mutex<()>,
375-
// A counter that keeps tracks of the number of bytes allocated since last stress test
376-
pub allocation_bytes: AtomicUsize,
377-
// Wrapper around analysis counters
386+
/// A counter that keeps tracks of the number of bytes allocated since last stress test
387+
allocation_bytes: AtomicUsize,
388+
/// A counteer that keeps tracks of the number of bytes allocated by malloc
389+
#[cfg(feature = "malloc_counted_size")]
390+
malloc_bytes: AtomicUsize,
391+
/// Wrapper around analysis counters
378392
#[cfg(feature = "analysis")]
379393
pub analysis_manager: AnalysisManager<VM>,
380394

@@ -518,6 +532,8 @@ impl<VM: VMBinding> BasePlan<VM> {
518532
scanned_stacks: AtomicUsize::new(0),
519533
mutator_iterator_lock: Mutex::new(()),
520534
allocation_bytes: AtomicUsize::new(0),
535+
#[cfg(feature = "malloc_counted_size")]
536+
malloc_bytes: AtomicUsize::new(0),
521537
#[cfg(feature = "analysis")]
522538
analysis_manager,
523539
}
@@ -596,6 +612,14 @@ impl<VM: VMBinding> BasePlan<VM> {
596612
pages += self.ro_space.reserved_pages();
597613
}
598614

615+
// If we need to count malloc'd size as part of our heap, we add it here.
616+
#[cfg(feature = "malloc_counted_size")]
617+
{
618+
pages += crate::util::conversions::bytes_to_pages_up(
619+
self.malloc_bytes.load(Ordering::SeqCst),
620+
);
621+
}
622+
599623
// The VM space may be used as an immutable boot image, in which case, we should not count
600624
// it as part of the heap size.
601625
pages
@@ -794,12 +818,7 @@ impl<VM: VMBinding> BasePlan<VM> {
794818
&& (self.allocation_bytes.load(Ordering::SeqCst) > *self.options.stress_factor)
795819
}
796820

797-
pub(super) fn collection_required<P: Plan>(
798-
&self,
799-
plan: &P,
800-
space_full: bool,
801-
_space: &dyn Space<VM>,
802-
) -> bool {
821+
pub(super) fn collection_required<P: Plan>(&self, plan: &P, space_full: bool) -> bool {
803822
let stress_force_gc = self.should_do_stress_gc();
804823
if stress_force_gc {
805824
debug!(
@@ -838,6 +857,19 @@ impl<VM: VMBinding> BasePlan<VM> {
838857
self.vm_space
839858
.verify_side_metadata_sanity(side_metadata_sanity_checker);
840859
}
860+
861+
#[cfg(feature = "malloc_counted_size")]
862+
pub(crate) fn increase_malloc_bytes_by(&self, size: usize) {
863+
self.malloc_bytes.fetch_add(size, Ordering::SeqCst);
864+
}
865+
#[cfg(feature = "malloc_counted_size")]
866+
pub(crate) fn decrease_malloc_bytes_by(&self, size: usize) {
867+
self.malloc_bytes.fetch_sub(size, Ordering::SeqCst);
868+
}
869+
#[cfg(feature = "malloc_counted_size")]
870+
pub fn get_malloc_bytes(&self) -> usize {
871+
self.malloc_bytes.load(Ordering::SeqCst)
872+
}
841873
}
842874

843875
/**

src/plan/immix/global.rs

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -51,8 +51,8 @@ pub const IMMIX_CONSTRAINTS: PlanConstraints = PlanConstraints {
5151
impl<VM: VMBinding> Plan for Immix<VM> {
5252
type VM = VM;
5353

54-
fn collection_required(&self, space_full: bool, space: &dyn Space<Self::VM>) -> bool {
55-
self.base().collection_required(self, space_full, space)
54+
fn collection_required(&self, space_full: bool, _space: Option<&dyn Space<Self::VM>>) -> bool {
55+
self.base().collection_required(self, space_full)
5656
}
5757

5858
fn last_collection_was_exhaustive(&self) -> bool {

src/plan/markcompact/global.rs

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -163,8 +163,8 @@ impl<VM: VMBinding> Plan for MarkCompact<VM> {
163163
.add(crate::util::sanity::sanity_checker::ScheduleSanityGC::<Self>::new(self));
164164
}
165165

166-
fn collection_required(&self, space_full: bool, space: &dyn Space<Self::VM>) -> bool {
167-
self.base().collection_required(self, space_full, space)
166+
fn collection_required(&self, space_full: bool, _space: Option<&dyn Space<Self::VM>>) -> bool {
167+
self.base().collection_required(self, space_full)
168168
}
169169

170170
fn get_used_pages(&self) -> usize {

0 commit comments

Comments
 (0)