diff --git a/Cargo.toml b/Cargo.toml index 2ab6dca..0087e59 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -3,6 +3,8 @@ members = [ "crates/component", "crates/mysten-network", "crates/name-variant", + "crates/parity-util-mem", + "crates/parity-util-mem-derive", "crates/rccheck", "crates/telemetry-subscribers", "crates/typed-store", diff --git a/crates/parity-util-mem-derive/Cargo.toml b/crates/parity-util-mem-derive/Cargo.toml new file mode 100644 index 0000000..7622be0 --- /dev/null +++ b/crates/parity-util-mem-derive/Cargo.toml @@ -0,0 +1,19 @@ +[package] +name = "parity-util-mem-derive" +version = "0.1.0" +authors = ["Parity Technologies "] +license = "MIT OR Apache-2.0" +description = "Crate for memory reporting" +repository = "https://github.com/paritytech/pariry-common/parity-util-mem/derive" +edition = "2021" +rust-version = "1.56.1" +publish = false + +[lib] +path = "lib.rs" +proc-macro = true + +[dependencies] +proc-macro2 = "1" +syn = { version = "1", features = ["full"] } +synstructure = "0.12" diff --git a/crates/parity-util-mem-derive/lib.rs b/crates/parity-util-mem-derive/lib.rs new file mode 100644 index 0000000..e1eb65c --- /dev/null +++ b/crates/parity-util-mem-derive/lib.rs @@ -0,0 +1,90 @@ +// Copyright (c) 2022, Mysten Labs, Inc. +// SPDX-License-Identifier: Apache-2.0 + +// Copyright 2020 Parity Technologies +// +// Licensed under the Apache License, Version 2.0 or the MIT license +// , at your +// option. This file may not be copied, modified, or distributed +// except according to those terms. + +//! A crate for deriving the MallocSizeOf trait. +//! +//! This is a copy of Servo malloc_size_of_derive code, modified to work with +//! our `parity_util_mem` library + +#![allow(clippy::all)] + +extern crate proc_macro2; +#[macro_use] +extern crate syn; +#[macro_use] +extern crate synstructure; + +decl_derive!([MallocSizeOf, attributes(ignore_malloc_size_of)] => malloc_size_of_derive); + +fn malloc_size_of_derive(s: synstructure::Structure) -> proc_macro2::TokenStream { + let match_body = s.each(|binding| { + let ignore = binding + .ast() + .attrs + .iter() + .any(|attr| match attr.parse_meta().unwrap() { + syn::Meta::Path(ref path) | syn::Meta::List(syn::MetaList { ref path, .. }) + if path.is_ident("ignore_malloc_size_of") => + { + panic!( + "#[ignore_malloc_size_of] should have an explanation, \ + e.g. #[ignore_malloc_size_of = \"because reasons\"]" + ); + } + syn::Meta::NameValue(syn::MetaNameValue { ref path, .. }) + if path.is_ident("ignore_malloc_size_of") => + { + true + } + _ => false, + }); + if ignore { + None + } else if let syn::Type::Array(..) = binding.ast().ty { + Some(quote! { + for item in #binding.iter() { + sum += parity_util_mem::MallocSizeOf::size_of(item, ops); + } + }) + } else { + Some(quote! { + sum += parity_util_mem::MallocSizeOf::size_of(#binding, ops); + }) + } + }); + + let ast = s.ast(); + let name = &ast.ident; + let (impl_generics, ty_generics, where_clause) = ast.generics.split_for_impl(); + let mut where_clause = where_clause.unwrap_or(&parse_quote!(where)).clone(); + for param in ast.generics.type_params() { + let ident = ¶m.ident; + where_clause + .predicates + .push(parse_quote!(#ident: parity_util_mem::MallocSizeOf)); + } + + let tokens = quote! { + impl #impl_generics parity_util_mem::MallocSizeOf for #name #ty_generics #where_clause { + #[inline] + #[allow(unused_variables, unused_mut, unreachable_code)] + fn size_of(&self, ops: &mut parity_util_mem::MallocSizeOfOps) -> usize { + let mut sum = 0; + match *self { + #match_body + } + sum + } + } + }; + + tokens +} diff --git a/crates/parity-util-mem/Cargo.toml b/crates/parity-util-mem/Cargo.toml new file mode 100644 index 0000000..dc0cc07 --- /dev/null +++ b/crates/parity-util-mem/Cargo.toml @@ -0,0 +1,31 @@ +[package] +name = "parity-util-mem" +version = "0.11.0" +authors = ["Parity Technologies ", "Andrew Schran "] +repository = "https://github.com/paritytech/parity-common" +description = "Collection of memory related utilities" +license = "MIT OR Apache-2.0" +edition = "2021" +rust-version = "1.56.1" +publish = false + +[dependencies] +cfg-if = "1.0.0" +lru = { version = "0.7", optional = true } +hashbrown = { version = "0.12", optional = true } +parity-util-mem-derive = { path = "../parity-util-mem-derive", version = "0.1" } +impl-trait-for-tuples = "0.2.0" +fastcrypto = { git = "https://github.com/MystenLabs/fastcrypto", rev = "c022a2ae23ca7cc2778293fd3b1db42e8cd02d3b", features = ["copy_key"] } +indexmap = { version = "1.9.1", features = ["serde"] } + +smallvec = { version = "1.0.0", optional = true } +parking_lot = { version = "0.12.0", optional = true } + +[target.'cfg(target_os = "windows")'.dependencies] +winapi = { version = "0.3.8", features = ["heapapi"] } + +[features] +default = ["std", "lru", "hashbrown", "smallvec", "estimate-heapsize"] +std = ["parking_lot"] +# Full estimate: no call to allocator +estimate-heapsize = [] diff --git a/crates/parity-util-mem/src/allocators.rs b/crates/parity-util-mem/src/allocators.rs new file mode 100644 index 0000000..e10019f --- /dev/null +++ b/crates/parity-util-mem/src/allocators.rs @@ -0,0 +1,96 @@ +// Copyright (c) 2022, Mysten Labs, Inc. +// SPDX-License-Identifier: Apache-2.0 + +// Copyright 2020 Parity Technologies +// +// Licensed under the Apache License, Version 2.0 or the MIT license +// , at your +// option. This file may not be copied, modified, or distributed +// except according to those terms. + +#[cfg(feature = "std")] +use crate::malloc_size::MallocUnconditionalSizeOf; +use crate::malloc_size::{MallocSizeOf, MallocSizeOfOps, VoidPtrToSizeFn}; +#[cfg(not(feature = "std"))] +use core::ffi::c_void; +#[cfg(feature = "std")] +use std::os::raw::c_void; + +mod usable_size { + + use super::*; + + cfg_if::cfg_if! { + + if #[cfg(any( + target_arch = "wasm32", + feature = "estimate-heapsize", + ))] { + + // do not try system allocator + + /// Warning this is for compatibility only. + /// This function does panic: `estimate-heapsize` feature needs to be activated + /// to avoid this function call. + pub unsafe extern "C" fn malloc_usable_size(_ptr: *const c_void) -> usize { + unreachable!("estimate heapsize only") + } + + } else if #[cfg(any( + target_os = "linux", + target_os = "android", + target_os = "freebsd", + ))] { + // Linux/BSD call system allocator (currently malloc). + extern "C" { + pub fn malloc_usable_size(ptr: *const c_void) -> usize; + } + + } else { + // default allocator for non linux or windows system use estimate + pub unsafe extern "C" fn malloc_usable_size(_ptr: *const c_void) -> usize { + unreachable!("estimate heapsize or feature allocator needed") + } + + } + + } + + /// No enclosing function defined. + #[inline] + pub fn new_enclosing_size_fn() -> Option { + None + } +} + +/// Get a new instance of a MallocSizeOfOps +pub fn new_malloc_size_ops() -> MallocSizeOfOps { + MallocSizeOfOps::new( + usable_size::malloc_usable_size, + usable_size::new_enclosing_size_fn(), + None, + ) +} + +/// Extension methods for `MallocSizeOf` trait, do not implement +/// directly. +/// It allows getting heapsize without exposing `MallocSizeOfOps` +/// (a single default `MallocSizeOfOps` is used for each call). +pub trait MallocSizeOfExt: MallocSizeOf { + /// Method to launch a heapsize measurement with a + /// fresh state. + fn malloc_size_of(&self) -> usize { + let mut ops = new_malloc_size_ops(); + ::size_of(self, &mut ops) + } +} + +impl MallocSizeOfExt for T {} + +#[cfg(feature = "std")] +impl MallocSizeOf for std::sync::Arc { + fn size_of(&self, ops: &mut MallocSizeOfOps) -> usize { + self.unconditional_size_of(ops) + } +} diff --git a/crates/parity-util-mem/src/external_impls.rs b/crates/parity-util-mem/src/external_impls.rs new file mode 100644 index 0000000..bc6c5d7 --- /dev/null +++ b/crates/parity-util-mem/src/external_impls.rs @@ -0,0 +1,36 @@ +// Copyright (c) 2022, Mysten Labs, Inc. +// SPDX-License-Identifier: Apache-2.0 + +use crate::{MallocShallowSizeOf, MallocSizeOf}; + +// fastcrypto +malloc_size_of_is_0!(fastcrypto::ed25519::Ed25519PublicKey); +malloc_size_of_is_0!(fastcrypto::ed25519::Ed25519Signature); + +// hash_map +malloc_size_of_is_0!(std::collections::hash_map::RandomState); + +// indexmap +impl MallocShallowSizeOf for indexmap::IndexMap { + fn shallow_size_of(&self, _ops: &mut crate::MallocSizeOfOps) -> usize { + self.capacity() + * (std::mem::size_of::() + + std::mem::size_of::() + + (2 * std::mem::size_of::())) + } +} +impl MallocSizeOf for indexmap::IndexMap { + // This only produces a rough estimate of IndexMap size, because we cannot access private + // fields to measure them precisely. + fn size_of(&self, ops: &mut crate::MallocSizeOfOps) -> usize { + let mut n = self.shallow_size_of(ops); + if let (Some(k), Some(v)) = (K::constant_size(), V::constant_size()) { + n += self.len() * (k + v) + } else { + n += self + .iter() + .fold(n, |acc, (k, v)| acc + k.size_of(ops) + v.size_of(ops)) + } + n + } +} diff --git a/crates/parity-util-mem/src/lib.rs b/crates/parity-util-mem/src/lib.rs new file mode 100644 index 0000000..71c54a2 --- /dev/null +++ b/crates/parity-util-mem/src/lib.rs @@ -0,0 +1,118 @@ +// Copyright (c) 2022, Mysten Labs, Inc. +// SPDX-License-Identifier: Apache-2.0 + +// Copyright 2020 Parity Technologies +// +// Licensed under the Apache License, Version 2.0 or the MIT license +// , at your +// option. This file may not be copied, modified, or distributed +// except according to those terms. + +//! Crate for parity memory management related utilities. +//! It includes global allocator choice, heap measurement and +//! memory erasure. + +#![allow(clippy::all)] +#![cfg_attr(not(feature = "std"), no_std)] + +#[cfg(not(feature = "std"))] +extern crate alloc; + +// default allocator used +mod memory_stats_noop; +use memory_stats_noop as memory_stats; + +pub mod allocators; + +#[cfg(any( + any(target_os = "macos", target_os = "ios"), + feature = "estimate-heapsize" +))] +pub mod sizeof; + +/// This is a copy of patched crate `malloc_size_of` as a module. +/// We need to have it as an inner module to be able to define our own traits implementation, +/// if at some point the trait become standard enough we could use the right way of doing it +/// by implementing it in our type traits crates. At this time moving this trait to the primitive +/// types level would impact too much of the dependencies to be easily manageable. +#[macro_use] +mod malloc_size; + +pub mod external_impls; + +pub use allocators::MallocSizeOfExt; +pub use malloc_size::{MallocShallowSizeOf, MallocSizeOf, MallocSizeOfOps}; + +pub use parity_util_mem_derive::*; + +/// Heap size of structure. +/// +/// Structure can be anything that implements MallocSizeOf. +pub fn malloc_size(t: &T) -> usize { + MallocSizeOf::size_of(t, &mut allocators::new_malloc_size_ops()) +} + +/// An error related to the memory stats gathering. +#[derive(Clone, Debug)] +pub struct MemoryStatsError(memory_stats::Error); + +#[cfg(feature = "std")] +impl std::fmt::Display for MemoryStatsError { + fn fmt(&self, fmt: &mut std::fmt::Formatter) -> std::fmt::Result { + self.0.fmt(fmt) + } +} + +#[cfg(feature = "std")] +impl std::error::Error for MemoryStatsError {} + +/// Snapshot of collected memory metrics. +#[non_exhaustive] +#[derive(Debug, Clone)] +pub struct MemoryAllocationSnapshot { + /// Total resident memory, in bytes. + pub resident: u64, + /// Total allocated memory, in bytes. + pub allocated: u64, +} + +/// Accessor to the allocator internals. +#[derive(Clone)] +pub struct MemoryAllocationTracker(self::memory_stats::MemoryAllocationTracker); + +impl MemoryAllocationTracker { + /// Create an instance of an allocation tracker. + pub fn new() -> Result { + self::memory_stats::MemoryAllocationTracker::new() + .map(MemoryAllocationTracker) + .map_err(MemoryStatsError) + } + + /// Create an allocation snapshot. + pub fn snapshot(&self) -> Result { + self.0.snapshot().map_err(MemoryStatsError) + } +} + +#[cfg(feature = "std")] +#[cfg(test)] +mod test { + use super::{malloc_size, MallocSizeOf, MallocSizeOfExt}; + use std::sync::Arc; + + #[test] + fn test_arc() { + let val = Arc::new("test".to_string()); + let s = val.malloc_size_of(); + assert!(s > 0); + } + + #[test] + fn test_dyn() { + trait Augmented: MallocSizeOf {} + impl Augmented for Vec {} + let val: Arc = Arc::new(vec![0u8; 1024]); + assert!(malloc_size(&*val) > 1000); + } +} diff --git a/crates/parity-util-mem/src/malloc_size.rs b/crates/parity-util-mem/src/malloc_size.rs new file mode 100644 index 0000000..73a422f --- /dev/null +++ b/crates/parity-util-mem/src/malloc_size.rs @@ -0,0 +1,908 @@ +// Copyright (c) 2022, Mysten Labs, Inc. +// SPDX-License-Identifier: Apache-2.0 + +// Copyright 2016-2017 The Servo Project Developers. See the COPYRIGHT +// file at the top-level directory of this distribution and at +// http://rust-lang.org/COPYRIGHT. +// +// Licensed under the Apache License, Version 2.0 or the MIT license +// , at your +// option. This file may not be copied, modified, or distributed +// except according to those terms. + +//! A crate for measuring the heap usage of data structures in a way that +//! integrates with Firefox's memory reporting, particularly the use of +//! mozjemalloc and DMD. In particular, it has the following features. +//! - It isn't bound to a particular heap allocator. +//! - It provides traits for both "shallow" and "deep" measurement, which gives +//! flexibility in the cases where the traits can't be used. +//! - It allows for measuring blocks even when only an interior pointer can be +//! obtained for heap allocations, e.g. `HashSet` and `HashMap`. (This relies +//! on the heap allocator having suitable support, which mozjemalloc has.) +//! - It allows handling of types like `Rc` and `Arc` by providing traits that +//! are different to the ones for non-graph structures. +//! +//! Suggested uses are as follows. +//! - When possible, use the `MallocSizeOf` trait. (Deriving support is +//! provided by the `malloc_size_of_derive` crate.) +//! - If you need an additional synchronization argument, provide a function +//! that is like the standard trait method, but with the extra argument. +//! - If you need multiple measurements for a type, provide a function named +//! `add_size_of` that takes a mutable reference to a struct that contains +//! the multiple measurement fields. +//! - When deep measurement (via `MallocSizeOf`) cannot be implemented for a +//! type, shallow measurement (via `MallocShallowSizeOf`) in combination with +//! iteration can be a useful substitute. +//! - `Rc` and `Arc` are always tricky, which is why `MallocSizeOf` is not (and +//! should not be) implemented for them. +//! - If an `Rc` or `Arc` is known to be a "primary" reference and can always +//! be measured, it should be measured via the `MallocUnconditionalSizeOf` +//! trait. +//! - If an `Rc` or `Arc` should be measured only if it hasn't been seen +//! before, it should be measured via the `MallocConditionalSizeOf` trait. +//! - Using universal function call syntax is a good idea when measuring boxed +//! fields in structs, because it makes it clear that the Box is being +//! measured as well as the thing it points to. E.g. +//! ` as MallocSizeOf>::size_of(field, ops)`. + +//! This is an extended version of the Servo internal malloc_size crate. +//! We should occasionally track the upstream changes/fixes and reintroduce them here, whenever applicable. + +#[cfg(not(feature = "std"))] +use alloc::vec::Vec; +#[cfg(feature = "std")] +mod rstd { + pub use std::*; +} +#[cfg(not(feature = "std"))] +mod rstd { + pub use core::*; + pub mod collections { + pub use alloc::collections::*; + pub use vec_deque::VecDeque; + } +} + +#[cfg(feature = "std")] +use std::sync::Arc; + +#[cfg(not(feature = "std"))] +pub use alloc::boxed::Box; +#[cfg(not(feature = "std"))] +use core::ffi::c_void; +#[cfg(feature = "std")] +use rstd::hash::Hash; +use rstd::{ + marker::PhantomData, + mem::size_of, + ops::{Deref, DerefMut, Range}, +}; +#[cfg(feature = "std")] +use std::hash::BuildHasher; +#[cfg(feature = "std")] +use std::os::raw::c_void; + +/// A C function that takes a pointer to a heap allocation and returns its size. +pub type VoidPtrToSizeFn = unsafe extern "C" fn(ptr: *const c_void) -> usize; + +/// A closure implementing a stateful predicate on pointers. +pub type VoidPtrToBoolFnMut = dyn FnMut(*const c_void) -> bool; + +/// Operations used when measuring heap usage of data structures. +pub struct MallocSizeOfOps { + /// A function that returns the size of a heap allocation. + size_of_op: VoidPtrToSizeFn, + + /// Like `size_of_op`, but can take an interior pointer. Optional because + /// not all allocators support this operation. If it's not provided, some + /// memory measurements will actually be computed estimates rather than + /// real and accurate measurements. + enclosing_size_of_op: Option, + + /// Check if a pointer has been seen before, and remember it for next time. + /// Useful when measuring `Rc`s and `Arc`s. Optional, because many places + /// don't need it. + have_seen_ptr_op: Option>, +} + +impl MallocSizeOfOps { + pub fn new( + size_of: VoidPtrToSizeFn, + malloc_enclosing_size_of: Option, + have_seen_ptr: Option>, + ) -> Self { + MallocSizeOfOps { + size_of_op: size_of, + enclosing_size_of_op: malloc_enclosing_size_of, + have_seen_ptr_op: have_seen_ptr, + } + } + + /// Check if an allocation is empty. This relies on knowledge of how Rust + /// handles empty allocations, which may change in the future. + fn is_empty(ptr: *const T) -> bool { + // The correct condition is this: + // `ptr as usize <= ::std::mem::align_of::()` + // But we can't call align_of() on a ?Sized T. So we approximate it + // with the following. 256 is large enough that it should always be + // larger than the required alignment, but small enough that it is + // always in the first page of memory and therefore not a legitimate + // address. + return ptr as *const usize as usize <= 256; + } + + /// Call `size_of_op` on `ptr`, first checking that the allocation isn't + /// empty, because some types (such as `Vec`) utilize empty allocations. + pub unsafe fn malloc_size_of(&self, ptr: *const T) -> usize { + if MallocSizeOfOps::is_empty(ptr) { + 0 + } else { + (self.size_of_op)(ptr as *const c_void) + } + } + + /// Is an `enclosing_size_of_op` available? + pub fn has_malloc_enclosing_size_of(&self) -> bool { + self.enclosing_size_of_op.is_some() + } + + /// Call `enclosing_size_of_op`, which must be available, on `ptr`, which + /// must not be empty. + pub unsafe fn malloc_enclosing_size_of(&self, ptr: *const T) -> usize { + assert!(!MallocSizeOfOps::is_empty(ptr)); + (self.enclosing_size_of_op.unwrap())(ptr as *const c_void) + } + + /// Call `have_seen_ptr_op` on `ptr`. + pub fn have_seen_ptr(&mut self, ptr: *const T) -> bool { + let have_seen_ptr_op = self + .have_seen_ptr_op + .as_mut() + .expect("missing have_seen_ptr_op"); + have_seen_ptr_op(ptr as *const c_void) + } +} + +/// Trait for measuring the "deep" heap usage of a data structure. This is the +/// most commonly-used of the traits. +pub trait MallocSizeOf { + /// Measure the heap usage of all descendant heap-allocated structures, but + /// not the space taken up by the value itself. + /// If `T::size_of` is a constant, consider implementing `constant_size` as well. + fn size_of(&self, ops: &mut MallocSizeOfOps) -> usize; + + /// Used to optimize `MallocSizeOf` implementation for collections + /// like `Vec` and `HashMap` to avoid iterating over them unnecessarily. + /// The `Self: Sized` bound is for object safety. + fn constant_size() -> Option + where + Self: Sized, + { + None + } +} + +/// Trait for measuring the "shallow" heap usage of a container. +pub trait MallocShallowSizeOf { + /// Measure the heap usage of immediate heap-allocated descendant + /// structures, but not the space taken up by the value itself. Anything + /// beyond the immediate descendants must be measured separately, using + /// iteration. + fn shallow_size_of(&self, ops: &mut MallocSizeOfOps) -> usize; +} + +/// Like `MallocSizeOf`, but with a different name so it cannot be used +/// accidentally with derive(MallocSizeOf). For use with types like `Rc` and +/// `Arc` when appropriate (e.g. when measuring a "primary" reference). +pub trait MallocUnconditionalSizeOf { + /// Measure the heap usage of all heap-allocated descendant structures, but + /// not the space taken up by the value itself. + fn unconditional_size_of(&self, ops: &mut MallocSizeOfOps) -> usize; +} + +/// `MallocUnconditionalSizeOf` combined with `MallocShallowSizeOf`. +pub trait MallocUnconditionalShallowSizeOf { + /// `unconditional_size_of` combined with `shallow_size_of`. + fn unconditional_shallow_size_of(&self, ops: &mut MallocSizeOfOps) -> usize; +} + +/// Like `MallocSizeOf`, but only measures if the value hasn't already been +/// measured. For use with types like `Rc` and `Arc` when appropriate (e.g. +/// when there is no "primary" reference). +pub trait MallocConditionalSizeOf { + /// Measure the heap usage of all heap-allocated descendant structures, but + /// not the space taken up by the value itself, and only if that heap usage + /// hasn't already been measured. + fn conditional_size_of(&self, ops: &mut MallocSizeOfOps) -> usize; +} + +/// `MallocConditionalSizeOf` combined with `MallocShallowSizeOf`. +pub trait MallocConditionalShallowSizeOf { + /// `conditional_size_of` combined with `shallow_size_of`. + fn conditional_shallow_size_of(&self, ops: &mut MallocSizeOfOps) -> usize; +} + +impl<'a, T: ?Sized> MallocSizeOf for &'a T { + fn size_of(&self, _ops: &mut MallocSizeOfOps) -> usize { + // Zero makes sense for a non-owning reference. + 0 + } + fn constant_size() -> Option { + Some(0) + } +} + +impl MallocSizeOf for Box { + fn size_of(&self, ops: &mut MallocSizeOfOps) -> usize { + self.shallow_size_of(ops) + (**self).size_of(ops) + } +} + +#[impl_trait_for_tuples::impl_for_tuples(12)] +impl MallocSizeOf for Tuple { + fn size_of(&self, ops: &mut MallocSizeOfOps) -> usize { + let mut result = 0; + for_tuples!( #( result += Tuple.size_of(ops); )* ); + result + } + fn constant_size() -> Option { + let mut result = Some(0); + for_tuples!( #( result = result.and_then(|s| Tuple::constant_size().map(|t| s + t)); )* ); + result + } +} + +impl MallocSizeOf for Option { + fn size_of(&self, ops: &mut MallocSizeOfOps) -> usize { + if let Some(val) = self.as_ref() { + val.size_of(ops) + } else { + 0 + } + } + fn constant_size() -> Option { + T::constant_size().filter(|s| *s == 0) + } +} + +impl MallocSizeOf for Result { + fn size_of(&self, ops: &mut MallocSizeOfOps) -> usize { + match *self { + Ok(ref x) => x.size_of(ops), + Err(ref e) => e.size_of(ops), + } + } + fn constant_size() -> Option { + // Result has constant size iff T::constant_size == E::constant_size + T::constant_size().and_then(|t| E::constant_size().filter(|e| *e == t)) + } +} + +impl MallocSizeOf for rstd::cell::Cell { + fn size_of(&self, ops: &mut MallocSizeOfOps) -> usize { + self.get().size_of(ops) + } + fn constant_size() -> Option { + T::constant_size() + } +} + +impl MallocSizeOf for rstd::cell::RefCell { + fn size_of(&self, ops: &mut MallocSizeOfOps) -> usize { + self.borrow().size_of(ops) + } + fn constant_size() -> Option { + T::constant_size() + } +} + +#[cfg(feature = "std")] +impl<'a, B: ?Sized + ToOwned> MallocSizeOf for std::borrow::Cow<'a, B> +where + B::Owned: MallocSizeOf, +{ + fn size_of(&self, ops: &mut MallocSizeOfOps) -> usize { + match *self { + std::borrow::Cow::Borrowed(_) => 0, + std::borrow::Cow::Owned(ref b) => b.size_of(ops), + } + } +} + +impl MallocSizeOf for [T] { + fn size_of(&self, ops: &mut MallocSizeOfOps) -> usize { + let mut n = 0; + if let Some(t) = T::constant_size() { + n += self.len() * t; + } else { + n = self.iter().fold(n, |acc, elem| acc + elem.size_of(ops)) + } + n + } +} + +impl MallocSizeOf for Vec { + fn size_of(&self, ops: &mut MallocSizeOfOps) -> usize { + let mut n = self.shallow_size_of(ops); + if let Some(t) = T::constant_size() { + n += self.len() * t; + } else { + n = self.iter().fold(n, |acc, elem| acc + elem.size_of(ops)) + } + n + } +} + +impl MallocShallowSizeOf for rstd::collections::VecDeque { + fn shallow_size_of(&self, ops: &mut MallocSizeOfOps) -> usize { + if ops.has_malloc_enclosing_size_of() { + if let Some(front) = self.front() { + // The front element is an interior pointer. + unsafe { ops.malloc_enclosing_size_of(&*front) } + } else { + // This assumes that no memory is allocated when the VecDeque is empty. + 0 + } + } else { + // An estimate. + self.capacity() * size_of::() + } + } +} + +impl MallocSizeOf for rstd::collections::VecDeque { + fn size_of(&self, ops: &mut MallocSizeOfOps) -> usize { + let mut n = self.shallow_size_of(ops); + if let Some(t) = T::constant_size() { + n += self.len() * t; + } else { + n = self.iter().fold(n, |acc, elem| acc + elem.size_of(ops)) + } + n + } +} + +#[cfg(feature = "std")] +impl MallocShallowSizeOf for std::collections::HashSet +where + T: Eq + Hash, + S: BuildHasher, +{ + fn shallow_size_of(&self, ops: &mut MallocSizeOfOps) -> usize { + if ops.has_malloc_enclosing_size_of() { + // The first value from the iterator gives us an interior pointer. + // `ops.malloc_enclosing_size_of()` then gives us the storage size. + // This assumes that the `HashSet`'s contents (values and hashes) + // are all stored in a single contiguous heap allocation. + self.iter() + .next() + .map_or(0, |t| unsafe { ops.malloc_enclosing_size_of(t) }) + } else { + // An estimate. + self.capacity() * (size_of::() + size_of::()) + } + } +} + +#[cfg(feature = "std")] +impl MallocSizeOf for std::collections::HashSet +where + T: Eq + Hash + MallocSizeOf, + S: BuildHasher, +{ + fn size_of(&self, ops: &mut MallocSizeOfOps) -> usize { + let mut n = self.shallow_size_of(ops); + if let Some(t) = T::constant_size() { + n += self.len() * t; + } else { + n = self.iter().fold(n, |acc, elem| acc + elem.size_of(ops)) + } + n + } +} + +impl MallocSizeOf for rstd::cmp::Reverse { + fn size_of(&self, ops: &mut MallocSizeOfOps) -> usize { + self.0.size_of(ops) + } + fn constant_size() -> Option { + I::constant_size() + } +} + +#[cfg(feature = "std")] +impl MallocShallowSizeOf for std::collections::HashMap { + fn shallow_size_of(&self, ops: &mut MallocSizeOfOps) -> usize { + // See the implementation for std::collections::HashSet for details. + if ops.has_malloc_enclosing_size_of() { + self.values() + .next() + .map_or(0, |v| unsafe { ops.malloc_enclosing_size_of(v) }) + } else { + self.capacity() * (size_of::() + size_of::() + size_of::()) + } + } +} + +#[cfg(feature = "std")] +impl MallocSizeOf for std::collections::HashMap +where + K: MallocSizeOf, + V: MallocSizeOf, +{ + fn size_of(&self, ops: &mut MallocSizeOfOps) -> usize { + let mut n = self.shallow_size_of(ops); + if let (Some(k), Some(v)) = (K::constant_size(), V::constant_size()) { + n += self.len() * (k + v) + } else { + n = self + .iter() + .fold(n, |acc, (k, v)| acc + k.size_of(ops) + v.size_of(ops)) + } + n + } +} + +impl MallocShallowSizeOf for rstd::collections::BTreeMap { + fn shallow_size_of(&self, ops: &mut MallocSizeOfOps) -> usize { + if ops.has_malloc_enclosing_size_of() { + self.values() + .next() + .map_or(0, |v| unsafe { ops.malloc_enclosing_size_of(v) }) + } else { + self.len() * (size_of::() + size_of::() + size_of::()) + } + } +} + +impl MallocSizeOf for rstd::collections::BTreeMap +where + K: MallocSizeOf, + V: MallocSizeOf, +{ + fn size_of(&self, ops: &mut MallocSizeOfOps) -> usize { + let mut n = self.shallow_size_of(ops); + if let (Some(k), Some(v)) = (K::constant_size(), V::constant_size()) { + n += self.len() * (k + v) + } else { + n = self + .iter() + .fold(n, |acc, (k, v)| acc + k.size_of(ops) + v.size_of(ops)) + } + n + } +} + +impl MallocShallowSizeOf for rstd::collections::BTreeSet { + fn shallow_size_of(&self, ops: &mut MallocSizeOfOps) -> usize { + if ops.has_malloc_enclosing_size_of() { + // See implementation for HashSet how this works. + self.iter() + .next() + .map_or(0, |t| unsafe { ops.malloc_enclosing_size_of(t) }) + } else { + // An estimate. + self.len() * (size_of::() + size_of::()) + } + } +} + +impl MallocSizeOf for rstd::collections::BTreeSet +where + T: MallocSizeOf, +{ + fn size_of(&self, ops: &mut MallocSizeOfOps) -> usize { + let mut n = self.shallow_size_of(ops); + if let Some(t) = T::constant_size() { + n += self.len() * t; + } else { + n = self.iter().fold(n, |acc, elem| acc + elem.size_of(ops)) + } + n + } +} + +// XXX: we don't want MallocSizeOf to be defined for Rc and Arc. If negative +// trait bounds are ever allowed, this code should be uncommented. +// (We do have a compile-fail test for this: +// rc_arc_must_not_derive_malloc_size_of.rs) +// impl !MallocSizeOf for Arc { } +// impl !MallocShallowSizeOf for Arc { } + +#[cfg(feature = "std")] +fn arc_ptr(s: &Arc) -> *const T { + &(**s) as *const T +} + +#[cfg(feature = "std")] +impl MallocUnconditionalSizeOf for Arc { + fn unconditional_size_of(&self, ops: &mut MallocSizeOfOps) -> usize { + self.unconditional_shallow_size_of(ops) + (**self).size_of(ops) + } +} + +#[cfg(feature = "std")] +impl MallocConditionalShallowSizeOf for Arc { + fn conditional_shallow_size_of(&self, ops: &mut MallocSizeOfOps) -> usize { + if ops.have_seen_ptr(arc_ptr(self)) { + 0 + } else { + self.unconditional_shallow_size_of(ops) + } + } +} + +#[cfg(feature = "std")] +impl MallocConditionalSizeOf for Arc { + fn conditional_size_of(&self, ops: &mut MallocSizeOfOps) -> usize { + if ops.have_seen_ptr(arc_ptr(self)) { + 0 + } else { + self.unconditional_size_of(ops) + } + } +} + +/// If a mutex is stored directly as a member of a data type that is being measured, +/// it is the unique owner of its contents and deserves to be measured. +/// +/// If a mutex is stored inside of an Arc value as a member of a data type that is being measured, +/// the Arc will not be automatically measured so there is no risk of overcounting the mutex's +/// contents. +/// +/// The same reasoning applies to RwLock. +#[cfg(feature = "std")] +impl MallocSizeOf for std::sync::Mutex { + fn size_of(&self, ops: &mut MallocSizeOfOps) -> usize { + self.lock().unwrap().size_of(ops) + } +} + +#[cfg(feature = "std")] +impl MallocSizeOf for parking_lot::Mutex { + fn size_of(&self, ops: &mut MallocSizeOfOps) -> usize { + self.lock().size_of(ops) + } +} + +#[cfg(feature = "std")] +impl MallocSizeOf for std::sync::RwLock { + fn size_of(&self, ops: &mut MallocSizeOfOps) -> usize { + self.read().unwrap().size_of(ops) + } +} + +#[cfg(feature = "std")] +impl MallocSizeOf for parking_lot::RwLock { + fn size_of(&self, ops: &mut MallocSizeOfOps) -> usize { + self.read().size_of(ops) + } +} + +/// Implement notion of 0 allocation size for some type(s). +/// +/// if used for generics, by default it will require that generaic arguments +/// should implement `MallocSizeOf`. This can be avoided with passing "any: " +/// in front of type list. +/// +/// ```rust +/// use parity_util_mem::{malloc_size, malloc_size_of_is_0}; +/// +/// struct Data

{ +/// phantom: std::marker::PhantomData

, +/// } +/// +/// malloc_size_of_is_0!(any: Data

); +/// +/// // MallocSizeOf is NOT implemented for [u8; 333] +/// assert_eq!(malloc_size(&Data::<[u8; 333]> { phantom: std::marker::PhantomData }), 0); +/// ``` +/// +/// and when no "any: " +/// +/// ```rust +/// use parity_util_mem::{malloc_size, malloc_size_of_is_0}; +/// +/// struct Data(pub T); +/// +/// // generic argument (`T`) must be `impl MallocSizeOf` +/// malloc_size_of_is_0!(Data); +/// +/// assert_eq!(malloc_size(&Data(0u8)), 0); +/// ``` +#[macro_export] +macro_rules! malloc_size_of_is_0( + ($($ty:ty),+) => ( + $( + impl $crate::MallocSizeOf for $ty { + #[inline(always)] + fn size_of(&self, _: &mut $crate::MallocSizeOfOps) -> usize { + 0 + } + #[inline(always)] + fn constant_size() -> Option { Some(0) } + } + )+ + ); + (any: $($ty:ident<$($gen:ident),+>),+) => ( + $( + impl<$($gen),+> $crate::MallocSizeOf for $ty<$($gen),+> { + #[inline(always)] + fn size_of(&self, _: &mut $crate::MallocSizeOfOps) -> usize { + 0 + } + #[inline(always)] + fn constant_size() -> Option { Some(0) } + } + )+ + ); + ($($ty:ident<$($gen:ident),+>),+) => ( + $( + impl<$($gen: $crate::MallocSizeOf),+> $crate::MallocSizeOf for $ty<$($gen),+> { + #[inline(always)] + fn size_of(&self, _: &mut $crate::MallocSizeOfOps) -> usize { + 0 + } + #[inline(always)] + fn constant_size() -> Option { Some(0) } + } + )+ + ); +); + +malloc_size_of_is_0!(bool, char, str); +malloc_size_of_is_0!(u8, u16, u32, u64, u128, usize); +malloc_size_of_is_0!(i8, i16, i32, i64, i128, isize); +malloc_size_of_is_0!(f32, f64); + +malloc_size_of_is_0!(rstd::sync::atomic::AtomicBool); +malloc_size_of_is_0!(rstd::sync::atomic::AtomicIsize); +malloc_size_of_is_0!(rstd::sync::atomic::AtomicUsize); + +malloc_size_of_is_0!(Range, Range, Range, Range, Range); +malloc_size_of_is_0!(Range, Range, Range, Range, Range); +malloc_size_of_is_0!(Range, Range); +malloc_size_of_is_0!(any: PhantomData); + +/// Measurable that defers to inner value and used to verify MallocSizeOf implementation in a +/// struct. +#[derive(Clone)] +pub struct Measurable(pub T); + +impl Deref for Measurable { + type Target = T; + + fn deref(&self) -> &T { + &self.0 + } +} + +impl DerefMut for Measurable { + fn deref_mut(&mut self) -> &mut T { + &mut self.0 + } +} + +#[cfg(feature = "hashbrown")] +impl MallocShallowSizeOf for hashbrown::HashMap { + fn shallow_size_of(&self, ops: &mut MallocSizeOfOps) -> usize { + // See the implementation for std::collections::HashSet for details. + if ops.has_malloc_enclosing_size_of() { + self.values() + .next() + .map_or(0, |v| unsafe { ops.malloc_enclosing_size_of(v) }) + } else { + self.capacity() * (size_of::() + size_of::() + size_of::()) + } + } +} + +#[cfg(feature = "hashbrown")] +impl MallocSizeOf for hashbrown::HashMap +where + K: MallocSizeOf, + V: MallocSizeOf, +{ + fn size_of(&self, ops: &mut MallocSizeOfOps) -> usize { + let mut n = self.shallow_size_of(ops); + if let (Some(k), Some(v)) = (K::constant_size(), V::constant_size()) { + n += self.len() * (k + v) + } else { + n = self + .iter() + .fold(n, |acc, (k, v)| acc + k.size_of(ops) + v.size_of(ops)) + } + n + } +} + +#[cfg(feature = "lru")] +impl MallocSizeOf for lru::LruCache +where + K: MallocSizeOf + rstd::cmp::Eq + rstd::hash::Hash, + V: MallocSizeOf, + S: rstd::hash::BuildHasher, +{ + fn size_of(&self, ops: &mut MallocSizeOfOps) -> usize { + let mut n = 0; + if let (Some(k), Some(v)) = (K::constant_size(), V::constant_size()) { + n += self.len() * (k + v) + } else { + n = self + .iter() + .fold(n, |acc, (k, v)| acc + k.size_of(ops) + v.size_of(ops)) + } + n + } +} + +malloc_size_of_is_0!( + [u8; 1], [u8; 2], [u8; 3], [u8; 4], [u8; 5], [u8; 6], [u8; 7], [u8; 8], [u8; 9], [u8; 10], + [u8; 11], [u8; 12], [u8; 13], [u8; 14], [u8; 15], [u8; 16], [u8; 17], [u8; 18], [u8; 19], + [u8; 20], [u8; 21], [u8; 22], [u8; 23], [u8; 24], [u8; 25], [u8; 26], [u8; 27], [u8; 28], + [u8; 29], [u8; 30], [u8; 31], [u8; 32] +); + +macro_rules! impl_smallvec { + ($size: expr) => { + #[cfg(feature = "smallvec")] + impl MallocSizeOf for smallvec::SmallVec<[T; $size]> + where + T: MallocSizeOf, + { + fn size_of(&self, ops: &mut MallocSizeOfOps) -> usize { + let mut n = if self.spilled() { + self.capacity() * core::mem::size_of::() + } else { + 0 + }; + if let Some(t) = T::constant_size() { + n += self.len() * t; + } else { + n = self.iter().fold(n, |acc, elem| acc + elem.size_of(ops)) + } + n + } + } + }; +} + +impl_smallvec!(32); // kvdb uses this +impl_smallvec!(36); // trie-db uses this + +#[cfg(feature = "std")] +malloc_size_of_is_0!(std::time::Instant); +#[cfg(feature = "std")] +malloc_size_of_is_0!(std::time::Duration); + +#[cfg(all(test, feature = "std"))] // tests are using std implementations +mod tests { + use crate::{allocators::new_malloc_size_ops, MallocSizeOf, MallocSizeOfOps}; + use smallvec::SmallVec; + use std::{collections::BTreeSet, mem}; + impl_smallvec!(3); + + #[test] + fn test_smallvec_stack_allocated_type() { + let mut v: SmallVec<[u8; 3]> = SmallVec::new(); + let mut ops = new_malloc_size_ops(); + assert_eq!(v.size_of(&mut ops), 0); + v.push(1); + v.push(2); + v.push(3); + assert_eq!(v.size_of(&mut ops), 0); + assert!(!v.spilled()); + v.push(4); + assert!( + v.spilled(), + "SmallVec spills when going beyond the capacity of the inner backing array" + ); + assert_eq!(v.size_of(&mut ops), 4); // 4 u8s on the heap + } + + #[test] + fn test_smallvec_boxed_stack_allocated_type() { + let mut v: SmallVec<[Box; 3]> = SmallVec::new(); + let mut ops = new_malloc_size_ops(); + assert_eq!(v.size_of(&mut ops), 0); + v.push(Box::new(1u8)); + v.push(Box::new(2u8)); + v.push(Box::new(3u8)); + assert!(v.size_of(&mut ops) >= 3); + assert!(!v.spilled()); + v.push(Box::new(4u8)); + assert!( + v.spilled(), + "SmallVec spills when going beyond the capacity of the inner backing array" + ); + let mut ops = new_malloc_size_ops(); + let expected_min_allocs = mem::size_of::>() * 4 + 4; + assert!(v.size_of(&mut ops) >= expected_min_allocs); + } + + #[test] + fn test_smallvec_heap_allocated_type() { + let mut v: SmallVec<[String; 3]> = SmallVec::new(); + let mut ops = new_malloc_size_ops(); + assert_eq!(v.size_of(&mut ops), 0); + v.push("COW".into()); + v.push("PIG".into()); + v.push("DUCK".into()); + assert!(!v.spilled()); + assert!(v.size_of(&mut ops) >= "COW".len() + "PIG".len() + "DUCK".len()); + v.push("ÖWL".into()); + assert!(v.spilled()); + let mut ops = new_malloc_size_ops(); + let expected_min_allocs = + mem::size_of::() * 4 + "ÖWL".len() + "COW".len() + "PIG".len() + "DUCK".len(); + assert!(v.size_of(&mut ops) >= expected_min_allocs); + } + + #[test] + fn test_large_vec() { + const N: usize = 128 * 1024 * 1024; + let val = vec![1u8; N]; + let mut ops = new_malloc_size_ops(); + assert!(val.size_of(&mut ops) >= N); + assert!(val.size_of(&mut ops) < 2 * N); + } + + #[test] + fn btree_set() { + let mut set = BTreeSet::new(); + for t in 0..100 { + set.insert(vec![t]); + } + // ~36 per value + assert!(crate::malloc_size(&set) > 3000); + } + + #[test] + fn special_malloc_size_of_0() { + struct Data

{ + phantom: std::marker::PhantomData

, + } + + malloc_size_of_is_0!(any: Data

); + + // MallocSizeOf is not implemented for [u8; 333] + assert_eq!( + crate::malloc_size(&Data::<[u8; 333]> { + phantom: std::marker::PhantomData + }), + 0 + ); + } + + #[test] + fn constant_size() { + struct AlwaysTwo(Vec); + + impl MallocSizeOf for AlwaysTwo { + fn size_of(&self, ops: &mut MallocSizeOfOps) -> usize { + self.0.size_of(ops) + } + fn constant_size() -> Option { + Some(2) + } + } + + assert_eq!(AlwaysTwo::constant_size(), Some(2)); + assert_eq!(std::cmp::Reverse::::constant_size(), Some(0)); + assert_eq!(std::cell::RefCell::::constant_size(), Some(0)); + assert_eq!(std::cell::Cell::::constant_size(), Some(0)); + assert_eq!(Result::<(), ()>::constant_size(), Some(0)); + assert_eq!( + <(AlwaysTwo, (), [u8; 32], AlwaysTwo)>::constant_size(), + Some(2 + 2) + ); + assert_eq!(Option::::constant_size(), Some(0)); + assert_eq!(<&String>::constant_size(), Some(0)); + + assert_eq!(::constant_size(), None); + assert_eq!(std::borrow::Cow::::constant_size(), None); + assert_eq!(Result::<(), String>::constant_size(), None); + assert_eq!(Option::::constant_size(), None); + } +} diff --git a/crates/parity-util-mem/src/memory_stats_noop.rs b/crates/parity-util-mem/src/memory_stats_noop.rs new file mode 100644 index 0000000..202ac90 --- /dev/null +++ b/crates/parity-util-mem/src/memory_stats_noop.rs @@ -0,0 +1,34 @@ +// Copyright (c) 2022, Mysten Labs, Inc. +// SPDX-License-Identifier: Apache-2.0 + +// Copyright 2021 Parity Technologies +// +// Licensed under the Apache License, Version 2.0 or the MIT license +// , at your +// option. This file may not be copied, modified, or distributed +// except according to those terms. + +#[derive(Clone, Debug)] +pub struct Unimplemented; +pub use Unimplemented as Error; + +#[cfg(feature = "std")] +impl std::fmt::Display for Unimplemented { + fn fmt(&self, fmt: &mut std::fmt::Formatter) -> std::fmt::Result { + fmt.write_str("unimplemented") + } +} + +#[derive(Clone)] +pub struct MemoryAllocationTracker {} + +impl MemoryAllocationTracker { + pub fn new() -> Result { + Err(Error) + } + + pub fn snapshot(&self) -> Result { + unimplemented!(); + } +} diff --git a/crates/parity-util-mem/src/sizeof.rs b/crates/parity-util-mem/src/sizeof.rs new file mode 100644 index 0000000..aae5d58 --- /dev/null +++ b/crates/parity-util-mem/src/sizeof.rs @@ -0,0 +1,56 @@ +// Copyright (c) 2022, Mysten Labs, Inc. +// SPDX-License-Identifier: Apache-2.0 + +// Copyright 2020 Parity Technologies +// +// Licensed under the Apache License, Version 2.0 or the MIT license +// , at your +// option. This file may not be copied, modified, or distributed +// except according to those terms. + +//! Estimation for heapsize calculation. Usable to replace call to allocator method (for some +//! allocators or simply because we just need a deterministic cunsumption measurement). + +use crate::malloc_size::{ + MallocShallowSizeOf, MallocSizeOf, MallocSizeOfOps, MallocUnconditionalShallowSizeOf, +}; +#[cfg(not(feature = "std"))] +use alloc::boxed::Box; +#[cfg(not(feature = "std"))] +use alloc::string::String; +#[cfg(not(feature = "std"))] +use alloc::sync::Arc; +#[cfg(not(feature = "std"))] +use alloc::vec::Vec; +#[cfg(not(feature = "std"))] +use core::mem::{size_of, size_of_val}; + +#[cfg(feature = "std")] +use std::mem::{size_of, size_of_val}; +#[cfg(feature = "std")] +use std::sync::Arc; + +impl MallocShallowSizeOf for Box { + fn shallow_size_of(&self, _ops: &mut MallocSizeOfOps) -> usize { + size_of_val(&**self) + } +} + +impl MallocSizeOf for String { + fn size_of(&self, _ops: &mut MallocSizeOfOps) -> usize { + self.capacity() * size_of::() + } +} + +impl MallocShallowSizeOf for Vec { + fn shallow_size_of(&self, _ops: &mut MallocSizeOfOps) -> usize { + self.capacity() * size_of::() + } +} + +impl MallocUnconditionalShallowSizeOf for Arc { + fn unconditional_shallow_size_of(&self, _ops: &mut MallocSizeOfOps) -> usize { + size_of::() + } +} diff --git a/crates/parity-util-mem/tests/derive.rs b/crates/parity-util-mem/tests/derive.rs new file mode 100644 index 0000000..66bcd70 --- /dev/null +++ b/crates/parity-util-mem/tests/derive.rs @@ -0,0 +1,94 @@ +// Copyright (c) 2022, Mysten Labs, Inc. +// SPDX-License-Identifier: Apache-2.0 + +// Copyright 2020 Parity Technologies +// +// Licensed under the Apache License, Version 2.0 or the MIT license +// , at your +// option. This file may not be copied, modified, or distributed +// except according to those terms. + +#![cfg(feature = "std")] + +use parity_util_mem::{MallocSizeOf, MallocSizeOfExt}; + +#[test] +fn derive_vec() { + #[derive(MallocSizeOf)] + struct Trivia { + v: Vec, + } + + let t = Trivia { v: vec![0u8; 1024] }; + + assert!(t.malloc_size_of() > 1000); +} + +#[test] +fn derive_hashmap() { + #[derive(MallocSizeOf, Default)] + struct Trivia { + hm: std::collections::HashMap>, + } + + let mut t = Trivia::default(); + + t.hm.insert(1, vec![0u8; 2048]); + + assert!(t.malloc_size_of() > 2000); +} + +#[test] +fn derive_ignore() { + #[derive(MallocSizeOf, Default)] + struct Trivia { + hm: std::collections::HashMap>, + #[ignore_malloc_size_of = "I don't like vectors"] + v: Vec, + } + + let mut t = Trivia::default(); + + t.hm.insert(1, vec![0u8; 2048]); + t.v = vec![0u8; 1024]; + assert!(t.malloc_size_of() < 3000); +} + +#[test] +#[cfg(all(feature = "lru", feature = "hashbrown"))] +fn derive_morecomplex() { + #[derive(MallocSizeOf)] + struct Trivia { + hm: hashbrown::HashMap>, + cache: lru::LruCache>, + } + + let mut t = Trivia { + hm: hashbrown::HashMap::new(), + cache: lru::LruCache::unbounded(), + }; + + t.hm.insert(1, vec![0u8; 2048]); + t.cache.put(1, vec![0u8; 2048]); + t.cache.put(2, vec![0u8; 4096]); + + assert!(t.malloc_size_of() > 8000); +} + +#[test] +fn derive_tuple() { + #[derive(MallocSizeOf)] + struct Trivia { + tp1: (), + tp2: (Vec, Vec), + } + + let t = Trivia { + tp1: (), + tp2: (vec![7u8; 1024], vec![9u8; 1024]), + }; + + assert!(t.malloc_size_of() > 2000); + assert!(t.malloc_size_of() < 3000); +}