From 380fa26e216e57f127a0b95460317906e3603648 Mon Sep 17 00:00:00 2001 From: Tin Svagelj Date: Mon, 28 Aug 2023 04:49:23 +0200 Subject: [PATCH] Initial commit Signed-off-by: Tin Svagelj --- .gitignore | 3 + Cargo.toml | 26 + LICENSE_APACHE | 176 +++++++ LICENSE_MIT | 21 + LICENSE_ZLIB | 17 + README.md | 66 +++ src/error.rs | 99 ++++ src/lib.rs | 1268 ++++++++++++++++++++++++++++++++++++++++++++++++ 8 files changed, 1676 insertions(+) create mode 100644 .gitignore create mode 100644 Cargo.toml create mode 100644 LICENSE_APACHE create mode 100644 LICENSE_MIT create mode 100644 LICENSE_ZLIB create mode 100644 README.md create mode 100644 src/error.rs create mode 100644 src/lib.rs diff --git a/.gitignore b/.gitignore new file mode 100644 index 0000000..80ddb49 --- /dev/null +++ b/.gitignore @@ -0,0 +1,3 @@ +debug/ +target/ +Cargo.lock diff --git a/Cargo.toml b/Cargo.toml new file mode 100644 index 0000000..b0e4796 --- /dev/null +++ b/Cargo.toml @@ -0,0 +1,26 @@ +[package] +name = "contiguous-mem" +version = "0.1.0" +edition = "2021" +description = "A contiguous memory storage" +authors = ["Tin Švagelj "] +license = "MIT OR Apache-2.0 OR Zlib" +keywords = ["memory", "contiguous", "storage", "container", "nostd"] +categories = [ + "data-structures", + "memory-management", + "game-engines", + "game-development", + "no-std", +] +repository = "https://github.com/Caellian/contiguous_mem" + +[dependencies] +portable-atomic = { version = "1", default-features = false } +spin = { version = "0.9", optional = true } + +[features] +default = ["std"] +std = ["portable-atomic/std"] +no_std = ["dep:spin"] +debug = [] diff --git a/LICENSE_APACHE b/LICENSE_APACHE new file mode 100644 index 0000000..d9a10c0 --- /dev/null +++ b/LICENSE_APACHE @@ -0,0 +1,176 @@ + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS diff --git a/LICENSE_MIT b/LICENSE_MIT new file mode 100644 index 0000000..7ff07df --- /dev/null +++ b/LICENSE_MIT @@ -0,0 +1,21 @@ +MIT License + +Copyright (c) 2023 Tin Švagelj + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in all +copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE +SOFTWARE. diff --git a/LICENSE_ZLIB b/LICENSE_ZLIB new file mode 100644 index 0000000..c0afaab --- /dev/null +++ b/LICENSE_ZLIB @@ -0,0 +1,17 @@ +Copyright (c) 2023 Tin Švagelj + +This software is provided 'as-is', without any express or implied +warranty. In no event will the authors be held liable for any damages +arising from the use of this software. + +Permission is granted to anyone to use this software for any purpose, +including commercial applications, and to alter it and redistribute it +freely, subject to the following restrictions: + +1. The origin of this software must not be misrepresented; you must not + claim that you wrote the original software. If you use this software + in a product, an acknowledgment in the product documentation would be + appreciated but is not required. +2. Altered source versions must be plainly marked as such, and must not be + misrepresented as being the original software. +3. This notice may not be removed or altered from any source distribution. diff --git a/README.md b/README.md new file mode 100644 index 0000000..397c745 --- /dev/null +++ b/README.md @@ -0,0 +1,66 @@ +# contiguous_mem + +Contiguous memory is memory that is allocated in one contiguous block. +Designed for both standard and no_std environments, this library ensures efficient memory allocation while being simple and (somewhat) safe to use. + +## Key Features + +- Type Agnostic: Support for various data types, including mixed types within the same container. + +- Multiple Implementations: Choose from specialized strategies to match your requirements: + - SyncContiguousMemory (ThreadSafeImpl): Enables asynchronous data access, ensuring safety in concurrent scenarios. + - GrowableContiguousMemory (NotThreadSafeImpl): Synchronous, mutex-free implementation for speed and dynamic resizing. + - FixedContiguousMemory (FixedSizeImpl): Highly optimized but unsafe for precise sizing and long-lived references. + +## Getting Started + +Add the crate to your dependencies: + +```toml +[dependencies] +contiguous_mem = "0.1.0" +``` + +Optionally disable the `std` feature to use in `no_std` environment: + +```toml +[dependencies] +contiguous_mem = { version = "0.1.0", default-features = false } +``` + +### Example usage + +```rust +use contiguous_mem::GrowableContiguousMemory; + +struct Data { + value: u32, +} + +fn main() { + // Create a ContiguousMemory instance with a capacity of 1024 bytes and 8-byte alignment + let mut memory = GrowableContiguousMemory::new(1024, 8).unwrap(); + + // Store data in the memory container + let data = Data { value: 42 }; + let stored_number = memory.store(22u64).unwrap(); + let stored_data = memory.store(data).unwrap(); + + // Retrieve and use the stored data + let retrieved_data = stored_data.get().unwrap(); + println!("Retrieved data: {}", retrieved_data.value); + let retrieved_number = stored_number.get().unwrap(); + println!("Retrieved number: {}", retrieved_number); +} +``` + +## Contributions + +Contribtions are welcome, feel free to create an issue or a pull request. + +All contributions to the project are licensed under the zlib/MIT/Apache 2.0 license unless you explicitly state otherwise. + +## License + +This project is licensed under [Zlib](./LICENSE_ZLIB), [MIT](./LICENSE_MIT), or +[Apache-2.0](./LICENSE_APACHE) license, choose whichever suits you most. diff --git a/src/error.rs b/src/error.rs new file mode 100644 index 0000000..76885af --- /dev/null +++ b/src/error.rs @@ -0,0 +1,99 @@ +#[cfg(feature = "no_std")] +use alloc::alloc::LayoutError; +#[cfg(feature = "std")] +use std::alloc::LayoutError; + +/// Represents errors that can occur while using the [`ContiguousMemory`](crate::ContiguousMemory) container. +#[derive(Debug)] +pub enum ContiguousMemoryError { + /// Tried to store data that does not fit into any of the remaining free memory regions. + NoStorageLeft, + /// Attempted to occupy a memory region that is already marked as taken. + AlreadyUsed, + /// Attempted to operate on a memory region that is not contained within the [`AllocationTracker`](crate::AllocationTracker). + NotContained, + /// Attempted to free memory that has already been deallocated. + DoubleFree, + /// The [`AllocationTracker`](crate::AllocationTracker) does not allow shrinking to the expected size. + Unshrinkable { + /// The minimum required size for shrinking the [`ContiguousMemory`](crate::ContiguousMemory) container. + min_required: usize, + }, + /// Indicates that a mutex containing the base memory offset or the [`AllocationTracker`](crate::AllocationTracker) was poisoned. + Poisoned { + /// Specifies which component was poisoned: the base memory offset or the [`AllocationTracker`](crate::AllocationTracker). + which: PoisonedMutex, + }, + /// Attempted to borrow the [`AllocationTracker`](crate::AllocationTracker) which is already in use. + TrackerInUse, + /// Indicates that the provided [`Layout`](std::alloc::Layout) is invalid. + Layout { + /// The underlying error that caused the [`Layout`](std::alloc::Layout) to be considered invalid. + source: LayoutError, + }, +} + +/// Represents possible poisoning sources for mutexes in [`ContiguousMemoryError::Poisoned`]. +#[derive(Debug)] +pub enum PoisonedMutex { + /// Mutex containing the base memory offset was poisoned. + BaseAddress, + /// [`AllocationTracker`] mutex was poisoned. + AllocationTracker, +} + +#[cfg(feature = "std")] +impl std::fmt::Display for ContiguousMemoryError { + fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { + match self { + ContiguousMemoryError::NoStorageLeft => { + write!(f, "Insufficient free storage available") + } + ContiguousMemoryError::NotContained => { + write!(f, "Attempted to mark a memory region that isn't contained") + } + ContiguousMemoryError::AlreadyUsed => write!( + f, + "Attempted to take a memory region that is already marked as occupied" + ), + ContiguousMemoryError::DoubleFree => write!( + f, + "Attempted to free a memory region that is already marked as free" + ), + ContiguousMemoryError::Unshrinkable { min_required } => write!( + f, + "Cannot shrink memory regions; minimum required space: {} bytes", + min_required + ), + ContiguousMemoryError::Poisoned { which } => match which { + PoisonedMutex::BaseAddress => { + write!(f, "Cannot acquire lock: base address Mutex was poisoned") + } + PoisonedMutex::AllocationTracker => write!( + f, + "Cannot acquire lock: AllocationTracker Mutex was poisoned" + ), + }, + ContiguousMemoryError::TrackerInUse => { + write!(f, "Cannot borrow AllocationTracker: it is already in use") + } + ContiguousMemoryError::Layout { source } => write!(f, "Layout error: {}", source), + } + } +} + +#[cfg(feature = "std")] +impl std::error::Error for ContiguousMemoryError { + fn source(&self) -> Option<&(dyn std::error::Error + 'static)> { + match self { + ContiguousMemoryError::Layout { source } => Some(source), + _ => None, + } + } +} + +impl From for ContiguousMemoryError { + fn from(layout_err: LayoutError) -> Self { + ContiguousMemoryError::Layout { source: layout_err } + } +} diff --git a/src/lib.rs b/src/lib.rs new file mode 100644 index 0000000..2e8d1ed --- /dev/null +++ b/src/lib.rs @@ -0,0 +1,1268 @@ +#![cfg_attr(feature = "no_std", no_std)] + +#[cfg(feature = "no_std")] +extern crate alloc; + +#[cfg(any( + all(not(feature = "std"), not(feature = "no_std")), + all(feature = "std", feature = "no_std") +))] +compile_error!( + "contiguous_mem requires either 'std' or 'no_std' feature to be enabled, not both or neither" +); + +pub mod error; + +use core::{ + cell::{Cell, RefCell}, + marker::PhantomData, + mem::size_of, + ptr::{null_mut, write_unaligned}, +}; + +#[cfg(feature = "std")] +mod std_imports { + pub use std::alloc::{Layout, LayoutError}; + + pub use std::rc::Rc; + pub use std::sync::Arc; + pub use std::sync::Mutex; + pub use std::sync::MutexGuard; + + pub use std::alloc::alloc as allocate; + pub use std::alloc::dealloc as deallocate; + pub use std::alloc::realloc as reallocate; +} +#[cfg(feature = "std")] +use std_imports::*; + +#[cfg(feature = "no_std")] +mod nostd_imports { + pub use alloc::alloc::{Layout, LayoutError}; + pub use alloc::vec::Vec; + + pub use alloc::rc::Rc; + pub use alloc::sync::Arc; + pub use spin::Mutex; + pub use spin::MutexGuard; + + pub use alloc::alloc::alloc as allocate; + pub use alloc::alloc::dealloc as deallocate; + pub use alloc::alloc::realloc as reallocate; +} +#[cfg(feature = "no_std")] +use nostd_imports::*; + +use portable_atomic::AtomicUsize; + +use error::{ContiguousMemoryError, PoisonedMutex}; + +/// Trait that adds a method which mimics std `Result::map_err` on a Lock in order to unify +/// no_std and std environments. +/// +/// This is necessary as [spin::Mutex::lock] doesn't return a Result but a [MutexGuard] +/// directly. +trait LockOr { + fn lock_or F>(&self, op: O) -> Result, F>; +} +#[cfg(feature = "std")] +impl LockOr for Mutex { + fn lock_or F>(&self, op: O) -> Result, F> { + self.lock().map_err(|_| op()) + } +} +#[cfg(feature = "no_std")] +impl LockOr for Mutex { + fn lock_or F>(&self, op: O) -> Result, F> { + Ok(self.lock()) + } +} + +/// Represents a range of bytes in [`AllocationTracker`] and [`ContiguousMemory`]. +#[derive(Clone, Copy, PartialEq, Eq)] +#[cfg_attr(any(feature = "debug", test), derive(Debug))] +pub struct ByteRange( + /// **Inclusive** lower bound of this byte range. + usize, + /// **Exclusive** upper bound of this byte range. + usize, +); +impl ByteRange { + /// Constructs a new byte range, ensuring that `from` and `to` are ordered correctly. + /// + /// # Arguments + /// + /// * `from` - The inclusive lower bound of the byte range. + /// * `to` - The exclusive upper bound of the byte range. + pub fn new(from: usize, to: usize) -> Self { + ByteRange(from.min(to), to.max(from)) + } + + /// Constructs a new byte range without checking `from` and `to` ordering. + /// + /// # Arguments + /// + /// * `from` - The inclusive lower bound of the byte range. + /// * `to` - The exclusive upper bound of the byte range. + pub fn new_unchecked(from: usize, to: usize) -> Self { + ByteRange(from, to) + } + + /// Aligns this byte range to the provided `alignment`. + /// + /// # Arguments + /// + /// * `alignment` - The alignment to which the byte range should be aligned. + pub fn aligned(&self, alignment: usize) -> Self { + let offset = (self.0 as *const u8).align_offset(alignment); + ByteRange(self.0 + offset, self.1) + } + + /// Caps the size of this byte range to the provided `size` and returns it. + /// If the size of this byte range is lesser than the required size, `None` is returned instead. + /// + /// # Arguments + /// + /// * `size` - The size to cap the byte range to. + pub fn cap_size(&self, size: usize) -> Option { + if self.len() < size { + return None; + } + Some(ByteRange(self.0, self.0 + size)) + } + + /// Offsets this byte range by a provided unsigned offset. + /// + /// # Arguments + /// + /// * `offset` - The unsigned offset to add to both lower and upper bounds of the byte range. + pub fn offset(&self, offset: usize) -> Self { + ByteRange(self.0 + offset, self.1 + offset) + } + + /// Offsets this byte range by a provided signed offset. + /// + /// # Arguments + /// + /// * `offset` - The signed offset to add to both lower and upper bounds of the byte range. + pub fn offset_signed(&self, offset: isize) -> Self { + ByteRange( + ((self.0 as isize).wrapping_add(offset)) as usize, + ((self.1 as isize).wrapping_add(offset)) as usize, + ) + } + + /// Returns length of this byte range. + pub fn len(&self) -> usize { + self.1 - self.0 + } + + /// Returns `true` if this byte range contains another byte range `other`. + /// + /// # Arguments + /// + /// * `other` - The other byte range to check for containment. + pub fn contains(&self, other: Self) -> bool { + self.0 <= other.0 && other.1 <= self.1 + } + + /// Returns two byte ranges that remain when another `other` range is removed from this one. + /// + /// It is possible for either or both of the returned byte ranges to have a length of 0 if `other` is aligned with + /// either the upper or lower bound of this range, or if it is equal to this range. + /// + /// # Arguments + /// + /// * `other` - The byte range to remove from this range. + pub fn difference_unchecked(&self, other: Self) -> (Self, Self) { + (ByteRange(self.0, other.0), ByteRange(other.1, self.1)) + } + + /// Merges this byte range with `other` and returns a byte range that contains both. + /// + /// # Arguments + /// + /// * `other` - The other byte range to merge with this one. + pub fn merge_unchecked(&self, other: Self) -> Self { + ByteRange(self.0.min(other.0), self.1.max(other.1)) + } + + /// Merges another `other` byte range into this one, resulting in a byte range that contains both. + /// + /// # Arguments + /// + /// * `other` - The other byte range to merge into this one. + pub fn merge_in_unchecked(&mut self, other: Self) { + self.0 = self.0.min(other.0); + self.1 = self.1.max(other.1); + } +} + +/// A structure that keeps track of unused regions of memory within provided bounds. +#[derive(Clone)] +#[cfg_attr(feature = "debug", derive(Debug))] +pub struct AllocationTracker { + size: usize, + unused: Vec, +} +impl AllocationTracker { + /// Constructs a new `AllocationTracker` of the provided `size`. + /// + /// # Arguments + /// + /// * `size` - The total size of the memory region that will be tracked. + pub fn new(size: usize) -> Self { + let mut initial = Vec::new(); + initial.push(ByteRange(0, size)); + AllocationTracker { + size, + unused: initial, + } + } + + /// Returns the total memory size being tracked. + pub fn len(&self) -> usize { + self.size + } + + /// Checks if there is no empty space left in the tracked region. + pub fn is_empty(&self) -> bool { + self.unused.is_empty() + } + + /// Returns a [`ByteRange`] encompassing the entire tracked memory region. + pub fn whole_range(&self) -> ByteRange { + ByteRange(0, self.size) + } + + /// Tries resizing the available memory range represented by this structure to the + /// provided `new_size`. + /// + /// # Arguments + /// + /// * `new_size` - The desired new size of the memory region. + /// + /// # Returns + /// + /// A `Result` indicating success or a `ContiguousMemoryError` if an error occurs. + /// + /// # Errors + /// + /// This function can return the following errors: + /// + /// - [`ContiguousMemoryError::Unshrinkable`]: If the remaining free regions cannot be shrunk to the desired size. + pub fn resize(&mut self, new_size: usize) -> Result<(), ContiguousMemoryError> { + if new_size == self.size { + return Ok(()); + } else if new_size < self.size { + let last = self + .unused + .last_mut() + .ok_or(ContiguousMemoryError::Unshrinkable { + min_required: self.size, + })?; + + let reduction = self.size - new_size; + if last.len() < reduction { + return Err(ContiguousMemoryError::Unshrinkable { + min_required: self.size - last.len(), + }); + } + last.1 -= reduction; + self.size = new_size; + } else { + match self.unused.last() { + Some(it) => { + // check whether the last free region ends at the end of tracked region + if it.1 == self.size { + let last = self + .unused + .last_mut() + .expect("free byte ranges isn't empty"); + last.1 = new_size; + } else { + self.unused.push(ByteRange(self.size, new_size)); + } + } + None => { + self.unused.push(ByteRange(self.size, new_size)); + } + } + self.size = new_size; + } + Ok(()) + } + + /// Returns the next free memory region that can accommodate the given type [`Layout`]. + /// + /// If the `layout` cannot be safely stored within any free segments of the represented memory region, `None` is returned. + /// + /// # Arguments + /// + /// * `layout` - The layout of the data to be stored. + /// + /// # Returns + /// + /// An optional [`ByteRange`] representing the next available memory region, or `None` if no suitable region is found. + pub fn peek_next(&self, layout: Layout) -> Option { + if layout.size() > self.size { + return None; + } + + let available = self.unused.iter().find(|it| { + it.len() >= layout.size() && it.aligned(layout.align()).len() >= layout.size() + })?; + + let usable = available.aligned(layout.align()).cap_size(layout.size())?; + + Some(usable) + } + + /// Tries marking the provided memory region as not free. + /// + /// # Arguments + /// + /// * `region` - The memory region to mark as not free. + /// + /// # Returns + /// + /// A `Result` indicating success or a `ContiguousMemoryError` if an error occurs. + /// + /// # Errors + /// + /// This function can return the following errors: + /// + /// - [`ContiguousMemoryError::NotContained`]: If the provided region falls outside of the memory tracked by the `AllocationTracker`. + /// - [`ContiguousMemoryError::AlreadyUsed`]: If the provided region isn't free. + pub fn take(&mut self, region: ByteRange) -> Result<(), ContiguousMemoryError> { + if self.whole_range().contains(region) { + return Err(ContiguousMemoryError::NotContained); + } + + let (i, found) = self + .unused + .iter() + .enumerate() + .find(|(_, it)| it.contains(region)) + .ok_or(ContiguousMemoryError::AlreadyUsed)?; + + let (left, right) = found.difference_unchecked(region); + + if left.len() > 0 { + self.unused[i] = left; + if right.len() > 0 { + self.unused.insert(i + 1, right); + } + } else if right.len() > 0 { + self.unused[i] = right; + } else { + self.unused.remove(i); + } + + Ok(()) + } + + /// Takes the next available memory region that can hold the provided `layout`. + /// + /// On success, it returns a [`ByteRange`] of the memory region that was taken. + /// + /// # Arguments + /// + /// * `layout` - The layout of the data to be stored. + /// + /// # Returns + /// + /// A `Result` indicating success with the allocated [`ByteRange`] or a `ContiguousMemoryError` if an error occurs. + /// + /// # Errors + /// + /// This function can return the following errors: + /// + /// - [`ContiguousMemoryError::NoStorageLeft`]: If the requested [`Layout`] cannot be fitted within any free memory regions. + pub fn take_next(&mut self, layout: Layout) -> Result { + if layout.size() > self.size { + return Err(ContiguousMemoryError::NoStorageLeft); + } + + let (i, available) = self + .unused + .iter() + .enumerate() + .find(|(_, it)| { + it.len() >= layout.size() && it.aligned(layout.align()).len() >= layout.size() + }) + .ok_or(ContiguousMemoryError::NoStorageLeft)?; + + let taken = available + .aligned(layout.align()) + .cap_size(layout.size()) + .ok_or(ContiguousMemoryError::NoStorageLeft)?; + + let (left, right) = available.difference_unchecked(taken); + + if left.len() > 0 { + self.unused[i] = left; + if right.len() > 0 { + self.unused.insert(i + 1, right); + } + } else if right.len() > 0 { + self.unused[i] = right; + } else { + self.unused.remove(i); + } + + Ok(taken) + } + + /// Tries marking the provided memory region as free. + /// + /// # Arguments + /// + /// * `region` - The memory region to mark as free. + /// + /// # Returns + /// + /// A `Result` indicating success or a `ContiguousMemoryError` if an error occurs. + /// + /// # Errors + /// + /// This function can return the following error: + /// + /// - [`ContiguousMemoryError::NotContained`]: If the provided region falls outside of the memory tracked by the `AllocationTracker`. + pub fn release(&mut self, region: ByteRange) -> Result<(), ContiguousMemoryError> { + if !self.whole_range().contains(region) { + return Err(ContiguousMemoryError::NotContained); + } + + if let Some(found) = self + .unused + .iter_mut() + .find(|it| region.1 == it.0 || it.1 == region.0 || it.contains(region)) + { + if found.contains(region) { + return Err(ContiguousMemoryError::DoubleFree); + } + found.merge_in_unchecked(region); + } else { + if let Some((i, _)) = self.unused.iter().enumerate().find(|it| it.0 > region.0) { + self.unused.insert(i, region); + } else { + self.unused.push(region); + } + } + + Ok(()) + } +} + +/// A trait defining the implementation details required by [`ContiguousMemory`]. +pub trait ImplDetails { + /// The type representing the base memory and allocation tracking. + type Base: Clone; + + /// The type representing the allocation tracking mechanism. + type AllocationTracker: Clone; + + /// The type representing the result of an allocation operation. + type AllocResult; + + /// The type representing the usage counter in [`crate::Ref`] type. + type UseCounter: Clone; + + /// Indicates whether the container can grow when out of memory. + const CAN_GROW: bool = true; + + /// Indicates whether locks are used for synchronization. + const USE_LOCKS: bool = false; + + /// Creates a new instance of the base type from a raw pointer. + fn new_base(value: *mut u8) -> Self::Base; + + /// Retrieves the base pointer from the base instance. + fn get_base(base: &Self::Base) -> Result<*mut u8, ContiguousMemoryError>; + + /// Resizes and reallocates the base memory according to new capacity. + fn reallocate( + base: &mut Self::Base, + layout: &mut Layout, + new_capacity: usize, + ) -> Result<*mut u8, ContiguousMemoryError>; + + /// Deallocates the base memory using layout information. + fn deallocate(base: &Self::Base, layout: Layout); + + /// Creates a new allocation tracker for the specified capacity. + fn new_allocation_tacker(capacity: usize) -> Self::AllocationTracker; + + /// Resizes the allocation tracker to the new capacity. + fn resize_tracker( + tracker: &mut Self::AllocationTracker, + new_capacity: usize, + ) -> Result<(), ContiguousMemoryError>; + + /// Finds the next free memory region for given layout in the tracker. + fn next_free( + tracker: &mut Self::AllocationTracker, + layout: Layout, + ) -> Result; + + /// Releases the specified memory range back to the allocation tracker. + fn release_range( + tracker: &mut Self::AllocationTracker, + range: ByteRange, + ) -> Result<(), ContiguousMemoryError>; + + /// Builds a reference for the stored data. + fn build_ref( + base: &Self::Base, + addr: *mut T, + range: &ByteRange, + tracker: &Self::AllocationTracker, + ) -> Self::AllocResult; + + /// Increments the usage counter for the reference. + fn bump_ref(counter: &Self::UseCounter); + + /// Decrements the usage counter and returns `true` if it reaches zero. + fn drop_ref(counter: &mut Self::UseCounter) -> bool; +} + +/// A marker struct representing the behavior specialization for thread-safe operations within +/// [`ContiguousMemory`](crate::ContiguousMemory). This implementation ensures that the container's +/// operations can be used safely in asynchronous contexts, utilizing mutexes to prevent data races. +pub struct ThreadSafeImpl; +impl ImplDetails for ThreadSafeImpl { + type Base = Arc>; + type AllocationTracker = Arc>; + type AllocResult = Ref; + type UseCounter = Arc; + + const USE_LOCKS: bool = true; + + #[inline(always)] + fn new_base(value: *mut u8) -> Self::Base { + Arc::new(Mutex::new(value)) + } + + #[inline(always)] + fn get_base(base: &Self::Base) -> Result<*mut u8, ContiguousMemoryError> { + base.lock_or(|| ContiguousMemoryError::Poisoned { + which: PoisonedMutex::BaseAddress, + }) + .map(|result| *result) + } + + #[inline(always)] + fn reallocate( + base: &mut Self::Base, + layout: &mut Layout, + new_capacity: usize, + ) -> Result<*mut u8, ContiguousMemoryError> { + let mut lock = base.lock_or(|| ContiguousMemoryError::Poisoned { + which: PoisonedMutex::BaseAddress, + })?; + *lock = unsafe { reallocate(*lock, *layout, new_capacity) }; + *layout = Layout::from_size_align(new_capacity, layout.align())?; + Ok(*lock) + } + + #[inline(always)] + fn deallocate(base: &Self::Base, layout: Layout) { + if let Ok(mut lock) = base.lock_or(|| ContiguousMemoryError::Poisoned { + which: PoisonedMutex::BaseAddress, + }) { + unsafe { deallocate(*lock, layout) }; + *lock = null_mut(); + } + } + + #[inline(always)] + fn new_allocation_tacker(capacity: usize) -> Self::AllocationTracker { + Arc::new(Mutex::new(AllocationTracker::new(capacity))) + } + + #[inline(always)] + fn resize_tracker( + tracker: &mut Self::AllocationTracker, + new_capacity: usize, + ) -> Result<(), ContiguousMemoryError> { + let mut lock = tracker.lock_or(|| ContiguousMemoryError::Poisoned { + which: PoisonedMutex::AllocationTracker, + })?; + lock.resize(new_capacity)?; + Ok(()) + } + + #[inline(always)] + fn next_free( + tracker: &mut Self::AllocationTracker, + layout: Layout, + ) -> Result { + let mut lock = tracker.lock_or(|| ContiguousMemoryError::Poisoned { + which: PoisonedMutex::AllocationTracker, + })?; + lock.take_next(layout) + } + + #[inline(always)] + fn release_range( + tracker: &mut Self::AllocationTracker, + range: ByteRange, + ) -> Result<(), ContiguousMemoryError> { + let mut lock = tracker.lock_or(|| ContiguousMemoryError::Poisoned { + which: PoisonedMutex::AllocationTracker, + })?; + lock.release(range) + } + + #[inline(always)] + fn build_ref( + base: &Self::Base, + _addr: *mut T, + range: &ByteRange, + tracker: &Self::AllocationTracker, + ) -> Self::AllocResult { + Ref { + base: base.clone(), + range: range.clone(), + tracker: tracker.clone(), + uses: Arc::new(AtomicUsize::new(1)), + _phantom: PhantomData, + } + } + + #[inline(always)] + fn bump_ref(counter: &Self::UseCounter) { + counter.add(1, portable_atomic::Ordering::Relaxed) + } + + #[inline(always)] + fn drop_ref(counter: &mut Self::UseCounter) -> bool { + counter.sub(1, portable_atomic::Ordering::Relaxed); + counter.load(portable_atomic::Ordering::Release) == 0 + } +} + +/// A marker struct representing the behavior specialization for operations within +/// [`ContiguousMemory`](crate::ContiguousMemory) that do not require thread-safety. +/// This implementation skips mutexes, making it faster but unsuitable for concurrent usage. +pub struct NotThreadSafeImpl; +impl ImplDetails for NotThreadSafeImpl { + type Base = Rc>; + type AllocationTracker = Rc>; + type AllocResult = Ref; + type UseCounter = Rc>; + + #[inline(always)] + fn new_base(value: *mut u8) -> Self::Base { + Rc::new(Cell::new(value)) + } + + #[inline(always)] + fn get_base(base: &Self::Base) -> Result<*mut u8, ContiguousMemoryError> { + Ok(base.get()) + } + + #[inline(always)] + fn reallocate( + base: &mut Self::Base, + layout: &mut Layout, + new_capacity: usize, + ) -> Result<*mut u8, ContiguousMemoryError> { + let value = unsafe { reallocate(base.get(), *layout, new_capacity) }; + base.set(value); + *layout = Layout::from_size_align(new_capacity, layout.align())?; + Ok(value) + } + + #[inline(always)] + fn deallocate(base: &Self::Base, layout: Layout) { + unsafe { deallocate(base.get(), layout) }; + base.set(null_mut()) + } + + #[inline(always)] + fn new_allocation_tacker(capacity: usize) -> Self::AllocationTracker { + Rc::new(RefCell::new(AllocationTracker::new(capacity))) + } + + #[inline(always)] + fn resize_tracker( + tracker: &mut Self::AllocationTracker, + new_capacity: usize, + ) -> Result<(), ContiguousMemoryError> { + tracker.borrow_mut().resize(new_capacity) + } + + #[inline(always)] + fn next_free( + tracker: &mut Self::AllocationTracker, + layout: Layout, + ) -> Result { + tracker + .try_borrow_mut() + .map_err(|_| ContiguousMemoryError::TrackerInUse)? + .take_next(layout) + } + + #[inline(always)] + fn release_range( + tracker: &mut Self::AllocationTracker, + range: ByteRange, + ) -> Result<(), ContiguousMemoryError> { + tracker + .try_borrow_mut() + .map_err(|_| ContiguousMemoryError::TrackerInUse)? + .release(range) + } + + #[inline(always)] + fn build_ref( + base: &Self::Base, + _addr: *mut T, + range: &ByteRange, + tracker: &Self::AllocationTracker, + ) -> Self::AllocResult { + Ref { + base: base.clone(), + range: range.clone(), + tracker: tracker.clone(), + uses: Rc::new(Cell::new(1)), + _phantom: PhantomData, + } + } + + #[inline(always)] + fn bump_ref(counter: &Self::UseCounter) { + counter.set(counter.get() + 1); + } + + #[inline(always)] + fn drop_ref(counter: &mut Self::UseCounter) -> bool { + counter.set(counter.get() - 1); + counter.get() == 0 + } +} + +/// A marker struct representing the behavior specialization for a highly performance-optimized, +/// yet unsafe implementation within [`ContiguousMemory`](crate::ContiguousMemory). This trait is used when +/// the exact required size is known during construction, and when the container is guaranteed +/// to outlive any pointers to data contained in the memory block. +pub struct FixedSizeImpl; +impl ImplDetails for FixedSizeImpl { + type Base = *mut u8; + type AllocationTracker = AllocationTracker; + type AllocResult = *mut T; + type UseCounter = (); + + const CAN_GROW: bool = false; + + #[inline(always)] + fn new_base(value: *mut u8) -> Self::Base { + value + } + + #[inline(always)] + fn get_base(base: &Self::Base) -> Result<*mut u8, ContiguousMemoryError> { + Ok(*base) + } + + #[inline(always)] + fn reallocate( + _base: &mut Self::Base, + _layout: &mut Layout, + _new_capacity: usize, + ) -> Result<*mut u8, ContiguousMemoryError> { + unimplemented!("can't reallocate ContiguousMemory with FixedSizeImpl"); + } + + #[inline(always)] + fn deallocate(base: &Self::Base, layout: Layout) { + unsafe { + deallocate(*base, layout); + } + } + + #[inline(always)] + fn new_allocation_tacker(capacity: usize) -> Self::AllocationTracker { + AllocationTracker::new(capacity) + } + + #[inline(always)] + fn resize_tracker( + _tracker: &mut Self::AllocationTracker, + _new_capacity: usize, + ) -> Result<(), ContiguousMemoryError> { + Err(ContiguousMemoryError::NoStorageLeft) + } + + #[inline(always)] + fn next_free( + tracker: &mut Self::AllocationTracker, + layout: Layout, + ) -> Result { + tracker.take_next(layout) + } + + #[inline(always)] + fn release_range( + tracker: &mut Self::AllocationTracker, + range: ByteRange, + ) -> Result<(), ContiguousMemoryError> { + tracker.release(range) + } + + #[inline(always)] + fn build_ref( + _base: &Self::Base, + addr: *mut T, + _range: &ByteRange, + _tracker: &Self::AllocationTracker, + ) -> Self::AllocResult { + addr + } + + #[inline(always)] + fn bump_ref(_counter: &Self::UseCounter) { + unimplemented!("CMRef not implemented for FixedSizeImpl ContiguousMemory") + } + + #[inline(always)] + fn drop_ref(_counter: &mut Self::UseCounter) -> bool { + unimplemented!("CMRef not implemented for FixedSizeImpl ContiguousMemory") + } +} + +/// A memory container for efficient allocation and storage of contiguous data. +/// +/// This structure manages a contiguous block of memory, allowing for the storage of arbitrary data +/// while ensuring that stored items are placed adjacently without imposing any restrictions on layout, +/// such as those found in memory pools or the standard library's [Vec]. +/// +/// The `ContiguousMemory` type is particularly useful for scenarios where data locality and efficient +/// memory usage are crucial, as it provides a means to allocate and manage memory in a linear fashion. +/// +/// # Performance +/// +/// The [`store`] operation has a generally constant time complexity when storing items with the same layout, +/// as it primarily involves finding available memory regions. The time complexity increases linearly with the +/// number of gaps between previously stored items, making it an effective choice for maintaining data locality. +/// +/// [`store`]: ContiguousMemory::store +#[cfg_attr(feature = "debug", derive(Debug))] +pub struct ContiguousMemory { + base: S::Base, + layout: Layout, + tracker: S::AllocationTracker, +} + +impl ContiguousMemory { + /// Creates a new `ContiguousMemory` instance with the specified capacity and alignment. + /// + /// # Arguments + /// + /// * `capacity` - The initial capacity of the memory container. + /// * `alignment` - The alignment requirement for memory allocations. + /// + /// # Returns + /// + /// A `Result` containing the newly created `ContiguousMemory` instance on success, + /// or a `LayoutError` if the memory layout cannot be satisfied. + pub fn new(capacity: usize, alignment: usize) -> Result { + let layout = Layout::from_size_align(capacity, alignment)?; + let base = unsafe { allocate(layout) }; + Ok(ContiguousMemory { + base: S::new_base(base), + layout, + tracker: S::new_allocation_tacker(capacity), + }) + } + + /// Retrieves the base address of the allocated memory. + /// + /// # Safety + /// + /// This function is marked as unsafe because it returns a raw pointer to the allocated memory. + /// + /// # Returns + /// + /// A `Result` containing the base address of the allocated memory on success, + /// or [`ContiguousMemoryError::Poisoned`] error when the Mutex holding the base address is poisoned. + pub unsafe fn get_base(&self) -> Result<*mut u8, ContiguousMemoryError> { + S::get_base(&self.base) + } + + /// Returns the current capacity of the memory container. + /// + /// The capacity represents the size of the memory block that has been allocated + /// for storing data. It may be larger than the amount of data currently stored + /// within the container. + pub fn get_capacity(&self) -> usize { + self.layout.size() + } + + /// Resizes the memory container to the specified capacity. + /// + /// This function can either grow or shrink the container based on the new capacity. + /// + /// # Arguments + /// + /// * `new_capacity` - The desired new capacity of the memory container. + /// + /// # Returns + /// + /// A `Result` indicating success on resizing the container, or a `ContiguousMemoryError` if an error occurs. + /// + /// # Errors + /// + /// This function can return the following errors: + /// + /// - [`ContiguousMemoryError::Poisoned`]: This error can occur if the mutex holding the base address or + /// the [`AllocationTracker`](crate::AllocationTracker) is poisoned. This error suggests potential thread contention issues. + /// + /// - [`ContiguousMemoryError::Unshrinkable`]: This error occurs when attempting to shrink the memory container, but + /// the stored data prevents the container from being shrunk to the desired capacity. + pub fn resize(&mut self, new_capacity: usize) -> Result<(), ContiguousMemoryError> { + if new_capacity == self.layout.size() { + return Ok(()); + } + + let old_capacity = self.layout.size(); + S::resize_tracker(&mut self.tracker, new_capacity)?; + match S::reallocate(&mut self.base, &mut self.layout, new_capacity) { + Ok(_) => {} + Err(ContiguousMemoryError::Poisoned { which }) if S::USE_LOCKS => { + S::resize_tracker(&mut self.tracker, old_capacity)?; + return Err(ContiguousMemoryError::Poisoned { which }); + } + Err(other) => return Err(other), + } + + Ok(()) + } + + /// Stores a value of type `T` in the memory container. + /// + /// This operation allocates memory for the provided value and stores it in the contiguous memory block. + /// + /// # Arguments + /// + /// * `value` - The value of type `T` to be stored in the memory container. + /// + /// # Returns + /// + /// A `Result` that encapsulates the result of the storage operation: + /// + /// - If the implementation details type `S` is [`NotThreadSafeImpl`](crate::NotThreadSafeImpl) + /// or [`ThreadSafeImpl`](crate::ThreadSafeImpl), the result will be a [`crate::Ref`] pointing to the stored value. + /// This reference provides a convenient and safe way to access and manipulate the stored data within the memory block. + /// + /// - If the implementation details type `S` is [`FixedSizeImpl`](crate::FixedSizeImpl), the result will be a raw pointer + /// (`*mut T`) to the stored value. This is due to the fact that fixed-size container won't move which means + /// the pointer will not be invalidated. + /// + /// The returned [`Result`] indicates success or an error if the storage operation encounters any issues. + /// + /// # Errors + /// + /// This function can return the following errors: + /// + /// - [`ContiguousMemoryError::NoStorageLeft`]: Only returned when the implementation details type `S` + /// is [`FixedSizeImpl`](crate::FixedSizeImpl) and indicates that the container couldn't accommodate the + /// provided data due to size limitations. Other implementation details grow the container instead. + /// + /// - [`ContiguousMemoryError::Poisoned`]: This error can occur when the [`AllocationTracker`](crate::AllocationTracker) + /// associated with the memory container is poisoned. + /// + pub fn store(&mut self, value: T) -> Result, ContiguousMemoryError> { + let layout = Layout::new::(); + + let (addr, range) = loop { + match S::next_free(&mut self.tracker, layout) { + Ok(taken) => { + let found = (taken.0 + S::get_base(&self.base)? as usize) as *mut T; + unsafe { + write_unaligned(found, value); + } + break (found, taken); + } + Err(ContiguousMemoryError::NoStorageLeft) if S::CAN_GROW => { + self.resize(self.layout.size() * 2)?; + } + Err(other) => return Err(other), + } + }; + + Ok(S::build_ref(&self.base, addr, &range, &self.tracker)) + } +} + +impl ContiguousMemory { + #[inline(always)] + pub unsafe fn free_typed(&mut self, value: *mut T) -> Result<(), ContiguousMemoryError> { + Self::free(self, value, size_of::()) + } + + pub unsafe fn free( + &mut self, + value: *mut T, + size: usize, + ) -> Result<(), ContiguousMemoryError> { + FixedSizeImpl::release_range( + &mut self.tracker, + ByteRange(value as usize, value as usize + size), + ) + } +} + +/// A type alias for [`ContiguousMemory`](crate::ContiguousMemory) that enables references to data stored within it +/// to be used safely in asynchronous contexts. This version uses a thread-safe implementation. +pub type SyncContiguousMemory = ContiguousMemory; + +/// A type alias for [`ContiguousMemory`](crate::ContiguousMemory) that offers a synchronous implementation without +/// using internal mutexes. This version is faster but doesn't provide thread safety. It allows the container to be +/// shrunk or grown to fit more data. +pub type GrowableContiguousMemory = ContiguousMemory; + +/// A type alias for [`ContiguousMemory`](crate::ContiguousMemory) that provides a highly performance-optimized +/// (unsafe) implementation. It's suitable when the exact required size is known during construction and when +/// the container is guaranteed to outlive any references pointing to it. +pub type FixedContiguousMemory = ContiguousMemory; + +impl Drop for ContiguousMemory { + fn drop(&mut self) { + S::deallocate(&self.base, self.layout) + } +} + +/// A reference to `T` data stored in a [`ContiguousMemory`] structure. +pub struct Ref { + base: S::Base, + range: ByteRange, + tracker: S::AllocationTracker, + uses: S::UseCounter, + _phantom: PhantomData, +} + +impl Ref { + /// Tries accessing referenced data at its current location. + /// + /// Returns a [`Poisoned`](ContiguousMemoryError::Poisoned) error if the Mutex + /// holding the `base` address pointer has been poisoned. + pub fn get(&self) -> Result<&T, ContiguousMemoryError> { + unsafe { + let base = S::get_base(&self.base)?.offset(self.range.0 as isize); + Ok(&*(base as *mut T)) + } + } +} + +impl Clone for Ref { + fn clone(&self) -> Self { + S::bump_ref(&self.uses); + Ref { + base: self.base.clone(), + range: self.range.clone(), + tracker: self.tracker.clone(), + uses: self.uses.clone(), + _phantom: PhantomData, + } + } +} + +impl Drop for Ref { + fn drop(&mut self) { + if S::drop_ref(&mut self.uses) { + let _ = S::release_range(&mut self.tracker, self.range); + } + } +} + +#[cfg(test)] +mod test { + use super::*; + + #[test] + fn byterange_merging_works() { + let a = ByteRange::new_unchecked(0, 10); + let b = ByteRange::new_unchecked(10, 20); + + let added_seq = a.merge_unchecked(b); + assert_eq!(added_seq.0, 0); + assert_eq!(added_seq.1, 20); + + let added_seq_rev = b.merge_unchecked(a); + assert_eq!(added_seq_rev.0, 0); + assert_eq!(added_seq_rev.1, 20); + } + + #[test] + fn byterange_difference_works() { + let larger = ByteRange::new_unchecked(0, 500); + + let left_aligned = ByteRange::new_unchecked(0, 10); + let test_left = larger.difference_unchecked(left_aligned); + assert_eq!(test_left.0 .0, 0); + assert_eq!(test_left.0 .1, 0); + assert_eq!(test_left.1 .0, 10); + assert_eq!(test_left.1 .1, 500); + + let contained = ByteRange::new_unchecked(300, 400); + let test_contained = larger.difference_unchecked(contained); + assert_eq!(test_contained.0 .0, 0); + assert_eq!(test_contained.0 .1, 300); + assert_eq!(test_contained.1 .0, 400); + assert_eq!(test_contained.1 .1, 500); + + let right_aligned = ByteRange::new_unchecked(450, 500); + let test_right = larger.difference_unchecked(right_aligned); + assert_eq!(test_right.0 .0, 0); + assert_eq!(test_right.0 .1, 450); + assert_eq!(test_right.1 .0, 500); + assert_eq!(test_right.1 .1, 500); + } + + #[test] + fn test_new_allocation_tracker() { + let tracker = AllocationTracker::new(1024); + assert_eq!(tracker.len(), 1024); + assert_eq!(tracker.is_empty(), false); + assert_eq!(tracker.whole_range(), ByteRange(0, 1024)); + } + + #[test] + fn test_resize_allocation_tracker() { + let mut tracker = AllocationTracker::new(1024); + + tracker.resize(512).unwrap(); + assert_eq!(tracker.len(), 512); + + tracker.resize(2048).unwrap(); + assert_eq!(tracker.len(), 2048); + } + + #[test] + fn test_take_and_release_allocation_tracker() { + let mut tracker = AllocationTracker::new(1024); + + let range = tracker + .take_next(Layout::from_size_align(32, 8).unwrap()) + .unwrap(); + assert_eq!(range, ByteRange(0, 32)); + + tracker + .release(range) + .expect("expected AllocationTracker to have the provided range marked as taken"); + assert_eq!(tracker.is_empty(), false); + } + + #[test] + fn test_peek_next_allocation_tracker() { + let tracker = AllocationTracker::new(1024); + + let layout = Layout::from_size_align(64, 8).unwrap(); + let range = tracker.peek_next(layout).unwrap(); + assert_eq!(range, ByteRange(0, 64)); + } + + #[test] + fn test_take_next_allocation_tracker() { + let mut tracker = AllocationTracker::new(1024); + + let layout = Layout::from_size_align(128, 8).unwrap(); + let range = tracker.take_next(layout).unwrap(); + assert_eq!(range, ByteRange(0, 128)); + } + + #[test] + fn test_new_contiguous_memory() { + let memory = ContiguousMemory::::new(1024, 8).unwrap(); + assert_eq!(memory.get_capacity(), 1024); + } + + #[test] + fn test_store_and_get_contiguous_memory() { + let mut memory = ContiguousMemory::::new(1024, 8).unwrap(); + + let value = 42u32; + let stored_ref = memory.store(value).unwrap(); + let retrieved_value = stored_ref.get().unwrap(); + assert_eq!(*retrieved_value, value); + } + + #[test] + fn test_resize_contiguous_memory() { + let mut memory = ContiguousMemory::::new(1024, 8).unwrap(); + + memory.resize(512).unwrap(); + assert_eq!(memory.get_capacity(), 512); + + memory.resize(2048).unwrap(); + assert_eq!(memory.get_capacity(), 2048); + } + + #[test] + fn test_growable_contiguous_memory() { + let mut memory = GrowableContiguousMemory::new(1024, 8).unwrap(); + + let value = 42u32; + let stored_ref = memory.store(value).unwrap(); + let retrieved_value = stored_ref.get().unwrap(); + assert_eq!(*retrieved_value, value); + + memory.resize(2048).unwrap(); + assert_eq!(memory.get_capacity(), 2048); + } + + #[test] + fn test_fixed_contiguous_memory() { + let mut memory = FixedContiguousMemory::new(1024, 8).unwrap(); + + let value = 42u32; + let stored_ref = memory.store(value).unwrap(); + unsafe { + assert_eq!(*stored_ref, value); + } + + // No resize allowed for FixedContiguousMemory + assert!(memory.resize(2048).is_err()); + } + + struct TestStruct1 { + field1: u32, + field2: u64, + } + + struct TestStruct2 { + field1: u16, + field2: f32, + field3: i32, + } + + #[test] + fn test_store_structs_with_different_layouts() { + let mut memory = ContiguousMemory::::new(1024, 8).unwrap(); + + let struct1 = TestStruct1 { + field1: 42, + field2: 1234567890, + }; + let struct2 = TestStruct2 { + field1: 123, + field2: 3.14, + field3: -42, + }; + + let stored_struct1 = memory.store(struct1).unwrap(); + let stored_struct2 = memory.store(struct2).unwrap(); + + let retrieved_struct1 = stored_struct1.get().unwrap(); + assert_eq!(retrieved_struct1.field1, 42); + assert_eq!(retrieved_struct1.field2, 1234567890); + + let retrieved_struct2 = stored_struct2.get().unwrap(); + assert_eq!(retrieved_struct2.field1, 123); + assert_eq!(retrieved_struct2.field2, 3.14); + assert_eq!(retrieved_struct2.field3, -42); + } +}