From 2b9096517c0f69bce6308ea81ff50c9caa8b2fe8 Mon Sep 17 00:00:00 2001 From: flysand7 Date: Sat, 7 Sep 2024 09:20:56 +1100 Subject: [PATCH 01/35] [mem]: Code formatting --- core/mem/alloc.odin | 220 +++++++++++++++++++++---- core/mem/allocators.odin | 175 +++++++++++--------- core/mem/mem.odin | 16 +- core/mem/mutex_allocator.odin | 14 +- core/mem/raw.odin | 36 ++-- core/mem/rollback_stack_allocator.odin | 81 ++++----- core/mem/tracking_allocator.odin | 34 ++-- 7 files changed, 388 insertions(+), 188 deletions(-) diff --git a/core/mem/alloc.odin b/core/mem/alloc.odin index e51d971e1db..558e810e34a 100644 --- a/core/mem/alloc.odin +++ b/core/mem/alloc.odin @@ -63,30 +63,58 @@ DEFAULT_PAGE_SIZE :: 4 * 1024 @(require_results) -alloc :: proc(size: int, alignment: int = DEFAULT_ALIGNMENT, allocator := context.allocator, loc := #caller_location) -> (rawptr, Allocator_Error) { +alloc :: proc( + size: int, + alignment: int = DEFAULT_ALIGNMENT, + allocator := context.allocator, + loc := #caller_location, +) -> (rawptr, Allocator_Error) { data, err := runtime.mem_alloc(size, alignment, allocator, loc) return raw_data(data), err } @(require_results) -alloc_bytes :: proc(size: int, alignment: int = DEFAULT_ALIGNMENT, allocator := context.allocator, loc := #caller_location) -> ([]byte, Allocator_Error) { +alloc_bytes :: proc( + size: int, + alignment: int = DEFAULT_ALIGNMENT, + allocator := context.allocator, + loc := #caller_location, +) -> ([]byte, Allocator_Error) { return runtime.mem_alloc(size, alignment, allocator, loc) } @(require_results) -alloc_bytes_non_zeroed :: proc(size: int, alignment: int = DEFAULT_ALIGNMENT, allocator := context.allocator, loc := #caller_location) -> ([]byte, Allocator_Error) { +alloc_bytes_non_zeroed :: proc( + size: int, + alignment: int = DEFAULT_ALIGNMENT, + allocator := context.allocator, + loc := #caller_location, +) -> ([]byte, Allocator_Error) { return runtime.mem_alloc_non_zeroed(size, alignment, allocator, loc) } -free :: proc(ptr: rawptr, allocator := context.allocator, loc := #caller_location) -> Allocator_Error { +free :: proc( + ptr: rawptr, + allocator := context.allocator, + loc := #caller_location, +) -> Allocator_Error { return runtime.mem_free(ptr, allocator, loc) } -free_with_size :: proc(ptr: rawptr, byte_count: int, allocator := context.allocator, loc := #caller_location) -> Allocator_Error { +free_with_size :: proc( + ptr: rawptr, + byte_count: int, + allocator := context.allocator, + loc := #caller_location, +) -> Allocator_Error { return runtime.mem_free_with_size(ptr, byte_count, allocator, loc) } -free_bytes :: proc(bytes: []byte, allocator := context.allocator, loc := #caller_location) -> Allocator_Error { +free_bytes :: proc( + bytes: []byte, + allocator := context.allocator, + loc := #caller_location, +) -> Allocator_Error { return runtime.mem_free_bytes(bytes, allocator, loc) } @@ -95,13 +123,26 @@ free_all :: proc(allocator := context.allocator, loc := #caller_location) -> All } @(require_results) -resize :: proc(ptr: rawptr, old_size, new_size: int, alignment: int = DEFAULT_ALIGNMENT, allocator := context.allocator, loc := #caller_location) -> (rawptr, Allocator_Error) { +resize :: proc( + ptr: rawptr, + old_size: int, + new_size: int, + alignment: int = DEFAULT_ALIGNMENT, + allocator := context.allocator, + loc := #caller_location, +) -> (rawptr, Allocator_Error) { data, err := runtime.mem_resize(ptr, old_size, new_size, alignment, allocator, loc) return raw_data(data), err } @(require_results) -resize_bytes :: proc(old_data: []byte, new_size: int, alignment: int = DEFAULT_ALIGNMENT, allocator := context.allocator, loc := #caller_location) -> ([]byte, Allocator_Error) { +resize_bytes :: proc( + old_data: []byte, + new_size: int, + alignment: int = DEFAULT_ALIGNMENT, + allocator := context.allocator, + loc := #caller_location, +) -> ([]byte, Allocator_Error) { return runtime.mem_resize(raw_data(old_data), len(old_data), new_size, alignment, allocator, loc) } @@ -115,7 +156,11 @@ query_features :: proc(allocator: Allocator, loc := #caller_location) -> (set: A } @(require_results) -query_info :: proc(pointer: rawptr, allocator: Allocator, loc := #caller_location) -> (props: Allocator_Query_Info) { +query_info :: proc( + pointer: rawptr, + allocator: Allocator, + loc := #caller_location, +) -> (props: Allocator_Query_Info) { props.pointer = pointer if allocator.procedure != nil { allocator.procedure(allocator.data, .Query_Info, 0, 0, &props, 0, loc) @@ -123,25 +168,44 @@ query_info :: proc(pointer: rawptr, allocator: Allocator, loc := #caller_locatio return } - - -delete_string :: proc(str: string, allocator := context.allocator, loc := #caller_location) -> Allocator_Error { +delete_string :: proc( + str: string, + allocator := context.allocator, + loc := #caller_location, +) -> Allocator_Error { return runtime.delete_string(str, allocator, loc) } -delete_cstring :: proc(str: cstring, allocator := context.allocator, loc := #caller_location) -> Allocator_Error { + +delete_cstring :: proc( + str: cstring, + allocator := context.allocator, + loc := #caller_location, +) -> Allocator_Error { return runtime.delete_cstring(str, allocator, loc) } -delete_dynamic_array :: proc(array: $T/[dynamic]$E, loc := #caller_location) -> Allocator_Error { + +delete_dynamic_array :: proc( + array: $T/[dynamic]$E, + loc := #caller_location, +) -> Allocator_Error { return runtime.delete_dynamic_array(array, loc) } -delete_slice :: proc(array: $T/[]$E, allocator := context.allocator, loc := #caller_location) -> Allocator_Error { + +delete_slice :: proc( + array: $T/[]$E, + allocator := context.allocator, + loc := #caller_location, +) -> Allocator_Error { return runtime.delete_slice(array, allocator, loc) } -delete_map :: proc(m: $T/map[$K]$V, loc := #caller_location) -> Allocator_Error { + +delete_map :: proc( + m: $T/map[$K]$V, + loc := #caller_location, +) -> Allocator_Error { return runtime.delete_map(m, loc) } - delete :: proc{ delete_string, delete_cstring, @@ -150,46 +214,102 @@ delete :: proc{ delete_map, } - @(require_results) -new :: proc($T: typeid, allocator := context.allocator, loc := #caller_location) -> (^T, Allocator_Error) { +new :: proc( + $T: typeid, + allocator := context.allocator, + loc := #caller_location, +) -> (^T, Allocator_Error) { return new_aligned(T, align_of(T), allocator, loc) } + @(require_results) -new_aligned :: proc($T: typeid, alignment: int, allocator := context.allocator, loc := #caller_location) -> (t: ^T, err: Allocator_Error) { +new_aligned :: proc( + $T: typeid, + alignment: int, + allocator := context.allocator, + loc := #caller_location, +) -> (t: ^T, err: Allocator_Error) { return runtime.new_aligned(T, alignment, allocator, loc) } + @(require_results) -new_clone :: proc(data: $T, allocator := context.allocator, loc := #caller_location) -> (t: ^T, err: Allocator_Error) { +new_clone :: proc( + data: $T, + allocator := context.allocator, + loc := #caller_location, +) -> (t: ^T, err: Allocator_Error) { return runtime.new_clone(data, allocator, loc) } @(require_results) -make_aligned :: proc($T: typeid/[]$E, #any_int len: int, alignment: int, allocator := context.allocator, loc := #caller_location) -> (slice: T, err: Allocator_Error) { +make_aligned :: proc( + $T: typeid/[]$E, + #any_int len: int, + alignment: int, + allocator := context.allocator, + loc := #caller_location, +) -> (slice: T, err: Allocator_Error) { return runtime.make_aligned(T, len, alignment, allocator, loc) } + @(require_results) -make_slice :: proc($T: typeid/[]$E, #any_int len: int, allocator := context.allocator, loc := #caller_location) -> (T, Allocator_Error) { +make_slice :: proc( + $T: typeid/[]$E, + #any_int len: int, + allocator := context.allocator, + loc := #caller_location, +) -> (T, Allocator_Error) { return runtime.make_slice(T, len, allocator, loc) } + @(require_results) -make_dynamic_array :: proc($T: typeid/[dynamic]$E, allocator := context.allocator, loc := #caller_location) -> (T, Allocator_Error) { +make_dynamic_array :: proc( + $T: typeid/[dynamic]$E, + allocator := context.allocator, + loc := #caller_location, +) -> (T, Allocator_Error) { return runtime.make_dynamic_array(T, allocator, loc) } + @(require_results) -make_dynamic_array_len :: proc($T: typeid/[dynamic]$E, #any_int len: int, allocator := context.allocator, loc := #caller_location) -> (T, Allocator_Error) { +make_dynamic_array_len :: proc( + $T: typeid/[dynamic]$E, + #any_int len: int, + allocator := context.allocator, + loc := #caller_location, +) -> (T, Allocator_Error) { return runtime.make_dynamic_array_len_cap(T, len, len, allocator, loc) } + @(require_results) -make_dynamic_array_len_cap :: proc($T: typeid/[dynamic]$E, #any_int len: int, #any_int cap: int, allocator := context.allocator, loc := #caller_location) -> (array: T, err: Allocator_Error) { +make_dynamic_array_len_cap :: proc( + $T: typeid/[dynamic]$E, + #any_int len: int, + #any_int cap: int, + allocator := context.allocator, + loc := #caller_location, +) -> (array: T, err: Allocator_Error) { return runtime.make_dynamic_array_len_cap(T, len, cap, allocator, loc) } + @(require_results) -make_map :: proc($T: typeid/map[$K]$E, #any_int cap: int = 1< (m: T, err: Allocator_Error) { +make_map :: proc( + $T: typeid/map[$K]$E, + #any_int cap: int = 1< (m: T, err: Allocator_Error) { return runtime.make_map(T, cap, allocator, loc) } + @(require_results) -make_multi_pointer :: proc($T: typeid/[^]$E, #any_int len: int, allocator := context.allocator, loc := #caller_location) -> (mp: T, err: Allocator_Error) { +make_multi_pointer :: proc( + $T: typeid/[^]$E, + #any_int len: int, + allocator := context.allocator, + loc := #caller_location +) -> (mp: T, err: Allocator_Error) { return runtime.make_multi_pointer(T, len, allocator, loc) } @@ -202,26 +322,58 @@ make :: proc{ make_multi_pointer, } - @(require_results) -default_resize_align :: proc(old_memory: rawptr, old_size, new_size, alignment: int, allocator := context.allocator, loc := #caller_location) -> (res: rawptr, err: Allocator_Error) { +default_resize_align :: proc( + old_memory: rawptr, + old_size: int, + new_size: int, + alignment: int, + allocator := context.allocator, + loc := #caller_location, +) -> (res: rawptr, err: Allocator_Error) { data: []byte - data, err = default_resize_bytes_align(([^]byte)(old_memory)[:old_size], new_size, alignment, allocator, loc) + data, err = default_resize_bytes_align( + ([^]byte) (old_memory)[:old_size], + new_size, + alignment, + allocator, + loc, + ) res = raw_data(data) return } @(require_results) -default_resize_bytes_align_non_zeroed :: proc(old_data: []byte, new_size, alignment: int, allocator := context.allocator, loc := #caller_location) -> ([]byte, Allocator_Error) { +default_resize_bytes_align_non_zeroed :: proc( + old_data: []byte, + new_size: int, + alignment: int, + allocator := context.allocator, + loc := #caller_location, +) -> ([]byte, Allocator_Error) { return _default_resize_bytes_align(old_data, new_size, alignment, false, allocator, loc) } + @(require_results) -default_resize_bytes_align :: proc(old_data: []byte, new_size, alignment: int, allocator := context.allocator, loc := #caller_location) -> ([]byte, Allocator_Error) { +default_resize_bytes_align :: proc( + old_data: []byte, + new_size: int, + alignment: int, + allocator := context.allocator, + loc := #caller_location, +) -> ([]byte, Allocator_Error) { return _default_resize_bytes_align(old_data, new_size, alignment, true, allocator, loc) } @(require_results) -_default_resize_bytes_align :: #force_inline proc(old_data: []byte, new_size, alignment: int, should_zero: bool, allocator := context.allocator, loc := #caller_location) -> ([]byte, Allocator_Error) { +_default_resize_bytes_align :: #force_inline proc( + old_data: []byte, + new_size: int, + alignment: int, + should_zero: bool, + allocator := context.allocator, + loc := #caller_location, +) -> ([]byte, Allocator_Error) { old_memory := raw_data(old_data) old_size := len(old_data) if old_memory == nil { diff --git a/core/mem/allocators.odin b/core/mem/allocators.odin index a5b93ad056e..7bc1a6d77d2 100644 --- a/core/mem/allocators.odin +++ b/core/mem/allocators.odin @@ -3,9 +3,14 @@ package mem import "base:intrinsics" import "base:runtime" -nil_allocator_proc :: proc(allocator_data: rawptr, mode: Allocator_Mode, - size, alignment: int, - old_memory: rawptr, old_size: int, loc := #caller_location) -> ([]byte, Allocator_Error) { +nil_allocator_proc :: proc( + allocator_data: rawptr, + mode: Allocator_Mode, + size, alignment: int, + old_memory: rawptr, + old_size: int, + loc := #caller_location, +) -> ([]byte, Allocator_Error) { return nil, nil } @@ -16,8 +21,6 @@ nil_allocator :: proc() -> Allocator { } } -// Custom allocators - Arena :: struct { data: []byte, offset: int, @@ -30,7 +33,6 @@ Arena_Temp_Memory :: struct { prev_offset: int, } - arena_init :: proc(a: ^Arena, data: []byte) { a.data = data a.offset = 0 @@ -54,9 +56,15 @@ arena_allocator :: proc(arena: ^Arena) -> Allocator { } } -arena_allocator_proc :: proc(allocator_data: rawptr, mode: Allocator_Mode, - size, alignment: int, - old_memory: rawptr, old_size: int, location := #caller_location) -> ([]byte, Allocator_Error) { +arena_allocator_proc :: proc( + allocator_data: rawptr, + mode: Allocator_Mode, + size: int, + alignment: int, + old_memory: rawptr, + old_size: int, + location := #caller_location, +) -> ([]byte, Allocator_Error) { arena := cast(^Arena)allocator_data switch mode { @@ -120,8 +128,6 @@ end_arena_temp_memory :: proc(tmp: Arena_Temp_Memory) { tmp.arena.temp_count -= 1 } - - Scratch_Allocator :: struct { data: []byte, curr_offset: int, @@ -151,9 +157,14 @@ scratch_allocator_destroy :: proc(s: ^Scratch_Allocator) { s^ = {} } -scratch_allocator_proc :: proc(allocator_data: rawptr, mode: Allocator_Mode, - size, alignment: int, - old_memory: rawptr, old_size: int, loc := #caller_location) -> ([]byte, Allocator_Error) { +scratch_allocator_proc :: proc( + allocator_data: rawptr, + mode: Allocator_Mode, + size, alignment: int, + old_memory: rawptr, + old_size: int, + loc := #caller_location, +) -> ([]byte, Allocator_Error) { s := (^Scratch_Allocator)(allocator_data) @@ -299,10 +310,6 @@ scratch_allocator :: proc(allocator: ^Scratch_Allocator) -> Allocator { } } - - - - Stack_Allocation_Header :: struct { prev_offset: int, padding: int, @@ -339,34 +346,44 @@ stack_allocator :: proc(stack: ^Stack) -> Allocator { } } - -stack_allocator_proc :: proc(allocator_data: rawptr, mode: Allocator_Mode, - size, alignment: int, - old_memory: rawptr, old_size: int, location := #caller_location) -> ([]byte, Allocator_Error) { +stack_allocator_proc :: proc( + allocator_data: rawptr, + mode: Allocator_Mode, + size: int, + alignment: int, + old_memory: rawptr, + old_size: int, + location := #caller_location, +) -> ([]byte, Allocator_Error) { s := cast(^Stack)allocator_data if s.data == nil { return nil, .Invalid_Argument } - raw_alloc :: proc(s: ^Stack, size, alignment: int, zero_memory: bool) -> ([]byte, Allocator_Error) { + raw_alloc :: proc( + s: ^Stack, + size: int, + alignment: int, + zero_memory: bool, + ) -> ([]byte, Allocator_Error) { curr_addr := uintptr(raw_data(s.data)) + uintptr(s.curr_offset) - padding := calc_padding_with_header(curr_addr, uintptr(alignment), size_of(Stack_Allocation_Header)) + padding := calc_padding_with_header( + curr_addr, + uintptr(alignment), + size_of(Stack_Allocation_Header), + ) if s.curr_offset + padding + size > len(s.data) { return nil, .Out_Of_Memory } s.prev_offset = s.curr_offset s.curr_offset += padding - next_addr := curr_addr + uintptr(padding) header := (^Stack_Allocation_Header)(next_addr - size_of(Stack_Allocation_Header)) header.padding = padding header.prev_offset = s.prev_offset - s.curr_offset += size - s.peak_used = max(s.peak_used, s.curr_offset) - if zero_memory { zero(rawptr(next_addr), size) } @@ -467,12 +484,6 @@ stack_allocator_proc :: proc(allocator_data: rawptr, mode: Allocator_Mode, return nil, nil } - - - - - - Small_Stack_Allocation_Header :: struct { padding: u8, } @@ -505,9 +516,14 @@ small_stack_allocator :: proc(stack: ^Small_Stack) -> Allocator { } } -small_stack_allocator_proc :: proc(allocator_data: rawptr, mode: Allocator_Mode, - size, alignment: int, - old_memory: rawptr, old_size: int, location := #caller_location) -> ([]byte, Allocator_Error) { +small_stack_allocator_proc :: proc( + allocator_data: rawptr, + mode: Allocator_Mode, + size, alignment: int, + old_memory: rawptr, + old_size: int, + location := #caller_location, +) -> ([]byte, Allocator_Error) { s := cast(^Small_Stack)allocator_data if s.data == nil { @@ -612,10 +628,6 @@ small_stack_allocator_proc :: proc(allocator_data: rawptr, mode: Allocator_Mode, return nil, nil } - - - - Dynamic_Pool :: struct { block_size: int, out_band_size: int, @@ -632,15 +644,18 @@ Dynamic_Pool :: struct { block_allocator: Allocator, } - DYNAMIC_POOL_BLOCK_SIZE_DEFAULT :: 65536 DYNAMIC_POOL_OUT_OF_BAND_SIZE_DEFAULT :: 6554 - - -dynamic_pool_allocator_proc :: proc(allocator_data: rawptr, mode: Allocator_Mode, - size, alignment: int, - old_memory: rawptr, old_size: int, loc := #caller_location) -> ([]byte, Allocator_Error) { +dynamic_pool_allocator_proc :: proc( + allocator_data: rawptr, + mode: Allocator_Mode, + size: int, + alignment: int, + old_memory: rawptr, + old_size: int, + loc := #caller_location, +) -> ([]byte, Allocator_Error) { pool := (^Dynamic_Pool)(allocator_data) switch mode { @@ -689,19 +704,21 @@ dynamic_pool_allocator :: proc(pool: ^Dynamic_Pool) -> Allocator { } } -dynamic_pool_init :: proc(pool: ^Dynamic_Pool, - block_allocator := context.allocator, - array_allocator := context.allocator, - block_size := DYNAMIC_POOL_BLOCK_SIZE_DEFAULT, - out_band_size := DYNAMIC_POOL_OUT_OF_BAND_SIZE_DEFAULT, - alignment := 8) { - pool.block_size = block_size - pool.out_band_size = out_band_size - pool.alignment = alignment +dynamic_pool_init :: proc( + pool: ^Dynamic_Pool, + block_allocator := context.allocator, + array_allocator := context.allocator, + block_size := DYNAMIC_POOL_BLOCK_SIZE_DEFAULT, + out_band_size := DYNAMIC_POOL_OUT_OF_BAND_SIZE_DEFAULT, + alignment := 8, +) { + pool.block_size = block_size + pool.out_band_size = out_band_size + pool.alignment = alignment pool.block_allocator = block_allocator pool.out_band_allocations.allocator = array_allocator - pool. unused_blocks.allocator = array_allocator - pool. used_blocks.allocator = array_allocator + pool.unused_blocks.allocator = array_allocator + pool.used_blocks.allocator = array_allocator } dynamic_pool_destroy :: proc(pool: ^Dynamic_Pool) { @@ -709,11 +726,9 @@ dynamic_pool_destroy :: proc(pool: ^Dynamic_Pool) { delete(pool.unused_blocks) delete(pool.used_blocks) delete(pool.out_band_allocations) - zero(pool, size_of(pool^)) } - @(require_results) dynamic_pool_alloc :: proc(pool: ^Dynamic_Pool, bytes: int) -> (rawptr, Allocator_Error) { data, err := dynamic_pool_alloc_bytes(pool, bytes) @@ -736,9 +751,14 @@ dynamic_pool_alloc_bytes :: proc(p: ^Dynamic_Pool, bytes: int) -> ([]byte, Alloc new_block = pop(&p.unused_blocks) } else { data: []byte - data, err = p.block_allocator.procedure(p.block_allocator.data, Allocator_Mode.Alloc, - p.block_size, p.alignment, - nil, 0) + data, err = p.block_allocator.procedure( + p.block_allocator.data, + Allocator_Mode.Alloc, + p.block_size, + p.alignment, + nil, + 0, + ) new_block = raw_data(data) } @@ -808,10 +828,14 @@ dynamic_pool_free_all :: proc(p: ^Dynamic_Pool) { clear(&p.unused_blocks) } - -panic_allocator_proc :: proc(allocator_data: rawptr, mode: Allocator_Mode, - size, alignment: int, - old_memory: rawptr, old_size: int,loc := #caller_location) -> ([]byte, Allocator_Error) { +panic_allocator_proc :: proc( + allocator_data: rawptr, + mode: Allocator_Mode, + size, alignment: int, + old_memory: rawptr, + old_size: int, + loc := #caller_location, +) -> ([]byte, Allocator_Error) { switch mode { case .Alloc: @@ -859,11 +883,6 @@ panic_allocator :: proc() -> Allocator { } } - - - - - Buddy_Block :: struct #align(align_of(uint)) { size: uint, is_free: bool, @@ -929,7 +948,6 @@ buddy_block_coalescence :: proc(head, tail: ^Buddy_Block) { } } - @(require_results) buddy_block_find_best :: proc(head, tail: ^Buddy_Block, size: uint) -> ^Buddy_Block { assert(size != 0) @@ -998,7 +1016,6 @@ buddy_block_find_best :: proc(head, tail: ^Buddy_Block, size: uint) -> ^Buddy_Bl return nil } - Buddy_Allocator :: struct { head: ^Buddy_Block, tail: ^Buddy_Block, @@ -1089,9 +1106,13 @@ buddy_allocator_free :: proc(b: ^Buddy_Allocator, ptr: rawptr) -> Allocator_Erro return nil } -buddy_allocator_proc :: proc(allocator_data: rawptr, mode: Allocator_Mode, - size, alignment: int, - old_memory: rawptr, old_size: int,loc := #caller_location) -> ([]byte, Allocator_Error) { +buddy_allocator_proc :: proc( + allocator_data: rawptr, mode: Allocator_Mode, + size, alignment: int, + old_memory: rawptr, + old_size: int, + loc := #caller_location, +) -> ([]byte, Allocator_Error) { b := (^Buddy_Allocator)(allocator_data) diff --git a/core/mem/mem.odin b/core/mem/mem.odin index d423cc1eb99..9e47c9602c3 100644 --- a/core/mem/mem.odin +++ b/core/mem/mem.odin @@ -14,10 +14,12 @@ Exabyte :: runtime.Exabyte set :: proc "contextless" (data: rawptr, value: byte, len: int) -> rawptr { return runtime.memset(data, i32(value), len) } + zero :: proc "contextless" (data: rawptr, len: int) -> rawptr { intrinsics.mem_zero(data, len) return data } + zero_explicit :: proc "contextless" (data: rawptr, len: int) -> rawptr { // This routine tries to avoid the compiler optimizing away the call, // so that it is always executed. It is intended to provided @@ -27,20 +29,22 @@ zero_explicit :: proc "contextless" (data: rawptr, len: int) -> rawptr { intrinsics.atomic_thread_fence(.Seq_Cst) // Prevent reordering return data } + zero_item :: proc "contextless" (item: $P/^$T) -> P { intrinsics.mem_zero(item, size_of(T)) return item } + zero_slice :: proc "contextless" (data: $T/[]$E) -> T { zero(raw_data(data), size_of(E)*len(data)) return data } - copy :: proc "contextless" (dst, src: rawptr, len: int) -> rawptr { intrinsics.mem_copy(dst, src, len) return dst } + copy_non_overlapping :: proc "contextless" (dst, src: rawptr, len: int) -> rawptr { intrinsics.mem_copy_non_overlapping(dst, src, len) return dst @@ -120,6 +124,7 @@ compare_ptrs :: proc "contextless" (a, b: rawptr, n: int) -> int { } ptr_offset :: intrinsics.ptr_offset + ptr_sub :: intrinsics.ptr_sub @(require_results) @@ -211,6 +216,7 @@ align_forward_uintptr :: proc(ptr, align: uintptr) -> uintptr { align_forward_int :: proc(ptr, align: int) -> int { return int(align_forward_uintptr(uintptr(ptr), uintptr(align))) } + @(require_results) align_forward_uint :: proc(ptr, align: uint) -> uint { return uint(align_forward_uintptr(uintptr(ptr), uintptr(align))) @@ -230,6 +236,7 @@ align_backward_uintptr :: proc(ptr, align: uintptr) -> uintptr { align_backward_int :: proc(ptr, align: int) -> int { return int(align_backward_uintptr(uintptr(ptr), uintptr(align))) } + @(require_results) align_backward_uint :: proc(ptr, align: uint) -> uint { return uint(align_backward_uintptr(uintptr(ptr), uintptr(align))) @@ -247,7 +254,6 @@ reinterpret_copy :: proc "contextless" ($T: typeid, ptr: rawptr) -> (value: T) { return } - Fixed_Byte_Buffer :: distinct [dynamic]byte @(require_results) @@ -264,8 +270,6 @@ make_fixed_byte_buffer :: proc "contextless" (backing: []byte) -> Fixed_Byte_Buf return transmute(Fixed_Byte_Buffer)d } - - @(require_results) align_formula :: proc "contextless" (size, align: int) -> int { result := size + align-1 @@ -276,12 +280,10 @@ align_formula :: proc "contextless" (size, align: int) -> int { calc_padding_with_header :: proc "contextless" (ptr: uintptr, align: uintptr, header_size: int) -> int { p, a := ptr, align modulo := p & (a-1) - padding := uintptr(0) if modulo != 0 { padding = a - modulo } - needed_space := uintptr(header_size) if padding < needed_space { needed_space -= padding @@ -296,8 +298,6 @@ calc_padding_with_header :: proc "contextless" (ptr: uintptr, align: uintptr, he return int(padding) } - - @(require_results, deprecated="prefer 'slice.clone'") clone_slice :: proc(slice: $T/[]$E, allocator := context.allocator, loc := #caller_location) -> (new_slice: T) { new_slice, _ = make(T, len(slice), allocator, loc) diff --git a/core/mem/mutex_allocator.odin b/core/mem/mutex_allocator.odin index 591703eab38..1cccc7dacab 100644 --- a/core/mem/mutex_allocator.odin +++ b/core/mem/mutex_allocator.odin @@ -13,7 +13,6 @@ mutex_allocator_init :: proc(m: ^Mutex_Allocator, backing_allocator: Allocator) m.mutex = {} } - @(require_results) mutex_allocator :: proc(m: ^Mutex_Allocator) -> Allocator { return Allocator{ @@ -22,11 +21,16 @@ mutex_allocator :: proc(m: ^Mutex_Allocator) -> Allocator { } } -mutex_allocator_proc :: proc(allocator_data: rawptr, mode: Allocator_Mode, - size, alignment: int, - old_memory: rawptr, old_size: int, loc := #caller_location) -> (result: []byte, err: Allocator_Error) { +mutex_allocator_proc :: proc( + allocator_data: rawptr, + mode: Allocator_Mode, + size: int, + alignment: int, + old_memory: rawptr, + old_size: int, + loc := #caller_location, +) -> (result: []byte, err: Allocator_Error) { m := (^Mutex_Allocator)(allocator_data) - sync.mutex_guard(&m.mutex) return m.backing.procedure(m.backing.data, mode, size, alignment, old_memory, old_size, loc) } diff --git a/core/mem/raw.odin b/core/mem/raw.odin index f56206957bb..7fda3229d30 100644 --- a/core/mem/raw.odin +++ b/core/mem/raw.odin @@ -3,22 +3,36 @@ package mem import "base:builtin" import "base:runtime" -Raw_Any :: runtime.Raw_Any -Raw_String :: runtime.Raw_String -Raw_Cstring :: runtime.Raw_Cstring -Raw_Slice :: runtime.Raw_Slice +Raw_Any :: runtime.Raw_Any + +Raw_String :: runtime.Raw_String + +Raw_Cstring :: runtime.Raw_Cstring + +Raw_Slice :: runtime.Raw_Slice + Raw_Dynamic_Array :: runtime.Raw_Dynamic_Array -Raw_Map :: runtime.Raw_Map -Raw_Soa_Pointer :: runtime.Raw_Soa_Pointer -Raw_Complex32 :: runtime.Raw_Complex32 -Raw_Complex64 :: runtime.Raw_Complex64 -Raw_Complex128 :: runtime.Raw_Complex128 -Raw_Quaternion64 :: runtime.Raw_Quaternion64 +Raw_Map :: runtime.Raw_Map + +Raw_Soa_Pointer :: runtime.Raw_Soa_Pointer + +Raw_Complex32 :: runtime.Raw_Complex32 + +Raw_Complex64 :: runtime.Raw_Complex64 + +Raw_Complex128 :: runtime.Raw_Complex128 + +Raw_Quaternion64 :: runtime.Raw_Quaternion64 + Raw_Quaternion128 :: runtime.Raw_Quaternion128 + Raw_Quaternion256 :: runtime.Raw_Quaternion256 -Raw_Quaternion64_Vector_Scalar :: runtime.Raw_Quaternion64_Vector_Scalar + +Raw_Quaternion64_Vector_Scalar :: runtime.Raw_Quaternion64_Vector_Scalar + Raw_Quaternion128_Vector_Scalar :: runtime.Raw_Quaternion128_Vector_Scalar + Raw_Quaternion256_Vector_Scalar :: runtime.Raw_Quaternion256_Vector_Scalar make_any :: proc "contextless" (data: rawptr, id: typeid) -> any { diff --git a/core/mem/rollback_stack_allocator.odin b/core/mem/rollback_stack_allocator.odin index f5e428d878f..76143555221 100644 --- a/core/mem/rollback_stack_allocator.odin +++ b/core/mem/rollback_stack_allocator.odin @@ -1,45 +1,47 @@ package mem -// The Rollback Stack Allocator was designed for the test runner to be fast, -// able to grow, and respect the Tracking Allocator's requirement for -// individual frees. It is not overly concerned with fragmentation, however. -// -// It has support for expansion when configured with a block allocator and -// limited support for out-of-order frees. -// -// Allocation has constant-time best and usual case performance. -// At worst, it is linear according to the number of memory blocks. -// -// Allocation follows a first-fit strategy when there are multiple memory -// blocks. -// -// Freeing has constant-time best and usual case performance. -// At worst, it is linear according to the number of memory blocks and number -// of freed items preceding the last item in a block. -// -// Resizing has constant-time performance, if it's the last item in a block, or -// the new size is smaller. Naturally, this becomes linear-time if there are -// multiple blocks to search for the pointer's owning block. Otherwise, the -// allocator defaults to a combined alloc & free operation internally. -// -// Out-of-order freeing is accomplished by collapsing a run of freed items -// from the last allocation backwards. -// -// Each allocation has an overhead of 8 bytes and any extra bytes to satisfy -// the requested alignment. +/* +The Rollback Stack Allocator was designed for the test runner to be fast, +able to grow, and respect the Tracking Allocator's requirement for +individual frees. It is not overly concerned with fragmentation, however. +It has support for expansion when configured with a block allocator and +limited support for out-of-order frees. + +Allocation has constant-time best and usual case performance. +At worst, it is linear according to the number of memory blocks. + +Allocation follows a first-fit strategy when there are multiple memory +blocks. + +Freeing has constant-time best and usual case performance. +At worst, it is linear according to the number of memory blocks and number +of freed items preceding the last item in a block. + +Resizing has constant-time performance, if it's the last item in a block, or +the new size is smaller. Naturally, this becomes linear-time if there are +multiple blocks to search for the pointer's owning block. Otherwise, the +allocator defaults to a combined alloc & free operation internally. + +Out-of-order freeing is accomplished by collapsing a run of freed items +from the last allocation backwards. + +Each allocation has an overhead of 8 bytes and any extra bytes to satisfy +the requested alignment. +*/ import "base:runtime" ROLLBACK_STACK_DEFAULT_BLOCK_SIZE :: 4 * Megabyte -// This limitation is due to the size of `prev_ptr`, but it is only for the -// head block; any allocation in excess of the allocator's `block_size` is -// valid, so long as the block allocator can handle it. -// -// This is because allocations over the block size are not split up if the item -// within is freed; they are immediately returned to the block allocator. -ROLLBACK_STACK_MAX_HEAD_BLOCK_SIZE :: 2 * Gigabyte +/* +This limitation is due to the size of `prev_ptr`, but it is only for the +head block; any allocation in excess of the allocator's `block_size` is +valid, so long as the block allocator can handle it. +This is because allocations over the block size are not split up if the item +within is freed; they are immediately returned to the block allocator. +*/ +ROLLBACK_STACK_MAX_HEAD_BLOCK_SIZE :: 2 * Gigabyte Rollback_Stack_Header :: bit_field u64 { prev_offset: uintptr | 32, @@ -60,7 +62,6 @@ Rollback_Stack :: struct { block_allocator: Allocator, } - @(private="file", require_results) rb_ptr_in_bounds :: proc(block: ^Rollback_Stack_Block, ptr: rawptr) -> bool { start := raw_data(block.buffer) @@ -294,9 +295,13 @@ rollback_stack_allocator :: proc(stack: ^Rollback_Stack) -> Allocator { } @(require_results) -rollback_stack_allocator_proc :: proc(allocator_data: rawptr, mode: Allocator_Mode, - size, alignment: int, - old_memory: rawptr, old_size: int, location := #caller_location, +rollback_stack_allocator_proc :: proc( + allocator_data: rawptr, + mode: Allocator_Mode, + size, alignment: int, + old_memory: rawptr, + old_size: int, + location := #caller_location, ) -> (result: []byte, err: Allocator_Error) { stack := cast(^Rollback_Stack)allocator_data diff --git a/core/mem/tracking_allocator.odin b/core/mem/tracking_allocator.odin index 1b57e5fb486..356180be1cf 100644 --- a/core/mem/tracking_allocator.odin +++ b/core/mem/tracking_allocator.odin @@ -12,22 +12,23 @@ Tracking_Allocator_Entry :: struct { err: Allocator_Error, location: runtime.Source_Code_Location, } + Tracking_Allocator_Bad_Free_Entry :: struct { memory: rawptr, location: runtime.Source_Code_Location, } + Tracking_Allocator :: struct { - backing: Allocator, - allocation_map: map[rawptr]Tracking_Allocator_Entry, - bad_free_array: [dynamic]Tracking_Allocator_Bad_Free_Entry, - mutex: sync.Mutex, + backing: Allocator, + allocation_map: map[rawptr]Tracking_Allocator_Entry, + bad_free_array: [dynamic]Tracking_Allocator_Bad_Free_Entry, + mutex: sync.Mutex, clear_on_free_all: bool, - - total_memory_allocated: i64, - total_allocation_count: i64, - total_memory_freed: i64, - total_free_count: i64, - peak_memory_allocated: i64, + total_memory_allocated: i64, + total_allocation_count: i64, + total_memory_freed: i64, + total_free_count: i64, + peak_memory_allocated: i64, current_memory_allocated: i64, } @@ -35,7 +36,6 @@ tracking_allocator_init :: proc(t: ^Tracking_Allocator, backing_allocator: Alloc t.backing = backing_allocator t.allocation_map.allocator = internals_allocator t.bad_free_array.allocator = internals_allocator - if .Free_All in query_features(t.backing) { t.clear_on_free_all = true } @@ -46,7 +46,6 @@ tracking_allocator_destroy :: proc(t: ^Tracking_Allocator) { delete(t.bad_free_array) } - // Clear only the current allocation data while keeping the totals intact. tracking_allocator_clear :: proc(t: ^Tracking_Allocator) { sync.mutex_lock(&t.mutex) @@ -78,9 +77,14 @@ tracking_allocator :: proc(data: ^Tracking_Allocator) -> Allocator { } } -tracking_allocator_proc :: proc(allocator_data: rawptr, mode: Allocator_Mode, - size, alignment: int, - old_memory: rawptr, old_size: int, loc := #caller_location) -> (result: []byte, err: Allocator_Error) { +tracking_allocator_proc :: proc( + allocator_data: rawptr, + mode: Allocator_Mode, + size, alignment: int, + old_memory: rawptr, + old_size: int, + loc := #caller_location, +) -> (result: []byte, err: Allocator_Error) { track_alloc :: proc(data: ^Tracking_Allocator, entry: ^Tracking_Allocator_Entry) { data.total_memory_allocated += i64(entry.size) data.total_allocation_count += 1 From da6213196dd4c64cf53d163dd7392531e26f3ad2 Mon Sep 17 00:00:00 2001 From: flysand7 Date: Sat, 7 Sep 2024 09:42:04 +1100 Subject: [PATCH 02/35] [mem]: API for using arena directly --- core/mem/allocators.odin | 71 +++++++++++++++++++++------------------- 1 file changed, 38 insertions(+), 33 deletions(-) diff --git a/core/mem/allocators.odin b/core/mem/allocators.odin index 7bc1a6d77d2..52a05958f68 100644 --- a/core/mem/allocators.odin +++ b/core/mem/allocators.odin @@ -3,6 +3,13 @@ package mem import "base:intrinsics" import "base:runtime" +nil_allocator :: proc() -> Allocator { + return Allocator{ + procedure = nil_allocator_proc, + data = nil, + } +} + nil_allocator_proc :: proc( allocator_data: rawptr, mode: Allocator_Mode, @@ -14,13 +21,6 @@ nil_allocator_proc :: proc( return nil, nil } -nil_allocator :: proc() -> Allocator { - return Allocator{ - procedure = nil_allocator_proc, - data = nil, - } -} - Arena :: struct { data: []byte, offset: int, @@ -56,6 +56,30 @@ arena_allocator :: proc(arena: ^Arena) -> Allocator { } } +arena_alloc_non_zeroed :: proc(a: ^Arena, size: int, alignment := DEFAULT_ALIGNMENT) -> ([]byte, Allocator_Error) { + #no_bounds_check end := &a.data[a.offset] + ptr := align_forward(end, uintptr(alignment)) + total_size := size + ptr_sub((^byte)(ptr), (^byte)(end)) + if a.offset + total_size > len(a.data) { + return nil, .Out_Of_Memory + } + a.offset += total_size + a.peak_used = max(a.peak_used, a.offset) + return byte_slice(ptr, size), nil +} + +arena_alloc :: proc(a: ^Arena, size: int, alignment := DEFAULT_ALIGNMENT) -> ([]byte, Allocator_Error) { + bytes, err := arena_alloc_non_zeroed(a, size, alignment) + if bytes != nil { + zero(raw_data(bytes), size) + } + return bytes, err +} + +arena_free_all :: proc(a: ^Arena) { + a.offset = 0 +} + arena_allocator_proc :: proc( allocator_data: rawptr, mode: Allocator_Mode, @@ -66,49 +90,28 @@ arena_allocator_proc :: proc( location := #caller_location, ) -> ([]byte, Allocator_Error) { arena := cast(^Arena)allocator_data - switch mode { - case .Alloc, .Alloc_Non_Zeroed: - #no_bounds_check end := &arena.data[arena.offset] - - ptr := align_forward(end, uintptr(alignment)) - - total_size := size + ptr_sub((^byte)(ptr), (^byte)(end)) - - if arena.offset + total_size > len(arena.data) { - return nil, .Out_Of_Memory - } - - arena.offset += total_size - arena.peak_used = max(arena.peak_used, arena.offset) - if mode != .Alloc_Non_Zeroed { - zero(ptr, size) - } - return byte_slice(ptr, size), nil - + case .Alloc: + return arena_alloc(arena, size, alignment) + case .Alloc_Non_Zeroed: + return arena_alloc_non_zeroed(arena, size, alignment) case .Free: return nil, .Mode_Not_Implemented - case .Free_All: - arena.offset = 0 - + arena_free_all(arena) case .Resize: return default_resize_bytes_align(byte_slice(old_memory, old_size), size, alignment, arena_allocator(arena)) - case .Resize_Non_Zeroed: return default_resize_bytes_align_non_zeroed(byte_slice(old_memory, old_size), size, alignment, arena_allocator(arena)) - case .Query_Features: set := (^Allocator_Mode_Set)(old_memory) if set != nil { set^ = {.Alloc, .Alloc_Non_Zeroed, .Free_All, .Resize, .Resize_Non_Zeroed, .Query_Features} } return nil, nil - case .Query_Info: return nil, .Mode_Not_Implemented } - return nil, nil } @@ -128,6 +131,8 @@ end_arena_temp_memory :: proc(tmp: Arena_Temp_Memory) { tmp.arena.temp_count -= 1 } + + Scratch_Allocator :: struct { data: []byte, curr_offset: int, From e5106e48a809a313be35c3554e9dc310c117eefe Mon Sep 17 00:00:00 2001 From: flysand7 Date: Sat, 7 Sep 2024 10:09:05 +1100 Subject: [PATCH 03/35] [mem]: API for using scratch allocator directly --- core/mem/allocators.odin | 285 ++++++++++++++++++++++----------------- 1 file changed, 165 insertions(+), 120 deletions(-) diff --git a/core/mem/allocators.odin b/core/mem/allocators.odin index 52a05958f68..2be4d5b6131 100644 --- a/core/mem/allocators.odin +++ b/core/mem/allocators.odin @@ -56,6 +56,14 @@ arena_allocator :: proc(arena: ^Arena) -> Allocator { } } +arena_alloc :: proc(a: ^Arena, size: int, alignment := DEFAULT_ALIGNMENT) -> ([]byte, Allocator_Error) { + bytes, err := arena_alloc_non_zeroed(a, size, alignment) + if bytes != nil { + zero_slice(bytes) + } + return bytes, err +} + arena_alloc_non_zeroed :: proc(a: ^Arena, size: int, alignment := DEFAULT_ALIGNMENT) -> ([]byte, Allocator_Error) { #no_bounds_check end := &a.data[a.offset] ptr := align_forward(end, uintptr(alignment)) @@ -68,14 +76,6 @@ arena_alloc_non_zeroed :: proc(a: ^Arena, size: int, alignment := DEFAULT_ALIGNM return byte_slice(ptr, size), nil } -arena_alloc :: proc(a: ^Arena, size: int, alignment := DEFAULT_ALIGNMENT) -> ([]byte, Allocator_Error) { - bytes, err := arena_alloc_non_zeroed(a, size, alignment) - if bytes != nil { - zero(raw_data(bytes), size) - } - return bytes, err -} - arena_free_all :: proc(a: ^Arena) { a.offset = 0 } @@ -162,6 +162,153 @@ scratch_allocator_destroy :: proc(s: ^Scratch_Allocator) { s^ = {} } +scratch_allocator_alloc :: proc( + s: ^Scratch_Allocator, + size: int, + alignment := DEFAULT_ALIGNMENT, + loc := #caller_location, +) -> ([]byte, Allocator_Error) { + bytes, err := scratch_allocator_alloc_non_zeroed(s, size, alignment, loc) + if bytes != nil { + zero_slice(bytes) + } + return bytes, err +} + +scratch_allocator_alloc_non_zeroed :: proc( + s: ^Scratch_Allocator, + size: int, + alignment := DEFAULT_ALIGNMENT, + loc := #caller_location, +) -> ([]byte, Allocator_Error) { + if s.data == nil { + DEFAULT_BACKING_SIZE :: 4 * Megabyte + if !(context.allocator.procedure != scratch_allocator_proc && context.allocator.data != s) { + panic("cyclic initialization of the scratch allocator with itself", loc) + } + scratch_allocator_init(s, DEFAULT_BACKING_SIZE) + } + size := size + size = align_forward_int(size, alignment) + switch { + case s.curr_offset+size <= len(s.data): + start := uintptr(raw_data(s.data)) + ptr := start + uintptr(s.curr_offset) + ptr = align_forward_uintptr(ptr, uintptr(alignment)) + s.prev_allocation = rawptr(ptr) + offset := int(ptr - start) + s.curr_offset = offset + size + return byte_slice(rawptr(ptr), size), nil + case size <= len(s.data): + start := uintptr(raw_data(s.data)) + ptr := align_forward_uintptr(start, uintptr(alignment)) + s.prev_allocation = rawptr(ptr) + offset := int(ptr - start) + s.curr_offset = offset + size + return byte_slice(rawptr(ptr), size), nil + } + a := s.backup_allocator + if a.procedure == nil { + a = context.allocator + s.backup_allocator = a + } + ptr, err := alloc_bytes_non_zeroed(size, alignment, a, loc) + if err != nil { + return ptr, err + } + if s.leaked_allocations == nil { + s.leaked_allocations, err = make([dynamic][]byte, a) + } + append(&s.leaked_allocations, ptr) + if logger := context.logger; logger.lowest_level <= .Warning { + if logger.procedure != nil { + logger.procedure(logger.data, .Warning, "mem.Scratch_Allocator resorted to backup_allocator" , logger.options, loc) + } + } + return ptr, err +} + +scratch_allocator_free :: proc(s: ^Scratch_Allocator, ptr: rawptr, loc := #caller_location) -> Allocator_Error { + if s.data == nil { + panic("Free on an uninitialized scratch allocator", loc) + } + if ptr == nil { + return nil + } + start := uintptr(raw_data(s.data)) + end := start + uintptr(len(s.data)) + old_ptr := uintptr(ptr) + if s.prev_allocation == ptr { + s.curr_offset = int(uintptr(s.prev_allocation) - start) + s.prev_allocation = nil + return nil + } + if start <= old_ptr && old_ptr < end { + // NOTE(bill): Cannot free this pointer but it is valid + return nil + } + if len(s.leaked_allocations) != 0 { + for data, i in s.leaked_allocations { + ptr := raw_data(data) + if ptr == ptr { + free_bytes(data, s.backup_allocator, loc) + ordered_remove(&s.leaked_allocations, i, loc) + return nil + } + } + } + return .Invalid_Pointer +} + +scratch_allocator_free_all :: proc(s: ^Scratch_Allocator, loc := #caller_location) { + s.curr_offset = 0 + s.prev_allocation = nil + for ptr in s.leaked_allocations { + free_bytes(ptr, s.backup_allocator, loc) + } + clear(&s.leaked_allocations) +} + +scratch_allocator_resize_non_zeroed :: proc( + s: ^Scratch_Allocator, + old_memory: rawptr, + old_size: int, + size: int, + alignment := DEFAULT_ALIGNMENT, + loc := #caller_location +) -> ([]byte, Allocator_Error) { + begin := uintptr(raw_data(s.data)) + end := begin + uintptr(len(s.data)) + old_ptr := uintptr(old_memory) + if begin <= old_ptr && old_ptr < end && old_ptr+uintptr(size) < end { + s.curr_offset = int(old_ptr-begin)+size + return byte_slice(old_memory, size), nil + } + data, err := scratch_allocator_alloc_non_zeroed(s, size, alignment, loc) + if err != nil { + return data, err + } + // TODO(flysand): OOB access on size < old_size. + runtime.copy(data, byte_slice(old_memory, old_size)) + err = scratch_allocator_free(s, old_memory, loc) + return data, err +} + +scratch_allocator_resize :: proc( + s: ^Scratch_Allocator, + old_memory: rawptr, + old_size: int, + size: int, + alignment := DEFAULT_ALIGNMENT, + loc := #caller_location +) -> ([]byte, Allocator_Error) { + bytes, err := scratch_allocator_resize_non_zeroed(s, old_memory, old_size, size, alignment, loc) + if bytes != nil && size > old_size { + zero_slice(bytes[size:]) + } + return bytes, err +} + scratch_allocator_proc :: proc( allocator_data: rawptr, mode: Allocator_Mode, @@ -170,9 +317,7 @@ scratch_allocator_proc :: proc( old_size: int, loc := #caller_location, ) -> ([]byte, Allocator_Error) { - s := (^Scratch_Allocator)(allocator_data) - if s.data == nil { DEFAULT_BACKING_SIZE :: 4 * Megabyte if !(context.allocator.procedure != scratch_allocator_proc && @@ -181,129 +326,29 @@ scratch_allocator_proc :: proc( } scratch_allocator_init(s, DEFAULT_BACKING_SIZE) } - size := size - switch mode { - case .Alloc, .Alloc_Non_Zeroed: - size = align_forward_int(size, alignment) - - switch { - case s.curr_offset+size <= len(s.data): - start := uintptr(raw_data(s.data)) - ptr := start + uintptr(s.curr_offset) - ptr = align_forward_uintptr(ptr, uintptr(alignment)) - if mode != .Alloc_Non_Zeroed { - zero(rawptr(ptr), size) - } - - s.prev_allocation = rawptr(ptr) - offset := int(ptr - start) - s.curr_offset = offset + size - return byte_slice(rawptr(ptr), size), nil - - case size <= len(s.data): - start := uintptr(raw_data(s.data)) - ptr := align_forward_uintptr(start, uintptr(alignment)) - if mode != .Alloc_Non_Zeroed { - zero(rawptr(ptr), size) - } - - s.prev_allocation = rawptr(ptr) - offset := int(ptr - start) - s.curr_offset = offset + size - return byte_slice(rawptr(ptr), size), nil - } - a := s.backup_allocator - if a.procedure == nil { - a = context.allocator - s.backup_allocator = a - } - - ptr, err := alloc_bytes(size, alignment, a, loc) - if err != nil { - return ptr, err - } - if s.leaked_allocations == nil { - s.leaked_allocations, err = make([dynamic][]byte, a) - } - append(&s.leaked_allocations, ptr) - - if logger := context.logger; logger.lowest_level <= .Warning { - if logger.procedure != nil { - logger.procedure(logger.data, .Warning, "mem.Scratch_Allocator resorted to backup_allocator" , logger.options, loc) - } - } - - return ptr, err - + case .Alloc: + return scratch_allocator_alloc(s, size, alignment, loc) + case .Alloc_Non_Zeroed: + return scratch_allocator_alloc_non_zeroed(s, size, alignment, loc) case .Free: - if old_memory == nil { - return nil, nil - } - start := uintptr(raw_data(s.data)) - end := start + uintptr(len(s.data)) - old_ptr := uintptr(old_memory) - - if s.prev_allocation == old_memory { - s.curr_offset = int(uintptr(s.prev_allocation) - start) - s.prev_allocation = nil - return nil, nil - } - - if start <= old_ptr && old_ptr < end { - // NOTE(bill): Cannot free this pointer but it is valid - return nil, nil - } - - if len(s.leaked_allocations) != 0 { - for data, i in s.leaked_allocations { - ptr := raw_data(data) - if ptr == old_memory { - free_bytes(data, s.backup_allocator) - ordered_remove(&s.leaked_allocations, i) - return nil, nil - } - } - } - return nil, .Invalid_Pointer - // panic("invalid pointer passed to default_temp_allocator"); - + return nil, scratch_allocator_free(s, old_memory, loc) case .Free_All: - s.curr_offset = 0 - s.prev_allocation = nil - for ptr in s.leaked_allocations { - free_bytes(ptr, s.backup_allocator) - } - clear(&s.leaked_allocations) - - case .Resize, .Resize_Non_Zeroed: - begin := uintptr(raw_data(s.data)) - end := begin + uintptr(len(s.data)) - old_ptr := uintptr(old_memory) - if begin <= old_ptr && old_ptr < end && old_ptr+uintptr(size) < end { - s.curr_offset = int(old_ptr-begin)+size - return byte_slice(old_memory, size), nil - } - data, err := scratch_allocator_proc(allocator_data, .Alloc, size, alignment, old_memory, old_size, loc) - if err != nil { - return data, err - } - runtime.copy(data, byte_slice(old_memory, old_size)) - _, err = scratch_allocator_proc(allocator_data, .Free, 0, alignment, old_memory, old_size, loc) - return data, err - + scratch_allocator_free_all(s, loc) + case .Resize: + return scratch_allocator_resize(s, old_memory, old_size, size, alignment, loc) + case .Resize_Non_Zeroed: + return scratch_allocator_resize_non_zeroed(s, old_memory, old_size, size, alignment, loc) case .Query_Features: set := (^Allocator_Mode_Set)(old_memory) if set != nil { set^ = {.Alloc, .Alloc_Non_Zeroed, .Free, .Free_All, .Resize, .Resize_Non_Zeroed, .Query_Features} } return nil, nil - case .Query_Info: return nil, .Mode_Not_Implemented } - return nil, nil } From 834f082dbabb807e01f483c6a4d61a51a4dad47c Mon Sep 17 00:00:00 2001 From: flysand7 Date: Sat, 7 Sep 2024 10:24:00 +1100 Subject: [PATCH 04/35] [mem]: Initialize scratch allocator during calls to free and resize --- core/mem/allocators.odin | 18 ++++++++++-------- 1 file changed, 10 insertions(+), 8 deletions(-) diff --git a/core/mem/allocators.odin b/core/mem/allocators.odin index 2be4d5b6131..4ffb02085f0 100644 --- a/core/mem/allocators.odin +++ b/core/mem/allocators.odin @@ -261,6 +261,9 @@ scratch_allocator_free :: proc(s: ^Scratch_Allocator, ptr: rawptr, loc := #calle } scratch_allocator_free_all :: proc(s: ^Scratch_Allocator, loc := #caller_location) { + if s.data == nil { + panic("free_all called on an unitialized scratch allocator", loc) + } s.curr_offset = 0 s.prev_allocation = nil for ptr in s.leaked_allocations { @@ -277,6 +280,13 @@ scratch_allocator_resize_non_zeroed :: proc( alignment := DEFAULT_ALIGNMENT, loc := #caller_location ) -> ([]byte, Allocator_Error) { + if s.data == nil { + DEFAULT_BACKING_SIZE :: 4 * Megabyte + if !(context.allocator.procedure != scratch_allocator_proc && context.allocator.data != s) { + panic("cyclic initialization of the scratch allocator with itself", loc) + } + scratch_allocator_init(s, DEFAULT_BACKING_SIZE) + } begin := uintptr(raw_data(s.data)) end := begin + uintptr(len(s.data)) old_ptr := uintptr(old_memory) @@ -318,14 +328,6 @@ scratch_allocator_proc :: proc( loc := #caller_location, ) -> ([]byte, Allocator_Error) { s := (^Scratch_Allocator)(allocator_data) - if s.data == nil { - DEFAULT_BACKING_SIZE :: 4 * Megabyte - if !(context.allocator.procedure != scratch_allocator_proc && - context.allocator.data != allocator_data) { - panic("cyclic initialization of the scratch allocator with itself") - } - scratch_allocator_init(s, DEFAULT_BACKING_SIZE) - } size := size switch mode { case .Alloc: From 9750b64096024990ee84b5727d6db34ffc686948 Mon Sep 17 00:00:00 2001 From: flysand7 Date: Sat, 7 Sep 2024 10:55:54 +1100 Subject: [PATCH 05/35] [mem]: API for using stack allocator directly --- core/mem/allocators.odin | 283 +++++++++++++++++++++++---------------- 1 file changed, 169 insertions(+), 114 deletions(-) diff --git a/core/mem/allocators.odin b/core/mem/allocators.odin index 4ffb02085f0..bade70ce089 100644 --- a/core/mem/allocators.odin +++ b/core/mem/allocators.odin @@ -289,6 +289,7 @@ scratch_allocator_resize_non_zeroed :: proc( } begin := uintptr(raw_data(s.data)) end := begin + uintptr(len(s.data)) + // TODO(flysand): Doesn't handle old_memory == nil old_ptr := uintptr(old_memory) if begin <= old_ptr && old_ptr < end && old_ptr+uintptr(size) < end { s.curr_offset = int(old_ptr-begin)+size @@ -362,10 +363,7 @@ scratch_allocator :: proc(allocator: ^Scratch_Allocator) -> Allocator { } } -Stack_Allocation_Header :: struct { - prev_offset: int, - padding: int, -} + // Stack is a stack-like allocator which has a strict memory freeing order Stack :: struct { @@ -375,6 +373,11 @@ Stack :: struct { peak_used: int, } +Stack_Allocation_Header :: struct { + prev_offset: int, + padding: int, +} + stack_init :: proc(s: ^Stack, data: []byte) { s.data = data s.prev_offset = 0 @@ -398,6 +401,156 @@ stack_allocator :: proc(stack: ^Stack) -> Allocator { } } +stack_allocator_alloc_non_zeroed :: proc( + s: ^Stack, + size: int, + alignment := DEFAULT_ALIGNMENT, + loc := #caller_location +) -> ([]byte, Allocator_Error) { + if s.data == nil { + panic("Stack allocation on an uninitialized stack allocator", loc) + } + curr_addr := uintptr(raw_data(s.data)) + uintptr(s.curr_offset) + padding := calc_padding_with_header( + curr_addr, + uintptr(alignment), + size_of(Stack_Allocation_Header), + ) + if s.curr_offset + padding + size > len(s.data) { + return nil, .Out_Of_Memory + } + s.prev_offset = s.curr_offset + s.curr_offset += padding + next_addr := curr_addr + uintptr(padding) + header := (^Stack_Allocation_Header)(next_addr - size_of(Stack_Allocation_Header)) + header.padding = padding + header.prev_offset = s.prev_offset + s.curr_offset += size + s.peak_used = max(s.peak_used, s.curr_offset) + return byte_slice(rawptr(next_addr), size), nil +} + +stack_allocator_alloc :: proc( + s: ^Stack, + size: int, + alignment := DEFAULT_ALIGNMENT, + loc := #caller_location +) -> ([]byte, Allocator_Error) { + bytes, err := stack_allocator_alloc_non_zeroed(s, size, alignment, loc) + if bytes != nil { + zero_slice(bytes) + } + return bytes, err +} + +stack_allocator_free :: proc( + s: ^Stack, + old_memory: rawptr, + loc := #caller_location, +) -> (Allocator_Error) { + if s.data == nil { + panic("Stack free on an uninitialized stack allocator", loc) + } + if old_memory == nil { + return nil + } + start := uintptr(raw_data(s.data)) + end := start + uintptr(len(s.data)) + curr_addr := uintptr(old_memory) + if !(start <= curr_addr && curr_addr < end) { + panic("Out of bounds memory address passed to stack allocator (free)", loc) + } + if curr_addr >= start+uintptr(s.curr_offset) { + // NOTE(bill): Allow double frees + return nil + } + header := (^Stack_Allocation_Header)(curr_addr - size_of(Stack_Allocation_Header)) + old_offset := int(curr_addr - uintptr(header.padding) - uintptr(raw_data(s.data))) + if old_offset != header.prev_offset { + // panic("Out of order stack allocator free"); + return .Invalid_Pointer + } + s.curr_offset = old_offset + s.prev_offset = header.prev_offset + return nil +} + +stack_allocator_free_all :: proc(s: ^Stack) { + if s.data == nil { + panic("Stack free all on an uninitialized stack allocator", loc) + } + s.prev_offset = 0 + s.curr_offset = 0 +} + +stack_allocator_resize_non_zeroed :: proc( + s: ^Stack, + old_memory: rawptr, + old_size: int, + size: int, + alignment := DEFAULT_ALIGNMENT, + loc := #caller_location, +) -> ([]byte, Allocator_Error) { + if s.data == nil { + panic("Stack free all on an uninitialized stack allocator", loc) + } + if old_memory == nil { + return stack_allocator_alloc_non_zeroed(s, size, alignment, loc) + } + if size == 0 { + return nil, nil + } + start := uintptr(raw_data(s.data)) + end := start + uintptr(len(s.data)) + curr_addr := uintptr(old_memory) + if !(start <= curr_addr && curr_addr < end) { + panic("Out of bounds memory address passed to stack allocator (resize)") + } + if curr_addr >= start+uintptr(s.curr_offset) { + // NOTE(bill): Allow double frees + return nil, nil + } + if old_size == size { + return byte_slice(old_memory, size), nil + } + header := (^Stack_Allocation_Header)(curr_addr - size_of(Stack_Allocation_Header)) + old_offset := int(curr_addr - uintptr(header.padding) - uintptr(raw_data(s.data))) + if old_offset != header.prev_offset { + data, err := stack_allocator_alloc_non_zeroed(s, size, alignment, loc) + if err == nil { + runtime.copy(data, byte_slice(old_memory, old_size)) + } + return data, err + } + old_memory_size := uintptr(s.curr_offset) - (curr_addr - start) + assert(old_memory_size == uintptr(old_size)) + diff := size - old_size + s.curr_offset += diff // works for smaller sizes too + if diff > 0 { + zero(rawptr(curr_addr + uintptr(diff)), diff) + } + return byte_slice(old_memory, size), nil +} + +stack_allocator_resize :: proc( + s: ^Stack, + old_memory: rawptr, + old_size: int, + size: int, + alignment := DEFAULT_ALIGNMENT, + loc := #caller_location, +) -> ([]byte, Allocator_Error) { + bytes, err := stack_allocator_alloc_non_zeroed(s, size, alignment, loc) + if bytes != nil { + if old_memory == nil { + zero_slice(bytes) + } else if size > old_size { + zero_slice(bytes[old_size:]) + } + } + return bytes, err +} + stack_allocator_proc :: proc( allocator_data: rawptr, mode: Allocator_Mode, @@ -408,121 +561,22 @@ stack_allocator_proc :: proc( location := #caller_location, ) -> ([]byte, Allocator_Error) { s := cast(^Stack)allocator_data - if s.data == nil { return nil, .Invalid_Argument } - - raw_alloc :: proc( - s: ^Stack, - size: int, - alignment: int, - zero_memory: bool, - ) -> ([]byte, Allocator_Error) { - curr_addr := uintptr(raw_data(s.data)) + uintptr(s.curr_offset) - padding := calc_padding_with_header( - curr_addr, - uintptr(alignment), - size_of(Stack_Allocation_Header), - ) - if s.curr_offset + padding + size > len(s.data) { - return nil, .Out_Of_Memory - } - s.prev_offset = s.curr_offset - s.curr_offset += padding - next_addr := curr_addr + uintptr(padding) - header := (^Stack_Allocation_Header)(next_addr - size_of(Stack_Allocation_Header)) - header.padding = padding - header.prev_offset = s.prev_offset - s.curr_offset += size - s.peak_used = max(s.peak_used, s.curr_offset) - if zero_memory { - zero(rawptr(next_addr), size) - } - return byte_slice(rawptr(next_addr), size), nil - } - switch mode { - case .Alloc, .Alloc_Non_Zeroed: - return raw_alloc(s, size, alignment, mode == .Alloc) + case .Alloc: + return stack_allocator_alloc(s, size, alignment, loc) + case .Alloc_Non_Zeroed: + return stack_allocator_alloc_non_zeroed(s, size, alignment, loc) case .Free: - if old_memory == nil { - return nil, nil - } - start := uintptr(raw_data(s.data)) - end := start + uintptr(len(s.data)) - curr_addr := uintptr(old_memory) - - if !(start <= curr_addr && curr_addr < end) { - panic("Out of bounds memory address passed to stack allocator (free)") - } - - if curr_addr >= start+uintptr(s.curr_offset) { - // NOTE(bill): Allow double frees - return nil, nil - } - - header := (^Stack_Allocation_Header)(curr_addr - size_of(Stack_Allocation_Header)) - old_offset := int(curr_addr - uintptr(header.padding) - uintptr(raw_data(s.data))) - - if old_offset != header.prev_offset { - // panic("Out of order stack allocator free"); - return nil, .Invalid_Pointer - } - - s.curr_offset = old_offset - s.prev_offset = header.prev_offset - + return nil, stack_allocator_free(s, old_memory, loc) case .Free_All: - s.prev_offset = 0 - s.curr_offset = 0 - - case .Resize, .Resize_Non_Zeroed: - if old_memory == nil { - return raw_alloc(s, size, alignment, mode == .Resize) - } - if size == 0 { - return nil, nil - } - - start := uintptr(raw_data(s.data)) - end := start + uintptr(len(s.data)) - curr_addr := uintptr(old_memory) - if !(start <= curr_addr && curr_addr < end) { - panic("Out of bounds memory address passed to stack allocator (resize)") - } - - if curr_addr >= start+uintptr(s.curr_offset) { - // NOTE(bill): Allow double frees - return nil, nil - } - - if old_size == size { - return byte_slice(old_memory, size), nil - } - - header := (^Stack_Allocation_Header)(curr_addr - size_of(Stack_Allocation_Header)) - old_offset := int(curr_addr - uintptr(header.padding) - uintptr(raw_data(s.data))) - - if old_offset != header.prev_offset { - data, err := raw_alloc(s, size, alignment, mode == .Resize) - if err == nil { - runtime.copy(data, byte_slice(old_memory, old_size)) - } - return data, err - } - - old_memory_size := uintptr(s.curr_offset) - (curr_addr - start) - assert(old_memory_size == uintptr(old_size)) - - diff := size - old_size - s.curr_offset += diff // works for smaller sizes too - if diff > 0 { - zero(rawptr(curr_addr + uintptr(diff)), diff) - } - - return byte_slice(old_memory, size), nil - + stack_allocator_free_all(s) + case .Resize: + return stack_allocator_resize(s, old_memory, old_size, size, alignment, loc) + case .Resize_Non_Zeroed: + return stack_allocator_resize_non_zeroed(s, old_memory, old_size, size, alignment, loc) case .Query_Features: set := (^Allocator_Mode_Set)(old_memory) if set != nil { @@ -532,10 +586,11 @@ stack_allocator_proc :: proc( case .Query_Info: return nil, .Mode_Not_Implemented } - return nil, nil } + + Small_Stack_Allocation_Header :: struct { padding: u8, } From de220a9aa5382b50a828eef42eb8e5895909b661 Mon Sep 17 00:00:00 2001 From: flysand7 Date: Sat, 7 Sep 2024 11:06:59 +1100 Subject: [PATCH 06/35] [mem]: Remove the extra word 'allocator' in procedures --- core/mem/allocators.odin | 95 +++++++++++++++++++++------------------- 1 file changed, 49 insertions(+), 46 deletions(-) diff --git a/core/mem/allocators.odin b/core/mem/allocators.odin index bade70ce089..97c6d03c93f 100644 --- a/core/mem/allocators.odin +++ b/core/mem/allocators.odin @@ -131,9 +131,12 @@ end_arena_temp_memory :: proc(tmp: Arena_Temp_Memory) { tmp.arena.temp_count -= 1 } +/* old procedures */ +Scratch_Allocator :: Scratch +scratch_allocator_init :: scratch_init +scratch_allocator_destroy :: scratch_destroy - -Scratch_Allocator :: struct { +Scratch :: struct { data: []byte, curr_offset: int, prev_allocation: rawptr, @@ -141,7 +144,7 @@ Scratch_Allocator :: struct { leaked_allocations: [dynamic][]byte, } -scratch_allocator_init :: proc(s: ^Scratch_Allocator, size: int, backup_allocator := context.allocator) -> Allocator_Error { +scratch_init :: proc(s: ^Scratch, size: int, backup_allocator := context.allocator) -> Allocator_Error { s.data = make_aligned([]byte, size, 2*align_of(rawptr), backup_allocator) or_return s.curr_offset = 0 s.prev_allocation = nil @@ -150,7 +153,7 @@ scratch_allocator_init :: proc(s: ^Scratch_Allocator, size: int, backup_allocato return nil } -scratch_allocator_destroy :: proc(s: ^Scratch_Allocator) { +scratch_destroy :: proc(s: ^Scratch) { if s == nil { return } @@ -162,21 +165,21 @@ scratch_allocator_destroy :: proc(s: ^Scratch_Allocator) { s^ = {} } -scratch_allocator_alloc :: proc( - s: ^Scratch_Allocator, +scratch_alloc :: proc( + s: ^Scratch, size: int, alignment := DEFAULT_ALIGNMENT, loc := #caller_location, ) -> ([]byte, Allocator_Error) { - bytes, err := scratch_allocator_alloc_non_zeroed(s, size, alignment, loc) + bytes, err := scratch_alloc_non_zeroed(s, size, alignment, loc) if bytes != nil { zero_slice(bytes) } return bytes, err } -scratch_allocator_alloc_non_zeroed :: proc( - s: ^Scratch_Allocator, +scratch_alloc_non_zeroed :: proc( + s: ^Scratch, size: int, alignment := DEFAULT_ALIGNMENT, loc := #caller_location, @@ -186,7 +189,7 @@ scratch_allocator_alloc_non_zeroed :: proc( if !(context.allocator.procedure != scratch_allocator_proc && context.allocator.data != s) { panic("cyclic initialization of the scratch allocator with itself", loc) } - scratch_allocator_init(s, DEFAULT_BACKING_SIZE) + scratch_init(s, DEFAULT_BACKING_SIZE) } size := size size = align_forward_int(size, alignment) @@ -222,13 +225,13 @@ scratch_allocator_alloc_non_zeroed :: proc( append(&s.leaked_allocations, ptr) if logger := context.logger; logger.lowest_level <= .Warning { if logger.procedure != nil { - logger.procedure(logger.data, .Warning, "mem.Scratch_Allocator resorted to backup_allocator" , logger.options, loc) + logger.procedure(logger.data, .Warning, "mem.Scratch resorted to backup_allocator" , logger.options, loc) } } return ptr, err } -scratch_allocator_free :: proc(s: ^Scratch_Allocator, ptr: rawptr, loc := #caller_location) -> Allocator_Error { +scratch_free :: proc(s: ^Scratch, ptr: rawptr, loc := #caller_location) -> Allocator_Error { if s.data == nil { panic("Free on an uninitialized scratch allocator", loc) } @@ -260,7 +263,7 @@ scratch_allocator_free :: proc(s: ^Scratch_Allocator, ptr: rawptr, loc := #calle return .Invalid_Pointer } -scratch_allocator_free_all :: proc(s: ^Scratch_Allocator, loc := #caller_location) { +scratch_free_all :: proc(s: ^Scratch, loc := #caller_location) { if s.data == nil { panic("free_all called on an unitialized scratch allocator", loc) } @@ -272,8 +275,8 @@ scratch_allocator_free_all :: proc(s: ^Scratch_Allocator, loc := #caller_locatio clear(&s.leaked_allocations) } -scratch_allocator_resize_non_zeroed :: proc( - s: ^Scratch_Allocator, +scratch_resize_non_zeroed :: proc( + s: ^Scratch, old_memory: rawptr, old_size: int, size: int, @@ -285,7 +288,7 @@ scratch_allocator_resize_non_zeroed :: proc( if !(context.allocator.procedure != scratch_allocator_proc && context.allocator.data != s) { panic("cyclic initialization of the scratch allocator with itself", loc) } - scratch_allocator_init(s, DEFAULT_BACKING_SIZE) + scratch_init(s, DEFAULT_BACKING_SIZE) } begin := uintptr(raw_data(s.data)) end := begin + uintptr(len(s.data)) @@ -295,25 +298,25 @@ scratch_allocator_resize_non_zeroed :: proc( s.curr_offset = int(old_ptr-begin)+size return byte_slice(old_memory, size), nil } - data, err := scratch_allocator_alloc_non_zeroed(s, size, alignment, loc) + data, err := scratch_alloc_non_zeroed(s, size, alignment, loc) if err != nil { return data, err } // TODO(flysand): OOB access on size < old_size. runtime.copy(data, byte_slice(old_memory, old_size)) - err = scratch_allocator_free(s, old_memory, loc) + err = scratch_free(s, old_memory, loc) return data, err } -scratch_allocator_resize :: proc( - s: ^Scratch_Allocator, +scratch_resize :: proc( + s: ^Scratch, old_memory: rawptr, old_size: int, size: int, alignment := DEFAULT_ALIGNMENT, loc := #caller_location ) -> ([]byte, Allocator_Error) { - bytes, err := scratch_allocator_resize_non_zeroed(s, old_memory, old_size, size, alignment, loc) + bytes, err := scratch_resize_non_zeroed(s, old_memory, old_size, size, alignment, loc) if bytes != nil && size > old_size { zero_slice(bytes[size:]) } @@ -328,21 +331,21 @@ scratch_allocator_proc :: proc( old_size: int, loc := #caller_location, ) -> ([]byte, Allocator_Error) { - s := (^Scratch_Allocator)(allocator_data) + s := (^Scratch)(allocator_data) size := size switch mode { case .Alloc: - return scratch_allocator_alloc(s, size, alignment, loc) + return scratch_alloc(s, size, alignment, loc) case .Alloc_Non_Zeroed: - return scratch_allocator_alloc_non_zeroed(s, size, alignment, loc) + return scratch_alloc_non_zeroed(s, size, alignment, loc) case .Free: - return nil, scratch_allocator_free(s, old_memory, loc) + return nil, scratch_free(s, old_memory, loc) case .Free_All: - scratch_allocator_free_all(s, loc) + scratch_free_all(s, loc) case .Resize: - return scratch_allocator_resize(s, old_memory, old_size, size, alignment, loc) + return scratch_resize(s, old_memory, old_size, size, alignment, loc) case .Resize_Non_Zeroed: - return scratch_allocator_resize_non_zeroed(s, old_memory, old_size, size, alignment, loc) + return scratch_resize_non_zeroed(s, old_memory, old_size, size, alignment, loc) case .Query_Features: set := (^Allocator_Mode_Set)(old_memory) if set != nil { @@ -356,7 +359,7 @@ scratch_allocator_proc :: proc( } @(require_results) -scratch_allocator :: proc(allocator: ^Scratch_Allocator) -> Allocator { +scratch_allocator :: proc(allocator: ^Scratch) -> Allocator { return Allocator{ procedure = scratch_allocator_proc, data = allocator, @@ -401,7 +404,7 @@ stack_allocator :: proc(stack: ^Stack) -> Allocator { } } -stack_allocator_alloc_non_zeroed :: proc( +stack_alloc_non_zeroed :: proc( s: ^Stack, size: int, alignment := DEFAULT_ALIGNMENT, @@ -430,20 +433,20 @@ stack_allocator_alloc_non_zeroed :: proc( return byte_slice(rawptr(next_addr), size), nil } -stack_allocator_alloc :: proc( +stack_alloc :: proc( s: ^Stack, size: int, alignment := DEFAULT_ALIGNMENT, loc := #caller_location ) -> ([]byte, Allocator_Error) { - bytes, err := stack_allocator_alloc_non_zeroed(s, size, alignment, loc) + bytes, err := stack_alloc_non_zeroed(s, size, alignment, loc) if bytes != nil { zero_slice(bytes) } return bytes, err } -stack_allocator_free :: proc( +stack_free :: proc( s: ^Stack, old_memory: rawptr, loc := #caller_location, @@ -475,7 +478,7 @@ stack_allocator_free :: proc( return nil } -stack_allocator_free_all :: proc(s: ^Stack) { +stack_free_all :: proc(s: ^Stack, loc := #caller_location) { if s.data == nil { panic("Stack free all on an uninitialized stack allocator", loc) } @@ -483,7 +486,7 @@ stack_allocator_free_all :: proc(s: ^Stack) { s.curr_offset = 0 } -stack_allocator_resize_non_zeroed :: proc( +stack_resize_non_zeroed :: proc( s: ^Stack, old_memory: rawptr, old_size: int, @@ -495,7 +498,7 @@ stack_allocator_resize_non_zeroed :: proc( panic("Stack free all on an uninitialized stack allocator", loc) } if old_memory == nil { - return stack_allocator_alloc_non_zeroed(s, size, alignment, loc) + return stack_alloc_non_zeroed(s, size, alignment, loc) } if size == 0 { return nil, nil @@ -516,7 +519,7 @@ stack_allocator_resize_non_zeroed :: proc( header := (^Stack_Allocation_Header)(curr_addr - size_of(Stack_Allocation_Header)) old_offset := int(curr_addr - uintptr(header.padding) - uintptr(raw_data(s.data))) if old_offset != header.prev_offset { - data, err := stack_allocator_alloc_non_zeroed(s, size, alignment, loc) + data, err := stack_alloc_non_zeroed(s, size, alignment, loc) if err == nil { runtime.copy(data, byte_slice(old_memory, old_size)) } @@ -532,7 +535,7 @@ stack_allocator_resize_non_zeroed :: proc( return byte_slice(old_memory, size), nil } -stack_allocator_resize :: proc( +stack_resize :: proc( s: ^Stack, old_memory: rawptr, old_size: int, @@ -540,7 +543,7 @@ stack_allocator_resize :: proc( alignment := DEFAULT_ALIGNMENT, loc := #caller_location, ) -> ([]byte, Allocator_Error) { - bytes, err := stack_allocator_alloc_non_zeroed(s, size, alignment, loc) + bytes, err := stack_alloc_non_zeroed(s, size, alignment, loc) if bytes != nil { if old_memory == nil { zero_slice(bytes) @@ -558,7 +561,7 @@ stack_allocator_proc :: proc( alignment: int, old_memory: rawptr, old_size: int, - location := #caller_location, + loc := #caller_location, ) -> ([]byte, Allocator_Error) { s := cast(^Stack)allocator_data if s.data == nil { @@ -566,17 +569,17 @@ stack_allocator_proc :: proc( } switch mode { case .Alloc: - return stack_allocator_alloc(s, size, alignment, loc) + return stack_alloc(s, size, alignment, loc) case .Alloc_Non_Zeroed: - return stack_allocator_alloc_non_zeroed(s, size, alignment, loc) + return stack_alloc_non_zeroed(s, size, alignment, loc) case .Free: - return nil, stack_allocator_free(s, old_memory, loc) + return nil, stack_free(s, old_memory, loc) case .Free_All: - stack_allocator_free_all(s) + stack_free_all(s, loc) case .Resize: - return stack_allocator_resize(s, old_memory, old_size, size, alignment, loc) + return stack_resize(s, old_memory, old_size, size, alignment, loc) case .Resize_Non_Zeroed: - return stack_allocator_resize_non_zeroed(s, old_memory, old_size, size, alignment, loc) + return stack_resize_non_zeroed(s, old_memory, old_size, size, alignment, loc) case .Query_Features: set := (^Allocator_Mode_Set)(old_memory) if set != nil { From 4843db0960abb49de9357d048083a46bb603b2ae Mon Sep 17 00:00:00 2001 From: flysand7 Date: Sat, 7 Sep 2024 12:23:55 +1100 Subject: [PATCH 07/35] [mem]: API for using small stack allocator directly --- core/mem/allocators.odin | 356 ++++++++++++++++++++++----------------- 1 file changed, 203 insertions(+), 153 deletions(-) diff --git a/core/mem/allocators.odin b/core/mem/allocators.odin index 97c6d03c93f..a9a362014d0 100644 --- a/core/mem/allocators.odin +++ b/core/mem/allocators.odin @@ -33,6 +33,14 @@ Arena_Temp_Memory :: struct { prev_offset: int, } +@(require_results) +arena_allocator :: proc(arena: ^Arena) -> Allocator { + return Allocator{ + procedure = arena_allocator_proc, + data = arena, + } +} + arena_init :: proc(a: ^Arena, data: []byte) { a.data = data a.offset = 0 @@ -48,14 +56,6 @@ init_arena :: proc(a: ^Arena, data: []byte) { a.temp_count = 0 } -@(require_results) -arena_allocator :: proc(arena: ^Arena) -> Allocator { - return Allocator{ - procedure = arena_allocator_proc, - data = arena, - } -} - arena_alloc :: proc(a: ^Arena, size: int, alignment := DEFAULT_ALIGNMENT) -> ([]byte, Allocator_Error) { bytes, err := arena_alloc_non_zeroed(a, size, alignment) if bytes != nil { @@ -144,6 +144,14 @@ Scratch :: struct { leaked_allocations: [dynamic][]byte, } +@(require_results) +scratch_allocator :: proc(allocator: ^Scratch) -> Allocator { + return Allocator{ + procedure = scratch_allocator_proc, + data = allocator, + } +} + scratch_init :: proc(s: ^Scratch, size: int, backup_allocator := context.allocator) -> Allocator_Error { s.data = make_aligned([]byte, size, 2*align_of(rawptr), backup_allocator) or_return s.curr_offset = 0 @@ -275,6 +283,21 @@ scratch_free_all :: proc(s: ^Scratch, loc := #caller_location) { clear(&s.leaked_allocations) } +scratch_resize :: proc( + s: ^Scratch, + old_memory: rawptr, + old_size: int, + size: int, + alignment := DEFAULT_ALIGNMENT, + loc := #caller_location +) -> ([]byte, Allocator_Error) { + bytes, err := scratch_resize_non_zeroed(s, old_memory, old_size, size, alignment, loc) + if bytes != nil && size > old_size { + zero_slice(bytes[size:]) + } + return bytes, err +} + scratch_resize_non_zeroed :: proc( s: ^Scratch, old_memory: rawptr, @@ -308,21 +331,6 @@ scratch_resize_non_zeroed :: proc( return data, err } -scratch_resize :: proc( - s: ^Scratch, - old_memory: rawptr, - old_size: int, - size: int, - alignment := DEFAULT_ALIGNMENT, - loc := #caller_location -) -> ([]byte, Allocator_Error) { - bytes, err := scratch_resize_non_zeroed(s, old_memory, old_size, size, alignment, loc) - if bytes != nil && size > old_size { - zero_slice(bytes[size:]) - } - return bytes, err -} - scratch_allocator_proc :: proc( allocator_data: rawptr, mode: Allocator_Mode, @@ -358,14 +366,6 @@ scratch_allocator_proc :: proc( return nil, nil } -@(require_results) -scratch_allocator :: proc(allocator: ^Scratch) -> Allocator { - return Allocator{ - procedure = scratch_allocator_proc, - data = allocator, - } -} - // Stack is a stack-like allocator which has a strict memory freeing order @@ -381,6 +381,14 @@ Stack_Allocation_Header :: struct { padding: int, } +@(require_results) +stack_allocator :: proc(stack: ^Stack) -> Allocator { + return Allocator{ + procedure = stack_allocator_proc, + data = stack, + } +} + stack_init :: proc(s: ^Stack, data: []byte) { s.data = data s.prev_offset = 0 @@ -396,12 +404,17 @@ init_stack :: proc(s: ^Stack, data: []byte) { s.peak_used = 0 } -@(require_results) -stack_allocator :: proc(stack: ^Stack) -> Allocator { - return Allocator{ - procedure = stack_allocator_proc, - data = stack, +stack_alloc :: proc( + s: ^Stack, + size: int, + alignment := DEFAULT_ALIGNMENT, + loc := #caller_location +) -> ([]byte, Allocator_Error) { + bytes, err := stack_alloc_non_zeroed(s, size, alignment, loc) + if bytes != nil { + zero_slice(bytes) } + return bytes, err } stack_alloc_non_zeroed :: proc( @@ -433,19 +446,6 @@ stack_alloc_non_zeroed :: proc( return byte_slice(rawptr(next_addr), size), nil } -stack_alloc :: proc( - s: ^Stack, - size: int, - alignment := DEFAULT_ALIGNMENT, - loc := #caller_location -) -> ([]byte, Allocator_Error) { - bytes, err := stack_alloc_non_zeroed(s, size, alignment, loc) - if bytes != nil { - zero_slice(bytes) - } - return bytes, err -} - stack_free :: proc( s: ^Stack, old_memory: rawptr, @@ -486,6 +486,25 @@ stack_free_all :: proc(s: ^Stack, loc := #caller_location) { s.curr_offset = 0 } +stack_resize :: proc( + s: ^Stack, + old_memory: rawptr, + old_size: int, + size: int, + alignment := DEFAULT_ALIGNMENT, + loc := #caller_location, +) -> ([]byte, Allocator_Error) { + bytes, err := stack_alloc_non_zeroed(s, size, alignment, loc) + if bytes != nil { + if old_memory == nil { + zero_slice(bytes) + } else if size > old_size { + zero_slice(bytes[old_size:]) + } + } + return bytes, err +} + stack_resize_non_zeroed :: proc( s: ^Stack, old_memory: rawptr, @@ -535,25 +554,6 @@ stack_resize_non_zeroed :: proc( return byte_slice(old_memory, size), nil } -stack_resize :: proc( - s: ^Stack, - old_memory: rawptr, - old_size: int, - size: int, - alignment := DEFAULT_ALIGNMENT, - loc := #caller_location, -) -> ([]byte, Allocator_Error) { - bytes, err := stack_alloc_non_zeroed(s, size, alignment, loc) - if bytes != nil { - if old_memory == nil { - zero_slice(bytes) - } else if size > old_size { - zero_slice(bytes[old_size:]) - } - } - return bytes, err -} - stack_allocator_proc :: proc( allocator_data: rawptr, mode: Allocator_Mode, @@ -626,118 +626,168 @@ small_stack_allocator :: proc(stack: ^Small_Stack) -> Allocator { } } -small_stack_allocator_proc :: proc( - allocator_data: rawptr, - mode: Allocator_Mode, - size, alignment: int, - old_memory: rawptr, - old_size: int, - location := #caller_location, +small_stack_alloc :: proc( + s: ^Small_Stack, + size: int, + alignment := DEFAULT_ALIGNMENT, + loc := #caller_location, ) -> ([]byte, Allocator_Error) { - s := cast(^Small_Stack)allocator_data + bytes, err := small_stack_alloc_non_zeroed(s, size, alignment, loc) + if bytes != nil { + zero_slice(bytes) + } + return bytes, err +} +small_stack_alloc_non_zeroed :: proc( + s: ^Small_Stack, + size: int, + alignment := DEFAULT_ALIGNMENT, + loc := #caller_location, +) -> ([]byte, Allocator_Error) { if s.data == nil { return nil, .Invalid_Argument } + alignment := alignment + alignment := clamp(alignment, 1, 8*size_of(Stack_Allocation_Header{}.padding)/2) + curr_addr := uintptr(raw_data(s.data)) + uintptr(s.offset) + padding := calc_padding_with_header(curr_addr, uintptr(alignment), size_of(Small_Stack_Allocation_Header)) + if s.offset + padding + size > len(s.data) { + return nil, .Out_Of_Memory + } + s.offset += padding + next_addr := curr_addr + uintptr(padding) + header := (^Small_Stack_Allocation_Header)(next_addr - size_of(Small_Stack_Allocation_Header)) + header.padding = auto_cast padding + s.offset += size + s.peak_used = max(s.peak_used, s.offset) + return byte_slice(rawptr(next_addr), size), nil +} + +small_stack_free :: proc( + s: ^Small_Stack, + old_memory: rawptr, + loc := #caller_location, +) -> Allocator_Error { + if old_memory == nil { + return nil, nil + } + start := uintptr(raw_data(s.data)) + end := start + uintptr(len(s.data)) + curr_addr := uintptr(old_memory) + if !(start <= curr_addr && curr_addr < end) { + // panic("Out of bounds memory address passed to stack allocator (free)"); + return nil, .Invalid_Pointer + } + if curr_addr >= start+uintptr(s.offset) { + // NOTE(bill): Allow double frees + return nil, nil + } + header := (^Small_Stack_Allocation_Header)(curr_addr - size_of(Small_Stack_Allocation_Header)) + old_offset := int(curr_addr - uintptr(header.padding) - uintptr(raw_data(s.data))) + s.offset = old_offset +} - align := clamp(alignment, 1, 8*size_of(Stack_Allocation_Header{}.padding)/2) +small_stack_free_all :: proc(s: ^Small_Stack) { + s.offset = 0 +} - raw_alloc :: proc(s: ^Small_Stack, size, alignment: int, zero_memory: bool) -> ([]byte, Allocator_Error) { - curr_addr := uintptr(raw_data(s.data)) + uintptr(s.offset) - padding := calc_padding_with_header(curr_addr, uintptr(alignment), size_of(Small_Stack_Allocation_Header)) - if s.offset + padding + size > len(s.data) { - return nil, .Out_Of_Memory +small_stack_resize :: proc( + s: ^Small_Stack, + old_memory: rawptr, + old_size: int, + size: int, + alignment := DEFAULT_ALIGNMENT, + loc := #caller_location, +) -> ([]byte, Allocator_Error) { + bytes, err := small_stack_resize_non_zeroed(s, old_memory, old_size, size, alignment, loc) + if bytes != nil { + if old_memory == nil { + zero_slice(bytes) + } else if size > old_size { + zero_slice(bytes[old_size:]) } - s.offset += padding - - next_addr := curr_addr + uintptr(padding) - header := (^Small_Stack_Allocation_Header)(next_addr - size_of(Small_Stack_Allocation_Header)) - header.padding = auto_cast padding + } + return bytes, err +} - s.offset += size +small_stack_resize_non_zeroed :: proc( + s: ^Small_Stack, + old_memory: rawptr, + old_size: int, + size: int, + alignment := DEFAULT_ALIGNMENT, + loc := #caller_location, +) -> ([]byte, Allocator_Error) { + if old_memory == nil { + return small_stack_alloc_non_zeroed(s, size, align, loc) + } + if size == 0 { + return nil, nil + } + alignment := alignment + alignment := clamp(alignment, 1, 8*size_of(Stack_Allocation_Header{}.padding)/2) + start := uintptr(raw_data(s.data)) + end := start + uintptr(len(s.data)) + curr_addr := uintptr(old_memory) + if !(start <= curr_addr && curr_addr < end) { + // panic("Out of bounds memory address passed to stack allocator (resize)"); + return nil, .Invalid_Pointer + } + if curr_addr >= start+uintptr(s.offset) { + // NOTE(bill): Treat as a double free + return nil, nil + } + if old_size == size { + return byte_slice(old_memory, size), nil + } + data, err := small_stack_alloc_non_zeroed(s, size, alignment, loc) + if err == nil { + runtime.copy(data, byte_slice(old_memory, old_size)) + } + return data, err - s.peak_used = max(s.peak_used, s.offset) +} - if zero_memory { - zero(rawptr(next_addr), size) - } - return byte_slice(rawptr(next_addr), size), nil +small_stack_allocator_proc :: proc( + allocator_data: rawptr, + mode: Allocator_Mode, + size, alignment: int, + old_memory: rawptr, + old_size: int, + location := #caller_location, +) -> ([]byte, Allocator_Error) { + s := cast(^Small_Stack)allocator_data + if s.data == nil { + return nil, .Invalid_Argument } - switch mode { - case .Alloc, .Alloc_Non_Zeroed: - return raw_alloc(s, size, align, mode == .Alloc) + case .Alloc: + return small_stack_alloc(s, size, alignment, loc) + case .Alloc_Non_Zeroed: + return small_stack_alloc_non_zeroed(s, size, alignment, loc) case .Free: - if old_memory == nil { - return nil, nil - } - start := uintptr(raw_data(s.data)) - end := start + uintptr(len(s.data)) - curr_addr := uintptr(old_memory) - - if !(start <= curr_addr && curr_addr < end) { - // panic("Out of bounds memory address passed to stack allocator (free)"); - return nil, .Invalid_Pointer - } - - if curr_addr >= start+uintptr(s.offset) { - // NOTE(bill): Allow double frees - return nil, nil - } - - header := (^Small_Stack_Allocation_Header)(curr_addr - size_of(Small_Stack_Allocation_Header)) - old_offset := int(curr_addr - uintptr(header.padding) - uintptr(raw_data(s.data))) - - s.offset = old_offset - + return nil, small_stack_free(s, old_memory, loc) case .Free_All: - s.offset = 0 - - case .Resize, .Resize_Non_Zeroed: - if old_memory == nil { - return raw_alloc(s, size, align, mode == .Resize) - } - if size == 0 { - return nil, nil - } - - start := uintptr(raw_data(s.data)) - end := start + uintptr(len(s.data)) - curr_addr := uintptr(old_memory) - if !(start <= curr_addr && curr_addr < end) { - // panic("Out of bounds memory address passed to stack allocator (resize)"); - return nil, .Invalid_Pointer - } - - if curr_addr >= start+uintptr(s.offset) { - // NOTE(bill): Treat as a double free - return nil, nil - } - - if old_size == size { - return byte_slice(old_memory, size), nil - } - - data, err := raw_alloc(s, size, align, mode == .Resize) - if err == nil { - runtime.copy(data, byte_slice(old_memory, old_size)) - } - return data, err - + small_stack_free_all(s) + case .Resize: + return small_stack_resize(s, old_memory, old_size, size, alignment, loc) + case .Resize_Non_Zeroed: + return small_stack_resize_non_zeroed(s, old_memory, old_size, size, alignment, loc) case .Query_Features: set := (^Allocator_Mode_Set)(old_memory) if set != nil { set^ = {.Alloc, .Alloc_Non_Zeroed, .Free, .Free_All, .Resize, .Resize_Non_Zeroed, .Query_Features} } return nil, nil - case .Query_Info: return nil, .Mode_Not_Implemented } - return nil, nil } + + Dynamic_Pool :: struct { block_size: int, out_band_size: int, From aea3e9a585e07765a1d5c71448460665e25414e2 Mon Sep 17 00:00:00 2001 From: flysand7 Date: Sat, 7 Sep 2024 12:26:47 +1100 Subject: [PATCH 08/35] [mem]: Fix vet errors --- core/mem/allocators.odin | 17 +++++++++-------- 1 file changed, 9 insertions(+), 8 deletions(-) diff --git a/core/mem/allocators.odin b/core/mem/allocators.odin index a9a362014d0..2d9d6d11455 100644 --- a/core/mem/allocators.odin +++ b/core/mem/allocators.odin @@ -649,7 +649,7 @@ small_stack_alloc_non_zeroed :: proc( return nil, .Invalid_Argument } alignment := alignment - alignment := clamp(alignment, 1, 8*size_of(Stack_Allocation_Header{}.padding)/2) + alignment = clamp(alignment, 1, 8*size_of(Stack_Allocation_Header{}.padding)/2) curr_addr := uintptr(raw_data(s.data)) + uintptr(s.offset) padding := calc_padding_with_header(curr_addr, uintptr(alignment), size_of(Small_Stack_Allocation_Header)) if s.offset + padding + size > len(s.data) { @@ -670,22 +670,23 @@ small_stack_free :: proc( loc := #caller_location, ) -> Allocator_Error { if old_memory == nil { - return nil, nil + return nil } start := uintptr(raw_data(s.data)) end := start + uintptr(len(s.data)) curr_addr := uintptr(old_memory) if !(start <= curr_addr && curr_addr < end) { // panic("Out of bounds memory address passed to stack allocator (free)"); - return nil, .Invalid_Pointer + return .Invalid_Pointer } if curr_addr >= start+uintptr(s.offset) { // NOTE(bill): Allow double frees - return nil, nil + return nil } header := (^Small_Stack_Allocation_Header)(curr_addr - size_of(Small_Stack_Allocation_Header)) old_offset := int(curr_addr - uintptr(header.padding) - uintptr(raw_data(s.data))) s.offset = old_offset + return nil } small_stack_free_all :: proc(s: ^Small_Stack) { @@ -719,14 +720,14 @@ small_stack_resize_non_zeroed :: proc( alignment := DEFAULT_ALIGNMENT, loc := #caller_location, ) -> ([]byte, Allocator_Error) { + alignment := alignment + alignment = clamp(alignment, 1, 8*size_of(Stack_Allocation_Header{}.padding)/2) if old_memory == nil { - return small_stack_alloc_non_zeroed(s, size, align, loc) + return small_stack_alloc_non_zeroed(s, size, alignment, loc) } if size == 0 { return nil, nil } - alignment := alignment - alignment := clamp(alignment, 1, 8*size_of(Stack_Allocation_Header{}.padding)/2) start := uintptr(raw_data(s.data)) end := start + uintptr(len(s.data)) curr_addr := uintptr(old_memory) @@ -755,7 +756,7 @@ small_stack_allocator_proc :: proc( size, alignment: int, old_memory: rawptr, old_size: int, - location := #caller_location, + loc := #caller_location, ) -> ([]byte, Allocator_Error) { s := cast(^Small_Stack)allocator_data if s.data == nil { From f8641ddd1b096663ed45f9179014ff5201b65225 Mon Sep 17 00:00:00 2001 From: flysand7 Date: Sat, 7 Sep 2024 12:33:12 +1100 Subject: [PATCH 09/35] [mem]: Rename dynamic pool to dynamic arena --- core/mem/allocators.odin | 63 +++++++++++++++++++++++----------------- 1 file changed, 37 insertions(+), 26 deletions(-) diff --git a/core/mem/allocators.odin b/core/mem/allocators.odin index 2d9d6d11455..7c33d1c9ff0 100644 --- a/core/mem/allocators.odin +++ b/core/mem/allocators.odin @@ -788,8 +788,19 @@ small_stack_allocator_proc :: proc( } - -Dynamic_Pool :: struct { +/* old stuff */ +Dynamic_Pool :: Dynamic_Arena +DYNAMIC_POOL_BLOCK_SIZE_DEFAULT :: DYNAMIC_ARENA_BLOCK_SIZE_DEFAULT +DYNAMIC_POOL_OUT_OF_BAND_SIZE_DEFAULT :: DYNAMIC_ARENA_OUT_OF_BAND_SIZE_DEFAULT +dynamic_pool_allocator_proc :: dynamic_arena_allocator_proc +dynamic_pool_free_all :: dynamic_arena_free_all +dynamic_pool_reset :: dynamic_arena_reset +dynamic_pool_alloc_bytes :: dynamic_arena_alloc_bytes +dynamic_pool_alloc :: dynamic_arena_alloc +dynamic_pool_init :: dynamic_arena_init +dynamic_pool_allocator :: dynamic_arena_allocator + +Dynamic_Arena :: struct { block_size: int, out_band_size: int, alignment: int, @@ -805,10 +816,10 @@ Dynamic_Pool :: struct { block_allocator: Allocator, } -DYNAMIC_POOL_BLOCK_SIZE_DEFAULT :: 65536 -DYNAMIC_POOL_OUT_OF_BAND_SIZE_DEFAULT :: 6554 +DYNAMIC_ARENA_BLOCK_SIZE_DEFAULT :: 65536 +DYNAMIC_ARENA_OUT_OF_BAND_SIZE_DEFAULT :: 6554 -dynamic_pool_allocator_proc :: proc( +dynamic_arena_allocator_proc :: proc( allocator_data: rawptr, mode: Allocator_Mode, size: int, @@ -817,21 +828,21 @@ dynamic_pool_allocator_proc :: proc( old_size: int, loc := #caller_location, ) -> ([]byte, Allocator_Error) { - pool := (^Dynamic_Pool)(allocator_data) + pool := (^Dynamic_Arena)(allocator_data) switch mode { case .Alloc, .Alloc_Non_Zeroed: - return dynamic_pool_alloc_bytes(pool, size) + return dynamic_arena_alloc_bytes(pool, size) case .Free: return nil, .Mode_Not_Implemented case .Free_All: - dynamic_pool_free_all(pool) + dynamic_arena_free_all(pool) return nil, nil case .Resize, .Resize_Non_Zeroed: if old_size >= size { return byte_slice(old_memory, size), nil } - data, err := dynamic_pool_alloc_bytes(pool, size) + data, err := dynamic_arena_alloc_bytes(pool, size) if err == nil { runtime.copy(data, byte_slice(old_memory, old_size)) } @@ -856,21 +867,20 @@ dynamic_pool_allocator_proc :: proc( return nil, nil } - @(require_results) -dynamic_pool_allocator :: proc(pool: ^Dynamic_Pool) -> Allocator { +dynamic_arena_allocator :: proc(pool: ^Dynamic_Arena) -> Allocator { return Allocator{ - procedure = dynamic_pool_allocator_proc, + procedure = dynamic_arena_allocator_proc, data = pool, } } -dynamic_pool_init :: proc( - pool: ^Dynamic_Pool, +dynamic_arena_init :: proc( + pool: ^Dynamic_Arena, block_allocator := context.allocator, array_allocator := context.allocator, - block_size := DYNAMIC_POOL_BLOCK_SIZE_DEFAULT, - out_band_size := DYNAMIC_POOL_OUT_OF_BAND_SIZE_DEFAULT, + block_size := DYNAMIC_ARENA_BLOCK_SIZE_DEFAULT, + out_band_size := DYNAMIC_ARENA_OUT_OF_BAND_SIZE_DEFAULT, alignment := 8, ) { pool.block_size = block_size @@ -882,8 +892,8 @@ dynamic_pool_init :: proc( pool.used_blocks.allocator = array_allocator } -dynamic_pool_destroy :: proc(pool: ^Dynamic_Pool) { - dynamic_pool_free_all(pool) +dynamic_arena_destroy :: proc(pool: ^Dynamic_Arena) { + dynamic_arena_free_all(pool) delete(pool.unused_blocks) delete(pool.used_blocks) delete(pool.out_band_allocations) @@ -891,14 +901,14 @@ dynamic_pool_destroy :: proc(pool: ^Dynamic_Pool) { } @(require_results) -dynamic_pool_alloc :: proc(pool: ^Dynamic_Pool, bytes: int) -> (rawptr, Allocator_Error) { - data, err := dynamic_pool_alloc_bytes(pool, bytes) +dynamic_arena_alloc :: proc(pool: ^Dynamic_Arena, bytes: int) -> (rawptr, Allocator_Error) { + data, err := dynamic_arena_alloc_bytes(pool, bytes) return raw_data(data), err } @(require_results) -dynamic_pool_alloc_bytes :: proc(p: ^Dynamic_Pool, bytes: int) -> ([]byte, Allocator_Error) { - cycle_new_block :: proc(p: ^Dynamic_Pool) -> (err: Allocator_Error) { +dynamic_arena_alloc_bytes :: proc(p: ^Dynamic_Arena, bytes: int) -> ([]byte, Allocator_Error) { + cycle_new_block :: proc(p: ^Dynamic_Arena) -> (err: Allocator_Error) { if p.block_allocator.procedure == nil { panic("You must call pool_init on a Pool before using it") } @@ -960,8 +970,7 @@ dynamic_pool_alloc_bytes :: proc(p: ^Dynamic_Pool, bytes: int) -> ([]byte, Alloc return ([^]byte)(memory)[:bytes], nil } - -dynamic_pool_reset :: proc(p: ^Dynamic_Pool) { +dynamic_arena_reset :: proc(p: ^Dynamic_Arena) { if p.current_block != nil { append(&p.unused_blocks, p.current_block) p.current_block = nil @@ -980,8 +989,8 @@ dynamic_pool_reset :: proc(p: ^Dynamic_Pool) { p.bytes_left = 0 // Make new allocations call `cycle_new_block` again. } -dynamic_pool_free_all :: proc(p: ^Dynamic_Pool) { - dynamic_pool_reset(p) +dynamic_arena_free_all :: proc(p: ^Dynamic_Arena) { + dynamic_arena_reset(p) for block in p.unused_blocks { free(block, p.block_allocator) @@ -989,6 +998,8 @@ dynamic_pool_free_all :: proc(p: ^Dynamic_Pool) { clear(&p.unused_blocks) } + + panic_allocator_proc :: proc( allocator_data: rawptr, mode: Allocator_Mode, From 03f6b9bbf6bdd9ccf2f566a2da108d1f8b3a38e1 Mon Sep 17 00:00:00 2001 From: flysand7 Date: Sat, 7 Sep 2024 12:59:19 +1100 Subject: [PATCH 10/35] [mem]: Add alloc_non_zeroed variant to dynamic pool --- core/mem/allocators.odin | 245 +++++++++++++++++++-------------------- 1 file changed, 122 insertions(+), 123 deletions(-) diff --git a/core/mem/allocators.odin b/core/mem/allocators.odin index 7c33d1c9ff0..4c6ab09b144 100644 --- a/core/mem/allocators.odin +++ b/core/mem/allocators.odin @@ -795,93 +795,35 @@ DYNAMIC_POOL_OUT_OF_BAND_SIZE_DEFAULT :: DYNAMIC_ARENA_OUT_OF_BAND_SIZE_DEFAULT dynamic_pool_allocator_proc :: dynamic_arena_allocator_proc dynamic_pool_free_all :: dynamic_arena_free_all dynamic_pool_reset :: dynamic_arena_reset -dynamic_pool_alloc_bytes :: dynamic_arena_alloc_bytes -dynamic_pool_alloc :: dynamic_arena_alloc +dynamic_pool_alloc_bytes :: dynamic_arena_alloc +dynamic_pool_alloc :: _dynamic_arena_alloc_ptr dynamic_pool_init :: dynamic_arena_init dynamic_pool_allocator :: dynamic_arena_allocator +dynamic_pool_destroy :: dynamic_arena_destroy + +DYNAMIC_ARENA_BLOCK_SIZE_DEFAULT :: 65536 +DYNAMIC_ARENA_OUT_OF_BAND_SIZE_DEFAULT :: 6554 Dynamic_Arena :: struct { - block_size: int, + block_size: int, out_band_size: int, - alignment: int, - - unused_blocks: [dynamic]rawptr, - used_blocks: [dynamic]rawptr, + alignment: int, + unused_blocks: [dynamic]rawptr, + used_blocks: [dynamic]rawptr, out_band_allocations: [dynamic]rawptr, - current_block: rawptr, - current_pos: rawptr, - bytes_left: int, - + current_pos: rawptr, + bytes_left: int, block_allocator: Allocator, } -DYNAMIC_ARENA_BLOCK_SIZE_DEFAULT :: 65536 -DYNAMIC_ARENA_OUT_OF_BAND_SIZE_DEFAULT :: 6554 - -dynamic_arena_allocator_proc :: proc( - allocator_data: rawptr, - mode: Allocator_Mode, - size: int, - alignment: int, - old_memory: rawptr, - old_size: int, - loc := #caller_location, -) -> ([]byte, Allocator_Error) { - pool := (^Dynamic_Arena)(allocator_data) - - switch mode { - case .Alloc, .Alloc_Non_Zeroed: - return dynamic_arena_alloc_bytes(pool, size) - case .Free: - return nil, .Mode_Not_Implemented - case .Free_All: - dynamic_arena_free_all(pool) - return nil, nil - case .Resize, .Resize_Non_Zeroed: - if old_size >= size { - return byte_slice(old_memory, size), nil - } - data, err := dynamic_arena_alloc_bytes(pool, size) - if err == nil { - runtime.copy(data, byte_slice(old_memory, old_size)) - } - return data, err - - case .Query_Features: - set := (^Allocator_Mode_Set)(old_memory) - if set != nil { - set^ = {.Alloc, .Alloc_Non_Zeroed, .Free_All, .Resize, .Resize_Non_Zeroed, .Query_Features, .Query_Info} - } - return nil, nil - - case .Query_Info: - info := (^Allocator_Query_Info)(old_memory) - if info != nil && info.pointer != nil { - info.size = pool.block_size - info.alignment = pool.alignment - return byte_slice(info, size_of(info^)), nil - } - return nil, nil - } - return nil, nil -} - -@(require_results) -dynamic_arena_allocator :: proc(pool: ^Dynamic_Arena) -> Allocator { - return Allocator{ - procedure = dynamic_arena_allocator_proc, - data = pool, - } -} - dynamic_arena_init :: proc( pool: ^Dynamic_Arena, block_allocator := context.allocator, array_allocator := context.allocator, block_size := DYNAMIC_ARENA_BLOCK_SIZE_DEFAULT, out_band_size := DYNAMIC_ARENA_OUT_OF_BAND_SIZE_DEFAULT, - alignment := 8, + alignment := DEFAULT_ALIGNMENT, ) { pool.block_size = block_size pool.out_band_size = out_band_size @@ -892,6 +834,14 @@ dynamic_arena_init :: proc( pool.used_blocks.allocator = array_allocator } +@(require_results) +dynamic_arena_allocator :: proc(pool: ^Dynamic_Arena) -> Allocator { + return Allocator{ + procedure = dynamic_arena_allocator_proc, + data = pool, + } +} + dynamic_arena_destroy :: proc(pool: ^Dynamic_Arena) { dynamic_arena_free_all(pool) delete(pool.unused_blocks) @@ -900,62 +850,66 @@ dynamic_arena_destroy :: proc(pool: ^Dynamic_Arena) { zero(pool, size_of(pool^)) } -@(require_results) -dynamic_arena_alloc :: proc(pool: ^Dynamic_Arena, bytes: int) -> (rawptr, Allocator_Error) { - data, err := dynamic_arena_alloc_bytes(pool, bytes) +@(private="file") +_dynamic_arena_cycle_new_block :: proc(p: ^Dynamic_Arena, loc := #caller_location) -> (err: Allocator_Error) { + if p.block_allocator.procedure == nil { + panic("You must call pool_init on a Pool before using it", loc) + } + if p.current_block != nil { + append(&p.used_blocks, p.current_block, loc=loc) + } + new_block: rawptr + if len(p.unused_blocks) > 0 { + new_block = pop(&p.unused_blocks) + } else { + data: []byte + data, err = p.block_allocator.procedure( + p.block_allocator.data, + Allocator_Mode.Alloc, + p.block_size, + p.alignment, + nil, + 0, + ) + new_block = raw_data(data) + } + p.bytes_left = p.block_size + p.current_pos = new_block + p.current_block = new_block + return +} + +@(private, require_results) +_dynamic_arena_alloc_ptr :: proc(pool: ^Dynamic_Arena, size: int, loc := #caller_location) -> (rawptr, Allocator_Error) { + data, err := dynamic_arena_alloc(pool, size, loc) return raw_data(data), err } @(require_results) -dynamic_arena_alloc_bytes :: proc(p: ^Dynamic_Arena, bytes: int) -> ([]byte, Allocator_Error) { - cycle_new_block :: proc(p: ^Dynamic_Arena) -> (err: Allocator_Error) { - if p.block_allocator.procedure == nil { - panic("You must call pool_init on a Pool before using it") - } - - if p.current_block != nil { - append(&p.used_blocks, p.current_block) - } - - new_block: rawptr - if len(p.unused_blocks) > 0 { - new_block = pop(&p.unused_blocks) - } else { - data: []byte - data, err = p.block_allocator.procedure( - p.block_allocator.data, - Allocator_Mode.Alloc, - p.block_size, - p.alignment, - nil, - 0, - ) - new_block = raw_data(data) - } - - p.bytes_left = p.block_size - p.current_pos = new_block - p.current_block = new_block - return +dynamic_arena_alloc :: proc(p: ^Dynamic_Arena, size: int, loc := #caller_location) -> ([]byte, Allocator_Error) { + bytes, err := dynamic_arena_alloc_non_zeroed(p, size, loc) + if bytes != nil { + zero_slice(bytes) } + return bytes, err +} - n := align_formula(bytes, p.alignment) +@(require_results) +dynamic_arena_alloc_non_zeroed :: proc(p: ^Dynamic_Arena, size: int, loc := #caller_location) -> ([]byte, Allocator_Error) { + n := align_formula(size, p.alignment) if n > p.block_size { return nil, .Invalid_Argument } if n >= p.out_band_size { - assert(p.block_allocator.procedure != nil) - memory, err := p.block_allocator.procedure(p.block_allocator.data, Allocator_Mode.Alloc, - p.block_size, p.alignment, - nil, 0) + assert(p.block_allocator.procedure != nil, "Backing block allocator must be initialized", loc=loc) + memory, err := alloc_bytes_non_zeroed(p.block_size, p.alignment, p.block_allocator, loc) if memory != nil { - append(&p.out_band_allocations, raw_data(memory)) + append(&p.out_band_allocations, raw_data(memory), loc = loc) } return memory, err } - if p.bytes_left < n { - err := cycle_new_block(p) + err := _dynamic_arena_cycle_new_block(p, loc) if err != nil { return nil, err } @@ -963,41 +917,86 @@ dynamic_arena_alloc_bytes :: proc(p: ^Dynamic_Arena, bytes: int) -> ([]byte, All return nil, .Out_Of_Memory } } - memory := p.current_pos p.current_pos = ([^]byte)(p.current_pos)[n:] p.bytes_left -= n - return ([^]byte)(memory)[:bytes], nil + return ([^]byte)(memory)[:size], nil } -dynamic_arena_reset :: proc(p: ^Dynamic_Arena) { +dynamic_arena_reset :: proc(p: ^Dynamic_Arena, loc := #caller_location) { if p.current_block != nil { - append(&p.unused_blocks, p.current_block) + append(&p.unused_blocks, p.current_block, loc=loc) p.current_block = nil } - for block in p.used_blocks { - append(&p.unused_blocks, block) + append(&p.unused_blocks, block, loc=loc) } clear(&p.used_blocks) - for a in p.out_band_allocations { - free(a, p.block_allocator) + free(a, p.block_allocator, loc=loc) } clear(&p.out_band_allocations) - - p.bytes_left = 0 // Make new allocations call `cycle_new_block` again. + p.bytes_left = 0 // Make new allocations call `_dynamic_arena_cycle_new_block` again. } dynamic_arena_free_all :: proc(p: ^Dynamic_Arena) { dynamic_arena_reset(p) - for block in p.unused_blocks { free(block, p.block_allocator) } clear(&p.unused_blocks) } +dynamic_arena_allocator_proc :: proc( + allocator_data: rawptr, + mode: Allocator_Mode, + size: int, + alignment: int, + old_memory: rawptr, + old_size: int, + loc := #caller_location, +) -> ([]byte, Allocator_Error) { + pool := (^Dynamic_Arena)(allocator_data) + + switch mode { + case .Alloc: + return dynamic_arena_alloc(pool, size, loc) + case .Alloc_Non_Zeroed: + return dynamic_arena_alloc_non_zeroed(pool, size, loc) + case .Free: + return nil, .Mode_Not_Implemented + case .Free_All: + dynamic_arena_free_all(pool) + return nil, nil + case .Resize, .Resize_Non_Zeroed: + if old_size >= size { + return byte_slice(old_memory, size), nil + } + data, err := dynamic_arena_alloc(pool, size) + if err == nil { + runtime.copy(data, byte_slice(old_memory, old_size)) + } + return data, err + + case .Query_Features: + set := (^Allocator_Mode_Set)(old_memory) + if set != nil { + set^ = {.Alloc, .Alloc_Non_Zeroed, .Free_All, .Resize, .Resize_Non_Zeroed, .Query_Features, .Query_Info} + } + return nil, nil + + case .Query_Info: + info := (^Allocator_Query_Info)(old_memory) + if info != nil && info.pointer != nil { + info.size = pool.block_size + info.alignment = pool.alignment + return byte_slice(info, size_of(info^)), nil + } + return nil, nil + } + return nil, nil +} + panic_allocator_proc :: proc( From b350a35b7738c6f7ba7ee65dd403b86de32213c5 Mon Sep 17 00:00:00 2001 From: flysand7 Date: Sat, 7 Sep 2024 13:10:29 +1100 Subject: [PATCH 11/35] [mem]: Add resize_non_zeroed variant to dynamic arena, and rename pool to arena --- core/mem/allocators.odin | 126 +++++++++++++++++++++++---------------- 1 file changed, 76 insertions(+), 50 deletions(-) diff --git a/core/mem/allocators.odin b/core/mem/allocators.odin index 4c6ab09b144..d7e3cfbfdd3 100644 --- a/core/mem/allocators.odin +++ b/core/mem/allocators.odin @@ -880,14 +880,14 @@ _dynamic_arena_cycle_new_block :: proc(p: ^Dynamic_Arena, loc := #caller_locatio } @(private, require_results) -_dynamic_arena_alloc_ptr :: proc(pool: ^Dynamic_Arena, size: int, loc := #caller_location) -> (rawptr, Allocator_Error) { - data, err := dynamic_arena_alloc(pool, size, loc) +_dynamic_arena_alloc_ptr :: proc(a: ^Dynamic_Arena, size: int, loc := #caller_location) -> (rawptr, Allocator_Error) { + data, err := dynamic_arena_alloc(a, size, loc) return raw_data(data), err } @(require_results) -dynamic_arena_alloc :: proc(p: ^Dynamic_Arena, size: int, loc := #caller_location) -> ([]byte, Allocator_Error) { - bytes, err := dynamic_arena_alloc_non_zeroed(p, size, loc) +dynamic_arena_alloc :: proc(a: ^Dynamic_Arena, size: int, loc := #caller_location) -> ([]byte, Allocator_Error) { + bytes, err := dynamic_arena_alloc_non_zeroed(a, size, loc) if bytes != nil { zero_slice(bytes) } @@ -895,56 +895,91 @@ dynamic_arena_alloc :: proc(p: ^Dynamic_Arena, size: int, loc := #caller_locatio } @(require_results) -dynamic_arena_alloc_non_zeroed :: proc(p: ^Dynamic_Arena, size: int, loc := #caller_location) -> ([]byte, Allocator_Error) { - n := align_formula(size, p.alignment) - if n > p.block_size { +dynamic_arena_alloc_non_zeroed :: proc(a: ^Dynamic_Arena, size: int, loc := #caller_location) -> ([]byte, Allocator_Error) { + n := align_formula(size, a.alignment) + if n > a.block_size { return nil, .Invalid_Argument } - if n >= p.out_band_size { - assert(p.block_allocator.procedure != nil, "Backing block allocator must be initialized", loc=loc) - memory, err := alloc_bytes_non_zeroed(p.block_size, p.alignment, p.block_allocator, loc) + if n >= a.out_band_size { + assert(a.block_allocator.procedure != nil, "Backing block allocator must be initialized", loc=loc) + memory, err := alloc_bytes_non_zeroed(a.block_size, a.alignment, a.block_allocator, loc) if memory != nil { - append(&p.out_band_allocations, raw_data(memory), loc = loc) + append(&a.out_band_allocations, raw_data(memory), loc = loc) } return memory, err } - if p.bytes_left < n { - err := _dynamic_arena_cycle_new_block(p, loc) + if a.bytes_left < n { + err := _dynamic_arena_cycle_new_block(a, loc) if err != nil { return nil, err } - if p.current_block == nil { + if a.current_block == nil { return nil, .Out_Of_Memory } } - memory := p.current_pos - p.current_pos = ([^]byte)(p.current_pos)[n:] - p.bytes_left -= n + memory := a.current_pos + a.current_pos = ([^]byte)(a.current_pos)[n:] + a.bytes_left -= n return ([^]byte)(memory)[:size], nil } -dynamic_arena_reset :: proc(p: ^Dynamic_Arena, loc := #caller_location) { - if p.current_block != nil { - append(&p.unused_blocks, p.current_block, loc=loc) - p.current_block = nil +dynamic_arena_reset :: proc(a: ^Dynamic_Arena, loc := #caller_location) { + if a.current_block != nil { + append(&a.unused_blocks, a.current_block, loc=loc) + a.current_block = nil + } + for block in a.used_blocks { + append(&a.unused_blocks, block, loc=loc) + } + clear(&a.used_blocks) + for a in a.out_band_allocations { + free(a, a.block_allocator, loc=loc) } - for block in p.used_blocks { - append(&p.unused_blocks, block, loc=loc) + clear(&a.out_band_allocations) + a.bytes_left = 0 // Make new allocations call `_dynamic_arena_cycle_new_block` again. +} + +dynamic_arena_free_all :: proc(a: ^Dynamic_Arena, loc := #caller_location) { + dynamic_arena_reset(a) + for block in a.unused_blocks { + free(block, a.block_allocator, loc) } - clear(&p.used_blocks) - for a in p.out_band_allocations { - free(a, p.block_allocator, loc=loc) + clear(&a.unused_blocks) +} + +dynamic_arena_resize :: proc( + a: ^Dynamic_Arena, + old_memory: rawptr, + old_size: int, + size: int, + loc := #caller_location, +) -> ([]byte, Allocator_Error) { + bytes, err := dynamic_arena_resize_non_zeroed(a, old_memory, old_size, size, loc) + if bytes != nil { + if old_memory == nil { + zero_slice(bytes) + } else if size > old_size { + zero_slice(bytes[old_size:]) + } } - clear(&p.out_band_allocations) - p.bytes_left = 0 // Make new allocations call `_dynamic_arena_cycle_new_block` again. + return bytes, err } -dynamic_arena_free_all :: proc(p: ^Dynamic_Arena) { - dynamic_arena_reset(p) - for block in p.unused_blocks { - free(block, p.block_allocator) +dynamic_arena_resize_non_zeroed :: proc( + a: ^Dynamic_Arena, + old_memory: rawptr, + old_size: int, + size: int, + loc := #caller_location, +) -> ([]byte, Allocator_Error) { + if old_size >= size { + return byte_slice(old_memory, size), nil } - clear(&p.unused_blocks) + data, err := dynamic_arena_alloc_non_zeroed(a, size, loc) + if err == nil { + runtime.copy(data, byte_slice(old_memory, old_size)) + } + return data, err } dynamic_arena_allocator_proc :: proc( @@ -956,35 +991,26 @@ dynamic_arena_allocator_proc :: proc( old_size: int, loc := #caller_location, ) -> ([]byte, Allocator_Error) { - pool := (^Dynamic_Arena)(allocator_data) - + arena := (^Dynamic_Arena)(allocator_data) switch mode { case .Alloc: - return dynamic_arena_alloc(pool, size, loc) + return dynamic_arena_alloc(arena, size, loc) case .Alloc_Non_Zeroed: - return dynamic_arena_alloc_non_zeroed(pool, size, loc) + return dynamic_arena_alloc_non_zeroed(arena, size, loc) case .Free: return nil, .Mode_Not_Implemented case .Free_All: - dynamic_arena_free_all(pool) - return nil, nil - case .Resize, .Resize_Non_Zeroed: - if old_size >= size { - return byte_slice(old_memory, size), nil - } - data, err := dynamic_arena_alloc(pool, size) - if err == nil { - runtime.copy(data, byte_slice(old_memory, old_size)) - } - return data, err - + dynamic_arena_free_all(arena, loc) + case .Resize: + return dynamic_arena_resize(arena, old_memory, old_size, size, loc) + case .Resize_Non_Zeroed: + return dynamic_arena_resize_non_zeroed(arena, old_memory, old_size, size, loc) case .Query_Features: set := (^Allocator_Mode_Set)(old_memory) if set != nil { set^ = {.Alloc, .Alloc_Non_Zeroed, .Free_All, .Resize, .Resize_Non_Zeroed, .Query_Features, .Query_Info} } return nil, nil - case .Query_Info: info := (^Allocator_Query_Info)(old_memory) if info != nil && info.pointer != nil { From 6d3cffa13c4e43400eaaa33a0c551cef5cd3e44c Mon Sep 17 00:00:00 2001 From: flysand7 Date: Sat, 7 Sep 2024 13:14:58 +1100 Subject: [PATCH 12/35] [mem]: Add @require_results to all functions returning values --- core/mem/allocators.odin | 32 +++++++++++++++++++++++++------- 1 file changed, 25 insertions(+), 7 deletions(-) diff --git a/core/mem/allocators.odin b/core/mem/allocators.odin index d7e3cfbfdd3..1efc600338d 100644 --- a/core/mem/allocators.odin +++ b/core/mem/allocators.odin @@ -56,6 +56,7 @@ init_arena :: proc(a: ^Arena, data: []byte) { a.temp_count = 0 } +@(require_results) arena_alloc :: proc(a: ^Arena, size: int, alignment := DEFAULT_ALIGNMENT) -> ([]byte, Allocator_Error) { bytes, err := arena_alloc_non_zeroed(a, size, alignment) if bytes != nil { @@ -64,6 +65,7 @@ arena_alloc :: proc(a: ^Arena, size: int, alignment := DEFAULT_ALIGNMENT) -> ([] return bytes, err } +@(require_results) arena_alloc_non_zeroed :: proc(a: ^Arena, size: int, alignment := DEFAULT_ALIGNMENT) -> ([]byte, Allocator_Error) { #no_bounds_check end := &a.data[a.offset] ptr := align_forward(end, uintptr(alignment)) @@ -173,6 +175,7 @@ scratch_destroy :: proc(s: ^Scratch) { s^ = {} } +@(require_results) scratch_alloc :: proc( s: ^Scratch, size: int, @@ -186,6 +189,7 @@ scratch_alloc :: proc( return bytes, err } +@(require_results) scratch_alloc_non_zeroed :: proc( s: ^Scratch, size: int, @@ -239,6 +243,7 @@ scratch_alloc_non_zeroed :: proc( return ptr, err } +@(require_results) scratch_free :: proc(s: ^Scratch, ptr: rawptr, loc := #caller_location) -> Allocator_Error { if s.data == nil { panic("Free on an uninitialized scratch allocator", loc) @@ -283,6 +288,7 @@ scratch_free_all :: proc(s: ^Scratch, loc := #caller_location) { clear(&s.leaked_allocations) } +@(require_results) scratch_resize :: proc( s: ^Scratch, old_memory: rawptr, @@ -298,6 +304,7 @@ scratch_resize :: proc( return bytes, err } +@(require_results) scratch_resize_non_zeroed :: proc( s: ^Scratch, old_memory: rawptr, @@ -404,6 +411,7 @@ init_stack :: proc(s: ^Stack, data: []byte) { s.peak_used = 0 } +@(require_results) stack_alloc :: proc( s: ^Stack, size: int, @@ -417,6 +425,7 @@ stack_alloc :: proc( return bytes, err } +@(require_results) stack_alloc_non_zeroed :: proc( s: ^Stack, size: int, @@ -446,6 +455,7 @@ stack_alloc_non_zeroed :: proc( return byte_slice(rawptr(next_addr), size), nil } +@(require_results) stack_free :: proc( s: ^Stack, old_memory: rawptr, @@ -486,6 +496,7 @@ stack_free_all :: proc(s: ^Stack, loc := #caller_location) { s.curr_offset = 0 } +@(require_results) stack_resize :: proc( s: ^Stack, old_memory: rawptr, @@ -505,6 +516,7 @@ stack_resize :: proc( return bytes, err } +@(require_results) stack_resize_non_zeroed :: proc( s: ^Stack, old_memory: rawptr, @@ -626,6 +638,7 @@ small_stack_allocator :: proc(stack: ^Small_Stack) -> Allocator { } } +@(require_results) small_stack_alloc :: proc( s: ^Small_Stack, size: int, @@ -639,6 +652,7 @@ small_stack_alloc :: proc( return bytes, err } +@(require_results) small_stack_alloc_non_zeroed :: proc( s: ^Small_Stack, size: int, @@ -664,6 +678,7 @@ small_stack_alloc_non_zeroed :: proc( return byte_slice(rawptr(next_addr), size), nil } +@(require_results) small_stack_free :: proc( s: ^Small_Stack, old_memory: rawptr, @@ -693,6 +708,7 @@ small_stack_free_all :: proc(s: ^Small_Stack) { s.offset = 0 } +@(require_results) small_stack_resize :: proc( s: ^Small_Stack, old_memory: rawptr, @@ -712,6 +728,7 @@ small_stack_resize :: proc( return bytes, err } +@(require_results) small_stack_resize_non_zeroed :: proc( s: ^Small_Stack, old_memory: rawptr, @@ -932,8 +949,8 @@ dynamic_arena_reset :: proc(a: ^Dynamic_Arena, loc := #caller_location) { append(&a.unused_blocks, block, loc=loc) } clear(&a.used_blocks) - for a in a.out_band_allocations { - free(a, a.block_allocator, loc=loc) + for allocation in a.out_band_allocations { + free(allocation, a.block_allocator, loc=loc) } clear(&a.out_band_allocations) a.bytes_left = 0 // Make new allocations call `_dynamic_arena_cycle_new_block` again. @@ -947,6 +964,7 @@ dynamic_arena_free_all :: proc(a: ^Dynamic_Arena, loc := #caller_location) { clear(&a.unused_blocks) } +@(require_results) dynamic_arena_resize :: proc( a: ^Dynamic_Arena, old_memory: rawptr, @@ -965,6 +983,7 @@ dynamic_arena_resize :: proc( return bytes, err } +@(require_results) dynamic_arena_resize_non_zeroed :: proc( a: ^Dynamic_Arena, old_memory: rawptr, @@ -1014,8 +1033,8 @@ dynamic_arena_allocator_proc :: proc( case .Query_Info: info := (^Allocator_Query_Info)(old_memory) if info != nil && info.pointer != nil { - info.size = pool.block_size - info.alignment = pool.alignment + info.size = arena.block_size + info.alignment = arena.alignment return byte_slice(info, size_of(info^)), nil } return nil, nil @@ -1033,7 +1052,6 @@ panic_allocator_proc :: proc( old_size: int, loc := #caller_location, ) -> ([]byte, Allocator_Error) { - switch mode { case .Alloc: if size > 0 { @@ -1057,7 +1075,6 @@ panic_allocator_proc :: proc( } case .Free_All: panic("mem: panic allocator, .Free_All called", loc=loc) - case .Query_Features: set := (^Allocator_Mode_Set)(old_memory) if set != nil { @@ -1068,7 +1085,6 @@ panic_allocator_proc :: proc( case .Query_Info: panic("mem: panic allocator, .Query_Info called", loc=loc) } - return nil, nil } @@ -1080,6 +1096,8 @@ panic_allocator :: proc() -> Allocator { } } + + Buddy_Block :: struct #align(align_of(uint)) { size: uint, is_free: bool, From c0e17808d46be70b461c020c9c320349e2f99ad9 Mon Sep 17 00:00:00 2001 From: flysand7 Date: Sat, 7 Sep 2024 13:26:09 +1100 Subject: [PATCH 13/35] [mem]: Split alloc and alloc_non_zeroed for buddy allocator --- core/mem/allocators.odin | 59 ++++++++++++++-------------------------- 1 file changed, 21 insertions(+), 38 deletions(-) diff --git a/core/mem/allocators.odin b/core/mem/allocators.odin index 1efc600338d..45c80e67884 100644 --- a/core/mem/allocators.odin +++ b/core/mem/allocators.odin @@ -1133,7 +1133,6 @@ buddy_block_coalescence :: proc(head, tail: ^Buddy_Block) { // Keep looping until there are no more buddies to coalesce block := head buddy := buddy_block_next(block) - no_coalescence := true for block < tail && buddy < tail { // make sure the buddies are within the range if block.is_free && buddy.is_free && block.size == buddy.size { @@ -1156,7 +1155,6 @@ buddy_block_coalescence :: proc(head, tail: ^Buddy_Block) { } } } - if no_coalescence { return } @@ -1166,17 +1164,14 @@ buddy_block_coalescence :: proc(head, tail: ^Buddy_Block) { @(require_results) buddy_block_find_best :: proc(head, tail: ^Buddy_Block, size: uint) -> ^Buddy_Block { assert(size != 0) - best_block: ^Buddy_Block block := head // left buddy := buddy_block_next(block) // right - // The entire memory section between head and tail is free, // just call 'buddy_block_split' to get the allocation if buddy == tail && block.is_free { return buddy_block_split(block, size) } - // Find the block which is the 'best_block' to requested allocation sized for block < tail && buddy < tail { // make sure the buddies are within the range // If both buddies are free, coalesce them together @@ -1187,7 +1182,6 @@ buddy_block_find_best :: proc(head, tail: ^Buddy_Block, size: uint) -> ^Buddy_Bl if size <= block.size && (best_block == nil || block.size <= best_block.size) { best_block = block } - block = buddy_block_next(buddy) if block < tail { // Delay the buddy block for the next iteration @@ -1195,20 +1189,16 @@ buddy_block_find_best :: proc(head, tail: ^Buddy_Block, size: uint) -> ^Buddy_Bl } continue } - - if block.is_free && size <= block.size && (best_block == nil || block.size <= best_block.size) { best_block = block } - if buddy.is_free && size <= buddy.size && (best_block == nil || buddy.size < best_block.size) { // If each buddy are the same size, then it makes more sense // to pick the buddy as it "bounces around" less best_block = buddy } - if (block.size <= buddy.size) { block = buddy_block_next(buddy) if (block < tail) { @@ -1221,12 +1211,10 @@ buddy_block_find_best :: proc(head, tail: ^Buddy_Block, size: uint) -> ^Buddy_Bl buddy = buddy_block_next(buddy) } } - if best_block != nil { // This will handle the case if the 'best_block' is also the perfect fit return buddy_block_split(best_block, size) } - // Maybe out of memory return nil } @@ -1245,26 +1233,20 @@ buddy_allocator :: proc(b: ^Buddy_Allocator) -> Allocator { } } -buddy_allocator_init :: proc(b: ^Buddy_Allocator, data: []byte, alignment: uint) { +buddy_allocator_init :: proc(b: ^Buddy_Allocator, data: []byte, alignment: uint, loc := #caller_location) { assert(data != nil) - assert(is_power_of_two(uintptr(len(data)))) - assert(is_power_of_two(uintptr(alignment))) - + assert(is_power_of_two(uintptr(len(data))), "Size of the backing buffer must be power of two", loc) + assert(is_power_of_two(uintptr(alignment)), "Alignment must be a power of two", loc) alignment := alignment if alignment < size_of(Buddy_Block) { alignment = size_of(Buddy_Block) } - ptr := raw_data(data) - assert(uintptr(ptr) % uintptr(alignment) == 0, "data is not aligned to minimum alignment") - + assert(uintptr(ptr) % uintptr(alignment) == 0, "data is not aligned to minimum alignment", loc) b.head = (^Buddy_Block)(ptr) - b.head.size = len(data) b.head.is_free = true - b.tail = buddy_block_next(b.head) - b.alignment = alignment } @@ -1274,19 +1256,25 @@ buddy_block_size_required :: proc(b: ^Buddy_Allocator, size: uint) -> uint { actual_size := b.alignment size += size_of(Buddy_Block) size = align_forward_uint(size, b.alignment) - for size > actual_size { actual_size <<= 1 } - return actual_size } @(require_results) -buddy_allocator_alloc :: proc(b: ^Buddy_Allocator, size: uint, zeroed: bool) -> ([]byte, Allocator_Error) { +buddy_allocator_alloc :: proc(b: ^Buddy_Allocator, size: uint) -> ([]byte, Allocator_Error) { + bytes, err := buddy_allocator_alloc_non_zeroed(b, size) + if bytes != nil { + zero_slice(bytes) + } + return bytes, err +} + +@(require_results) +buddy_allocator_alloc_non_zeroed :: proc(b: ^Buddy_Allocator, size: uint) -> ([]byte, Allocator_Error) { if size != 0 { actual_size := buddy_block_size_required(b, size) - found := buddy_block_find_best(b.head, b.tail, actual_size) if found != nil { // Try to coalesce all the free buddy blocks and then search again @@ -1297,32 +1285,28 @@ buddy_allocator_alloc :: proc(b: ^Buddy_Allocator, size: uint, zeroed: bool) -> return nil, .Out_Of_Memory } found.is_free = false - data := ([^]byte)(found)[b.alignment:][:size] - if zeroed { - zero_slice(data) - } return data, nil } return nil, nil } +@(require_results) buddy_allocator_free :: proc(b: ^Buddy_Allocator, ptr: rawptr) -> Allocator_Error { if ptr != nil { if !(b.head <= ptr && ptr <= b.tail) { return .Invalid_Pointer } - block := (^Buddy_Block)(([^]byte)(ptr)[-b.alignment:]) block.is_free = true - buddy_block_coalescence(b.head, b.tail) } return nil } buddy_allocator_proc :: proc( - allocator_data: rawptr, mode: Allocator_Mode, + allocator_data: rawptr, + mode: Allocator_Mode, size, alignment: int, old_memory: rawptr, old_size: int, @@ -1330,10 +1314,11 @@ buddy_allocator_proc :: proc( ) -> ([]byte, Allocator_Error) { b := (^Buddy_Allocator)(allocator_data) - switch mode { - case .Alloc, .Alloc_Non_Zeroed: - return buddy_allocator_alloc(b, uint(size), mode == .Alloc) + case .Alloc: + return buddy_allocator_alloc(b, uint(size)) + case .Alloc_Non_Zeroed: + return buddy_allocator_alloc_non_zeroed(b, uint(size)) case .Resize: return default_resize_bytes_align(byte_slice(old_memory, old_size), size, alignment, buddy_allocator(b)) case .Resize_Non_Zeroed: @@ -1341,13 +1326,11 @@ buddy_allocator_proc :: proc( case .Free: return nil, buddy_allocator_free(b, old_memory) case .Free_All: - alignment := b.alignment head := ([^]byte)(b.head) tail := ([^]byte)(b.tail) data := head[:ptr_sub(tail, head)] buddy_allocator_init(b, data, alignment) - case .Query_Features: set := (^Allocator_Mode_Set)(old_memory) if set != nil { From c0112d1c70e369dd4f4704d577c7ff6e8ef17282 Mon Sep 17 00:00:00 2001 From: flysand7 Date: Sat, 7 Sep 2024 13:27:17 +1100 Subject: [PATCH 14/35] [mem]: Add free_all for buddy allocator --- core/mem/allocators.odin | 18 +++++++++--------- 1 file changed, 9 insertions(+), 9 deletions(-) diff --git a/core/mem/allocators.odin b/core/mem/allocators.odin index 45c80e67884..5fedbd4d66d 100644 --- a/core/mem/allocators.odin +++ b/core/mem/allocators.odin @@ -1304,6 +1304,14 @@ buddy_allocator_free :: proc(b: ^Buddy_Allocator, ptr: rawptr) -> Allocator_Erro return nil } +buddy_allocator_free_all :: proc(b: ^Buddy_Allocator) { + alignment := b.alignment + head := ([^]byte)(b.head) + tail := ([^]byte)(b.tail) + data := head[:ptr_sub(tail, head)] + buddy_allocator_init(b, data, alignment) +} + buddy_allocator_proc :: proc( allocator_data: rawptr, mode: Allocator_Mode, @@ -1312,7 +1320,6 @@ buddy_allocator_proc :: proc( old_size: int, loc := #caller_location, ) -> ([]byte, Allocator_Error) { - b := (^Buddy_Allocator)(allocator_data) switch mode { case .Alloc: @@ -1326,18 +1333,13 @@ buddy_allocator_proc :: proc( case .Free: return nil, buddy_allocator_free(b, old_memory) case .Free_All: - alignment := b.alignment - head := ([^]byte)(b.head) - tail := ([^]byte)(b.tail) - data := head[:ptr_sub(tail, head)] - buddy_allocator_init(b, data, alignment) + buddy_allocator_free_all(b) case .Query_Features: set := (^Allocator_Mode_Set)(old_memory) if set != nil { set^ = {.Query_Features, .Alloc, .Alloc_Non_Zeroed, .Resize, .Resize_Non_Zeroed, .Free, .Free_All, .Query_Info} } return nil, nil - case .Query_Info: info := (^Allocator_Query_Info)(old_memory) if info != nil && info.pointer != nil { @@ -1345,7 +1347,6 @@ buddy_allocator_proc :: proc( if !(b.head <= ptr && ptr <= b.tail) { return nil, .Invalid_Pointer } - block := (^Buddy_Block)(([^]byte)(ptr)[-b.alignment:]) info.size = int(block.size) info.alignment = int(b.alignment) @@ -1353,6 +1354,5 @@ buddy_allocator_proc :: proc( } return nil, nil } - return nil, nil } From 64814f4199c8d89a3fb0ed7013aa20321a0b34d5 Mon Sep 17 00:00:00 2001 From: flysand7 Date: Sat, 7 Sep 2024 14:19:50 +1100 Subject: [PATCH 15/35] [mem]: Document the package --- core/mem/doc.odin | 111 ++++++++++++++++++++++++------- core/mem/tracking_allocator.odin | 31 +++++++++ 2 files changed, 119 insertions(+), 23 deletions(-) diff --git a/core/mem/doc.odin b/core/mem/doc.odin index 44c93f798ed..b152d073847 100644 --- a/core/mem/doc.odin +++ b/core/mem/doc.odin @@ -1,34 +1,99 @@ /* -package mem implements various types of allocators. +The `mem` package implements various allocators and provides utility functions +for dealing with memory, pointers and slices. +The documentation below describes basic concepts, applicable to the `mem` +package. -An example of how to use the `Tracking_Allocator` to track subsequent allocations -in your program and report leaks and bad frees: +## Pointers, multipointers, and slices -Example: - package foo +A *pointer* is an abstraction of an *address*, a numberic value representing the +location of an object in memory. That object is said to be *pointed to* by the +pointer. To obtain the address of a pointer, cast it to `uintptr`. - import "core:mem" - import "core:fmt" +A multipointer is a pointer that points to multiple objects. Unlike a pointer, +a multipointer can be indexed, but does not have a definite length. A slice is +a pointer that points to multiple objects equipped with the length, specifying +the amount of objects a slice points to. - _main :: proc() { - // do stuff - } +When object's values are read through a pointer, that operation is called a +*load* operation. When memory is read through a pointer, that operation is +called a *store* operation. Both of these operations can be called a *memory +access operation*. - main :: proc() { - track: mem.Tracking_Allocator - mem.tracking_allocator_init(&track, context.allocator) - defer mem.tracking_allocator_destroy(&track) - context.allocator = mem.tracking_allocator(&track) +## Allocators - _main() +In C and C++ memory models, allocations of objects in memory are typically +treated individually with a generic allocator (The `malloc` function). Which in +some scenarios can lead to poor cache utilization, slowdowns on individual +objects' memory management and growing complexity of the code needing to keep +track of the pointers and their lifetimes. - for _, leak in track.allocation_map { - fmt.printf("%v leaked %m\n", leak.location, leak.size) - } - for bad_free in track.bad_free_array { - fmt.printf("%v allocation %p was freed badly\n", bad_free.location, bad_free.memory) - } - } +Using different kinds of *allocators* for different purposes can solve these +problems. The allocators are typically optimized for specific use-cases and +can potentially simplify the memory management code. + +For example, in the context of making a game, having an Arena allocator could +simplify allocations of any temporary memory, because the programmer doesn't +have to keep track of which objects need to be freed every time they are +allocated, because at the end of every frame the whole allocator is reset to +its initial state and all objects are freed at once. + +The allocators have different kinds of restrictions on object lifetimes, sizes, +alignment and can be a significant gain, if used properly. Odin supports +allocators on a language level. + +Operations such as `new`, `free` and `delete` by default will use +`context.allocator`, which can be overridden by the user. When an override +happens all called functions will inherit the new context and use the same +allocator. + +## Alignment + +An address is said to be *aligned to `N` bytes*, if the addresses's numeric +value is divisible by `N`. The number `N` in this case can be referred to as +the *alignment boundary*. Typically an alignment is a power of two integer +value. + +A *natural alignment* of an object is typically equal to its size. For example +a 16 bit integer has a natural alignment of 2 bytes. When an object is not +located on its natural alignment boundary, accesses to that object are +considered *unaligned*. + +Some machines issue a hardware **exception**, or experience **slowdowns** when a +memory access operation occurs from an unaligned address. Examples of such +operations are: + +- SIMD instructions on x86. These instructions require all memory accesses to be + on an address that is aligned to 16 bytes. +- On ARM unaligned loads have an extra cycle penalty. + +As such, many operations that allocate memory in this package allow to +explicitly specify the alignment of allocated pointers/slices. The default +alignment for all operations is specified in a constant `mem.DEFAULT_ALIGNMENT`. + +## Zero by default + +Whenever new memory is allocated, via an allocator, or on the stack, by default +Odin will zero-initialize that memory, even if it wasn't explicitly +initialized. This allows for some convenience in certain scenarios and ease of +debugging, which will not be described in detail here. + +However zero-initialization can be a cause of slowdowns, when allocating large +buffers. For this reason, allocators have `*_non_zeroed` modes of allocation +that allow the user to request for uninitialized memory and will avoid a +relatively expensive zero-filling of the buffer. + +## Naming conventions + +The word `size` is used to denote the **size in bytes**. The word `length` is +used to denote the count of objects. + +Higher-level allocation functions follow the following naming scheme: + +- `new`: Allocates a single object +- `free`: Free a single object (opposite of `new`) +- `make`: Allocate a group of objects +- `delete`: Free a group of objects (opposite of `make`) */ package mem diff --git a/core/mem/tracking_allocator.odin b/core/mem/tracking_allocator.odin index 356180be1cf..e75844130da 100644 --- a/core/mem/tracking_allocator.odin +++ b/core/mem/tracking_allocator.odin @@ -18,6 +18,37 @@ Tracking_Allocator_Bad_Free_Entry :: struct { location: runtime.Source_Code_Location, } +/* +An example of how to use the `Tracking_Allocator` to track subsequent allocations +in your program and report leaks and bad frees: + +Example: + + package foo + + import "core:mem" + import "core:fmt" + + _main :: proc() { + // do stuff + } + + main :: proc() { + track: mem.Tracking_Allocator + mem.tracking_allocator_init(&track, context.allocator) + defer mem.tracking_allocator_destroy(&track) + context.allocator = mem.tracking_allocator(&track) + + _main() + + for _, leak in track.allocation_map { + fmt.printf("%v leaked %m\n", leak.location, leak.size) + } + for bad_free in track.bad_free_array { + fmt.printf("%v allocation %p was freed badly\n", bad_free.location, bad_free.memory) + } + } +*/ Tracking_Allocator :: struct { backing: Allocator, allocation_map: map[rawptr]Tracking_Allocator_Entry, From 2d988bbc5f21fd7e07926c93b01996a392b5a92d Mon Sep 17 00:00:00 2001 From: flysand7 Date: Sat, 7 Sep 2024 14:45:15 +1100 Subject: [PATCH 16/35] [mem]: Rename alloc to alloc_bytes and add alloc --- core/mem/allocators.odin | 295 ++++++++++++++++++++++++++++++++------- 1 file changed, 248 insertions(+), 47 deletions(-) diff --git a/core/mem/allocators.odin b/core/mem/allocators.odin index 5fedbd4d66d..acbc202e6da 100644 --- a/core/mem/allocators.odin +++ b/core/mem/allocators.odin @@ -57,8 +57,14 @@ init_arena :: proc(a: ^Arena, data: []byte) { } @(require_results) -arena_alloc :: proc(a: ^Arena, size: int, alignment := DEFAULT_ALIGNMENT) -> ([]byte, Allocator_Error) { - bytes, err := arena_alloc_non_zeroed(a, size, alignment) +arena_alloc :: proc(a: ^Arena, size: int, alignment := DEFAULT_ALIGNMENT) -> (rawptr, Allocator_Error) { + bytes, err := arena_alloc_bytes(a, size, alignment) + return raw_data(bytes), err +} + +@(require_results) +arena_alloc_bytes :: proc(a: ^Arena, size: int, alignment := DEFAULT_ALIGNMENT) -> ([]byte, Allocator_Error) { + bytes, err := arena_alloc_bytes_non_zeroed(a, size, alignment) if bytes != nil { zero_slice(bytes) } @@ -66,7 +72,13 @@ arena_alloc :: proc(a: ^Arena, size: int, alignment := DEFAULT_ALIGNMENT) -> ([] } @(require_results) -arena_alloc_non_zeroed :: proc(a: ^Arena, size: int, alignment := DEFAULT_ALIGNMENT) -> ([]byte, Allocator_Error) { +arena_alloc_non_zeroed :: proc(a: ^Arena, size: int, alignment := DEFAULT_ALIGNMENT) -> (rawptr, Allocator_Error) { + bytes, err := arena_alloc_bytes_non_zeroed(a, size, alignment) + return raw_data(bytes), err +} + +@(require_results) +arena_alloc_bytes_non_zeroed :: proc(a: ^Arena, size: int, alignment := DEFAULT_ALIGNMENT) -> ([]byte, Allocator_Error) { #no_bounds_check end := &a.data[a.offset] ptr := align_forward(end, uintptr(alignment)) total_size := size + ptr_sub((^byte)(ptr), (^byte)(end)) @@ -94,9 +106,9 @@ arena_allocator_proc :: proc( arena := cast(^Arena)allocator_data switch mode { case .Alloc: - return arena_alloc(arena, size, alignment) + return arena_alloc_bytes(arena, size, alignment) case .Alloc_Non_Zeroed: - return arena_alloc_non_zeroed(arena, size, alignment) + return arena_alloc_bytes_non_zeroed(arena, size, alignment) case .Free: return nil, .Mode_Not_Implemented case .Free_All: @@ -181,8 +193,19 @@ scratch_alloc :: proc( size: int, alignment := DEFAULT_ALIGNMENT, loc := #caller_location, +) -> (rawptr, Allocator_Error) { + bytes, err := scratch_alloc_bytes(s, size, alignment, loc) + return raw_data(bytes), err +} + +@(require_results) +scratch_alloc_bytes :: proc( + s: ^Scratch, + size: int, + alignment := DEFAULT_ALIGNMENT, + loc := #caller_location, ) -> ([]byte, Allocator_Error) { - bytes, err := scratch_alloc_non_zeroed(s, size, alignment, loc) + bytes, err := scratch_alloc_bytes_non_zeroed(s, size, alignment, loc) if bytes != nil { zero_slice(bytes) } @@ -195,6 +218,17 @@ scratch_alloc_non_zeroed :: proc( size: int, alignment := DEFAULT_ALIGNMENT, loc := #caller_location, +) -> (rawptr, Allocator_Error) { + bytes, err := scratch_alloc_bytes_non_zeroed(s, size, alignment, loc) + return raw_data(bytes), err +} + +@(require_results) +scratch_alloc_bytes_non_zeroed :: proc( + s: ^Scratch, + size: int, + alignment := DEFAULT_ALIGNMENT, + loc := #caller_location, ) -> ([]byte, Allocator_Error) { if s.data == nil { DEFAULT_BACKING_SIZE :: 4 * Megabyte @@ -296,8 +330,21 @@ scratch_resize :: proc( size: int, alignment := DEFAULT_ALIGNMENT, loc := #caller_location +) -> (rawptr, Allocator_Error) { + bytes, err := scratch_resize_bytes(s, old_memory, old_size, size, alignment, loc) + return raw_data(bytes), err +} + +@(require_results) +scratch_resize_bytes :: proc( + s: ^Scratch, + old_memory: rawptr, + old_size: int, + size: int, + alignment := DEFAULT_ALIGNMENT, + loc := #caller_location ) -> ([]byte, Allocator_Error) { - bytes, err := scratch_resize_non_zeroed(s, old_memory, old_size, size, alignment, loc) + bytes, err := scratch_resize_bytes_non_zeroed(s, old_memory, old_size, size, alignment, loc) if bytes != nil && size > old_size { zero_slice(bytes[size:]) } @@ -312,6 +359,19 @@ scratch_resize_non_zeroed :: proc( size: int, alignment := DEFAULT_ALIGNMENT, loc := #caller_location +) -> (rawptr, Allocator_Error) { + bytes, err := scratch_resize_bytes_non_zeroed(s, old_memory, old_size, size, alignment, loc) + return raw_data(bytes), err +} + +@(require_results) +scratch_resize_bytes_non_zeroed :: proc( + s: ^Scratch, + old_memory: rawptr, + old_size: int, + size: int, + alignment := DEFAULT_ALIGNMENT, + loc := #caller_location ) -> ([]byte, Allocator_Error) { if s.data == nil { DEFAULT_BACKING_SIZE :: 4 * Megabyte @@ -328,7 +388,7 @@ scratch_resize_non_zeroed :: proc( s.curr_offset = int(old_ptr-begin)+size return byte_slice(old_memory, size), nil } - data, err := scratch_alloc_non_zeroed(s, size, alignment, loc) + data, err := scratch_alloc_bytes_non_zeroed(s, size, alignment, loc) if err != nil { return data, err } @@ -350,17 +410,17 @@ scratch_allocator_proc :: proc( size := size switch mode { case .Alloc: - return scratch_alloc(s, size, alignment, loc) + return scratch_alloc_bytes(s, size, alignment, loc) case .Alloc_Non_Zeroed: - return scratch_alloc_non_zeroed(s, size, alignment, loc) + return scratch_alloc_bytes_non_zeroed(s, size, alignment, loc) case .Free: return nil, scratch_free(s, old_memory, loc) case .Free_All: scratch_free_all(s, loc) case .Resize: - return scratch_resize(s, old_memory, old_size, size, alignment, loc) + return scratch_resize_bytes(s, old_memory, old_size, size, alignment, loc) case .Resize_Non_Zeroed: - return scratch_resize_non_zeroed(s, old_memory, old_size, size, alignment, loc) + return scratch_resize_bytes_non_zeroed(s, old_memory, old_size, size, alignment, loc) case .Query_Features: set := (^Allocator_Mode_Set)(old_memory) if set != nil { @@ -417,8 +477,19 @@ stack_alloc :: proc( size: int, alignment := DEFAULT_ALIGNMENT, loc := #caller_location +) -> (rawptr, Allocator_Error) { + bytes, err := stack_alloc_bytes(s, size, alignment, loc) + return raw_data(bytes), err +} + +@(require_results) +stack_alloc_bytes :: proc( + s: ^Stack, + size: int, + alignment := DEFAULT_ALIGNMENT, + loc := #caller_location ) -> ([]byte, Allocator_Error) { - bytes, err := stack_alloc_non_zeroed(s, size, alignment, loc) + bytes, err := stack_alloc_bytes_non_zeroed(s, size, alignment, loc) if bytes != nil { zero_slice(bytes) } @@ -431,6 +502,17 @@ stack_alloc_non_zeroed :: proc( size: int, alignment := DEFAULT_ALIGNMENT, loc := #caller_location +) -> (rawptr, Allocator_Error) { + bytes, err := stack_alloc_bytes_non_zeroed(s, size, alignment, loc) + return raw_data(bytes), err +} + +@(require_results) +stack_alloc_bytes_non_zeroed :: proc( + s: ^Stack, + size: int, + alignment := DEFAULT_ALIGNMENT, + loc := #caller_location ) -> ([]byte, Allocator_Error) { if s.data == nil { panic("Stack allocation on an uninitialized stack allocator", loc) @@ -496,6 +578,7 @@ stack_free_all :: proc(s: ^Stack, loc := #caller_location) { s.curr_offset = 0 } + @(require_results) stack_resize :: proc( s: ^Stack, @@ -504,8 +587,21 @@ stack_resize :: proc( size: int, alignment := DEFAULT_ALIGNMENT, loc := #caller_location, +) -> (rawptr, Allocator_Error) { + bytes, err := stack_resize_bytes(s, old_memory, old_size, size, alignment) + return raw_data(bytes), err +} + +@(require_results) +stack_resize_bytes :: proc( + s: ^Stack, + old_memory: rawptr, + old_size: int, + size: int, + alignment := DEFAULT_ALIGNMENT, + loc := #caller_location, ) -> ([]byte, Allocator_Error) { - bytes, err := stack_alloc_non_zeroed(s, size, alignment, loc) + bytes, err := stack_alloc_bytes_non_zeroed(s, size, alignment, loc) if bytes != nil { if old_memory == nil { zero_slice(bytes) @@ -524,12 +620,25 @@ stack_resize_non_zeroed :: proc( size: int, alignment := DEFAULT_ALIGNMENT, loc := #caller_location, +) -> (rawptr, Allocator_Error) { + bytes, err := stack_resize_bytes_non_zeroed(s, old_memory, old_size, size, alignment) + return raw_data(bytes), err +} + +@(require_results) +stack_resize_bytes_non_zeroed :: proc( + s: ^Stack, + old_memory: rawptr, + old_size: int, + size: int, + alignment := DEFAULT_ALIGNMENT, + loc := #caller_location, ) -> ([]byte, Allocator_Error) { if s.data == nil { panic("Stack free all on an uninitialized stack allocator", loc) } if old_memory == nil { - return stack_alloc_non_zeroed(s, size, alignment, loc) + return stack_alloc_bytes_non_zeroed(s, size, alignment, loc) } if size == 0 { return nil, nil @@ -550,7 +659,7 @@ stack_resize_non_zeroed :: proc( header := (^Stack_Allocation_Header)(curr_addr - size_of(Stack_Allocation_Header)) old_offset := int(curr_addr - uintptr(header.padding) - uintptr(raw_data(s.data))) if old_offset != header.prev_offset { - data, err := stack_alloc_non_zeroed(s, size, alignment, loc) + data, err := stack_alloc_bytes_non_zeroed(s, size, alignment, loc) if err == nil { runtime.copy(data, byte_slice(old_memory, old_size)) } @@ -581,17 +690,17 @@ stack_allocator_proc :: proc( } switch mode { case .Alloc: - return stack_alloc(s, size, alignment, loc) + return stack_alloc_bytes(s, size, alignment, loc) case .Alloc_Non_Zeroed: - return stack_alloc_non_zeroed(s, size, alignment, loc) + return stack_alloc_bytes_non_zeroed(s, size, alignment, loc) case .Free: return nil, stack_free(s, old_memory, loc) case .Free_All: stack_free_all(s, loc) case .Resize: - return stack_resize(s, old_memory, old_size, size, alignment, loc) + return stack_resize_bytes(s, old_memory, old_size, size, alignment, loc) case .Resize_Non_Zeroed: - return stack_resize_non_zeroed(s, old_memory, old_size, size, alignment, loc) + return stack_resize_bytes_non_zeroed(s, old_memory, old_size, size, alignment, loc) case .Query_Features: set := (^Allocator_Mode_Set)(old_memory) if set != nil { @@ -644,8 +753,19 @@ small_stack_alloc :: proc( size: int, alignment := DEFAULT_ALIGNMENT, loc := #caller_location, +) -> (rawptr, Allocator_Error) { + bytes, err := small_stack_alloc_bytes(s, size, alignment, loc) + return raw_data(bytes), err +} + +@(require_results) +small_stack_alloc_bytes :: proc( + s: ^Small_Stack, + size: int, + alignment := DEFAULT_ALIGNMENT, + loc := #caller_location, ) -> ([]byte, Allocator_Error) { - bytes, err := small_stack_alloc_non_zeroed(s, size, alignment, loc) + bytes, err := small_stack_alloc_bytes_non_zeroed(s, size, alignment, loc) if bytes != nil { zero_slice(bytes) } @@ -658,6 +778,17 @@ small_stack_alloc_non_zeroed :: proc( size: int, alignment := DEFAULT_ALIGNMENT, loc := #caller_location, +) -> (rawptr, Allocator_Error) { + bytes, err := small_stack_alloc_bytes_non_zeroed(s, size, alignment, loc) + return raw_data(bytes), err +} + +@(require_results) +small_stack_alloc_bytes_non_zeroed :: proc( + s: ^Small_Stack, + size: int, + alignment := DEFAULT_ALIGNMENT, + loc := #caller_location, ) -> ([]byte, Allocator_Error) { if s.data == nil { return nil, .Invalid_Argument @@ -716,8 +847,21 @@ small_stack_resize :: proc( size: int, alignment := DEFAULT_ALIGNMENT, loc := #caller_location, +) -> (rawptr, Allocator_Error) { + bytes, err := small_stack_resize_bytes(s, old_memory, old_size, size, alignment, loc) + return raw_data(bytes), err +} + +@(require_results) +small_stack_resize_bytes :: proc( + s: ^Small_Stack, + old_memory: rawptr, + old_size: int, + size: int, + alignment := DEFAULT_ALIGNMENT, + loc := #caller_location, ) -> ([]byte, Allocator_Error) { - bytes, err := small_stack_resize_non_zeroed(s, old_memory, old_size, size, alignment, loc) + bytes, err := small_stack_resize_bytes_non_zeroed(s, old_memory, old_size, size, alignment, loc) if bytes != nil { if old_memory == nil { zero_slice(bytes) @@ -736,11 +880,24 @@ small_stack_resize_non_zeroed :: proc( size: int, alignment := DEFAULT_ALIGNMENT, loc := #caller_location, +) -> (rawptr, Allocator_Error) { + bytes, err := small_stack_resize_bytes_non_zeroed(s, old_memory, old_size, size, alignment, loc) + return raw_data(bytes), err +} + +@(require_results) +small_stack_resize_bytes_non_zeroed :: proc( + s: ^Small_Stack, + old_memory: rawptr, + old_size: int, + size: int, + alignment := DEFAULT_ALIGNMENT, + loc := #caller_location, ) -> ([]byte, Allocator_Error) { alignment := alignment alignment = clamp(alignment, 1, 8*size_of(Stack_Allocation_Header{}.padding)/2) if old_memory == nil { - return small_stack_alloc_non_zeroed(s, size, alignment, loc) + return small_stack_alloc_bytes_non_zeroed(s, size, alignment, loc) } if size == 0 { return nil, nil @@ -759,7 +916,7 @@ small_stack_resize_non_zeroed :: proc( if old_size == size { return byte_slice(old_memory, size), nil } - data, err := small_stack_alloc_non_zeroed(s, size, alignment, loc) + data, err := small_stack_alloc_bytes_non_zeroed(s, size, alignment, loc) if err == nil { runtime.copy(data, byte_slice(old_memory, old_size)) } @@ -781,17 +938,17 @@ small_stack_allocator_proc :: proc( } switch mode { case .Alloc: - return small_stack_alloc(s, size, alignment, loc) + return small_stack_alloc_bytes(s, size, alignment, loc) case .Alloc_Non_Zeroed: - return small_stack_alloc_non_zeroed(s, size, alignment, loc) + return small_stack_alloc_bytes_non_zeroed(s, size, alignment, loc) case .Free: return nil, small_stack_free(s, old_memory, loc) case .Free_All: small_stack_free_all(s) case .Resize: - return small_stack_resize(s, old_memory, old_size, size, alignment, loc) + return small_stack_resize_bytes(s, old_memory, old_size, size, alignment, loc) case .Resize_Non_Zeroed: - return small_stack_resize_non_zeroed(s, old_memory, old_size, size, alignment, loc) + return small_stack_resize_bytes_non_zeroed(s, old_memory, old_size, size, alignment, loc) case .Query_Features: set := (^Allocator_Mode_Set)(old_memory) if set != nil { @@ -805,19 +962,21 @@ small_stack_allocator_proc :: proc( } -/* old stuff */ +/* Preserved for compatibility */ Dynamic_Pool :: Dynamic_Arena DYNAMIC_POOL_BLOCK_SIZE_DEFAULT :: DYNAMIC_ARENA_BLOCK_SIZE_DEFAULT DYNAMIC_POOL_OUT_OF_BAND_SIZE_DEFAULT :: DYNAMIC_ARENA_OUT_OF_BAND_SIZE_DEFAULT dynamic_pool_allocator_proc :: dynamic_arena_allocator_proc dynamic_pool_free_all :: dynamic_arena_free_all dynamic_pool_reset :: dynamic_arena_reset -dynamic_pool_alloc_bytes :: dynamic_arena_alloc -dynamic_pool_alloc :: _dynamic_arena_alloc_ptr +dynamic_pool_alloc_bytes :: dynamic_arena_alloc_bytes +dynamic_pool_alloc :: dynamic_arena_alloc dynamic_pool_init :: dynamic_arena_init dynamic_pool_allocator :: dynamic_arena_allocator dynamic_pool_destroy :: dynamic_arena_destroy + + DYNAMIC_ARENA_BLOCK_SIZE_DEFAULT :: 65536 DYNAMIC_ARENA_OUT_OF_BAND_SIZE_DEFAULT :: 6554 @@ -897,14 +1056,14 @@ _dynamic_arena_cycle_new_block :: proc(p: ^Dynamic_Arena, loc := #caller_locatio } @(private, require_results) -_dynamic_arena_alloc_ptr :: proc(a: ^Dynamic_Arena, size: int, loc := #caller_location) -> (rawptr, Allocator_Error) { - data, err := dynamic_arena_alloc(a, size, loc) +dynamic_arena_alloc :: proc(a: ^Dynamic_Arena, size: int, loc := #caller_location) -> (rawptr, Allocator_Error) { + data, err := dynamic_arena_alloc_bytes(a, size, loc) return raw_data(data), err } @(require_results) -dynamic_arena_alloc :: proc(a: ^Dynamic_Arena, size: int, loc := #caller_location) -> ([]byte, Allocator_Error) { - bytes, err := dynamic_arena_alloc_non_zeroed(a, size, loc) +dynamic_arena_alloc_bytes :: proc(a: ^Dynamic_Arena, size: int, loc := #caller_location) -> ([]byte, Allocator_Error) { + bytes, err := dynamic_arena_alloc_bytes_non_zeroed(a, size, loc) if bytes != nil { zero_slice(bytes) } @@ -912,7 +1071,13 @@ dynamic_arena_alloc :: proc(a: ^Dynamic_Arena, size: int, loc := #caller_locatio } @(require_results) -dynamic_arena_alloc_non_zeroed :: proc(a: ^Dynamic_Arena, size: int, loc := #caller_location) -> ([]byte, Allocator_Error) { +dynamic_arena_alloc_non_zeroed :: proc(a: ^Dynamic_Arena, size: int, loc := #caller_location) -> (rawptr, Allocator_Error) { + data, err := dynamic_arena_alloc_bytes_non_zeroed(a, size, loc) + return raw_data(data), err +} + +@(require_results) +dynamic_arena_alloc_bytes_non_zeroed :: proc(a: ^Dynamic_Arena, size: int, loc := #caller_location) -> ([]byte, Allocator_Error) { n := align_formula(size, a.alignment) if n > a.block_size { return nil, .Invalid_Argument @@ -971,8 +1136,20 @@ dynamic_arena_resize :: proc( old_size: int, size: int, loc := #caller_location, +) -> (rawptr, Allocator_Error) { + bytes, err := dynamic_arena_resize_bytes(a, old_memory, old_size, size, loc) + return raw_data(bytes), err +} + +@(require_results) +dynamic_arena_resize_bytes :: proc( + a: ^Dynamic_Arena, + old_memory: rawptr, + old_size: int, + size: int, + loc := #caller_location, ) -> ([]byte, Allocator_Error) { - bytes, err := dynamic_arena_resize_non_zeroed(a, old_memory, old_size, size, loc) + bytes, err := dynamic_arena_resize_bytes_non_zeroed(a, old_memory, old_size, size, loc) if bytes != nil { if old_memory == nil { zero_slice(bytes) @@ -990,11 +1167,23 @@ dynamic_arena_resize_non_zeroed :: proc( old_size: int, size: int, loc := #caller_location, +) -> (rawptr, Allocator_Error) { + bytes, err := dynamic_arena_resize_bytes_non_zeroed(a, old_memory, old_size, size, loc) + return raw_data(bytes), err +} + +@(require_results) +dynamic_arena_resize_bytes_non_zeroed :: proc( + a: ^Dynamic_Arena, + old_memory: rawptr, + old_size: int, + size: int, + loc := #caller_location, ) -> ([]byte, Allocator_Error) { if old_size >= size { return byte_slice(old_memory, size), nil } - data, err := dynamic_arena_alloc_non_zeroed(a, size, loc) + data, err := dynamic_arena_alloc_bytes_non_zeroed(a, size, loc) if err == nil { runtime.copy(data, byte_slice(old_memory, old_size)) } @@ -1013,17 +1202,17 @@ dynamic_arena_allocator_proc :: proc( arena := (^Dynamic_Arena)(allocator_data) switch mode { case .Alloc: - return dynamic_arena_alloc(arena, size, loc) + return dynamic_arena_alloc_bytes(arena, size, loc) case .Alloc_Non_Zeroed: - return dynamic_arena_alloc_non_zeroed(arena, size, loc) + return dynamic_arena_alloc_bytes_non_zeroed(arena, size, loc) case .Free: return nil, .Mode_Not_Implemented case .Free_All: dynamic_arena_free_all(arena, loc) case .Resize: - return dynamic_arena_resize(arena, old_memory, old_size, size, loc) + return dynamic_arena_resize_bytes(arena, old_memory, old_size, size, loc) case .Resize_Non_Zeroed: - return dynamic_arena_resize_non_zeroed(arena, old_memory, old_size, size, loc) + return dynamic_arena_resize_bytes_non_zeroed(arena, old_memory, old_size, size, loc) case .Query_Features: set := (^Allocator_Mode_Set)(old_memory) if set != nil { @@ -1263,8 +1452,14 @@ buddy_block_size_required :: proc(b: ^Buddy_Allocator, size: uint) -> uint { } @(require_results) -buddy_allocator_alloc :: proc(b: ^Buddy_Allocator, size: uint) -> ([]byte, Allocator_Error) { - bytes, err := buddy_allocator_alloc_non_zeroed(b, size) +buddy_allocator_alloc :: proc(b: ^Buddy_Allocator, size: uint) -> (rawptr, Allocator_Error) { + bytes, err := buddy_allocator_alloc_bytes(b, size) + return raw_data(bytes), err +} + +@(require_results) +buddy_allocator_alloc_bytes :: proc(b: ^Buddy_Allocator, size: uint) -> ([]byte, Allocator_Error) { + bytes, err := buddy_allocator_alloc_bytes_non_zeroed(b, size) if bytes != nil { zero_slice(bytes) } @@ -1272,7 +1467,13 @@ buddy_allocator_alloc :: proc(b: ^Buddy_Allocator, size: uint) -> ([]byte, Alloc } @(require_results) -buddy_allocator_alloc_non_zeroed :: proc(b: ^Buddy_Allocator, size: uint) -> ([]byte, Allocator_Error) { +buddy_allocator_alloc_non_zeroed :: proc(b: ^Buddy_Allocator, size: uint) -> (rawptr, Allocator_Error) { + bytes, err := buddy_allocator_alloc_bytes_non_zeroed(b, size) + return raw_data(bytes), err +} + +@(require_results) +buddy_allocator_alloc_bytes_non_zeroed :: proc(b: ^Buddy_Allocator, size: uint) -> ([]byte, Allocator_Error) { if size != 0 { actual_size := buddy_block_size_required(b, size) found := buddy_block_find_best(b.head, b.tail, actual_size) @@ -1323,9 +1524,9 @@ buddy_allocator_proc :: proc( b := (^Buddy_Allocator)(allocator_data) switch mode { case .Alloc: - return buddy_allocator_alloc(b, uint(size)) + return buddy_allocator_alloc_bytes(b, uint(size)) case .Alloc_Non_Zeroed: - return buddy_allocator_alloc_non_zeroed(b, uint(size)) + return buddy_allocator_alloc_bytes_non_zeroed(b, uint(size)) case .Resize: return default_resize_bytes_align(byte_slice(old_memory, old_size), size, alignment, buddy_allocator(b)) case .Resize_Non_Zeroed: From 6017a20e1cde4c218ab05a754c747b6864e87394 Mon Sep 17 00:00:00 2001 From: flysand7 Date: Sat, 7 Sep 2024 15:11:04 +1100 Subject: [PATCH 17/35] [mem]: Make resize_bytes take a slice for the old memory --- core/mem/allocators.odin | 90 ++++++++++++++++++++-------------------- 1 file changed, 45 insertions(+), 45 deletions(-) diff --git a/core/mem/allocators.odin b/core/mem/allocators.odin index acbc202e6da..34b89fcb857 100644 --- a/core/mem/allocators.odin +++ b/core/mem/allocators.odin @@ -331,21 +331,20 @@ scratch_resize :: proc( alignment := DEFAULT_ALIGNMENT, loc := #caller_location ) -> (rawptr, Allocator_Error) { - bytes, err := scratch_resize_bytes(s, old_memory, old_size, size, alignment, loc) + bytes, err := scratch_resize_bytes(s, byte_slice(old_memory, old_size), size, alignment, loc) return raw_data(bytes), err } @(require_results) scratch_resize_bytes :: proc( s: ^Scratch, - old_memory: rawptr, - old_size: int, + old_data: []byte, size: int, alignment := DEFAULT_ALIGNMENT, loc := #caller_location ) -> ([]byte, Allocator_Error) { - bytes, err := scratch_resize_bytes_non_zeroed(s, old_memory, old_size, size, alignment, loc) - if bytes != nil && size > old_size { + bytes, err := scratch_resize_bytes_non_zeroed(s, old_data, size, alignment, loc) + if bytes != nil && size > len(old_data) { zero_slice(bytes[size:]) } return bytes, err @@ -360,19 +359,20 @@ scratch_resize_non_zeroed :: proc( alignment := DEFAULT_ALIGNMENT, loc := #caller_location ) -> (rawptr, Allocator_Error) { - bytes, err := scratch_resize_bytes_non_zeroed(s, old_memory, old_size, size, alignment, loc) + bytes, err := scratch_resize_bytes_non_zeroed(s, byte_slice(old_memory, old_size), size, alignment, loc) return raw_data(bytes), err } @(require_results) scratch_resize_bytes_non_zeroed :: proc( s: ^Scratch, - old_memory: rawptr, - old_size: int, + old_data: []byte, size: int, alignment := DEFAULT_ALIGNMENT, loc := #caller_location ) -> ([]byte, Allocator_Error) { + old_memory := raw_data(old_data) + old_size := len(old_data) if s.data == nil { DEFAULT_BACKING_SIZE :: 4 * Megabyte if !(context.allocator.procedure != scratch_allocator_proc && context.allocator.data != s) { @@ -418,9 +418,9 @@ scratch_allocator_proc :: proc( case .Free_All: scratch_free_all(s, loc) case .Resize: - return scratch_resize_bytes(s, old_memory, old_size, size, alignment, loc) + return scratch_resize_bytes(s, byte_slice(old_memory, old_size), size, alignment, loc) case .Resize_Non_Zeroed: - return scratch_resize_bytes_non_zeroed(s, old_memory, old_size, size, alignment, loc) + return scratch_resize_bytes_non_zeroed(s, byte_slice(old_memory, old_size), size, alignment, loc) case .Query_Features: set := (^Allocator_Mode_Set)(old_memory) if set != nil { @@ -588,25 +588,24 @@ stack_resize :: proc( alignment := DEFAULT_ALIGNMENT, loc := #caller_location, ) -> (rawptr, Allocator_Error) { - bytes, err := stack_resize_bytes(s, old_memory, old_size, size, alignment) + bytes, err := stack_resize_bytes(s, byte_slice(old_memory, old_size), size, alignment) return raw_data(bytes), err } @(require_results) stack_resize_bytes :: proc( s: ^Stack, - old_memory: rawptr, - old_size: int, + old_data: []byte, size: int, alignment := DEFAULT_ALIGNMENT, loc := #caller_location, ) -> ([]byte, Allocator_Error) { bytes, err := stack_alloc_bytes_non_zeroed(s, size, alignment, loc) if bytes != nil { - if old_memory == nil { + if old_data == nil { zero_slice(bytes) - } else if size > old_size { - zero_slice(bytes[old_size:]) + } else if size > len(old_data) { + zero_slice(bytes[len(old_data):]) } } return bytes, err @@ -621,19 +620,20 @@ stack_resize_non_zeroed :: proc( alignment := DEFAULT_ALIGNMENT, loc := #caller_location, ) -> (rawptr, Allocator_Error) { - bytes, err := stack_resize_bytes_non_zeroed(s, old_memory, old_size, size, alignment) + bytes, err := stack_resize_bytes_non_zeroed(s, byte_slice(old_memory, old_size), size, alignment) return raw_data(bytes), err } @(require_results) stack_resize_bytes_non_zeroed :: proc( s: ^Stack, - old_memory: rawptr, - old_size: int, + old_data: []byte, size: int, alignment := DEFAULT_ALIGNMENT, loc := #caller_location, ) -> ([]byte, Allocator_Error) { + old_memory := raw_data(old_data) + old_size := len(old_data) if s.data == nil { panic("Stack free all on an uninitialized stack allocator", loc) } @@ -698,9 +698,9 @@ stack_allocator_proc :: proc( case .Free_All: stack_free_all(s, loc) case .Resize: - return stack_resize_bytes(s, old_memory, old_size, size, alignment, loc) + return stack_resize_bytes(s, byte_slice(old_memory, old_size), size, alignment, loc) case .Resize_Non_Zeroed: - return stack_resize_bytes_non_zeroed(s, old_memory, old_size, size, alignment, loc) + return stack_resize_bytes_non_zeroed(s, byte_slice(old_memory, old_size), size, alignment, loc) case .Query_Features: set := (^Allocator_Mode_Set)(old_memory) if set != nil { @@ -848,25 +848,24 @@ small_stack_resize :: proc( alignment := DEFAULT_ALIGNMENT, loc := #caller_location, ) -> (rawptr, Allocator_Error) { - bytes, err := small_stack_resize_bytes(s, old_memory, old_size, size, alignment, loc) + bytes, err := small_stack_resize_bytes(s, byte_slice(old_memory, old_size), size, alignment, loc) return raw_data(bytes), err } @(require_results) small_stack_resize_bytes :: proc( s: ^Small_Stack, - old_memory: rawptr, - old_size: int, + old_data: []byte, size: int, alignment := DEFAULT_ALIGNMENT, loc := #caller_location, ) -> ([]byte, Allocator_Error) { - bytes, err := small_stack_resize_bytes_non_zeroed(s, old_memory, old_size, size, alignment, loc) + bytes, err := small_stack_resize_bytes_non_zeroed(s, old_data, size, alignment, loc) if bytes != nil { - if old_memory == nil { + if old_data == nil { zero_slice(bytes) - } else if size > old_size { - zero_slice(bytes[old_size:]) + } else if size > len(old_data) { + zero_slice(bytes[len(old_data):]) } } return bytes, err @@ -881,19 +880,20 @@ small_stack_resize_non_zeroed :: proc( alignment := DEFAULT_ALIGNMENT, loc := #caller_location, ) -> (rawptr, Allocator_Error) { - bytes, err := small_stack_resize_bytes_non_zeroed(s, old_memory, old_size, size, alignment, loc) + bytes, err := small_stack_resize_bytes_non_zeroed(s, byte_slice(old_memory, old_size), size, alignment, loc) return raw_data(bytes), err } @(require_results) small_stack_resize_bytes_non_zeroed :: proc( s: ^Small_Stack, - old_memory: rawptr, - old_size: int, + old_data: []byte, size: int, alignment := DEFAULT_ALIGNMENT, loc := #caller_location, ) -> ([]byte, Allocator_Error) { + old_memory := raw_data(old_data) + old_size := len(old_data) alignment := alignment alignment = clamp(alignment, 1, 8*size_of(Stack_Allocation_Header{}.padding)/2) if old_memory == nil { @@ -946,9 +946,9 @@ small_stack_allocator_proc :: proc( case .Free_All: small_stack_free_all(s) case .Resize: - return small_stack_resize_bytes(s, old_memory, old_size, size, alignment, loc) + return small_stack_resize_bytes(s, byte_slice(old_memory, old_size), size, alignment, loc) case .Resize_Non_Zeroed: - return small_stack_resize_bytes_non_zeroed(s, old_memory, old_size, size, alignment, loc) + return small_stack_resize_bytes_non_zeroed(s, byte_slice(old_memory, old_size), size, alignment, loc) case .Query_Features: set := (^Allocator_Mode_Set)(old_memory) if set != nil { @@ -1137,24 +1137,23 @@ dynamic_arena_resize :: proc( size: int, loc := #caller_location, ) -> (rawptr, Allocator_Error) { - bytes, err := dynamic_arena_resize_bytes(a, old_memory, old_size, size, loc) + bytes, err := dynamic_arena_resize_bytes(a, byte_slice(old_memory, old_size), size, loc) return raw_data(bytes), err } @(require_results) dynamic_arena_resize_bytes :: proc( a: ^Dynamic_Arena, - old_memory: rawptr, - old_size: int, + old_data: []byte, size: int, loc := #caller_location, ) -> ([]byte, Allocator_Error) { - bytes, err := dynamic_arena_resize_bytes_non_zeroed(a, old_memory, old_size, size, loc) + bytes, err := dynamic_arena_resize_bytes_non_zeroed(a, old_data, size, loc) if bytes != nil { - if old_memory == nil { + if old_data == nil { zero_slice(bytes) - } else if size > old_size { - zero_slice(bytes[old_size:]) + } else if size > len(old_data) { + zero_slice(bytes[len(old_data):]) } } return bytes, err @@ -1168,18 +1167,19 @@ dynamic_arena_resize_non_zeroed :: proc( size: int, loc := #caller_location, ) -> (rawptr, Allocator_Error) { - bytes, err := dynamic_arena_resize_bytes_non_zeroed(a, old_memory, old_size, size, loc) + bytes, err := dynamic_arena_resize_bytes_non_zeroed(a, byte_slice(old_memory, old_size), size, loc) return raw_data(bytes), err } @(require_results) dynamic_arena_resize_bytes_non_zeroed :: proc( a: ^Dynamic_Arena, - old_memory: rawptr, - old_size: int, + old_data: []byte, size: int, loc := #caller_location, ) -> ([]byte, Allocator_Error) { + old_memory := raw_data(old_data) + old_size := len(old_data) if old_size >= size { return byte_slice(old_memory, size), nil } @@ -1210,9 +1210,9 @@ dynamic_arena_allocator_proc :: proc( case .Free_All: dynamic_arena_free_all(arena, loc) case .Resize: - return dynamic_arena_resize_bytes(arena, old_memory, old_size, size, loc) + return dynamic_arena_resize_bytes(arena, byte_slice(old_memory, old_size), size, loc) case .Resize_Non_Zeroed: - return dynamic_arena_resize_bytes_non_zeroed(arena, old_memory, old_size, size, loc) + return dynamic_arena_resize_bytes_non_zeroed(arena, byte_slice(old_memory, old_size), size, loc) case .Query_Features: set := (^Allocator_Mode_Set)(old_memory) if set != nil { From 7c9d2f61f58f9ccb730da335dbd6573ec6e844b4 Mon Sep 17 00:00:00 2001 From: flysand7 Date: Sat, 7 Sep 2024 15:16:20 +1100 Subject: [PATCH 18/35] [mem]: Update package documentation --- core/mem/doc.odin | 18 ++++++++++++++---- 1 file changed, 14 insertions(+), 4 deletions(-) diff --git a/core/mem/doc.odin b/core/mem/doc.odin index b152d073847..5e8bcce6a8c 100644 --- a/core/mem/doc.odin +++ b/core/mem/doc.odin @@ -1,5 +1,5 @@ /* -The `mem` package implements various allocators and provides utility functions +The `mem` package implements various allocators and provides utility procedures for dealing with memory, pointers and slices. The documentation below describes basic concepts, applicable to the `mem` @@ -24,7 +24,7 @@ access operation*. ## Allocators In C and C++ memory models, allocations of objects in memory are typically -treated individually with a generic allocator (The `malloc` function). Which in +treated individually with a generic allocator (The `malloc` procedure). Which in some scenarios can lead to poor cache utilization, slowdowns on individual objects' memory management and growing complexity of the code needing to keep track of the pointers and their lifetimes. @@ -45,7 +45,7 @@ allocators on a language level. Operations such as `new`, `free` and `delete` by default will use `context.allocator`, which can be overridden by the user. When an override -happens all called functions will inherit the new context and use the same +happens all called procedures will inherit the new context and use the same allocator. ## Alignment @@ -89,7 +89,17 @@ relatively expensive zero-filling of the buffer. The word `size` is used to denote the **size in bytes**. The word `length` is used to denote the count of objects. -Higher-level allocation functions follow the following naming scheme: +The allocation procedures use the following conventions: + +- If the name contains `alloc_bytes` or `resize_bytes`, then the procedure takes + in slice parameters and returns slices. +- If the procedure name contains `alloc` or `resize`, then the procedure takes + in a raw pointer and returns raw pointers. +- If the procedure name contains `free_bytes`, then the procedure takes in a + slice. +- If the procedure name contains `free`, then the procedure takes in a pointer. + +Higher-level allocation procedures follow the following naming scheme: - `new`: Allocates a single object - `free`: Free a single object (opposite of `new`) From 3a351ec407af42aeb82ac4fb51f9b633422f59fb Mon Sep 17 00:00:00 2001 From: flysand7 Date: Sat, 7 Sep 2024 18:01:41 +1100 Subject: [PATCH 19/35] [mem]: Document mem.odin --- core/mem/mem.odin | 448 +++++++++++++++++++++++++++++++++++++++++----- 1 file changed, 404 insertions(+), 44 deletions(-) diff --git a/core/mem/mem.odin b/core/mem/mem.odin index 9e47c9602c3..c17ab43a9d4 100644 --- a/core/mem/mem.odin +++ b/core/mem/mem.odin @@ -3,23 +3,99 @@ package mem import "base:runtime" import "base:intrinsics" -Byte :: runtime.Byte +/* +The size, in bytes, of a single byte. + +This constant is equal to the value of `1`. +*/ +Byte :: runtime.Byte + +/* +The size, in bytes, of one kilobyte. + +This constant is equal to the amount of bytes in one kilobyte (also known as +kibibyte), which is equal to 1024 bytes. +*/ Kilobyte :: runtime.Kilobyte + +/* +The size, in bytes, of one megabyte. + +This constant is equal to the amount of bytes in one megabyte (also known as +mebibyte), which is equal to 1024 kilobyte. +*/ Megabyte :: runtime.Megabyte + +/* +The size, in bytes, of one gigabyte. + +This constant is equal to the amount of bytes in one gigabyte (also known as +gibiibyte), which is equal to 1024 megabytes. +*/ Gigabyte :: runtime.Gigabyte + +/* +The size, in bytes, of one terabyte. + +This constant is equal to the amount of bytes in one terabyte (also known as +tebiibyte), which is equal to 1024 gigabytes. +*/ Terabyte :: runtime.Terabyte + +/* +The size, in bytes, of one petabyte. + +This constant is equal to the amount of bytes in one petabyte (also known as +pebiibyte), which is equal to 1024 terabytes. +*/ Petabyte :: runtime.Petabyte -Exabyte :: runtime.Exabyte +/* +The size, in bytes, of one exabyte. + +This constant is equal to the amount of bytes in one exabyte (also known as +exbibyte), which is equal to 1024 petabytes. +*/ +Exabyte :: runtime.Exabyte + +/* +Set each byte of a memory range to a specific value. + +This procedure copies value specified by the `value` parameter into each of the +`len` bytes of a memory range, located at address `data`. + +This procedure returns the pointer to `data`. +*/ set :: proc "contextless" (data: rawptr, value: byte, len: int) -> rawptr { return runtime.memset(data, i32(value), len) } +/* +Set each byte of a memory range to zero. + +This procedure copies the value `0` into the `len` bytes of a memory range, +starting at address `data`. + +This procedure returns the pointer to `data`. +*/ zero :: proc "contextless" (data: rawptr, len: int) -> rawptr { intrinsics.mem_zero(data, len) return data } +/* +Set each byte of a memory range to zero. + +This procedure copies the value `0` into the `len` bytes of a memory range, +starting at address `data`. + +This procedure returns the pointer to `data`. + +Unlike the `zero()` procedure, which can be optimized away or reordered by the +compiler under certain circumstances, `zero_explicit()` procedure can not be +optimized away or reordered with other memory access operations, and the +compiler assumes volatile semantics of the memory. +*/ zero_explicit :: proc "contextless" (data: rawptr, len: int) -> rawptr { // This routine tries to avoid the compiler optimizing away the call, // so that it is always executed. It is intended to provided @@ -30,26 +106,82 @@ zero_explicit :: proc "contextless" (data: rawptr, len: int) -> rawptr { return data } +/* +Zero-fill the memory of an object. + +This procedure sets each byte of the object pointed to by the pointer `item` +to zero, and returns the pointer to `item`. +*/ zero_item :: proc "contextless" (item: $P/^$T) -> P { intrinsics.mem_zero(item, size_of(T)) return item } +/* +Zero-fill the memory of the slice. + +This procedure sets each byte of the slice pointed to by the slice `data` +to zero, and returns the slice `data`. +*/ zero_slice :: proc "contextless" (data: $T/[]$E) -> T { zero(raw_data(data), size_of(E)*len(data)) return data } +/* +Copy bytes from one memory range to another. + +This procedure copies `len` bytes of data, from the memory range pointed to by +the `src` pointer into the memory range pointed to by the `dst` pointer, and +returns the `dst` pointer. +*/ copy :: proc "contextless" (dst, src: rawptr, len: int) -> rawptr { intrinsics.mem_copy(dst, src, len) return dst } +/* +Copy bytes between two non-overlapping memory ranges. + +This procedure copies `len` bytes of data, from the memory range pointed to by +the `src` pointer into the memory range pointed to by the `dst` pointer, and +returns the `dst` pointer. + +This is a slightly more optimized version of the `copy` procedure that requires +that memory ranges specified by the parameters to this procedure are not +overlapping. If the memory ranges specified by `dst` and `src` pointers overlap, +the behavior of this function may be unpredictable. +*/ copy_non_overlapping :: proc "contextless" (dst, src: rawptr, len: int) -> rawptr { intrinsics.mem_copy_non_overlapping(dst, src, len) return dst } +/* +Compare two memory ranges defined by slices. + +This procedure performs a byte-by-byte comparison between memory ranges +specified by slices `a` and `b`, and returns a value, specifying their relative +ordering. + +If the return value is: +- Equal to `-1`, then `a` is "smaller" than `b`. +- Equal to `+1`, then `a` is "bigger" than `b`. +- Equal to `0`, then `a` and `b` are equal. + +The comparison is performed as follows: +1. Each byte, upto `min(len(a), len(b))` bytes is compared between `a` and `b`. + - If the byte in slice `a` is smaller than a byte in slice `b`, then comparison + stops and this procedure returns `-1`. + - If the byte in slice `a` is bigger than a byte in slice `b`, then comparison + stops and this procedure returns `+1`. + - Otherwise the comparison continues until `min(len(a), len(b))` are compared. +2. If all the bytes in the range are equal, then the lengths of the slices are + compared. + - If the length of slice `a` is smaller than the length of slice `b`, then `-1` is returned. + - If the length of slice `b` is smaller than the length of slice `b`, then `+1` is returned. + - Otherwise `0` is returned. +*/ @(require_results) compare :: proc "contextless" (a, b: []byte) -> int { res := compare_byte_ptrs(raw_data(a), raw_data(b), min(len(a), len(b))) @@ -61,16 +193,89 @@ compare :: proc "contextless" (a, b: []byte) -> int { return res } +/* +Compare two memory ranges defined by byte pointers. + +This procedure performs a byte-by-byte comparison between memory ranges of size +`n` located at addresses `a` and `b`, and returns a value, specifying their relative +ordering. + +If the return value is: +- Equal to `-1`, then `a` is "smaller" than `b`. +- Equal to `+1`, then `a` is "bigger" than `b`. +- Equal to `0`, then `a` and `b` are equal. + +The comparison is performed as follows: +1. Each byte, upto `n` bytes is compared between `a` and `b`. + - If the byte in `a` is smaller than a byte in `b`, then comparison stops + and this procedure returns `-1`. + - If the byte in `a` is bigger than a byte in `b`, then comparison stops + and this procedure returns `+1`. + - Otherwise the comparison continues until `n` bytes are compared. +2. If all the bytes in the range are equal, this procedure returns `0`. +*/ @(require_results) compare_byte_ptrs :: proc "contextless" (a, b: ^byte, n: int) -> int #no_bounds_check { return runtime.memory_compare(a, b, n) } +/* +Compare two memory ranges defined by pointers. + +This procedure performs a byte-by-byte comparison between memory ranges of size +`n` located at addresses `a` and `b`, and returns a value, specifying their relative +ordering. + +If the return value is: +- Equal to `-1`, then `a` is "smaller" than `b`. +- Equal to `+1`, then `a` is "bigger" than `b`. +- Equal to `0`, then `a` and `b` are equal. + +The comparison is performed as follows: +1. Each byte, upto `n` bytes is compared between `a` and `b`. + - If the byte in `a` is smaller than a byte in `b`, then comparison stops + and this procedure returns `-1`. + - If the byte in `a` is bigger than a byte in `b`, then comparison stops + and this procedure returns `+1`. + - Otherwise the comparison continues until `n` bytes are compared. +2. If all the bytes in the range are equal, this procedure returns `0`. +*/ +@(require_results) +compare_ptrs :: proc "contextless" (a, b: rawptr, n: int) -> int { + return compare_byte_ptrs((^byte)(a), (^byte)(b), n) +} + +/* +Check whether two objects are equal on binary level. + +This procedure checks whether the memory ranges occupied by objects `a` and +`b` are equal. See `compare_byte_ptrs()` for how this comparison is done. +*/ +@(require_results) +simple_equal :: proc "contextless" (a, b: $T) -> bool where intrinsics.type_is_simple_compare(T) { + a, b := a, b + return compare_byte_ptrs((^byte)(&a), (^byte)(&b), size_of(T)) == 0 +} + +/* +Check if the memory range defined by a slice is zero-filled. + +This procedure checks whether every byte, pointed to by the slice, specified +by the parameter `data`, is zero. If all bytes of the slice are zero, this +procedure returns `true`. Otherwise this procedure returns `false`. +*/ @(require_results) check_zero :: proc(data: []byte) -> bool { return check_zero_ptr(raw_data(data), len(data)) } +/* +Check if the memory range defined defined by a pointer is zero-filled. + +This procedure checks whether each of the `len` bytes, starting at address +`ptr` is zero. If all bytes of this range are zero, this procedure returns +`true`. Otherwise this procedure returns `false`. +*/ @(require_results) check_zero_ptr :: proc(ptr: rawptr, len: int) -> bool { switch { @@ -85,58 +290,99 @@ check_zero_ptr :: proc(ptr: rawptr, len: int) -> bool { case 4: return intrinsics.unaligned_load((^u32)(ptr)) == 0 case 8: return intrinsics.unaligned_load((^u64)(ptr)) == 0 } - start := uintptr(ptr) start_aligned := align_forward_uintptr(start, align_of(uintptr)) end := start + uintptr(len) end_aligned := align_backward_uintptr(end, align_of(uintptr)) - for b in start.. bool where intrinsics.type_is_simple_compare(T) { - a, b := a, b - return compare_byte_ptrs((^byte)(&a), (^byte)(&b), size_of(T)) == 0 -} +/* +Offset a given pointer by a given amount. -@(require_results) -compare_ptrs :: proc "contextless" (a, b: rawptr, n: int) -> int { - return compare_byte_ptrs((^byte)(a), (^byte)(b), n) -} +This procedure offsets the pointer `ptr` to an object of type `T`, by the amount +of bytes specified by `offset*size_of(T)`, and returns the pointer `ptr`. +**Note**: Prefer to use multipointer types, if possible. +*/ ptr_offset :: intrinsics.ptr_offset +/* +Offset a given pointer by a given amount backwards. + +This procedure offsets the pointer `ptr` to an object of type `T`, by the amount +of bytes specified by `offset*size_of(T)` in the negative direction, and +returns the pointer `ptr`. +*/ ptr_sub :: intrinsics.ptr_sub +/* +Construct a slice from pointer and length. + +This procedure creates a slice, that points to `len` amount of objects located +at an address, specified by `ptr`. +*/ @(require_results) slice_ptr :: proc "contextless" (ptr: ^$T, len: int) -> []T { return ([^]T)(ptr)[:len] } +/* +Construct a byte slice from raw pointer and length. + +This procedure creates a byte slice, that points to `len` amount of bytes +located at an address specified by `data`. +*/ @(require_results) byte_slice :: #force_inline proc "contextless" (data: rawptr, #any_int len: int) -> []byte { return ([^]u8)(data)[:max(len, 0)] } +/* +Create a byte slice from pointer and length. + +This procedure creates a byte slice, pointing to `len` objects, starting from +the address specified by `ptr`. +*/ +@(require_results) +ptr_to_bytes :: proc "contextless" (ptr: ^$T, len := 1) -> []byte { + return transmute([]byte)Raw_Slice{ptr, len*size_of(T)} +} + +/* +Obtain the slice, pointing to the contents of `any`. + +This procedure returns the slice, pointing to the contents of the specified +value of the `any` type. +*/ +@(require_results) +any_to_bytes :: proc "contextless" (val: any) -> []byte { + ti := type_info_of(val.id) + size := ti != nil ? ti.size : 0 + return transmute([]byte)Raw_Slice{val.data, size} +} + +/* +Obtain a byte slice from any slice. + +This procedure returns a slice, that points to the same bytes as the slice, +specified by `slice` and returns the resulting byte slice. +*/ @(require_results) slice_to_bytes :: proc "contextless" (slice: $E/[]$T) -> []byte { s := transmute(Raw_Slice)slice @@ -144,6 +390,15 @@ slice_to_bytes :: proc "contextless" (slice: $E/[]$T) -> []byte { return transmute([]byte)s } +/* +Transmute slice to a different type. + +This procedure performs an operation similar to transmute, returning a slice of +type `T` that points to the same bytes as the slice specified by `slice` +parameter. Unlike plain transmute operation, this procedure adjusts the length +of the resulting slice, such that the resulting slice points to the correct +amount of objects to cover the memory region pointed to by `slice`. +*/ @(require_results) slice_data_cast :: proc "contextless" ($T: typeid/[]$A, slice: $S/[]$B) -> T { when size_of(A) == 0 || size_of(B) == 0 { @@ -155,12 +410,25 @@ slice_data_cast :: proc "contextless" ($T: typeid/[]$A, slice: $S/[]$B) -> T { } } +/* +Obtain data and length of a slice. + +This procedure returns the pointer to the start of the memory region pointed to +by slice `slice` and the length of the slice. +*/ @(require_results) slice_to_components :: proc "contextless" (slice: $E/[]$T) -> (data: ^T, len: int) { s := transmute(Raw_Slice)slice return (^T)(s.data), s.len } +/* +Create a dynamic array from slice. + +This procedure creates a dynamic array, using slice `backing` as the backing +buffer for the dynamic array. The resulting dynamic array can not grow beyond +the size of the specified slice. +*/ @(require_results) buffer_from_slice :: proc "contextless" (backing: $T/[]$E) -> [dynamic]E { return transmute([dynamic]E)Raw_Dynamic_Array{ @@ -174,19 +442,12 @@ buffer_from_slice :: proc "contextless" (backing: $T/[]$E) -> [dynamic]E { } } -@(require_results) -ptr_to_bytes :: proc "contextless" (ptr: ^$T, len := 1) -> []byte { - return transmute([]byte)Raw_Slice{ptr, len*size_of(T)} -} - -@(require_results) -any_to_bytes :: proc "contextless" (val: any) -> []byte { - ti := type_info_of(val.id) - size := ti != nil ? ti.size : 0 - return transmute([]byte)Raw_Slice{val.data, size} -} - +/* +Check whether a number is a power of two. +This procedure checks whether a given pointer-sized unsigned integer contains +a power-of-two value. +*/ @(require_results) is_power_of_two :: proc "contextless" (x: uintptr) -> bool { if x <= 0 { @@ -195,67 +456,156 @@ is_power_of_two :: proc "contextless" (x: uintptr) -> bool { return (x & (x-1)) == 0 } -@(require_results) -align_forward :: proc(ptr: rawptr, align: uintptr) -> rawptr { - return rawptr(align_forward_uintptr(uintptr(ptr), align)) -} +/* +Align uintptr forward. + +This procedure returns the next address after `ptr`, that is located on the +alignment boundary specified by `align`. If `ptr` is already aligned to `align` +bytes, `ptr` is returned. +The specified alignment must be a power of 2. +*/ @(require_results) align_forward_uintptr :: proc(ptr, align: uintptr) -> uintptr { assert(is_power_of_two(align)) + return (p + align-1) & ~(align-1) +} - p := ptr - modulo := p & (align-1) - if modulo != 0 { - p += align - modulo - } - return p +/* +Align pointer forward. + +This procedure returns the next address after `ptr`, that is located on the +alignment boundary specified by `align`. If `ptr` is already aligned to `align` +bytes, `ptr` is returned. + +The specified alignment must be a power of 2. +*/ +@(require_results) +align_forward :: proc(ptr: rawptr, align: uintptr) -> rawptr { + return rawptr(align_forward_uintptr(uintptr(ptr), align)) } +/* +Align int forward. + +This procedure returns the next address after `ptr`, that is located on the +alignment boundary specified by `align`. If `ptr` is already aligned to `align` +bytes, `ptr` is returned. + +The specified alignment must be a power of 2. +*/ @(require_results) align_forward_int :: proc(ptr, align: int) -> int { return int(align_forward_uintptr(uintptr(ptr), uintptr(align))) } +/* +Align uint forward. + +This procedure returns the next address after `ptr`, that is located on the +alignment boundary specified by `align`. If `ptr` is already aligned to `align` +bytes, `ptr` is returned. + +The specified alignment must be a power of 2. +*/ @(require_results) align_forward_uint :: proc(ptr, align: uint) -> uint { return uint(align_forward_uintptr(uintptr(ptr), uintptr(align))) } +/* +Align uintptr backwards. + +This procedure returns the previous address before `ptr`, that is located on the +alignment boundary specified by `align`. If `ptr` is already aligned to `align` +bytes, `ptr` is returned. + +The specified alignment must be a power of 2. +*/ @(require_results) -align_backward :: proc(ptr: rawptr, align: uintptr) -> rawptr { - return rawptr(align_backward_uintptr(uintptr(ptr), align)) +align_backward_uintptr :: proc(ptr, align: uintptr) -> uintptr { + assert(is_power_of_two(align)) + return ptr & ~(align-1) } +/* +Align rawptr backwards. + +This procedure returns the previous address before `ptr`, that is located on the +alignment boundary specified by `align`. If `ptr` is already aligned to `align` +bytes, `ptr` is returned. + +The specified alignment must be a power of 2. +*/ @(require_results) -align_backward_uintptr :: proc(ptr, align: uintptr) -> uintptr { - return align_forward_uintptr(ptr - align + 1, align) +align_backward :: proc(ptr: rawptr, align: uintptr) -> rawptr { + return rawptr(align_backward_uintptr(uintptr(ptr), align)) } +/* +Align int backwards. + +This procedure returns the previous address before `ptr`, that is located on the +alignment boundary specified by `align`. If `ptr` is already aligned to `align` +bytes, `ptr` is returned. + +The specified alignment must be a power of 2. +*/ @(require_results) align_backward_int :: proc(ptr, align: int) -> int { return int(align_backward_uintptr(uintptr(ptr), uintptr(align))) } +/* +Align uint backwards. + +This procedure returns the previous address before `ptr`, that is located on the +alignment boundary specified by `align`. If `ptr` is already aligned to `align` +bytes, `ptr` is returned. + +The specified alignment must be a power of 2. +*/ @(require_results) align_backward_uint :: proc(ptr, align: uint) -> uint { return uint(align_backward_uintptr(uintptr(ptr), uintptr(align))) } +/* +Create a context with a given allocator. + +This procedure returns a copy of the current context with the allocator replaced +by the allocator `a`. +*/ @(require_results) context_from_allocator :: proc(a: Allocator) -> type_of(context) { context.allocator = a return context } +/* +Copy the value from a pointer into a value. + +This procedure copies the object of type `T` pointed to by the pointer `ptr` +into a new stack-allocated value and returns that value. +*/ @(require_results) reinterpret_copy :: proc "contextless" ($T: typeid, ptr: rawptr) -> (value: T) { copy(&value, ptr, size_of(T)) return } +/* +Dynamic array with a fixed capacity buffer. + +This type represents dynamic arrays with a fixed-size backing buffer. Upon +allocating memory beyond reaching the maximum capacity, allocations from fixed +byte buffers return `nil` and no error. +*/ Fixed_Byte_Buffer :: distinct [dynamic]byte +/* +Create a fixed byte buffer from a slice. +*/ @(require_results) make_fixed_byte_buffer :: proc "contextless" (backing: []byte) -> Fixed_Byte_Buffer { s := transmute(Raw_Slice)backing @@ -270,12 +620,24 @@ make_fixed_byte_buffer :: proc "contextless" (backing: []byte) -> Fixed_Byte_Buf return transmute(Fixed_Byte_Buffer)d } +/* +General-purpose align formula. + +This procedure is equivalent to `align_forward`, but it does not require the +alignment to be a power of two. +*/ @(require_results) align_formula :: proc "contextless" (size, align: int) -> int { result := size + align-1 return result - result%align } +/* +Calculate the padding after the pointer with a header. + +This procedure returns the next address, following `ptr` and `header_size` +bytes of space that is aligned to a boundary specified by `align`. +*/ @(require_results) calc_padding_with_header :: proc "contextless" (ptr: uintptr, align: uintptr, header_size: int) -> int { p, a := ptr, align @@ -287,14 +649,12 @@ calc_padding_with_header :: proc "contextless" (ptr: uintptr, align: uintptr, he needed_space := uintptr(header_size) if padding < needed_space { needed_space -= padding - if needed_space & (a-1) > 0 { padding += align * (1+(needed_space/align)) } else { padding += align * (needed_space/align) } } - return int(padding) } From f8cd13767e360092fb4b7df02508f2884cf82da5 Mon Sep 17 00:00:00 2001 From: flysand7 Date: Sat, 7 Sep 2024 18:08:11 +1100 Subject: [PATCH 20/35] [mem]: Fix the issue with unbranched version of ptr align --- core/mem/mem.odin | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/core/mem/mem.odin b/core/mem/mem.odin index c17ab43a9d4..2212ee1718b 100644 --- a/core/mem/mem.odin +++ b/core/mem/mem.odin @@ -468,7 +468,7 @@ The specified alignment must be a power of 2. @(require_results) align_forward_uintptr :: proc(ptr, align: uintptr) -> uintptr { assert(is_power_of_two(align)) - return (p + align-1) & ~(align-1) + return (ptr + align-1) & ~(align-1) } /* From 1842cd6297ae60fba46b43758d5ffc655ef2e58c Mon Sep 17 00:00:00 2001 From: flysand7 Date: Sun, 8 Sep 2024 00:09:18 +1100 Subject: [PATCH 21/35] Fix typo Co-authored-by: FourteenBrush <74827262+FourteenBrush@users.noreply.github.com> --- core/mem/mem.odin | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/core/mem/mem.odin b/core/mem/mem.odin index 2212ee1718b..0554cee23dd 100644 --- a/core/mem/mem.odin +++ b/core/mem/mem.odin @@ -98,7 +98,7 @@ compiler assumes volatile semantics of the memory. */ zero_explicit :: proc "contextless" (data: rawptr, len: int) -> rawptr { // This routine tries to avoid the compiler optimizing away the call, - // so that it is always executed. It is intended to provided + // so that it is always executed. It is intended to provide // equivalent semantics to those provided by the C11 Annex K 3.7.4.1 // memset_s call. intrinsics.mem_zero_volatile(data, len) // Use the volatile mem_zero From c719a86312813c1eed8793aafe6f98eb5ed1b3e0 Mon Sep 17 00:00:00 2001 From: flysand7 Date: Sun, 8 Sep 2024 10:58:40 +1100 Subject: [PATCH 22/35] [mem]: Document alloc.odin --- core/mem/alloc.odin | 565 ++++++++++++++++++++++++++++++++++++++++---- 1 file changed, 524 insertions(+), 41 deletions(-) diff --git a/core/mem/alloc.odin b/core/mem/alloc.odin index 558e810e34a..dbf9af9b244 100644 --- a/core/mem/alloc.odin +++ b/core/mem/alloc.odin @@ -2,66 +2,272 @@ package mem import "base:runtime" -// NOTE(bill, 2019-12-31): These are defined in `package runtime` as they are used in the `context`. This is to prevent an import definition cycle. -Allocator_Mode :: runtime.Allocator_Mode +//NOTE(bill, 2019-12-31): These are defined in `package runtime` as they are used in the `context`. This is to prevent an import definition cycle. + /* -Allocator_Mode :: enum byte { - Alloc, - Free, - Free_All, - Resize, - Query_Features, - Alloc_Non_Zeroed, - Resize_Non_Zeroed, -} +A request to allocator procedure. + +This type represents a type of allocation request made to an allocator +procedure. There is one allocator procedure per allocator, and this value is +used to discriminate between different functions of the allocator. + +The type is defined as follows: + + Allocator_Mode :: enum byte { + Alloc, + Alloc_Non_Zeroed, + Free, + Free_All, + Resize, + Resize_Non_Zeroed, + Query_Features, + } + +Depending on which value is used, the allocator procedure will perform different +functions: + +- `Alloc`: Allocates a memory region with a given `size` and `alignment`. +- `Alloc_Non_Zeroed`: Same as `Alloc` without explicit zero-initialization of + the memory region. +- `Free`: Free a memory region located at address `ptr` with a given `size`. +- `Free_All`: Free all memory allocated using this allocator. +- `Resize`: Resize a memory region located at address `old_ptr` with size + `old_size` to be `size` bytes in length and have the specified `alignment`, + in case a re-alllocation occurs. +- `Resize_Non_Zeroed`: Same as `Resize`, without explicit zero-initialization. + */ +Allocator_Mode :: runtime.Allocator_Mode -Allocator_Mode_Set :: runtime.Allocator_Mode_Set /* -Allocator_Mode_Set :: distinct bit_set[Allocator_Mode]; +A set of allocator features. + +This type represents values that contain a set of features an allocator has. +Currently the type is defined as follows: + + Allocator_Mode_Set :: distinct bit_set[Allocator_Mode]; */ +Allocator_Mode_Set :: runtime.Allocator_Mode_Set -Allocator_Query_Info :: runtime.Allocator_Query_Info /* -Allocator_Query_Info :: struct { - pointer: rawptr, - size: Maybe(int), - alignment: Maybe(int), -} +Allocator information. + +This type represents information about a given allocator at a specific point +in time. Currently the type is defined as follows: + + Allocator_Query_Info :: struct { + pointer: rawptr, + size: Maybe(int), + alignment: Maybe(int), + } + +- `pointer`: Pointer to a backing buffer. +- `size`: Size of the backing buffer. +- `alignment`: The allocator's alignment. + +If not applicable, any of these fields may be `nil`. */ +Allocator_Query_Info :: runtime.Allocator_Query_Info +/* +An allocation request error. + +This type represents error values the allocators may return upon requests. + + Allocator_Error :: enum byte { + None = 0, + Out_Of_Memory = 1, + Invalid_Pointer = 2, + Invalid_Argument = 3, + Mode_Not_Implemented = 4, + } + +The meaning of the errors is as follows: + +- `None`: No error. +- `Out_Of_Memory`: Either: + 1. The allocator has ran out of the backing buffer, or the requested + allocation size is too large to fit into a backing buffer. + 2. The operating system error during memory allocation. + 3. The backing allocator was used to allocate a new backing buffer and the + backing allocator returned Out_Of_Memory. +- `Invalid_Pointer`: The pointer referring to a memory region does not belong + to any of the allocators backing buffers or does not point to a valid start + of an allocation made in that allocator. +- `Invalid_Argument`: Can occur if one of the arguments makes it impossible to + satisfy a request (i.e. having alignment larger than the backing buffer + of the allocation). +- `Mode_Not_Implemented`: The allocator does not support the specified + operation. For example, an arena does not support freeing individual + allocations. +*/ Allocator_Error :: runtime.Allocator_Error + /* -Allocator_Error :: enum byte { - None = 0, - Out_Of_Memory = 1, - Invalid_Pointer = 2, - Invalid_Argument = 3, - Mode_Not_Implemented = 4, -} +The allocator procedure. + +This type represents allocation procedures. An allocation procedure is a single +procedure, implementing all allocator functions such as allocating the memory, +freeing the memory, etc. + +Currently the type is defined as follows: + + Allocator_Proc :: #type proc( + allocator_data: rawptr, + mode: Allocator_Mode, + size: int, + alignment: int, + old_memory: rawptr, + old_size: int, + location: Source_Code_Location = #caller_location, + ) -> ([]byte, Allocator_Error); + +The function of this procedure and the meaning of parameters depends on the +value of the `mode` parameter. + +## 1. `.Alloc`, `.Alloc_Non_Zeroed` + +Allocates a memory region of size `size`, aligned on a boundary specified by +`alignment`. + +**Inputs**: +- `allocator_data`: Pointer to the allocator data. +- `mode`: `.Alloc` or `.Alloc_Non_Zeroed`. +- `size`: The desired size of the memory region. +- `alignment`: The desired alignmnet of the allocation. +- `old_memory`: Unused, should be `nil`. +- `old_size`: Unused, should be 0. + +**Returns**: +1. The memory region, if allocated successfully, or `nil` otherwise. +2. An error, if allocation failed. + +**Note**: Some allocators may return `nil`, even if no error is returned. +Always check both the error and the allocated buffer. + +Same as `.Alloc`. + +## 2. `Free` + +Frees a memory region located at the address specified by `old_memory`. If the +allocator does not track sizes of allocations, the size should be specified in +the `old_size` parameter. + +**Inputs**: +- `allocator_data`: Pointer to the allocator data. +- `mode`: `.Free`. +- `size`: Unused, should be 0. +- `alignment`: Unused, should be 0. +- `old_memory`: Pointer to the memory region to free. +- `old_size`: The size of the memory region to free. This parameter is optional + if the allocator keeps track of the sizes of allocations. + +**Returns**: +1. `nil` +2. Error, if freeing failed. + +## 3. `Free_All` + +Frees all allocations, associated with the allocator, making it available for +further allocations using the same backing buffers. + +**Inputs**: +- `allocator_data`: Pointer to the allocator data. +- `mode`: `.Free_All`. +- `size`: Unused, should be 0. +- `alignment`: Unused, should be 0. +- `old_memory`: Unused, should be `nil`. +- `old_size`: Unused, should be `0`. + +**Returns**: +1. `nil`. +2. Error, if freeing failed. + +## 4. `Resize`, `Resize_Non_Zeroed` + +Resizes the memory region, of the size `old_size` located at the address +specified by `old_memory` to have the new size `size`. The slice of the new +memory region is returned from the procedure. The allocator may attempt to +keep the new memory region at the same address as the previous allocation, +however no such guarantee is made. Do not assume the new memory region will +be at the same address as the old memory region. + +If `old_memory` pointer is `nil`, this function acts just like `.Alloc` or +`.Alloc_Non_Zeroed`, using `size` and `alignment` to allocate a new memory +region. + +If `new_size` is `nil`, the procedure acts just like `.Free`, freeing the +memory region `old_size` bytes in length, located at the address specified by +`old_memory`. + +**Inputs**: +- `allocator_data`: Pointer to the allocator data. +- `mode`: `.Resize` or `.Resize_All`. +- `size`: The desired new size of the memory region. +- `alignment`: The alignment of the new memory region, if its allocated +- `old_memory`: The pointer to the memory region to resize. +- `old_size`: The size of the memory region to resize. If the allocator + keeps track of the sizes of allocations, this parameter is optional. + +**Returns**: +1. The slice of the memory region after resize operation, if successfull, + `nil` otherwise. +2. An error, if the resize failed. + +**Note**: Some allocators may return `nil`, even if no error is returned. +Always check both the error and the allocated buffer. */ Allocator_Proc :: runtime.Allocator_Proc + /* -Allocator_Proc :: #type proc(allocator_data: rawptr, mode: Allocator_Mode, - size, alignment: int, - old_memory: rawptr, old_size: int, location: Source_Code_Location = #caller_location) -> ([]byte, Allocator_Error); -*/ +Allocator. +This type represents generic interface for all allocators. Currently this type +is defined as follows: + + Allocator :: struct { + procedure: Allocator_Proc, + data: rawptr, + } + +- `procedure`: Pointer to the allocation procedure. +- `data`: Pointer to the allocator data. +*/ Allocator :: runtime.Allocator + /* -Allocator :: struct { - procedure: Allocator_Proc, - data: rawptr, -} -*/ +Default alignment. +This value is the default alignment for all platforms that is used, if the +alignment is not specified explicitly. +*/ DEFAULT_ALIGNMENT :: 2*align_of(rawptr) +/* +Default page size. + +This value is the default page size for the current platform. +*/ DEFAULT_PAGE_SIZE :: 64 * 1024 when ODIN_ARCH == .wasm32 || ODIN_ARCH == .wasm64p32 else 16 * 1024 when ODIN_OS == .Darwin && ODIN_ARCH == .arm64 else 4 * 1024 +/* +Allocate memory. + +This function allocates `size` bytes of memory, aligned to a boundary specified +by `alignment` using the allocator specified by `allocator`. + +**Inputs**: +- `size`: The desired size of the allocated memory region. +- `alignment`: The desired alignment of the allocated memory region. +- `allocator`: The allocator to allocate from. + +**Returns**: +1. Pointer to the allocated memory, or `nil` if allocation failed. +2. Error, if the allocation failed. +*/ @(require_results) alloc :: proc( size: int, @@ -73,6 +279,21 @@ alloc :: proc( return raw_data(data), err } +/* +Allocate memory. + +This function allocates `size` bytes of memory, aligned to a boundary specified +by `alignment` using the allocator specified by `allocator`. + +**Inputs**: +- `size`: The desired size of the allocated memory region. +- `alignment`: The desired alignment of the allocated memory region. +- `allocator`: The allocator to allocate from. + +**Returns**: +1. Slice of the allocated memory region, or `nil` if allocation failed. +2. Error, if the allocation failed. +*/ @(require_results) alloc_bytes :: proc( size: int, @@ -83,6 +304,22 @@ alloc_bytes :: proc( return runtime.mem_alloc(size, alignment, allocator, loc) } +/* +Allocate non-zeroed memory. + +This function allocates `size` bytes of memory, aligned to a boundary specified +by `alignment` using the allocator specified by `allocator`. This procedure +does not explicitly zero-initialize allocated memory region. + +**Inputs**: +- `size`: The desired size of the allocated memory region. +- `alignment`: The desired alignment of the allocated memory region. +- `allocator`: The allocator to allocate from. + +**Returns**: +1. Slice of the allocated memory region, or `nil` if allocation failed. +2. Error, if the allocation failed. +*/ @(require_results) alloc_bytes_non_zeroed :: proc( size: int, @@ -93,6 +330,16 @@ alloc_bytes_non_zeroed :: proc( return runtime.mem_alloc_non_zeroed(size, alignment, allocator, loc) } +/* +Free memory. + +This procedure frees memory region located at the address, specified by `ptr`, +allocated from the allocator specified by `allocator`. + +**Inputs**: +- `ptr`: Pointer to the memory region to free. +- `allocator`: The allocator to free to. +*/ free :: proc( ptr: rawptr, allocator := context.allocator, @@ -101,15 +348,42 @@ free :: proc( return runtime.mem_free(ptr, allocator, loc) } +/* +Free a memory region. + +This procedure frees `size` bytes of memory region located at the address, +specified by `ptr`, allocated from the allocator specified by `allocator`. + +**Inputs**: +- `ptr`: Pointer to the memory region to free. +- `size`: The size of the memory region to free. +- `allocator`: The allocator to free to. + +**Returns**: +- The error, if freeing failed. +*/ free_with_size :: proc( ptr: rawptr, - byte_count: int, + size: int, allocator := context.allocator, loc := #caller_location, ) -> Allocator_Error { - return runtime.mem_free_with_size(ptr, byte_count, allocator, loc) + return runtime.mem_free_with_size(ptr, size, allocator, loc) } +/* +Free a memory region. + +This procedure frees memory region, specified by `bytes`, allocated from the +allocator specified by `allocator`. + +**Inputs**: +- `bytes`: The memory region to free. +- `allocator`: The allocator to free to. + +**Returns**: +- The error, if freeing failed. +*/ free_bytes :: proc( bytes: []byte, allocator := context.allocator, @@ -118,10 +392,45 @@ free_bytes :: proc( return runtime.mem_free_bytes(bytes, allocator, loc) } +/* +Free all allocations. + +This procedure frees all allocations made on the allocator specified by +`allocator` to that allocator, making it available for further allocations. +*/ free_all :: proc(allocator := context.allocator, loc := #caller_location) -> Allocator_Error { return runtime.mem_free_all(allocator, loc) } +/* +Resize a memory region. + +This procedure resizes a memory region, `old_size` bytes in size, located at +the address specified by `ptr`, such that it has a new size, specified by +`new_size` and and is aligned on a boundary specified by `alignment`. + +If the `ptr` parameter is `nil`, `resize()` acts just like `alloc`, allocating +`new_size` bytes, aligned on a boundary specified by `alignment`. + +If the `new_size` parameter is `0`, `resize()` acts just like `free`, freeing +the memory region `old_size` bytes in length, located at the address specified +by `ptr`. + +**Inputs**: +- `ptr`: Pointer to the memory region to resize. +- `old_size`: Size of the memory region to resize. +- `new_size`: The desired size of the resized memory region. +- `alignment`: The desired alignment of the resized memory region. +- `allocator`: The owner of the memory region to resize. + +**Returns**: +1. The pointer to the resized memory region, if successfull, `nil` otherwise. +2. Error, if resize failed. + +**Note**: The `alignment` parameter is used to preserve the original alignment +of the allocation, if `resize()` needs to relocate the memory region. Do not +use `resize()` to change the alignment of the allocated memory region. +*/ @(require_results) resize :: proc( ptr: rawptr, @@ -135,6 +444,33 @@ resize :: proc( return raw_data(data), err } +/* +Resize a memory region. + +This procedure resizes a memory region, specified by `old_data`, such that it +has a new size, specified by `new_size` and and is aligned on a boundary +specified by `alignment`. + +If the `old_data` parameter is `nil`, `resize()` acts just like `alloc`, +allocating `new_size` bytes, aligned on a boundary specified by `alignment`. + +If the `new_size` parameter is `0`, `resize()` acts just like `free`, freeing +the memory region specified by `old_data`. + +**Inputs**: +- `old_data`: Pointer to the memory region to resize. +- `new_size`: The desired size of the resized memory region. +- `alignment`: The desired alignment of the resized memory region. +- `allocator`: The owner of the memory region to resize. + +**Returns**: +1. The resized memory region, if successfull, `nil` otherwise. +2. Error, if resize failed. + +**Note**: The `alignment` parameter is used to preserve the original alignment +of the allocation, if `resize()` needs to relocate the memory region. Do not +use `resize()` to change the alignment of the allocated memory region. +*/ @(require_results) resize_bytes :: proc( old_data: []byte, @@ -146,6 +482,9 @@ resize_bytes :: proc( return runtime.mem_resize(raw_data(old_data), len(old_data), new_size, alignment, allocator, loc) } +/* +Query allocator features. +*/ @(require_results) query_features :: proc(allocator: Allocator, loc := #caller_location) -> (set: Allocator_Mode_Set) { if allocator.procedure != nil { @@ -155,6 +494,9 @@ query_features :: proc(allocator: Allocator, loc := #caller_location) -> (set: A return nil } +/* +Query allocator information. +*/ @(require_results) query_info :: proc( pointer: rawptr, @@ -168,6 +510,9 @@ query_info :: proc( return } +/* +Free a string. +*/ delete_string :: proc( str: string, allocator := context.allocator, @@ -176,6 +521,9 @@ delete_string :: proc( return runtime.delete_string(str, allocator, loc) } +/* +Free a cstring. +*/ delete_cstring :: proc( str: cstring, allocator := context.allocator, @@ -184,6 +532,9 @@ delete_cstring :: proc( return runtime.delete_cstring(str, allocator, loc) } +/* +Free a dynamic array. +*/ delete_dynamic_array :: proc( array: $T/[dynamic]$E, loc := #caller_location, @@ -191,6 +542,9 @@ delete_dynamic_array :: proc( return runtime.delete_dynamic_array(array, loc) } +/* +Free a slice. +*/ delete_slice :: proc( array: $T/[]$E, allocator := context.allocator, @@ -199,6 +553,9 @@ delete_slice :: proc( return runtime.delete_slice(array, allocator, loc) } +/* +Free a map. +*/ delete_map :: proc( m: $T/map[$K]$V, loc := #caller_location, @@ -206,6 +563,9 @@ delete_map :: proc( return runtime.delete_map(m, loc) } +/* +Free. +*/ delete :: proc{ delete_string, delete_cstring, @@ -214,6 +574,13 @@ delete :: proc{ delete_map, } +/* +Allocate a new object. + +This procedure allocates a new object of type `T` using an allocator specified +by `allocator`, and returns a pointer to the allocated object, if allocated +successfully, or `nil` otherwise. +*/ @(require_results) new :: proc( $T: typeid, @@ -223,6 +590,14 @@ new :: proc( return new_aligned(T, align_of(T), allocator, loc) } +/* +Allocate a new object with alignment. + +This procedure allocates a new object of type `T` using an allocator specified +by `allocator`, and returns a pointer, aligned on a boundary specified by +`alignment` to the allocated object, if allocated successfully, or `nil` +otherwise. +*/ @(require_results) new_aligned :: proc( $T: typeid, @@ -233,6 +608,14 @@ new_aligned :: proc( return runtime.new_aligned(T, alignment, allocator, loc) } +/* +Allocate a new object and initialize it with a value. + +This procedure allocates a new object of type `T` using an allocator specified +by `allocator`, and returns a pointer, aligned on a boundary specified by +`alignment` to the allocated object, if allocated successfully, or `nil` +otherwise. The allocated object is initialized with `data`. +*/ @(require_results) new_clone :: proc( data: $T, @@ -242,6 +625,13 @@ new_clone :: proc( return runtime.new_clone(data, allocator, loc) } +/* +Allocate a new slice with alignment. + +This procedure allocates a new slice of type `T` with length `len`, aligned +on a boundary specified by `alignment` from an allocator specified by +`allocator`, and returns the allocated slice. +*/ @(require_results) make_aligned :: proc( $T: typeid/[]$E, @@ -253,6 +643,12 @@ make_aligned :: proc( return runtime.make_aligned(T, len, alignment, allocator, loc) } +/* +Allocate a new slice. + +This procedure allocates a new slice of type `T` with length `len`, from an +allocator specified by `allocator`, and returns the allocated slice. +*/ @(require_results) make_slice :: proc( $T: typeid/[]$E, @@ -263,6 +659,12 @@ make_slice :: proc( return runtime.make_slice(T, len, allocator, loc) } +/* +Allocate a dynamic array. + +This procedure creates a dynamic array of type `T`, with `allocator` as its +backing allocator, and initial length and capacity of `0`. +*/ @(require_results) make_dynamic_array :: proc( $T: typeid/[dynamic]$E, @@ -272,6 +674,13 @@ make_dynamic_array :: proc( return runtime.make_dynamic_array(T, allocator, loc) } +/* +Allocate a dynamic array with initial length. + +This procedure creates a dynamic array of type `T`, with `allocator` as its +backing allocator, and initial capacity of `0`, and initial length specified by +`len`. +*/ @(require_results) make_dynamic_array_len :: proc( $T: typeid/[dynamic]$E, @@ -282,6 +691,13 @@ make_dynamic_array_len :: proc( return runtime.make_dynamic_array_len_cap(T, len, len, allocator, loc) } +/* +Allocate a dynamic array with initial length and capacity. + +This procedure creates a dynamic array of type `T`, with `allocator` as its +backing allocator, and initial capacity specified by `cap`, and initial length +specified by `len`. +*/ @(require_results) make_dynamic_array_len_cap :: proc( $T: typeid/[dynamic]$E, @@ -293,6 +709,13 @@ make_dynamic_array_len_cap :: proc( return runtime.make_dynamic_array_len_cap(T, len, cap, allocator, loc) } +/* +Allocate a map. + +This procedure creates a map of type `T` with initial capacity specified by +`cap`, that is using an allocator specified by `allocator` as its backing +allocator. +*/ @(require_results) make_map :: proc( $T: typeid/map[$K]$E, @@ -303,6 +726,12 @@ make_map :: proc( return runtime.make_map(T, cap, allocator, loc) } +/* +Allocate a multi pointer. + +This procedure allocates a multipointer of type `T` pointing to `len` elements, +from an allocator specified by `allocator`. +*/ @(require_results) make_multi_pointer :: proc( $T: typeid/[^]$E, @@ -313,6 +742,9 @@ make_multi_pointer :: proc( return runtime.make_multi_pointer(T, len, allocator, loc) } +/* +Allocate. +*/ make :: proc{ make_slice, make_dynamic_array, @@ -322,6 +754,23 @@ make :: proc{ make_multi_pointer, } +/* +Default resize procedure. + +When allocator does not support resize operation, but supports `.Alloc` and +`.Free`, this procedure is used to implement allocator's default behavior on +resize. + +The behavior of the function is as follows: + +- If `new_size` is `0`, the function acts like `free()`, freeing the memory + region of `old_size` bytes located at `old_memory`. +- If `old_memory` is `nil`, the function acts like `alloc()`, allocating + `new_size` bytes of memory aligned on a boundary specified by `alignment`. +- Otherwise, a new memory region of size `new_size` is allocated, then the + data from the old memory region is copied and the old memory region is + freed. +*/ @(require_results) default_resize_align :: proc( old_memory: rawptr, @@ -343,6 +792,27 @@ default_resize_align :: proc( return } +/* +Default resize procedure. + +When allocator does not support resize operation, but supports +`.Alloc_Non_Zeroed` and `.Free`, this procedure is used to implement allocator's +default behavior on +resize. + +Unlike `default_resize_align` no new memory is being explicitly +zero-initialized. + +The behavior of the function is as follows: + +- If `new_size` is `0`, the function acts like `free()`, freeing the memory + region of `old_size` bytes located at `old_memory`. +- If `old_memory` is `nil`, the function acts like `alloc()`, allocating + `new_size` bytes of memory aligned on a boundary specified by `alignment`. +- Otherwise, a new memory region of size `new_size` is allocated, then the + data from the old memory region is copied and the old memory region is + freed. +*/ @(require_results) default_resize_bytes_align_non_zeroed :: proc( old_data: []byte, @@ -354,6 +824,23 @@ default_resize_bytes_align_non_zeroed :: proc( return _default_resize_bytes_align(old_data, new_size, alignment, false, allocator, loc) } +/* +Default resize procedure. + +When allocator does not support resize operation, but supports `.Alloc` and +`.Free`, this procedure is used to implement allocator's default behavior on +resize. + +The behavior of the function is as follows: + +- If `new_size` is `0`, the function acts like `free()`, freeing the memory + region specified by `old_data`. +- If `old_data` is `nil`, the function acts like `alloc()`, allocating + `new_size` bytes of memory aligned on a boundary specified by `alignment`. +- Otherwise, a new memory region of size `new_size` is allocated, then the + data from the old memory region is copied and the old memory region is + freed. +*/ @(require_results) default_resize_bytes_align :: proc( old_data: []byte, @@ -383,16 +870,13 @@ _default_resize_bytes_align :: #force_inline proc( return alloc_bytes_non_zeroed(new_size, alignment, allocator, loc) } } - if new_size == 0 { err := free_bytes(old_data, allocator, loc) return nil, err } - if new_size == old_size { return old_data, .None } - new_memory : []byte err : Allocator_Error if should_zero { @@ -403,7 +887,6 @@ _default_resize_bytes_align :: #force_inline proc( if new_memory == nil || err != nil { return nil, err } - runtime.copy(new_memory, old_data) free_bytes(old_data, allocator, loc) return new_memory, err From b78d54601021e4c22a0a62aa7a1cfa69405da455 Mon Sep 17 00:00:00 2001 From: flysand7 Date: Sun, 8 Sep 2024 11:02:17 +1100 Subject: [PATCH 23/35] [mem]: Add non_zeroed versions of resize --- core/mem/alloc.odin | 100 +++++++++++++++++++++++++++++++++++++++++--- 1 file changed, 94 insertions(+), 6 deletions(-) diff --git a/core/mem/alloc.odin b/core/mem/alloc.odin index dbf9af9b244..c2e55541cd8 100644 --- a/core/mem/alloc.odin +++ b/core/mem/alloc.odin @@ -409,10 +409,10 @@ This procedure resizes a memory region, `old_size` bytes in size, located at the address specified by `ptr`, such that it has a new size, specified by `new_size` and and is aligned on a boundary specified by `alignment`. -If the `ptr` parameter is `nil`, `resize()` acts just like `alloc`, allocating +If the `ptr` parameter is `nil`, `resize()` acts just like `alloc()`, allocating `new_size` bytes, aligned on a boundary specified by `alignment`. -If the `new_size` parameter is `0`, `resize()` acts just like `free`, freeing +If the `new_size` parameter is `0`, `resize()` acts just like `free()`, freeing the memory region `old_size` bytes in length, located at the address specified by `ptr`. @@ -444,6 +444,51 @@ resize :: proc( return raw_data(data), err } +/* +Resize a memory region without zero-initialization. + +This procedure resizes a memory region, `old_size` bytes in size, located at +the address specified by `ptr`, such that it has a new size, specified by +`new_size` and and is aligned on a boundary specified by `alignment`. + +If the `ptr` parameter is `nil`, `resize()` acts just like `alloc()`, allocating +`new_size` bytes, aligned on a boundary specified by `alignment`. + +If the `new_size` parameter is `0`, `resize()` acts just like `free()`, freeing +the memory region `old_size` bytes in length, located at the address specified +by `ptr`. + +Unlike `resize()`, this procedure does not explicitly zero-initialize any new +memory. + +**Inputs**: +- `ptr`: Pointer to the memory region to resize. +- `old_size`: Size of the memory region to resize. +- `new_size`: The desired size of the resized memory region. +- `alignment`: The desired alignment of the resized memory region. +- `allocator`: The owner of the memory region to resize. + +**Returns**: +1. The pointer to the resized memory region, if successfull, `nil` otherwise. +2. Error, if resize failed. + +**Note**: The `alignment` parameter is used to preserve the original alignment +of the allocation, if `resize()` needs to relocate the memory region. Do not +use `resize()` to change the alignment of the allocated memory region. +*/ +@(require_results) +resize_non_zeroed :: proc( + ptr: rawptr, + old_size: int, + new_size: int, + alignment: int = DEFAULT_ALIGNMENT, + allocator := context.allocator, + loc := #caller_location, +) -> (rawptr, Allocator_Error) { + data, err := runtime.non_zero_mem_resize(ptr, old_size, new_size, alignment, allocator, loc) + return raw_data(data), err +} + /* Resize a memory region. @@ -451,11 +496,12 @@ This procedure resizes a memory region, specified by `old_data`, such that it has a new size, specified by `new_size` and and is aligned on a boundary specified by `alignment`. -If the `old_data` parameter is `nil`, `resize()` acts just like `alloc`, -allocating `new_size` bytes, aligned on a boundary specified by `alignment`. +If the `old_data` parameter is `nil`, `resize_bytes()` acts just like +`alloc_bytes()`, allocating `new_size` bytes, aligned on a boundary specified +by `alignment`. -If the `new_size` parameter is `0`, `resize()` acts just like `free`, freeing -the memory region specified by `old_data`. +If the `new_size` parameter is `0`, `resize_bytes()` acts just like +`free_bytes()`, freeing the memory region specified by `old_data`. **Inputs**: - `old_data`: Pointer to the memory region to resize. @@ -482,6 +528,48 @@ resize_bytes :: proc( return runtime.mem_resize(raw_data(old_data), len(old_data), new_size, alignment, allocator, loc) } +/* +Resize a memory region. + +This procedure resizes a memory region, specified by `old_data`, such that it +has a new size, specified by `new_size` and and is aligned on a boundary +specified by `alignment`. + +If the `old_data` parameter is `nil`, `resize_bytes()` acts just like +`alloc_bytes()`, allocating `new_size` bytes, aligned on a boundary specified +by `alignment`. + +If the `new_size` parameter is `0`, `resize_bytes()` acts just like +`free_bytes()`, freeing the memory region specified by `old_data`. + +Unlike `resize_bytes()`, this procedure does not explicitly zero-initialize +any new memory. + +**Inputs**: +- `old_data`: Pointer to the memory region to resize. +- `new_size`: The desired size of the resized memory region. +- `alignment`: The desired alignment of the resized memory region. +- `allocator`: The owner of the memory region to resize. + +**Returns**: +1. The resized memory region, if successfull, `nil` otherwise. +2. Error, if resize failed. + +**Note**: The `alignment` parameter is used to preserve the original alignment +of the allocation, if `resize()` needs to relocate the memory region. Do not +use `resize()` to change the alignment of the allocated memory region. +*/ +@(require_results) +resize_bytes_non_zeroed :: proc( + old_data: []byte, + new_size: int, + alignment: int = DEFAULT_ALIGNMENT, + allocator := context.allocator, + loc := #caller_location, +) -> ([]byte, Allocator_Error) { + return runtime.non_zero_mem_resize(raw_data(old_data), len(old_data), new_size, alignment, allocator, loc) +} + /* Query allocator features. */ From 6eb80831b5ec9e43f1b70eb305688e8cff60f0ce Mon Sep 17 00:00:00 2001 From: flysand7 Date: Sun, 8 Sep 2024 11:12:28 +1100 Subject: [PATCH 24/35] [mem]: Panic when allocator is not initialized --- core/mem/allocators.odin | 67 +++++++++++++++++++++++++++------------- 1 file changed, 45 insertions(+), 22 deletions(-) diff --git a/core/mem/allocators.odin b/core/mem/allocators.odin index 34b89fcb857..e7b5faa1660 100644 --- a/core/mem/allocators.odin +++ b/core/mem/allocators.odin @@ -57,14 +57,24 @@ init_arena :: proc(a: ^Arena, data: []byte) { } @(require_results) -arena_alloc :: proc(a: ^Arena, size: int, alignment := DEFAULT_ALIGNMENT) -> (rawptr, Allocator_Error) { - bytes, err := arena_alloc_bytes(a, size, alignment) +arena_alloc :: proc( + a: ^Arena, + size: int, + alignment := DEFAULT_ALIGNMENT, + loc := #caller_location, +) -> (rawptr, Allocator_Error) { + bytes, err := arena_alloc_bytes(a, size, alignment, loc) return raw_data(bytes), err } @(require_results) -arena_alloc_bytes :: proc(a: ^Arena, size: int, alignment := DEFAULT_ALIGNMENT) -> ([]byte, Allocator_Error) { - bytes, err := arena_alloc_bytes_non_zeroed(a, size, alignment) +arena_alloc_bytes :: proc( + a: ^Arena, + size: int, + alignment := DEFAULT_ALIGNMENT, + loc := #caller_location, +) -> ([]byte, Allocator_Error) { + bytes, err := arena_alloc_bytes_non_zeroed(a, size, alignment, loc) if bytes != nil { zero_slice(bytes) } @@ -72,13 +82,26 @@ arena_alloc_bytes :: proc(a: ^Arena, size: int, alignment := DEFAULT_ALIGNMENT) } @(require_results) -arena_alloc_non_zeroed :: proc(a: ^Arena, size: int, alignment := DEFAULT_ALIGNMENT) -> (rawptr, Allocator_Error) { - bytes, err := arena_alloc_bytes_non_zeroed(a, size, alignment) +arena_alloc_non_zeroed :: proc( + a: ^Arena, + size: int, + alignment := DEFAULT_ALIGNMENT, + loc := #caller_location, +) -> (rawptr, Allocator_Error) { + bytes, err := arena_alloc_bytes_non_zeroed(a, size, alignment, loc) return raw_data(bytes), err } @(require_results) -arena_alloc_bytes_non_zeroed :: proc(a: ^Arena, size: int, alignment := DEFAULT_ALIGNMENT) -> ([]byte, Allocator_Error) { +arena_alloc_bytes_non_zeroed :: proc( + a: ^Arena, + size: int, + alignment := DEFAULT_ALIGNMENT, + loc := #caller_location +) -> ([]byte, Allocator_Error) { + if a.data == nil { + panic("Arena is not initialized", loc) + } #no_bounds_check end := &a.data[a.offset] ptr := align_forward(end, uintptr(alignment)) total_size := size + ptr_sub((^byte)(ptr), (^byte)(end)) @@ -101,22 +124,22 @@ arena_allocator_proc :: proc( alignment: int, old_memory: rawptr, old_size: int, - location := #caller_location, + loc := #caller_location, ) -> ([]byte, Allocator_Error) { arena := cast(^Arena)allocator_data switch mode { case .Alloc: - return arena_alloc_bytes(arena, size, alignment) + return arena_alloc_bytes(arena, size, alignment, loc) case .Alloc_Non_Zeroed: - return arena_alloc_bytes_non_zeroed(arena, size, alignment) + return arena_alloc_bytes_non_zeroed(arena, size, alignment, loc) case .Free: return nil, .Mode_Not_Implemented case .Free_All: arena_free_all(arena) case .Resize: - return default_resize_bytes_align(byte_slice(old_memory, old_size), size, alignment, arena_allocator(arena)) + return default_resize_bytes_align(byte_slice(old_memory, old_size), size, alignment, arena_allocator(arena), loc) case .Resize_Non_Zeroed: - return default_resize_bytes_align_non_zeroed(byte_slice(old_memory, old_size), size, alignment, arena_allocator(arena)) + return default_resize_bytes_align_non_zeroed(byte_slice(old_memory, old_size), size, alignment, arena_allocator(arena), loc) case .Query_Features: set := (^Allocator_Mode_Set)(old_memory) if set != nil { @@ -311,9 +334,6 @@ scratch_free :: proc(s: ^Scratch, ptr: rawptr, loc := #caller_location) -> Alloc } scratch_free_all :: proc(s: ^Scratch, loc := #caller_location) { - if s.data == nil { - panic("free_all called on an unitialized scratch allocator", loc) - } s.curr_offset = 0 s.prev_allocation = nil for ptr in s.leaked_allocations { @@ -571,9 +591,6 @@ stack_free :: proc( } stack_free_all :: proc(s: ^Stack, loc := #caller_location) { - if s.data == nil { - panic("Stack free all on an uninitialized stack allocator", loc) - } s.prev_offset = 0 s.curr_offset = 0 } @@ -791,7 +808,7 @@ small_stack_alloc_bytes_non_zeroed :: proc( loc := #caller_location, ) -> ([]byte, Allocator_Error) { if s.data == nil { - return nil, .Invalid_Argument + panic("Small stack is not initialized", loc) } alignment := alignment alignment = clamp(alignment, 1, 8*size_of(Stack_Allocation_Header{}.padding)/2) @@ -815,6 +832,9 @@ small_stack_free :: proc( old_memory: rawptr, loc := #caller_location, ) -> Allocator_Error { + if s.data == nil { + panic("Small stack is not initialized", loc) + } if old_memory == nil { return nil } @@ -892,6 +912,9 @@ small_stack_resize_bytes_non_zeroed :: proc( alignment := DEFAULT_ALIGNMENT, loc := #caller_location, ) -> ([]byte, Allocator_Error) { + if s.data == nil { + panic("Small stack is not initialized", loc) + } old_memory := raw_data(old_data) old_size := len(old_data) alignment := alignment @@ -1029,7 +1052,7 @@ dynamic_arena_destroy :: proc(pool: ^Dynamic_Arena) { @(private="file") _dynamic_arena_cycle_new_block :: proc(p: ^Dynamic_Arena, loc := #caller_location) -> (err: Allocator_Error) { if p.block_allocator.procedure == nil { - panic("You must call pool_init on a Pool before using it", loc) + panic("You must call arena_init on a Pool before using it", loc) } if p.current_block != nil { append(&p.used_blocks, p.current_block, loc=loc) @@ -1528,9 +1551,9 @@ buddy_allocator_proc :: proc( case .Alloc_Non_Zeroed: return buddy_allocator_alloc_bytes_non_zeroed(b, uint(size)) case .Resize: - return default_resize_bytes_align(byte_slice(old_memory, old_size), size, alignment, buddy_allocator(b)) + return default_resize_bytes_align(byte_slice(old_memory, old_size), size, alignment, buddy_allocator(b), loc) case .Resize_Non_Zeroed: - return default_resize_bytes_align_non_zeroed(byte_slice(old_memory, old_size), size, alignment, buddy_allocator(b)) + return default_resize_bytes_align_non_zeroed(byte_slice(old_memory, old_size), size, alignment, buddy_allocator(b), loc) case .Free: return nil, buddy_allocator_free(b, old_memory) case .Free_All: From f1f5dc614e0451c65d7e036ecb92eb76a7b753bd Mon Sep 17 00:00:00 2001 From: flysand7 Date: Sun, 8 Sep 2024 11:17:27 +1100 Subject: [PATCH 25/35] [mem]: Remove old comments --- core/mem/allocators.odin | 2 -- 1 file changed, 2 deletions(-) diff --git a/core/mem/allocators.odin b/core/mem/allocators.odin index e7b5faa1660..2ee3973161e 100644 --- a/core/mem/allocators.odin +++ b/core/mem/allocators.odin @@ -402,7 +402,6 @@ scratch_resize_bytes_non_zeroed :: proc( } begin := uintptr(raw_data(s.data)) end := begin + uintptr(len(s.data)) - // TODO(flysand): Doesn't handle old_memory == nil old_ptr := uintptr(old_memory) if begin <= old_ptr && old_ptr < end && old_ptr+uintptr(size) < end { s.curr_offset = int(old_ptr-begin)+size @@ -412,7 +411,6 @@ scratch_resize_bytes_non_zeroed :: proc( if err != nil { return data, err } - // TODO(flysand): OOB access on size < old_size. runtime.copy(data, byte_slice(old_memory, old_size)) err = scratch_free(s, old_memory, loc) return data, err From 3b30bc305c9da737545c96173b566efb1e10b466 Mon Sep 17 00:00:00 2001 From: flysand7 Date: Sun, 8 Sep 2024 14:13:03 +1100 Subject: [PATCH 26/35] [mem]: Document raw.odin --- core/mem/raw.odin | 74 ++++++++++++++++++++++++++++++++++++++--------- 1 file changed, 60 insertions(+), 14 deletions(-) diff --git a/core/mem/raw.odin b/core/mem/raw.odin index 7fda3229d30..ab1148cea25 100644 --- a/core/mem/raw.odin +++ b/core/mem/raw.odin @@ -3,40 +3,86 @@ package mem import "base:builtin" import "base:runtime" +/* +Mamory layout of the `any` type. +*/ Raw_Any :: runtime.Raw_Any +/* +Mamory layout of the `string` type. +*/ Raw_String :: runtime.Raw_String - +/* +Mamory layout of the `cstring` type. +*/ Raw_Cstring :: runtime.Raw_Cstring - +/* +Mamory layout of `[]T` types. +*/ Raw_Slice :: runtime.Raw_Slice - +/* +Mamory layout of `[dynamic]T` types. +*/ Raw_Dynamic_Array :: runtime.Raw_Dynamic_Array - +/* +Mamory layout of `map[K]V` types. +*/ Raw_Map :: runtime.Raw_Map - +/* +Mamory layout of `#soa []T` types. +*/ Raw_Soa_Pointer :: runtime.Raw_Soa_Pointer - +/* +Mamory layout of the `complex32` type. +*/ Raw_Complex32 :: runtime.Raw_Complex32 - +/* +Mamory layout of the `complex64` type. +*/ Raw_Complex64 :: runtime.Raw_Complex64 - +/* +Mamory layout of the `complex128` type. +*/ Raw_Complex128 :: runtime.Raw_Complex128 - +/* +Mamory layout of the `quaternion64` type. +*/ Raw_Quaternion64 :: runtime.Raw_Quaternion64 - +/* +Mamory layout of the `quaternion128` type. +*/ Raw_Quaternion128 :: runtime.Raw_Quaternion128 - +/* +Mamory layout of the `quaternion256` type. +*/ Raw_Quaternion256 :: runtime.Raw_Quaternion256 - +/* +Mamory layout of the `quaternion64` type. +*/ Raw_Quaternion64_Vector_Scalar :: runtime.Raw_Quaternion64_Vector_Scalar - +/* +Mamory layout of the `quaternion128` type. +*/ Raw_Quaternion128_Vector_Scalar :: runtime.Raw_Quaternion128_Vector_Scalar - +/* +Mamory layout of the `quaternion256` type. +*/ Raw_Quaternion256_Vector_Scalar :: runtime.Raw_Quaternion256_Vector_Scalar +/* +Create a value of the any type. + +This procedure creates a value with type `any` that points to an object with +typeid `id` located at an address specified by `data`. +*/ make_any :: proc "contextless" (data: rawptr, id: typeid) -> any { return transmute(any)Raw_Any{data, id} } +/* +Obtain pointer to the data. + +This procedure returns the pointer to the data of a slice, string, or a dynamic +array. +*/ raw_data :: builtin.raw_data From 299accb717ff376bbb063cde7585d582b658d405 Mon Sep 17 00:00:00 2001 From: flysand7 Date: Sun, 8 Sep 2024 14:17:32 +1100 Subject: [PATCH 27/35] [mem]: Put panic allocator after nil allocator, adjust @require_results --- core/mem/allocators.odin | 115 +++++++++++++++++++-------------------- 1 file changed, 57 insertions(+), 58 deletions(-) diff --git a/core/mem/allocators.odin b/core/mem/allocators.odin index 2ee3973161e..649d5466a8c 100644 --- a/core/mem/allocators.odin +++ b/core/mem/allocators.odin @@ -3,6 +3,7 @@ package mem import "base:intrinsics" import "base:runtime" +@(require_results) nil_allocator :: proc() -> Allocator { return Allocator{ procedure = nil_allocator_proc, @@ -21,6 +22,62 @@ nil_allocator_proc :: proc( return nil, nil } + + +@(require_results) +panic_allocator :: proc() -> Allocator { + return Allocator{ + procedure = panic_allocator_proc, + data = nil, + } +} + +panic_allocator_proc :: proc( + allocator_data: rawptr, + mode: Allocator_Mode, + size, alignment: int, + old_memory: rawptr, + old_size: int, + loc := #caller_location, +) -> ([]byte, Allocator_Error) { + switch mode { + case .Alloc: + if size > 0 { + panic("mem: panic allocator, .Alloc called", loc=loc) + } + case .Alloc_Non_Zeroed: + if size > 0 { + panic("mem: panic allocator, .Alloc_Non_Zeroed called", loc=loc) + } + case .Resize: + if size > 0 { + panic("mem: panic allocator, .Resize called", loc=loc) + } + case .Resize_Non_Zeroed: + if size > 0 { + panic("mem: panic allocator, .Resize_Non_Zeroed called", loc=loc) + } + case .Free: + if old_memory != nil { + panic("mem: panic allocator, .Free called", loc=loc) + } + case .Free_All: + panic("mem: panic allocator, .Free_All called", loc=loc) + case .Query_Features: + set := (^Allocator_Mode_Set)(old_memory) + if set != nil { + set^ = {.Query_Features} + } + return nil, nil + + case .Query_Info: + panic("mem: panic allocator, .Query_Info called", loc=loc) + } + return nil, nil +} + + + Arena :: struct { data: []byte, offset: int, @@ -300,7 +357,6 @@ scratch_alloc_bytes_non_zeroed :: proc( return ptr, err } -@(require_results) scratch_free :: proc(s: ^Scratch, ptr: rawptr, loc := #caller_location) -> Allocator_Error { if s.data == nil { panic("Free on an uninitialized scratch allocator", loc) @@ -555,7 +611,6 @@ stack_alloc_bytes_non_zeroed :: proc( return byte_slice(rawptr(next_addr), size), nil } -@(require_results) stack_free :: proc( s: ^Stack, old_memory: rawptr, @@ -824,7 +879,6 @@ small_stack_alloc_bytes_non_zeroed :: proc( return byte_slice(rawptr(next_addr), size), nil } -@(require_results) small_stack_free :: proc( s: ^Small_Stack, old_memory: rawptr, @@ -1254,60 +1308,6 @@ dynamic_arena_allocator_proc :: proc( -panic_allocator_proc :: proc( - allocator_data: rawptr, - mode: Allocator_Mode, - size, alignment: int, - old_memory: rawptr, - old_size: int, - loc := #caller_location, -) -> ([]byte, Allocator_Error) { - switch mode { - case .Alloc: - if size > 0 { - panic("mem: panic allocator, .Alloc called", loc=loc) - } - case .Alloc_Non_Zeroed: - if size > 0 { - panic("mem: panic allocator, .Alloc_Non_Zeroed called", loc=loc) - } - case .Resize: - if size > 0 { - panic("mem: panic allocator, .Resize called", loc=loc) - } - case .Resize_Non_Zeroed: - if size > 0 { - panic("mem: panic allocator, .Resize_Non_Zeroed called", loc=loc) - } - case .Free: - if old_memory != nil { - panic("mem: panic allocator, .Free called", loc=loc) - } - case .Free_All: - panic("mem: panic allocator, .Free_All called", loc=loc) - case .Query_Features: - set := (^Allocator_Mode_Set)(old_memory) - if set != nil { - set^ = {.Query_Features} - } - return nil, nil - - case .Query_Info: - panic("mem: panic allocator, .Query_Info called", loc=loc) - } - return nil, nil -} - -@(require_results) -panic_allocator :: proc() -> Allocator { - return Allocator{ - procedure = panic_allocator_proc, - data = nil, - } -} - - - Buddy_Block :: struct #align(align_of(uint)) { size: uint, is_free: bool, @@ -1513,7 +1513,6 @@ buddy_allocator_alloc_bytes_non_zeroed :: proc(b: ^Buddy_Allocator, size: uint) return nil, nil } -@(require_results) buddy_allocator_free :: proc(b: ^Buddy_Allocator, ptr: rawptr) -> Allocator_Error { if ptr != nil { if !(b.head <= ptr && ptr <= b.tail) { From 05df34f99c97eefc6957786150e19010cb84c230 Mon Sep 17 00:00:00 2001 From: flysand7 Date: Sun, 8 Sep 2024 18:44:33 +1100 Subject: [PATCH 28/35] [mem]: Start documenting allocators.odin --- core/mem/allocators.odin | 667 ++++++++++++++++++++++++++++++++++----- 1 file changed, 594 insertions(+), 73 deletions(-) diff --git a/core/mem/allocators.odin b/core/mem/allocators.odin index 649d5466a8c..972031a2131 100644 --- a/core/mem/allocators.odin +++ b/core/mem/allocators.odin @@ -3,6 +3,13 @@ package mem import "base:intrinsics" import "base:runtime" +/* +Nil allocator. + +The `nil` allocator returns `nil` on every allocation attempt. This type of +allocator can be used in scenarios where memory doesn't need to be allocated, +but an attempt to allocate memory is not an error. +*/ @(require_results) nil_allocator :: proc() -> Allocator { return Allocator{ @@ -23,7 +30,13 @@ nil_allocator_proc :: proc( } +/* +Panic allocator. +The panic allocator is a type of allocator that panics on any allocation +attempt. This type of allocator can be used in scenarios where memory should +not be allocated, and an attempt to allocate memory is an error. +*/ @(require_results) panic_allocator :: proc() -> Allocator { return Allocator{ @@ -77,7 +90,9 @@ panic_allocator_proc :: proc( } - +/* +Arena allocator data. +*/ Arena :: struct { data: []byte, offset: int, @@ -85,11 +100,19 @@ Arena :: struct { temp_count: int, } -Arena_Temp_Memory :: struct { - arena: ^Arena, - prev_offset: int, -} +/* +Arena allocator. +The arena allocator (also known as a linear allocator, bump allocator, +region allocator) is an allocator that uses a single backing buffer for +allocations. + +The buffer is being used contiguously, from start by end. Each subsequent +allocation occupies the next adjacent region of memory in the buffer. Since +arena allocator does not keep track of any metadata associated with the +allocations and their locations, it is impossible to free individual +allocations. +*/ @(require_results) arena_allocator :: proc(arena: ^Arena) -> Allocator { return Allocator{ @@ -98,6 +121,12 @@ arena_allocator :: proc(arena: ^Arena) -> Allocator { } } +/* +Initialize an arena. + +This procedure initializes the arena `a` with memory region `data` as it's +backing buffer. +*/ arena_init :: proc(a: ^Arena, data: []byte) { a.data = data a.offset = 0 @@ -113,6 +142,13 @@ init_arena :: proc(a: ^Arena, data: []byte) { a.temp_count = 0 } +/* +Allocate memory from an arena. + +This procedure allocates `size` bytes of memory aligned on a boundary specified +by `alignment` from an arena `a`. The allocated memory is zero-initialized. +This procedure returns a pointer to the newly allocated memory region. +*/ @(require_results) arena_alloc :: proc( a: ^Arena, @@ -124,6 +160,13 @@ arena_alloc :: proc( return raw_data(bytes), err } +/* +Allocate memory from an arena. + +This procedure allocates `size` bytes of memory aligned on a boundary specified +by `alignment` from an arena `a`. The allocated memory is zero-initialized. +This procedure returns a slice of the newly allocated memory region. +*/ @(require_results) arena_alloc_bytes :: proc( a: ^Arena, @@ -138,6 +181,14 @@ arena_alloc_bytes :: proc( return bytes, err } +/* +Allocate non-initialized memory from an arena. + +This procedure allocates `size` bytes of memory aligned on a boundary specified +by `alignment` from an arena `a`. The allocated memory is not explicitly +zero-initialized. This procedure returns a pointer to the newly allocated +memory region. +*/ @(require_results) arena_alloc_non_zeroed :: proc( a: ^Arena, @@ -149,6 +200,14 @@ arena_alloc_non_zeroed :: proc( return raw_data(bytes), err } +/* +Allocate non-initialized memory from an arena. + +This procedure allocates `size` bytes of memory aligned on a boundary specified +by `alignment` from an arena `a`. The allocated memory is not explicitly +zero-initialized. This procedure returns a slice of the newly allocated +memory region. +*/ @(require_results) arena_alloc_bytes_non_zeroed :: proc( a: ^Arena, @@ -170,6 +229,9 @@ arena_alloc_bytes_non_zeroed :: proc( return byte_slice(ptr, size), nil } +/* +Free all memory to an arena. +*/ arena_free_all :: proc(a: ^Arena) { a.offset = 0 } @@ -209,6 +271,28 @@ arena_allocator_proc :: proc( return nil, nil } +/* +Temporary memory region of arena. + +Temporary memory regions of arena act as "savepoints" for arena. When one is +created, the subsequent allocations are done inside the temporary memory +region. When `end_arena_temp_memory` is called, the arena is rolled back, and +all of the memory that was allocated from the arena will be freed. + +Multiple temporary memory regions can exist at the same time for an arena. +*/ +Arena_Temp_Memory :: struct { + arena: ^Arena, + prev_offset: int, +} + +/* +Start a temporary memory region. + +This procedure creates a temporary memory region. After a temporary memory +region is created, all allocations are said to be *inside* the temporary memory +region, until `end_arena_temp_memory` is called. +*/ @(require_results) begin_arena_temp_memory :: proc(a: ^Arena) -> Arena_Temp_Memory { tmp: Arena_Temp_Memory @@ -218,6 +302,12 @@ begin_arena_temp_memory :: proc(a: ^Arena) -> Arena_Temp_Memory { return tmp } +/* +End a temporary memory region. + +This procedure ends the temporary memory region for an arena. All of the +allocations *inside* the temporary memory region will be freed to the arena. +*/ end_arena_temp_memory :: proc(tmp: Arena_Temp_Memory) { assert(tmp.arena.offset >= tmp.prev_offset) assert(tmp.arena.temp_count > 0) @@ -225,11 +315,14 @@ end_arena_temp_memory :: proc(tmp: Arena_Temp_Memory) { tmp.arena.temp_count -= 1 } -/* old procedures */ +/* Preserved for compatibility */ Scratch_Allocator :: Scratch scratch_allocator_init :: scratch_init scratch_allocator_destroy :: scratch_destroy +/* +Scratch allocator data. +*/ Scratch :: struct { data: []byte, curr_offset: int, @@ -238,6 +331,23 @@ Scratch :: struct { leaked_allocations: [dynamic][]byte, } +/* +Scratch allocator. + +The scratch allocator works in a similar way to the `Arena` allocator. The +scratch allocator has a backing buffer, that is being allocated in +contiguous regions, from start to end. + +Each subsequent allocation will be the next adjacent region of memory in the +backing buffer. If the allocation doesn't fit into the remaining space of the +backing buffer, this allocation is put at the start of the buffer, and all +previous allocations will become invalidated. If the allocation doesn't fit +into the backing buffer as a whole, it will be allocated using a backing +allocator, and pointer to the allocated memory region will be put into the +`leaked_allocations` array. + +The `leaked_allocations` array is managed by the `context` allocator. +*/ @(require_results) scratch_allocator :: proc(allocator: ^Scratch) -> Allocator { return Allocator{ @@ -246,6 +356,9 @@ scratch_allocator :: proc(allocator: ^Scratch) -> Allocator { } } +/* +Initialize scratch allocator. +*/ scratch_init :: proc(s: ^Scratch, size: int, backup_allocator := context.allocator) -> Allocator_Error { s.data = make_aligned([]byte, size, 2*align_of(rawptr), backup_allocator) or_return s.curr_offset = 0 @@ -255,6 +368,9 @@ scratch_init :: proc(s: ^Scratch, size: int, backup_allocator := context.allocat return nil } +/* +Free all data associated with a scratch allocator. +*/ scratch_destroy :: proc(s: ^Scratch) { if s == nil { return @@ -267,6 +383,13 @@ scratch_destroy :: proc(s: ^Scratch) { s^ = {} } +/* +Allocate memory from scratch allocator. + +This procedure allocates `size` bytes of memory aligned on a boundary specified +by `alignment`. The allocated memory region is zero-initialized. This procedure +returns a pointer to the allocated memory region. +*/ @(require_results) scratch_alloc :: proc( s: ^Scratch, @@ -278,6 +401,13 @@ scratch_alloc :: proc( return raw_data(bytes), err } +/* +Allocate memory from scratch allocator. + +This procedure allocates `size` bytes of memory aligned on a boundary specified +by `alignment`. The allocated memory region is zero-initialized. This procedure +returns a slice of the allocated memory region. +*/ @(require_results) scratch_alloc_bytes :: proc( s: ^Scratch, @@ -292,6 +422,13 @@ scratch_alloc_bytes :: proc( return bytes, err } +/* +Allocate memory from scratch allocator. + +This procedure allocates `size` bytes of memory aligned on a boundary specified +by `alignment`. The allocated memory region is not explicitly zero-initialized. +This procedure returns a pointer to the allocated memory region. +*/ @(require_results) scratch_alloc_non_zeroed :: proc( s: ^Scratch, @@ -303,6 +440,13 @@ scratch_alloc_non_zeroed :: proc( return raw_data(bytes), err } +/* +Allocate memory from scratch allocator. + +This procedure allocates `size` bytes of memory aligned on a boundary specified +by `alignment`. The allocated memory region is not explicitly zero-initialized. +This procedure returns a slice of the allocated memory region. +*/ @(require_results) scratch_alloc_bytes_non_zeroed :: proc( s: ^Scratch, @@ -319,44 +463,49 @@ scratch_alloc_bytes_non_zeroed :: proc( } size := size size = align_forward_int(size, alignment) - switch { - case s.curr_offset+size <= len(s.data): - start := uintptr(raw_data(s.data)) - ptr := start + uintptr(s.curr_offset) - ptr = align_forward_uintptr(ptr, uintptr(alignment)) - s.prev_allocation = rawptr(ptr) - offset := int(ptr - start) - s.curr_offset = offset + size - return byte_slice(rawptr(ptr), size), nil - case size <= len(s.data): + if size <= len(s.data) { + offset := uintptr(0) + if s.curr_offset+size <= len(s.data) { + offset = uintptr(s.curr_offset) + } else { + offset = 0 + } start := uintptr(raw_data(s.data)) - ptr := align_forward_uintptr(start, uintptr(alignment)) + ptr := align_forward_uintptr(offset+start, uintptr(alignment)) s.prev_allocation = rawptr(ptr) - offset := int(ptr - start) - s.curr_offset = offset + size + s.curr_offset = int(offset) + size return byte_slice(rawptr(ptr), size), nil - } - a := s.backup_allocator - if a.procedure == nil { - a = context.allocator - s.backup_allocator = a - } - ptr, err := alloc_bytes_non_zeroed(size, alignment, a, loc) - if err != nil { - return ptr, err - } - if s.leaked_allocations == nil { - s.leaked_allocations, err = make([dynamic][]byte, a) - } - append(&s.leaked_allocations, ptr) - if logger := context.logger; logger.lowest_level <= .Warning { - if logger.procedure != nil { - logger.procedure(logger.data, .Warning, "mem.Scratch resorted to backup_allocator" , logger.options, loc) + } else { + a := s.backup_allocator + if a.procedure == nil { + a = context.allocator + s.backup_allocator = a + } + ptr, err := alloc_bytes_non_zeroed(size, alignment, a, loc) + if err != nil { + return ptr, err + } + if s.leaked_allocations == nil { + s.leaked_allocations, err = make([dynamic][]byte, a) } + append(&s.leaked_allocations, ptr) + if logger := context.logger; logger.lowest_level <= .Warning { + if logger.procedure != nil { + logger.procedure(logger.data, .Warning, "mem.Scratch resorted to backup_allocator" , logger.options, loc) + } + } + return ptr, err } - return ptr, err } +/* +Free memory to scratch allocator. + +This procedure frees the memory region allocated at pointer `ptr`. + +If `ptr` is not the latest allocation and is not a leaked allocation, this +operation is a no-op. +*/ scratch_free :: proc(s: ^Scratch, ptr: rawptr, loc := #caller_location) -> Allocator_Error { if s.data == nil { panic("Free on an uninitialized scratch allocator", loc) @@ -389,6 +538,9 @@ scratch_free :: proc(s: ^Scratch, ptr: rawptr, loc := #caller_location) -> Alloc return .Invalid_Pointer } +/* +Free all memory to the scratch allocator. +*/ scratch_free_all :: proc(s: ^Scratch, loc := #caller_location) { s.curr_offset = 0 s.prev_allocation = nil @@ -398,6 +550,22 @@ scratch_free_all :: proc(s: ^Scratch, loc := #caller_location) { clear(&s.leaked_allocations) } +/* +Resize an allocation. + +This procedure resizes a memory region, defined by its location, `old_memory`, +and its size, `old_size` to have a size `size` and alignment `alignment`. The +newly allocated memory, if any is zero-initialized. + +If `old_memory` is `nil`, this procedure acts just like `scratch_alloc()`, +allocating a memory region `size` bytes in size, aligned on a boundary specified +by `alignment`. + +If `size` is 0, this procedure acts just like `scratch_free()`, freeing the +memory region located at an address specified by `old_memory`. + +This procedure returns the pointer to the resized memory region. +*/ @(require_results) scratch_resize :: proc( s: ^Scratch, @@ -411,6 +579,22 @@ scratch_resize :: proc( return raw_data(bytes), err } +/* +Resize an allocation. + +This procedure resizes a memory region, specified by `old_data`, to have a size +`size` and alignment `alignment`. The newly allocated memory, if any is +zero-initialized. + +If `old_memory` is `nil`, this procedure acts just like `scratch_alloc()`, +allocating a memory region `size` bytes in size, aligned on a boundary specified +by `alignment`. + +If `size` is 0, this procedure acts just like `scratch_free()`, freeing the +memory region located at an address specified by `old_memory`. + +This procedure returns the slice of the resized memory region. +*/ @(require_results) scratch_resize_bytes :: proc( s: ^Scratch, @@ -426,6 +610,22 @@ scratch_resize_bytes :: proc( return bytes, err } +/* +Resize an allocation without zero-initialization. + +This procedure resizes a memory region, defined by its location, `old_memory`, +and its size, `old_size` to have a size `size` and alignment `alignment`. The +newly allocated memory, if any is not explicitly zero-initialized. + +If `old_memory` is `nil`, this procedure acts just like `scratch_alloc()`, +allocating a memory region `size` bytes in size, aligned on a boundary specified +by `alignment`. + +If `size` is 0, this procedure acts just like `scratch_free()`, freeing the +memory region located at an address specified by `old_memory`. + +This procedure returns the pointer to the resized memory region. +*/ @(require_results) scratch_resize_non_zeroed :: proc( s: ^Scratch, @@ -439,6 +639,22 @@ scratch_resize_non_zeroed :: proc( return raw_data(bytes), err } +/* +Resize an allocation. + +This procedure resizes a memory region, specified by `old_data`, to have a size +`size` and alignment `alignment`. The newly allocated memory, if any is not +explicitly zero-initialized. + +If `old_memory` is `nil`, this procedure acts just like `scratch_alloc()`, +allocating a memory region `size` bytes in size, aligned on a boundary specified +by `alignment`. + +If `size` is 0, this procedure acts just like `scratch_free()`, freeing the +memory region located at an address specified by `old_memory`. + +This procedure returns the slice of the resized memory region. +*/ @(require_results) scratch_resize_bytes_non_zeroed :: proc( s: ^Scratch, @@ -509,7 +725,9 @@ scratch_allocator_proc :: proc( -// Stack is a stack-like allocator which has a strict memory freeing order +/* +Stack allocator data. +*/ Stack :: struct { data: []byte, prev_offset: int, @@ -517,11 +735,30 @@ Stack :: struct { peak_used: int, } +/* +Header of a stack allocation. +*/ Stack_Allocation_Header :: struct { prev_offset: int, padding: int, } +/* +Stack allocator. + +The stack allocator is an allocator that allocates data in the backing buffer +linearly, from start to end. Each subsequent allocation will get the next +adjacent memory region. + +Unlike arena allocator, the stack allocator saves allocation metadata and has +a strict freeing order. Only the last allocated element can be freed. After the +last allocated element is freed, the next previous allocated element becomes +available for freeing. + +The metadata is stored in the allocation headers, that are located before the +start of each allocated memory region. Each header points to the start of the +previous allocation header. +*/ @(require_results) stack_allocator :: proc(stack: ^Stack) -> Allocator { return Allocator{ @@ -530,6 +767,12 @@ stack_allocator :: proc(stack: ^Stack) -> Allocator { } } +/* +Initialize the stack allocator. + +This procedure initializes the stack allocator with a backing buffer specified +by `data` parameter. +*/ stack_init :: proc(s: ^Stack, data: []byte) { s.data = data s.prev_offset = 0 @@ -545,6 +788,13 @@ init_stack :: proc(s: ^Stack, data: []byte) { s.peak_used = 0 } +/* +Allocate memory from stack. + +This procedure allocates `size` bytes of memory, aligned to the boundary +specified by `alignment`. The allocated memory is zero-initialized. This +procedure returns the pointer to the allocated memory. +*/ @(require_results) stack_alloc :: proc( s: ^Stack, @@ -556,6 +806,13 @@ stack_alloc :: proc( return raw_data(bytes), err } +/* +Allocate memory from stack. + +This procedure allocates `size` bytes of memory, aligned to the boundary +specified by `alignment`. The allocated memory is zero-initialized. This +procedure returns the slice of the allocated memory. +*/ @(require_results) stack_alloc_bytes :: proc( s: ^Stack, @@ -570,6 +827,13 @@ stack_alloc_bytes :: proc( return bytes, err } +/* +Allocate memory from stack. + +This procedure allocates `size` bytes of memory, aligned to the boundary +specified by `alignment`. The allocated memory is not explicitly +zero-initialized. This procedure returns the pointer to the allocated memory. +*/ @(require_results) stack_alloc_non_zeroed :: proc( s: ^Stack, @@ -581,6 +845,13 @@ stack_alloc_non_zeroed :: proc( return raw_data(bytes), err } +/* +Allocate memory from stack. + +This procedure allocates `size` bytes of memory, aligned to the boundary +specified by `alignment`. The allocated memory is not explicitly +zero-initialized. This procedure returns the slice of the allocated memory. +*/ @(require_results) stack_alloc_bytes_non_zeroed :: proc( s: ^Stack, @@ -611,6 +882,13 @@ stack_alloc_bytes_non_zeroed :: proc( return byte_slice(rawptr(next_addr), size), nil } +/* +Free memory to the stack. + +This procedure frees the memory region starting at `old_memory` to the stack. +If the freeing does is an out of order freeing, the `.Invalid_Pointer` error +is returned. +*/ stack_free :: proc( s: ^Stack, old_memory: rawptr, @@ -643,12 +921,30 @@ stack_free :: proc( return nil } +/* +Free all allocations to the stack. +*/ stack_free_all :: proc(s: ^Stack, loc := #caller_location) { s.prev_offset = 0 s.curr_offset = 0 } +/* +Resize an allocation. + +This procedure resizes a memory region, defined by its location, `old_memory`, +and its size, `old_size` to have a size `size` and alignment `alignment`. The +newly allocated memory, if any is zero-initialized. + +If `old_memory` is `nil`, this procedure acts just like `stack_alloc()`, +allocating a memory region `size` bytes in size, aligned on a boundary specified +by `alignment`. +If `size` is 0, this procedure acts just like `stack_free()`, freeing the +memory region located at an address specified by `old_memory`. + +This procedure returns the pointer to the resized memory region. +*/ @(require_results) stack_resize :: proc( s: ^Stack, @@ -662,6 +958,22 @@ stack_resize :: proc( return raw_data(bytes), err } +/* +Resize an allocation. + +This procedure resizes a memory region, specified by the `old_data` parameter +to have a size `size` and alignment `alignment`. The newly allocated memory, +if any is zero-initialized. + +If `old_memory` is `nil`, this procedure acts just like `stack_alloc()`, +allocating a memory region `size` bytes in size, aligned on a boundary specified +by `alignment`. + +If `size` is 0, this procedure acts just like `stack_free()`, freeing the +memory region located at an address specified by `old_memory`. + +This procedure returns the slice of the resized memory region. +*/ @(require_results) stack_resize_bytes :: proc( s: ^Stack, @@ -681,6 +993,22 @@ stack_resize_bytes :: proc( return bytes, err } +/* +Resize an allocation without zero-initialization. + +This procedure resizes a memory region, defined by its location, `old_memory`, +and its size, `old_size` to have a size `size` and alignment `alignment`. The +newly allocated memory, if any is not explicitly zero-initialized. + +If `old_memory` is `nil`, this procedure acts just like `stack_alloc()`, +allocating a memory region `size` bytes in size, aligned on a boundary specified +by `alignment`. + +If `size` is 0, this procedure acts just like `stack_free()`, freeing the +memory region located at an address specified by `old_memory`. + +This procedure returns the pointer to the resized memory region. +*/ @(require_results) stack_resize_non_zeroed :: proc( s: ^Stack, @@ -694,6 +1022,22 @@ stack_resize_non_zeroed :: proc( return raw_data(bytes), err } +/* +Resize an allocation without zero-initialization. + +This procedure resizes a memory region, specified by the `old_data` parameter +to have a size `size` and alignment `alignment`. The newly allocated memory, +if any is not explicitly zero-initialized. + +If `old_memory` is `nil`, this procedure acts just like `stack_alloc()`, +allocating a memory region `size` bytes in size, aligned on a boundary specified +by `alignment`. + +If `size` is 0, this procedure acts just like `stack_free()`, freeing the +memory region located at an address specified by `old_memory`. + +This procedure returns the slice of the resized memory region. +*/ @(require_results) stack_resize_bytes_non_zeroed :: proc( s: ^Stack, @@ -784,18 +1128,28 @@ stack_allocator_proc :: proc( } - +/* +Allocation header of the small stack allocator. +*/ Small_Stack_Allocation_Header :: struct { padding: u8, } -// Small_Stack is a stack-like allocator which uses the smallest possible header but at the cost of non-strict memory freeing order +/* +Small stack allocator data. +*/ Small_Stack :: struct { data: []byte, offset: int, peak_used: int, } +/* +Initialize small stack. + +This procedure initializes the small stack allocator with `data` as its backing +buffer. +*/ small_stack_init :: proc(s: ^Small_Stack, data: []byte) { s.data = data s.offset = 0 @@ -809,6 +1163,28 @@ init_small_stack :: proc(s: ^Small_Stack, data: []byte) { s.peak_used = 0 } +/* +Small stack allocator. + +The small stack allocator is just like a stack allocator, with the only +difference being an extremely small header size. Unlike the stack allocator, +small stack allows out-of order freeing of memory. + +The memory is allocated in the backing buffer linearly, from start to end. +Each subsequent allocation will get the next adjacent memory region. + +The metadata is stored in the allocation headers, that are located before the +start of each allocated memory region. Each header contains the amount of +padding bytes between that header and end of the previous allocation. + +## Properties + +**Performance characteristics**: TODO + +**Has a backing allocator**: No + +**Saves metadata**: Allocation header before each allocation. +*/ @(require_results) small_stack_allocator :: proc(stack: ^Small_Stack) -> Allocator { return Allocator{ @@ -817,6 +1193,13 @@ small_stack_allocator :: proc(stack: ^Small_Stack) -> Allocator { } } +/* +Allocate memory from small stack. + +This procedure allocates `size` bytes of memory aligned to a boundary specified +by `alignment`. The allocated memory is zero-initialized. This procedure +returns a pointer to the allocated memory region. +*/ @(require_results) small_stack_alloc :: proc( s: ^Small_Stack, @@ -828,6 +1211,13 @@ small_stack_alloc :: proc( return raw_data(bytes), err } +/* +Allocate memory from small stack. + +This procedure allocates `size` bytes of memory aligned to a boundary specified +by `alignment`. The allocated memory is zero-initialized. This procedure +returns a slice of the allocated memory region. +*/ @(require_results) small_stack_alloc_bytes :: proc( s: ^Small_Stack, @@ -842,6 +1232,13 @@ small_stack_alloc_bytes :: proc( return bytes, err } +/* +Allocate memory from small stack. + +This procedure allocates `size` bytes of memory aligned to a boundary specified +by `alignment`. The allocated memory is not explicitly zero-initialized. This +procedure returns a pointer to the allocated memory region. +*/ @(require_results) small_stack_alloc_non_zeroed :: proc( s: ^Small_Stack, @@ -853,6 +1250,13 @@ small_stack_alloc_non_zeroed :: proc( return raw_data(bytes), err } +/* +Allocate memory from small stack. + +This procedure allocates `size` bytes of memory aligned to a boundary specified +by `alignment`. The allocated memory is not explicitly zero-initialized. This +procedure returns a slice of the allocated memory region. +*/ @(require_results) small_stack_alloc_bytes_non_zeroed :: proc( s: ^Small_Stack, @@ -879,6 +1283,13 @@ small_stack_alloc_bytes_non_zeroed :: proc( return byte_slice(rawptr(next_addr), size), nil } +/* +Allocate memory from small stack. + +This procedure allocates `size` bytes of memory aligned to a boundary specified +by `alignment`. The allocated memory is not explicitly zero-initialized. This +procedure returns a slice of the allocated memory region. +*/ small_stack_free :: proc( s: ^Small_Stack, old_memory: rawptr, @@ -907,10 +1318,29 @@ small_stack_free :: proc( return nil } +/* +Free all memory to small stack. +*/ small_stack_free_all :: proc(s: ^Small_Stack) { s.offset = 0 } +/* +Resize an allocation. + +This procedure resizes a memory region, defined by its location, `old_memory`, +and its size, `old_size` to have a size `size` and alignment `alignment`. The +newly allocated memory, if any is zero-initialized. + +If `old_memory` is `nil`, this procedure acts just like `small_stack_alloc()`, +allocating a memory region `size` bytes in size, aligned on a boundary specified +by `alignment`. + +If `size` is 0, this procedure acts just like `small_stack_free()`, freeing the +memory region located at an address specified by `old_memory`. + +This procedure returns the pointer to the resized memory region. +*/ @(require_results) small_stack_resize :: proc( s: ^Small_Stack, @@ -924,6 +1354,22 @@ small_stack_resize :: proc( return raw_data(bytes), err } +/* +Resize an allocation. + +This procedure resizes a memory region, specified by the `old_data` parameter +to have a size `size` and alignment `alignment`. The newly allocated memory, +if any is zero-initialized. + +If `old_memory` is `nil`, this procedure acts just like `small_stack_alloc()`, +allocating a memory region `size` bytes in size, aligned on a boundary specified +by `alignment`. + +If `size` is 0, this procedure acts just like `small_stack_free()`, freeing the +memory region located at an address specified by `old_memory`. + +This procedure returns the slice of the resized memory region. +*/ @(require_results) small_stack_resize_bytes :: proc( s: ^Small_Stack, @@ -943,6 +1389,22 @@ small_stack_resize_bytes :: proc( return bytes, err } +/* +Resize an allocation without zero-initialization. + +This procedure resizes a memory region, defined by its location, `old_memory`, +and its size, `old_size` to have a size `size` and alignment `alignment`. The +newly allocated memory, if any is not explicitly zero-initialized. + +If `old_memory` is `nil`, this procedure acts just like `small_stack_alloc()`, +allocating a memory region `size` bytes in size, aligned on a boundary specified +by `alignment`. + +If `size` is 0, this procedure acts just like `small_stack_free()`, freeing the +memory region located at an address specified by `old_memory`. + +This procedure returns the pointer to the resized memory region. +*/ @(require_results) small_stack_resize_non_zeroed :: proc( s: ^Small_Stack, @@ -956,6 +1418,22 @@ small_stack_resize_non_zeroed :: proc( return raw_data(bytes), err } +/* +Resize an allocation without zero-initialization. + +This procedure resizes a memory region, specified by the `old_data` parameter +to have a size `size` and alignment `alignment`. The newly allocated memory, +if any is not explicitly zero-initialized. + +If `old_memory` is `nil`, this procedure acts just like `small_stack_alloc()`, +allocating a memory region `size` bytes in size, aligned on a boundary specified +by `alignment`. + +If `size` is 0, this procedure acts just like `small_stack_free()`, freeing the +memory region located at an address specified by `old_memory`. + +This procedure returns the slice of the resized memory region. +*/ @(require_results) small_stack_resize_bytes_non_zeroed :: proc( s: ^Small_Stack, @@ -1050,11 +1528,19 @@ dynamic_pool_init :: dynamic_arena_init dynamic_pool_allocator :: dynamic_arena_allocator dynamic_pool_destroy :: dynamic_arena_destroy - - +/* +Default block size for dynamic arena. +*/ DYNAMIC_ARENA_BLOCK_SIZE_DEFAULT :: 65536 + +/* +Default out-band size of the dynamic arena. +*/ DYNAMIC_ARENA_OUT_OF_BAND_SIZE_DEFAULT :: 6554 +/* +Dynamic arena allocator data. +*/ Dynamic_Arena :: struct { block_size: int, out_band_size: int, @@ -1068,65 +1554,95 @@ Dynamic_Arena :: struct { block_allocator: Allocator, } +/* +Initialize a dynamic arena. + +This procedure initializes a dynamic arena. The specified `block_allocator` +will be used to allocate arena blocks, and `array_allocator` to allocate +arrays of blocks and out-band blocks. The blocks have the default size of +`block_size` and out-band threshold will be `out_band_size`. All allocations +will be aligned to a boundary specified by `alignment`. +*/ dynamic_arena_init :: proc( - pool: ^Dynamic_Arena, + a: ^Dynamic_Arena, block_allocator := context.allocator, array_allocator := context.allocator, block_size := DYNAMIC_ARENA_BLOCK_SIZE_DEFAULT, out_band_size := DYNAMIC_ARENA_OUT_OF_BAND_SIZE_DEFAULT, alignment := DEFAULT_ALIGNMENT, ) { - pool.block_size = block_size - pool.out_band_size = out_band_size - pool.alignment = alignment - pool.block_allocator = block_allocator - pool.out_band_allocations.allocator = array_allocator - pool.unused_blocks.allocator = array_allocator - pool.used_blocks.allocator = array_allocator -} - + a.block_size = block_size + a.out_band_size = out_band_size + a.alignment = alignment + a.block_allocator = block_allocator + a.out_band_allocations.allocator = array_allocator + a.unused_blocks.allocator = array_allocator + a.used_blocks.allocator = array_allocator +} + +/* +Dynamic arena allocator. + +The dynamic arena allocator uses blocks of a specific size, allocated on-demand +using the block allocator. This allocator acts similarly to arena. All +allocations in a block happen contiguously, from start to end. If an allocation +does not fit into the remaining space of the block, and its size is smaller +than the specified out-band size, a new block is allocated using the +`block_allocator` and the allocation is performed from a newly-allocated block. + +If an allocation has bigger size than the specified out-band size, a new block +is allocated such that the allocation fits into this new block. This is referred +to as an *out-band allocation*. The out-band blocks are kept separately from +normal blocks. + +Just like arena, the dynamic arena does not support freeing of individual +objects. +*/ @(require_results) -dynamic_arena_allocator :: proc(pool: ^Dynamic_Arena) -> Allocator { +dynamic_arena_allocator :: proc(a: ^Dynamic_Arena) -> Allocator { return Allocator{ procedure = dynamic_arena_allocator_proc, - data = pool, + data = a, } } -dynamic_arena_destroy :: proc(pool: ^Dynamic_Arena) { - dynamic_arena_free_all(pool) - delete(pool.unused_blocks) - delete(pool.used_blocks) - delete(pool.out_band_allocations) - zero(pool, size_of(pool^)) +/* +Destroy a dynamic arena. +*/ +dynamic_arena_destroy :: proc(a: ^Dynamic_Arena) { + dynamic_arena_free_all(a) + delete(a.unused_blocks) + delete(a.used_blocks) + delete(a.out_band_allocations) + zero(a, size_of(a^)) } @(private="file") -_dynamic_arena_cycle_new_block :: proc(p: ^Dynamic_Arena, loc := #caller_location) -> (err: Allocator_Error) { - if p.block_allocator.procedure == nil { +_dynamic_arena_cycle_new_block :: proc(a: ^Dynamic_Arena, loc := #caller_location) -> (err: Allocator_Error) { + if a.block_allocator.procedure == nil { panic("You must call arena_init on a Pool before using it", loc) } - if p.current_block != nil { - append(&p.used_blocks, p.current_block, loc=loc) + if a.current_block != nil { + append(&a.used_blocks, a.current_block, loc=loc) } new_block: rawptr - if len(p.unused_blocks) > 0 { - new_block = pop(&p.unused_blocks) + if len(a.unused_blocks) > 0 { + new_block = pop(&a.unused_blocks) } else { data: []byte - data, err = p.block_allocator.procedure( - p.block_allocator.data, + data, err = a.block_allocator.procedure( + a.block_allocator.data, Allocator_Mode.Alloc, - p.block_size, - p.alignment, + a.block_size, + a.alignment, nil, 0, ) new_block = raw_data(data) } - p.bytes_left = p.block_size - p.current_pos = new_block - p.current_block = new_block + a.bytes_left = a.block_size + a.current_pos = new_block + a.current_block = new_block return } @@ -1435,6 +1951,11 @@ Buddy_Allocator :: struct { alignment: uint, } +/* +Buddy allocator. + +TODO +*/ @(require_results) buddy_allocator :: proc(b: ^Buddy_Allocator) -> Allocator { return Allocator{ From 167ced8ad161e0c8a84fd7b4fc3c94afff27236e Mon Sep 17 00:00:00 2001 From: flysand7 Date: Sun, 8 Sep 2024 18:50:44 +1100 Subject: [PATCH 29/35] [mem]: Don't use named params for dynamic pool in tests --- core/mem/allocators.odin | 16 ++++++++-------- tests/core/mem/test_mem_dynamic_pool.odin | 4 ++-- 2 files changed, 10 insertions(+), 10 deletions(-) diff --git a/core/mem/allocators.odin b/core/mem/allocators.odin index 972031a2131..f1e45d1a189 100644 --- a/core/mem/allocators.odin +++ b/core/mem/allocators.odin @@ -1564,20 +1564,20 @@ arrays of blocks and out-band blocks. The blocks have the default size of will be aligned to a boundary specified by `alignment`. */ dynamic_arena_init :: proc( - a: ^Dynamic_Arena, + pool: ^Dynamic_Arena, block_allocator := context.allocator, array_allocator := context.allocator, block_size := DYNAMIC_ARENA_BLOCK_SIZE_DEFAULT, out_band_size := DYNAMIC_ARENA_OUT_OF_BAND_SIZE_DEFAULT, alignment := DEFAULT_ALIGNMENT, ) { - a.block_size = block_size - a.out_band_size = out_band_size - a.alignment = alignment - a.block_allocator = block_allocator - a.out_band_allocations.allocator = array_allocator - a.unused_blocks.allocator = array_allocator - a.used_blocks.allocator = array_allocator + pool.block_size = block_size + pool.out_band_size = out_band_size + pool.alignment = alignment + pool.block_allocator = block_allocator + pool.out_band_allocations.allocator = array_allocator + pool.unused_blocks.allocator = array_allocator + pool.used_blocks.allocator = array_allocator } /* diff --git a/tests/core/mem/test_mem_dynamic_pool.odin b/tests/core/mem/test_mem_dynamic_pool.odin index d1086cfe639..fa204d3b1ac 100644 --- a/tests/core/mem/test_mem_dynamic_pool.odin +++ b/tests/core/mem/test_mem_dynamic_pool.odin @@ -6,7 +6,7 @@ import "core:mem" expect_pool_allocation :: proc(t: ^testing.T, expected_used_bytes, num_bytes, alignment: int) { pool: mem.Dynamic_Pool - mem.dynamic_pool_init(pool = &pool, alignment = alignment) + mem.dynamic_pool_init(&pool, alignment = alignment) pool_allocator := mem.dynamic_pool_allocator(&pool) element, err := mem.alloc(num_bytes, alignment, pool_allocator) @@ -48,7 +48,7 @@ expect_pool_allocation_out_of_band :: proc(t: ^testing.T, num_bytes, out_band_si testing.expect(t, num_bytes >= out_band_size, "Sanity check failed, your test call is flawed! Make sure that num_bytes >= out_band_size!") pool: mem.Dynamic_Pool - mem.dynamic_pool_init(pool = &pool, out_band_size = out_band_size) + mem.dynamic_pool_init(&pool, out_band_size = out_band_size) pool_allocator := mem.dynamic_pool_allocator(&pool) element, err := mem.alloc(num_bytes, allocator = pool_allocator) From fdd488256896ab40025ebd394735d5a6a30bd8ee Mon Sep 17 00:00:00 2001 From: flysand7 Date: Tue, 10 Sep 2024 19:51:20 +1100 Subject: [PATCH 30/35] [mem]: Adjust docs for alloc --- core/mem/alloc.odin | 166 +++++++++++++++++++++++++++++++++++++++----- 1 file changed, 150 insertions(+), 16 deletions(-) diff --git a/core/mem/alloc.odin b/core/mem/alloc.odin index c2e55541cd8..1ede92837fa 100644 --- a/core/mem/alloc.odin +++ b/core/mem/alloc.odin @@ -35,7 +35,6 @@ functions: `old_size` to be `size` bytes in length and have the specified `alignment`, in case a re-alllocation occurs. - `Resize_Non_Zeroed`: Same as `Resize`, without explicit zero-initialization. - */ Allocator_Mode :: runtime.Allocator_Mode @@ -123,7 +122,11 @@ Currently the type is defined as follows: ) -> ([]byte, Allocator_Error); The function of this procedure and the meaning of parameters depends on the -value of the `mode` parameter. +value of the `mode` parameter. For any operation the following constraints +apply: + +- The `alignment` must be a power of two. +- The `size` must be a positive integer. ## 1. `.Alloc`, `.Alloc_Non_Zeroed` @@ -142,10 +145,11 @@ Allocates a memory region of size `size`, aligned on a boundary specified by 1. The memory region, if allocated successfully, or `nil` otherwise. 2. An error, if allocation failed. -**Note**: Some allocators may return `nil`, even if no error is returned. +**Note**: The nil allocator may return `nil`, even if no error is returned. Always check both the error and the allocated buffer. -Same as `.Alloc`. +**Note**: The `.Alloc` mode is required to be implemented for an allocator +and can not return a `.Mode_Not_Implemented` error. ## 2. `Free` @@ -200,6 +204,10 @@ If `new_size` is `nil`, the procedure acts just like `.Free`, freeing the memory region `old_size` bytes in length, located at the address specified by `old_memory`. +If the `old_memory` pointer is not aligned to the boundary specified by +`alignment`, the procedure relocates the buffer such that the reallocated +buffer is aligned to the boundary specified by `alignment`. + **Inputs**: - `allocator_data`: Pointer to the allocator data. - `mode`: `.Resize` or `.Resize_All`. @@ -216,6 +224,9 @@ memory region `old_size` bytes in length, located at the address specified by **Note**: Some allocators may return `nil`, even if no error is returned. Always check both the error and the allocated buffer. + +**Note**: if `old_size` is `0` and `old_memory` is `nil`, this operation is a +no-op, and should not return errors. */ Allocator_Proc :: runtime.Allocator_Proc @@ -259,6 +270,8 @@ Allocate memory. This function allocates `size` bytes of memory, aligned to a boundary specified by `alignment` using the allocator specified by `allocator`. +If the `size` parameter is `0`, the operation is a no-op. + **Inputs**: - `size`: The desired size of the allocated memory region. - `alignment`: The desired alignment of the allocated memory region. @@ -267,6 +280,14 @@ by `alignment` using the allocator specified by `allocator`. **Returns**: 1. Pointer to the allocated memory, or `nil` if allocation failed. 2. Error, if the allocation failed. + +**Errors**: +- `None`: If no error occurred. +- `Out_Of_Memory`: Occurs when the allocator runs out of space in any of its + backing buffers, the backing allocator has ran out of space, or an operating + system failure occurred. +- `Invalid_Argument`: If the supplied `size` is negative, alignment is not a + power of two. */ @(require_results) alloc :: proc( @@ -293,6 +314,14 @@ by `alignment` using the allocator specified by `allocator`. **Returns**: 1. Slice of the allocated memory region, or `nil` if allocation failed. 2. Error, if the allocation failed. + +**Errors**: +- `None`: If no error occurred. +- `Out_Of_Memory`: Occurs when the allocator runs out of space in any of its + backing buffers, the backing allocator has ran out of space, or an operating + system failure occurred. +- `Invalid_Argument`: If the supplied `size` is negative, alignment is not a + power of two. */ @(require_results) alloc_bytes :: proc( @@ -319,6 +348,14 @@ does not explicitly zero-initialize allocated memory region. **Returns**: 1. Slice of the allocated memory region, or `nil` if allocation failed. 2. Error, if the allocation failed. + +**Errors**: +- `None`: If no error occurred. +- `Out_Of_Memory`: Occurs when the allocator runs out of space in any of its + backing buffers, the backing allocator has ran out of space, or an operating + system failure occurred. +- `Invalid_Argument`: If the supplied `size` is negative, alignment is not a + power of two. */ @(require_results) alloc_bytes_non_zeroed :: proc( @@ -339,6 +376,16 @@ allocated from the allocator specified by `allocator`. **Inputs**: - `ptr`: Pointer to the memory region to free. - `allocator`: The allocator to free to. + +**Returns**: +- The error, if freeing failed. + +**Errors**: +- `None`: When no error has occurred. +- `Invalid_Pointer`: The specified pointer is not owned by the specified allocator, + or does not point to a valid allocation. +- `Mode_Not_Implemented`: If the specified allocator does not support the `.Free` +mode. */ free :: proc( ptr: rawptr, @@ -354,6 +401,8 @@ Free a memory region. This procedure frees `size` bytes of memory region located at the address, specified by `ptr`, allocated from the allocator specified by `allocator`. +If the `size` parameter is `0`, this call is equivalent to `free()`. + **Inputs**: - `ptr`: Pointer to the memory region to free. - `size`: The size of the memory region to free. @@ -361,6 +410,13 @@ specified by `ptr`, allocated from the allocator specified by `allocator`. **Returns**: - The error, if freeing failed. + +**Errors**: +- `None`: When no error has occurred. +- `Invalid_Pointer`: The specified pointer is not owned by the specified allocator, + or does not point to a valid allocation. +- `Mode_Not_Implemented`: If the specified allocator does not support the `.Free` +mode. */ free_with_size :: proc( ptr: rawptr, @@ -377,12 +433,22 @@ Free a memory region. This procedure frees memory region, specified by `bytes`, allocated from the allocator specified by `allocator`. +If the length of the specified slice is zero, the `.Invalid_Argument` error +is returned. + **Inputs**: - `bytes`: The memory region to free. - `allocator`: The allocator to free to. **Returns**: - The error, if freeing failed. + +**Errors**: +- `None`: When no error has occurred. +- `Invalid_Pointer`: The specified pointer is not owned by the specified allocator, + or does not point to a valid allocation. +- `Mode_Not_Implemented`: If the specified allocator does not support the `.Free` +mode. */ free_bytes :: proc( bytes: []byte, @@ -397,6 +463,14 @@ Free all allocations. This procedure frees all allocations made on the allocator specified by `allocator` to that allocator, making it available for further allocations. + +**Inputs**: +- `allocator`: The allocator to free to. + +**Errors**: +- `None`: When no error has occurred. +- `Mode_Not_Implemented`: If the specified allocator does not support the `.Free` +mode. */ free_all :: proc(allocator := context.allocator, loc := #caller_location) -> Allocator_Error { return runtime.mem_free_all(allocator, loc) @@ -416,6 +490,10 @@ If the `new_size` parameter is `0`, `resize()` acts just like `free()`, freeing the memory region `old_size` bytes in length, located at the address specified by `ptr`. +If the `old_memory` pointer is not aligned to the boundary specified by +`alignment`, the procedure relocates the buffer such that the reallocated +buffer is aligned to the boundary specified by `alignment`. + **Inputs**: - `ptr`: Pointer to the memory region to resize. - `old_size`: Size of the memory region to resize. @@ -427,9 +505,20 @@ by `ptr`. 1. The pointer to the resized memory region, if successfull, `nil` otherwise. 2. Error, if resize failed. -**Note**: The `alignment` parameter is used to preserve the original alignment -of the allocation, if `resize()` needs to relocate the memory region. Do not -use `resize()` to change the alignment of the allocated memory region. +**Errors**: +- `None`: No error. +- `Out_Of_Memory`: When the allocator's backing buffer or it's backing + allocator does not have enough space to fit in an allocation with the new + size, or an operating system failure occurs. +- `Invalid_Pointer`: The pointer referring to a memory region does not belong + to any of the allocators backing buffers or does not point to a valid start + of an allocation made in that allocator. +- `Invalid_Argument`: When `size` is negative, alignment is not a power of two, + or the `old_size` argument is incorrect. +- `Mode_Not_Implemented`: The allocator does not support the `.Realloc` mode. + +**Note**: if `old_size` is `0` and `old_memory` is `nil`, this operation is a +no-op, and should not return errors. */ @(require_results) resize :: proc( @@ -458,6 +547,10 @@ If the `new_size` parameter is `0`, `resize()` acts just like `free()`, freeing the memory region `old_size` bytes in length, located at the address specified by `ptr`. +If the `old_memory` pointer is not aligned to the boundary specified by +`alignment`, the procedure relocates the buffer such that the reallocated +buffer is aligned to the boundary specified by `alignment`. + Unlike `resize()`, this procedure does not explicitly zero-initialize any new memory. @@ -472,9 +565,20 @@ memory. 1. The pointer to the resized memory region, if successfull, `nil` otherwise. 2. Error, if resize failed. -**Note**: The `alignment` parameter is used to preserve the original alignment -of the allocation, if `resize()` needs to relocate the memory region. Do not -use `resize()` to change the alignment of the allocated memory region. +**Errors**: +- `None`: No error. +- `Out_Of_Memory`: When the allocator's backing buffer or it's backing + allocator does not have enough space to fit in an allocation with the new + size, or an operating system failure occurs. +- `Invalid_Pointer`: The pointer referring to a memory region does not belong + to any of the allocators backing buffers or does not point to a valid start + of an allocation made in that allocator. +- `Invalid_Argument`: When `size` is negative, alignment is not a power of two, + or the `old_size` argument is incorrect. +- `Mode_Not_Implemented`: The allocator does not support the `.Realloc` mode. + +**Note**: if `old_size` is `0` and `old_memory` is `nil`, this operation is a +no-op, and should not return errors. */ @(require_results) resize_non_zeroed :: proc( @@ -503,6 +607,10 @@ by `alignment`. If the `new_size` parameter is `0`, `resize_bytes()` acts just like `free_bytes()`, freeing the memory region specified by `old_data`. +If the `old_memory` pointer is not aligned to the boundary specified by +`alignment`, the procedure relocates the buffer such that the reallocated +buffer is aligned to the boundary specified by `alignment`. + **Inputs**: - `old_data`: Pointer to the memory region to resize. - `new_size`: The desired size of the resized memory region. @@ -513,9 +621,20 @@ If the `new_size` parameter is `0`, `resize_bytes()` acts just like 1. The resized memory region, if successfull, `nil` otherwise. 2. Error, if resize failed. -**Note**: The `alignment` parameter is used to preserve the original alignment -of the allocation, if `resize()` needs to relocate the memory region. Do not -use `resize()` to change the alignment of the allocated memory region. +**Errors**: +- `None`: No error. +- `Out_Of_Memory`: When the allocator's backing buffer or it's backing + allocator does not have enough space to fit in an allocation with the new + size, or an operating system failure occurs. +- `Invalid_Pointer`: The pointer referring to a memory region does not belong + to any of the allocators backing buffers or does not point to a valid start + of an allocation made in that allocator. +- `Invalid_Argument`: When `size` is negative, alignment is not a power of two, + or the `old_size` argument is incorrect. +- `Mode_Not_Implemented`: The allocator does not support the `.Realloc` mode. + +**Note**: if `old_size` is `0` and `old_memory` is `nil`, this operation is a +no-op, and should not return errors. */ @(require_results) resize_bytes :: proc( @@ -542,6 +661,10 @@ by `alignment`. If the `new_size` parameter is `0`, `resize_bytes()` acts just like `free_bytes()`, freeing the memory region specified by `old_data`. +If the `old_memory` pointer is not aligned to the boundary specified by +`alignment`, the procedure relocates the buffer such that the reallocated +buffer is aligned to the boundary specified by `alignment`. + Unlike `resize_bytes()`, this procedure does not explicitly zero-initialize any new memory. @@ -555,9 +678,20 @@ any new memory. 1. The resized memory region, if successfull, `nil` otherwise. 2. Error, if resize failed. -**Note**: The `alignment` parameter is used to preserve the original alignment -of the allocation, if `resize()` needs to relocate the memory region. Do not -use `resize()` to change the alignment of the allocated memory region. +**Errors**: +- `None`: No error. +- `Out_Of_Memory`: When the allocator's backing buffer or it's backing + allocator does not have enough space to fit in an allocation with the new + size, or an operating system failure occurs. +- `Invalid_Pointer`: The pointer referring to a memory region does not belong + to any of the allocators backing buffers or does not point to a valid start + of an allocation made in that allocator. +- `Invalid_Argument`: When `size` is negative, alignment is not a power of two, + or the `old_size` argument is incorrect. +- `Mode_Not_Implemented`: The allocator does not support the `.Realloc` mode. + +**Note**: if `old_size` is `0` and `old_memory` is `nil`, this operation is a +no-op, and should not return errors. */ @(require_results) resize_bytes_non_zeroed :: proc( From f16ed256eaf90fb0fed1e795f6c62cd356180422 Mon Sep 17 00:00:00 2001 From: flysand7 Date: Wed, 11 Sep 2024 08:00:27 +1100 Subject: [PATCH 31/35] [mem]: Fix handling of default resize to check alignment --- base/runtime/internal.odin | 11 +++++++---- core/mem/alloc.odin | 2 +- core/mem/mem.odin | 11 +++++++++++ 3 files changed, 19 insertions(+), 5 deletions(-) diff --git a/base/runtime/internal.odin b/base/runtime/internal.odin index ff60cf547ae..a0bda9d40f7 100644 --- a/base/runtime/internal.odin +++ b/base/runtime/internal.odin @@ -118,16 +118,15 @@ mem_copy_non_overlapping :: proc "contextless" (dst, src: rawptr, len: int) -> r DEFAULT_ALIGNMENT :: 2*align_of(rawptr) mem_alloc_bytes :: #force_inline proc(size: int, alignment: int = DEFAULT_ALIGNMENT, allocator := context.allocator, loc := #caller_location) -> ([]byte, Allocator_Error) { - if size == 0 { - return nil, nil - } - if allocator.procedure == nil { + assert(is_power_of_two_int(alignment), "Alignment must be a power of two", loc) + if size == 0 || allocator.procedure == nil{ return nil, nil } return allocator.procedure(allocator.data, .Alloc, size, alignment, nil, 0, loc) } mem_alloc :: #force_inline proc(size: int, alignment: int = DEFAULT_ALIGNMENT, allocator := context.allocator, loc := #caller_location) -> ([]byte, Allocator_Error) { + assert(is_power_of_two_int(alignment), "Alignment must be a power of two", loc) if size == 0 || allocator.procedure == nil { return nil, nil } @@ -135,6 +134,7 @@ mem_alloc :: #force_inline proc(size: int, alignment: int = DEFAULT_ALIGNMENT, a } mem_alloc_non_zeroed :: #force_inline proc(size: int, alignment: int = DEFAULT_ALIGNMENT, allocator := context.allocator, loc := #caller_location) -> ([]byte, Allocator_Error) { + assert(is_power_of_two_int(alignment), "Alignment must be a power of two", loc) if size == 0 || allocator.procedure == nil { return nil, nil } @@ -174,6 +174,7 @@ mem_free_all :: #force_inline proc(allocator := context.allocator, loc := #calle } _mem_resize :: #force_inline proc(ptr: rawptr, old_size, new_size: int, alignment: int = DEFAULT_ALIGNMENT, allocator := context.allocator, should_zero: bool, loc := #caller_location) -> (data: []byte, err: Allocator_Error) { + assert(is_power_of_two_int(alignment), "Alignment must be a power of two", loc) if allocator.procedure == nil { return nil, nil } @@ -215,9 +216,11 @@ _mem_resize :: #force_inline proc(ptr: rawptr, old_size, new_size: int, alignmen } mem_resize :: proc(ptr: rawptr, old_size, new_size: int, alignment: int = DEFAULT_ALIGNMENT, allocator := context.allocator, loc := #caller_location) -> (data: []byte, err: Allocator_Error) { + assert(is_power_of_two_int(alignment), "Alignment must be a power of two", loc) return _mem_resize(ptr, old_size, new_size, alignment, allocator, true, loc) } non_zero_mem_resize :: proc(ptr: rawptr, old_size, new_size: int, alignment: int = DEFAULT_ALIGNMENT, allocator := context.allocator, loc := #caller_location) -> (data: []byte, err: Allocator_Error) { + assert(is_power_of_two_int(alignment), "Alignment must be a power of two", loc) return _mem_resize(ptr, old_size, new_size, alignment, allocator, false, loc) } diff --git a/core/mem/alloc.odin b/core/mem/alloc.odin index 1ede92837fa..5f65e9ebcf6 100644 --- a/core/mem/alloc.odin +++ b/core/mem/alloc.odin @@ -1096,7 +1096,7 @@ _default_resize_bytes_align :: #force_inline proc( err := free_bytes(old_data, allocator, loc) return nil, err } - if new_size == old_size { + if new_size == old_size && is_aligned(old_memory, alignment) { return old_data, .None } new_memory : []byte diff --git a/core/mem/mem.odin b/core/mem/mem.odin index 0554cee23dd..b57b18ffcb0 100644 --- a/core/mem/mem.odin +++ b/core/mem/mem.odin @@ -456,6 +456,17 @@ is_power_of_two :: proc "contextless" (x: uintptr) -> bool { return (x & (x-1)) == 0 } +/* +Check if a pointer is aligned. + +This procedure checks whether a pointer `x` is aligned to a boundary specified +by `align`, and returns `true` if the pointer is aligned, and false otherwise. +*/ +is_aligned :: proc "contextless" (x: rawptr, align: int) -> bool { + p := uintptr(x) + return (p & (1< Date: Sat, 14 Sep 2024 10:03:04 +1100 Subject: [PATCH 32/35] [mem]: Adjust the docs on the buddy allocator --- core/mem/allocators.odin | 205 ++++++++++++++++++++++++++++++++++++--- core/mem/doc.odin | 5 + 2 files changed, 197 insertions(+), 13 deletions(-) diff --git a/core/mem/allocators.odin b/core/mem/allocators.odin index f1e45d1a189..4b5efbacb82 100644 --- a/core/mem/allocators.odin +++ b/core/mem/allocators.odin @@ -112,6 +112,12 @@ allocation occupies the next adjacent region of memory in the buffer. Since arena allocator does not keep track of any metadata associated with the allocations and their locations, it is impossible to free individual allocations. + +The arena allocator can be used for temporary allocations in frame-based memory +management. Games are one example of such applications. A global arena can be +used for any temporary memory allocations, and at the end of each frame all +temporary allocations are freed. Since no temporary object is going to live +longer than a frame, no lifetimes are violated. */ @(require_results) arena_allocator :: proc(arena: ^Arena) -> Allocator { @@ -423,7 +429,7 @@ scratch_alloc_bytes :: proc( } /* -Allocate memory from scratch allocator. +Allocate non-initialized memory from scratch allocator. This procedure allocates `size` bytes of memory aligned on a boundary specified by `alignment`. The allocated memory region is not explicitly zero-initialized. @@ -441,7 +447,7 @@ scratch_alloc_non_zeroed :: proc( } /* -Allocate memory from scratch allocator. +Allocate non-initialized memory from scratch allocator. This procedure allocates `size` bytes of memory aligned on a boundary specified by `alignment`. The allocated memory region is not explicitly zero-initialized. @@ -499,7 +505,7 @@ scratch_alloc_bytes_non_zeroed :: proc( } /* -Free memory to scratch allocator. +Free memory to the scratch allocator. This procedure frees the memory region allocated at pointer `ptr`. @@ -1176,14 +1182,6 @@ Each subsequent allocation will get the next adjacent memory region. The metadata is stored in the allocation headers, that are located before the start of each allocated memory region. Each header contains the amount of padding bytes between that header and end of the previous allocation. - -## Properties - -**Performance characteristics**: TODO - -**Has a backing allocator**: No - -**Saves metadata**: Allocation header before each allocation. */ @(require_results) small_stack_allocator :: proc(stack: ^Small_Stack) -> Allocator { @@ -1608,6 +1606,9 @@ dynamic_arena_allocator :: proc(a: ^Dynamic_Arena) -> Allocator { /* Destroy a dynamic arena. + +This procedure frees all allocations, made on a dynamic arena, including the +unused blocks, as well as the arrays for storing blocks. */ dynamic_arena_destroy :: proc(a: ^Dynamic_Arena) { dynamic_arena_free_all(a) @@ -1646,12 +1647,28 @@ _dynamic_arena_cycle_new_block :: proc(a: ^Dynamic_Arena, loc := #caller_locatio return } +/* +Allocate memory from a dynamic arena. + +This procedure allocates `size` bytes of memory aligned on a boundary specified +by `alignment` from a dynamic arena `a`. The allocated memory is +zero-initialized. This procedure returns a pointer to the newly allocated memory +region. +*/ @(private, require_results) dynamic_arena_alloc :: proc(a: ^Dynamic_Arena, size: int, loc := #caller_location) -> (rawptr, Allocator_Error) { data, err := dynamic_arena_alloc_bytes(a, size, loc) return raw_data(data), err } +/* +Allocate memory from a dynamic arena. + +This procedure allocates `size` bytes of memory aligned on a boundary specified +by `alignment` from a dynamic arena `a`. The allocated memory is +zero-initialized. This procedure returns a slice of the newly allocated memory +region. +*/ @(require_results) dynamic_arena_alloc_bytes :: proc(a: ^Dynamic_Arena, size: int, loc := #caller_location) -> ([]byte, Allocator_Error) { bytes, err := dynamic_arena_alloc_bytes_non_zeroed(a, size, loc) @@ -1661,12 +1678,28 @@ dynamic_arena_alloc_bytes :: proc(a: ^Dynamic_Arena, size: int, loc := #caller_l return bytes, err } +/* +Allocate non-initialized memory from a dynamic arena. + +This procedure allocates `size` bytes of memory aligned on a boundary specified +by `alignment` from a dynamic arena `a`. The allocated memory is not explicitly +zero-initialized. This procedure returns a pointer to the newly allocated +memory region. +*/ @(require_results) dynamic_arena_alloc_non_zeroed :: proc(a: ^Dynamic_Arena, size: int, loc := #caller_location) -> (rawptr, Allocator_Error) { data, err := dynamic_arena_alloc_bytes_non_zeroed(a, size, loc) return raw_data(data), err } +/* +Allocate non-initialized memory from a dynamic arena. + +This procedure allocates `size` bytes of memory aligned on a boundary specified +by `alignment` from a dynamic arena `a`. The allocated memory is not explicitly +zero-initialized. This procedure returns a slice of the newly allocated +memory region. +*/ @(require_results) dynamic_arena_alloc_bytes_non_zeroed :: proc(a: ^Dynamic_Arena, size: int, loc := #caller_location) -> ([]byte, Allocator_Error) { n := align_formula(size, a.alignment) @@ -1696,6 +1729,12 @@ dynamic_arena_alloc_bytes_non_zeroed :: proc(a: ^Dynamic_Arena, size: int, loc : return ([^]byte)(memory)[:size], nil } +/* +Reset the dynamic arena. + +This procedure frees all the allocations, owned by the dynamic arena, excluding +the unused blocks. +*/ dynamic_arena_reset :: proc(a: ^Dynamic_Arena, loc := #caller_location) { if a.current_block != nil { append(&a.unused_blocks, a.current_block, loc=loc) @@ -1712,6 +1751,12 @@ dynamic_arena_reset :: proc(a: ^Dynamic_Arena, loc := #caller_location) { a.bytes_left = 0 // Make new allocations call `_dynamic_arena_cycle_new_block` again. } +/* +Free all memory from a dynamic arena. + +This procedure frees all the allocations, owned by the dynamic arena, including +the unused blocks. +*/ dynamic_arena_free_all :: proc(a: ^Dynamic_Arena, loc := #caller_location) { dynamic_arena_reset(a) for block in a.unused_blocks { @@ -1720,6 +1765,22 @@ dynamic_arena_free_all :: proc(a: ^Dynamic_Arena, loc := #caller_location) { clear(&a.unused_blocks) } +/* +Resize an allocation. + +This procedure resizes a memory region, defined by its location, `old_memory`, +and its size, `old_size` to have a size `size` and alignment `alignment`. The +newly allocated memory, if any is zero-initialized. + +If `old_memory` is `nil`, this procedure acts just like `dynamic_arena_alloc()`, +allocating a memory region `size` bytes in size, aligned on a boundary specified +by `alignment`. + +If `size` is 0, this procedure acts just like `dynamic_arena_free()`, freeing +the memory region located at an address specified by `old_memory`. + +This procedure returns the pointer to the resized memory region. +*/ @(require_results) dynamic_arena_resize :: proc( a: ^Dynamic_Arena, @@ -1732,6 +1793,22 @@ dynamic_arena_resize :: proc( return raw_data(bytes), err } +/* +Resize an allocation. + +This procedure resizes a memory region, specified by `old_data`, to have a size +`size` and alignment `alignment`. The newly allocated memory, if any is +zero-initialized. + +If `old_memory` is `nil`, this procedure acts just like `dynamic_arena_alloc()`, +allocating a memory region `size` bytes in size, aligned on a boundary specified +by `alignment`. + +If `size` is 0, this procedure acts just like `dynamic_arena_free()`, freeing the +memory region located at an address specified by `old_memory`. + +This procedure returns the slice of the resized memory region. +*/ @(require_results) dynamic_arena_resize_bytes :: proc( a: ^Dynamic_Arena, @@ -1750,6 +1827,22 @@ dynamic_arena_resize_bytes :: proc( return bytes, err } +/* +Resize an allocation without zero-initialization. + +This procedure resizes a memory region, defined by its location, `old_memory`, +and its size, `old_size` to have a size `size` and alignment `alignment`. The +newly allocated memory, if any is not explicitly zero-initialized. + +If `old_memory` is `nil`, this procedure acts just like `dynamic_arena_alloc()`, +allocating a memory region `size` bytes in size, aligned on a boundary specified +by `alignment`. + +If `size` is 0, this procedure acts just like `dynamic_arena_free()`, freeing the +memory region located at an address specified by `old_memory`. + +This procedure returns the pointer to the resized memory region. +*/ @(require_results) dynamic_arena_resize_non_zeroed :: proc( a: ^Dynamic_Arena, @@ -1762,6 +1855,22 @@ dynamic_arena_resize_non_zeroed :: proc( return raw_data(bytes), err } +/* +Resize an allocation. + +This procedure resizes a memory region, specified by `old_data`, to have a size +`size` and alignment `alignment`. The newly allocated memory, if any is not +explicitly zero-initialized. + +If `old_memory` is `nil`, this procedure acts just like `dynamic_arena_alloc()`, +allocating a memory region `size` bytes in size, aligned on a boundary specified +by `alignment`. + +If `size` is 0, this procedure acts just like `dynamic_arena_free()`, freeing +the memory region located at an address specified by `old_memory`. + +This procedure returns the slice of the resized memory region. +*/ @(require_results) dynamic_arena_resize_bytes_non_zeroed :: proc( a: ^Dynamic_Arena, @@ -1823,17 +1932,25 @@ dynamic_arena_allocator_proc :: proc( } - +/* +Header of the buddy block. +*/ Buddy_Block :: struct #align(align_of(uint)) { size: uint, is_free: bool, } +/* +Obtain the next buddy block. +*/ @(require_results) buddy_block_next :: proc(block: ^Buddy_Block) -> ^Buddy_Block { return (^Buddy_Block)(([^]byte)(block)[block.size:]) } +/* +Split the block into two, by truncating the given block to a given size. +*/ @(require_results) buddy_block_split :: proc(block: ^Buddy_Block, size: uint) -> ^Buddy_Block { block := block @@ -1854,6 +1971,9 @@ buddy_block_split :: proc(block: ^Buddy_Block, size: uint) -> ^Buddy_Block { return nil } +/* +Coalesce contiguous blocks in a range of blocks into one. +*/ buddy_block_coalescence :: proc(head, tail: ^Buddy_Block) { for { // Keep looping until there are no more buddies to coalesce @@ -1887,6 +2007,9 @@ buddy_block_coalescence :: proc(head, tail: ^Buddy_Block) { } } +/* +Find the best block for storing a given size in a range of blocks. +*/ @(require_results) buddy_block_find_best :: proc(head, tail: ^Buddy_Block, size: uint) -> ^Buddy_Block { assert(size != 0) @@ -1945,6 +2068,9 @@ buddy_block_find_best :: proc(head, tail: ^Buddy_Block, size: uint) -> ^Buddy_Bl return nil } +/* +The buddy allocator data. +*/ Buddy_Allocator :: struct { head: ^Buddy_Block, tail: ^Buddy_Block, @@ -1954,7 +2080,12 @@ Buddy_Allocator :: struct { /* Buddy allocator. -TODO +The buddy allocator is a type of allocator that splits the backing buffer into +multiple regions called buddy blocks. Initially, the allocator only has one +block with the size of the backing buffer. Upon each allocation, the allocator +finds the smallest block that can fit the size of requested memory region, and +splits the block according to the allocation size. If no block can be found, +the contiguous free blocks are coalesced and the search is performed again. */ @(require_results) buddy_allocator :: proc(b: ^Buddy_Allocator) -> Allocator { @@ -1964,6 +2095,12 @@ buddy_allocator :: proc(b: ^Buddy_Allocator) -> Allocator { } } +/* +Initialize the buddy allocator. + +This procedure initializes the buddy allocator `b` with a backing buffer `data` +and block alignment specified by `alignment`. +*/ buddy_allocator_init :: proc(b: ^Buddy_Allocator, data: []byte, alignment: uint, loc := #caller_location) { assert(data != nil) assert(is_power_of_two(uintptr(len(data))), "Size of the backing buffer must be power of two", loc) @@ -1981,6 +2118,9 @@ buddy_allocator_init :: proc(b: ^Buddy_Allocator, data: []byte, alignment: uint, b.alignment = alignment } +/* +Get required block size to fit in the allocation as well as the alignment padding. +*/ @(require_results) buddy_block_size_required :: proc(b: ^Buddy_Allocator, size: uint) -> uint { size := size @@ -1993,12 +2133,26 @@ buddy_block_size_required :: proc(b: ^Buddy_Allocator, size: uint) -> uint { return actual_size } +/* +Allocate memory from a buddy allocator. + +This procedure allocates `size` bytes of memory aligned on a boundary specified +by `alignment`. The allocated memory region is zero-initialized. This procedure +returns a pointer to the allocated memory region. +*/ @(require_results) buddy_allocator_alloc :: proc(b: ^Buddy_Allocator, size: uint) -> (rawptr, Allocator_Error) { bytes, err := buddy_allocator_alloc_bytes(b, size) return raw_data(bytes), err } +/* +Allocate memory from a buddy allocator. + +This procedure allocates `size` bytes of memory aligned on a boundary specified +by `alignment`. The allocated memory region is zero-initialized. This procedure +returns a slice of the allocated memory region. +*/ @(require_results) buddy_allocator_alloc_bytes :: proc(b: ^Buddy_Allocator, size: uint) -> ([]byte, Allocator_Error) { bytes, err := buddy_allocator_alloc_bytes_non_zeroed(b, size) @@ -2008,12 +2162,26 @@ buddy_allocator_alloc_bytes :: proc(b: ^Buddy_Allocator, size: uint) -> ([]byte, return bytes, err } +/* +Allocate non-initialized memory from a buddy allocator. + +This procedure allocates `size` bytes of memory aligned on a boundary specified +by `alignment`. The allocated memory region is not explicitly zero-initialized. +This procedure returns a pointer to the allocated memory region. +*/ @(require_results) buddy_allocator_alloc_non_zeroed :: proc(b: ^Buddy_Allocator, size: uint) -> (rawptr, Allocator_Error) { bytes, err := buddy_allocator_alloc_bytes_non_zeroed(b, size) return raw_data(bytes), err } +/* +Allocate non-initialized memory from a buddy allocator. + +This procedure allocates `size` bytes of memory aligned on a boundary specified +by `alignment`. The allocated memory region is not explicitly zero-initialized. +This procedure returns a slice of the allocated memory region. +*/ @(require_results) buddy_allocator_alloc_bytes_non_zeroed :: proc(b: ^Buddy_Allocator, size: uint) -> ([]byte, Allocator_Error) { if size != 0 { @@ -2034,6 +2202,14 @@ buddy_allocator_alloc_bytes_non_zeroed :: proc(b: ^Buddy_Allocator, size: uint) return nil, nil } +/* +Free memory to the buddy allocator. + +This procedure frees the memory region allocated at pointer `ptr`. + +If `ptr` is not the latest allocation and is not a leaked allocation, this +operation is a no-op. +*/ buddy_allocator_free :: proc(b: ^Buddy_Allocator, ptr: rawptr) -> Allocator_Error { if ptr != nil { if !(b.head <= ptr && ptr <= b.tail) { @@ -2046,6 +2222,9 @@ buddy_allocator_free :: proc(b: ^Buddy_Allocator, ptr: rawptr) -> Allocator_Erro return nil } +/* +Free all memory to the buddy allocator. +*/ buddy_allocator_free_all :: proc(b: ^Buddy_Allocator) { alignment := b.alignment head := ([^]byte)(b.head) diff --git a/core/mem/doc.odin b/core/mem/doc.odin index 5e8bcce6a8c..98755d79770 100644 --- a/core/mem/doc.odin +++ b/core/mem/doc.odin @@ -48,6 +48,11 @@ Operations such as `new`, `free` and `delete` by default will use happens all called procedures will inherit the new context and use the same allocator. +We will define one concept to simplify the description of some allocator-related +procedures, which is ownership. If the memory was allocated via a specific +allocator, that allocator is said to be the *owner* of that memory region. To +note, unlike Rust, in Odin the memory ownership model is not strict. + ## Alignment An address is said to be *aligned to `N` bytes*, if the addresses's numeric From 3ed2ab6e2c18081d80961168a57155e6f31ac573 Mon Sep 17 00:00:00 2001 From: flysand7 Date: Sat, 14 Sep 2024 10:18:51 +1100 Subject: [PATCH 33/35] [mem]: Adjust the docs for calc_padding_with_header --- core/mem/alloc.odin | 3 +-- core/mem/mem.odin | 24 ++++++++++++++++++++---- 2 files changed, 21 insertions(+), 6 deletions(-) diff --git a/core/mem/alloc.odin b/core/mem/alloc.odin index 5f65e9ebcf6..fac58daafef 100644 --- a/core/mem/alloc.odin +++ b/core/mem/alloc.odin @@ -1019,8 +1019,7 @@ Default resize procedure. When allocator does not support resize operation, but supports `.Alloc_Non_Zeroed` and `.Free`, this procedure is used to implement allocator's -default behavior on -resize. +default behavior on resize. Unlike `default_resize_align` no new memory is being explicitly zero-initialized. diff --git a/core/mem/mem.odin b/core/mem/mem.odin index b57b18ffcb0..67ed56c39a0 100644 --- a/core/mem/mem.odin +++ b/core/mem/mem.odin @@ -644,10 +644,26 @@ align_formula :: proc "contextless" (size, align: int) -> int { } /* -Calculate the padding after the pointer with a header. - -This procedure returns the next address, following `ptr` and `header_size` -bytes of space that is aligned to a boundary specified by `align`. +Calculate the padding for header preceding aligned data. + +This procedure returns the padding, following the specified pointer `ptr` that +will be able to fit in a header of the size `header_size`, immediately +preceding the memory region, aligned on a boundary specified by `align`. See +the following diagram for a visual representation. + + header size + |<------>| + +---+--------+------------- - - - + | HEADER | DATA... + +---+--------+------------- - - - + ^ ^ + |<---------->| + | padding | + ptr aligned ptr + +The function takes in `ptr` and `header_size`, as well as the required +alignment for `DATA`. The return value of the function is the padding between +`ptr` and `aligned_ptr` that will be able to fit the header. */ @(require_results) calc_padding_with_header :: proc "contextless" (ptr: uintptr, align: uintptr, header_size: int) -> int { From 016d1a84d4e09ea06491d9e5a4661e313906e3aa Mon Sep 17 00:00:00 2001 From: flysand7 Date: Sat, 14 Sep 2024 10:46:35 +1100 Subject: [PATCH 34/35] [mem]: Document mutex, rollback stack and tracking allocators --- core/mem/mutex_allocator.odin | 15 +++ core/mem/raw.odin | 46 ++++++--- core/mem/rollback_stack_allocator.odin | 135 ++++++++++++++----------- core/mem/tracking_allocator.odin | 114 ++++++++++++++------- 4 files changed, 197 insertions(+), 113 deletions(-) diff --git a/core/mem/mutex_allocator.odin b/core/mem/mutex_allocator.odin index 1cccc7dacab..d2c527fdb39 100644 --- a/core/mem/mutex_allocator.odin +++ b/core/mem/mutex_allocator.odin @@ -3,16 +3,31 @@ package mem import "core:sync" +/* +The data for mutex allocator. +*/ Mutex_Allocator :: struct { backing: Allocator, mutex: sync.Mutex, } +/* +Initialize the mutex allocator. + +This procedure initializes the mutex allocator using `backin_allocator` as the +allocator that will be used to pass all allocation requests through. +*/ mutex_allocator_init :: proc(m: ^Mutex_Allocator, backing_allocator: Allocator) { m.backing = backing_allocator m.mutex = {} } +/* +Mutex allocator. + +The mutex allocator is a wrapper for allocators that is used to serialize all +allocator requests across multiple threads. +*/ @(require_results) mutex_allocator :: proc(m: ^Mutex_Allocator) -> Allocator { return Allocator{ diff --git a/core/mem/raw.odin b/core/mem/raw.odin index ab1148cea25..41c91555ef1 100644 --- a/core/mem/raw.odin +++ b/core/mem/raw.odin @@ -4,68 +4,82 @@ import "base:builtin" import "base:runtime" /* -Mamory layout of the `any` type. +Memory layout of the `any` type. */ Raw_Any :: runtime.Raw_Any /* -Mamory layout of the `string` type. +Memory layout of the `string` type. */ Raw_String :: runtime.Raw_String + /* -Mamory layout of the `cstring` type. +Memory layout of the `cstring` type. */ Raw_Cstring :: runtime.Raw_Cstring + /* -Mamory layout of `[]T` types. +Memory layout of `[]T` types. */ Raw_Slice :: runtime.Raw_Slice + /* -Mamory layout of `[dynamic]T` types. +Memory layout of `[dynamic]T` types. */ Raw_Dynamic_Array :: runtime.Raw_Dynamic_Array + /* -Mamory layout of `map[K]V` types. +Memory layout of `map[K]V` types. */ Raw_Map :: runtime.Raw_Map + /* -Mamory layout of `#soa []T` types. +Memory layout of `#soa []T` types. */ Raw_Soa_Pointer :: runtime.Raw_Soa_Pointer + /* -Mamory layout of the `complex32` type. +Memory layout of the `complex32` type. */ Raw_Complex32 :: runtime.Raw_Complex32 + /* -Mamory layout of the `complex64` type. +Memory layout of the `complex64` type. */ Raw_Complex64 :: runtime.Raw_Complex64 + /* -Mamory layout of the `complex128` type. +Memory layout of the `complex128` type. */ Raw_Complex128 :: runtime.Raw_Complex128 + /* -Mamory layout of the `quaternion64` type. +Memory layout of the `quaternion64` type. */ Raw_Quaternion64 :: runtime.Raw_Quaternion64 + /* -Mamory layout of the `quaternion128` type. +Memory layout of the `quaternion128` type. */ Raw_Quaternion128 :: runtime.Raw_Quaternion128 + /* -Mamory layout of the `quaternion256` type. +Memory layout of the `quaternion256` type. */ Raw_Quaternion256 :: runtime.Raw_Quaternion256 + /* -Mamory layout of the `quaternion64` type. +Memory layout of the `quaternion64` type. */ Raw_Quaternion64_Vector_Scalar :: runtime.Raw_Quaternion64_Vector_Scalar + /* -Mamory layout of the `quaternion128` type. +Memory layout of the `quaternion128` type. */ Raw_Quaternion128_Vector_Scalar :: runtime.Raw_Quaternion128_Vector_Scalar + /* -Mamory layout of the `quaternion256` type. +Memory layout of the `quaternion256` type. */ Raw_Quaternion256_Vector_Scalar :: runtime.Raw_Quaternion256_Vector_Scalar diff --git a/core/mem/rollback_stack_allocator.odin b/core/mem/rollback_stack_allocator.odin index 76143555221..61ec7354612 100644 --- a/core/mem/rollback_stack_allocator.odin +++ b/core/mem/rollback_stack_allocator.odin @@ -1,39 +1,15 @@ package mem -/* -The Rollback Stack Allocator was designed for the test runner to be fast, -able to grow, and respect the Tracking Allocator's requirement for -individual frees. It is not overly concerned with fragmentation, however. - -It has support for expansion when configured with a block allocator and -limited support for out-of-order frees. - -Allocation has constant-time best and usual case performance. -At worst, it is linear according to the number of memory blocks. - -Allocation follows a first-fit strategy when there are multiple memory -blocks. - -Freeing has constant-time best and usual case performance. -At worst, it is linear according to the number of memory blocks and number -of freed items preceding the last item in a block. - -Resizing has constant-time performance, if it's the last item in a block, or -the new size is smaller. Naturally, this becomes linear-time if there are -multiple blocks to search for the pointer's owning block. Otherwise, the -allocator defaults to a combined alloc & free operation internally. - -Out-of-order freeing is accomplished by collapsing a run of freed items -from the last allocation backwards. - -Each allocation has an overhead of 8 bytes and any extra bytes to satisfy -the requested alignment. -*/ import "base:runtime" +/* +Rollback stack default block size. +*/ ROLLBACK_STACK_DEFAULT_BLOCK_SIZE :: 4 * Megabyte /* +Rollback stack max head block size. + This limitation is due to the size of `prev_ptr`, but it is only for the head block; any allocation in excess of the allocator's `block_size` is valid, so long as the block allocator can handle it. @@ -43,12 +19,18 @@ within is freed; they are immediately returned to the block allocator. */ ROLLBACK_STACK_MAX_HEAD_BLOCK_SIZE :: 2 * Gigabyte +/* +Allocation header of the rollback stack allocator. +*/ Rollback_Stack_Header :: bit_field u64 { prev_offset: uintptr | 32, is_free: bool | 1, prev_ptr: uintptr | 31, } +/* +Block header of the rollback stack allocator. +*/ Rollback_Stack_Block :: struct { next_block: ^Rollback_Stack_Block, last_alloc: rawptr, @@ -56,6 +38,9 @@ Rollback_Stack_Block :: struct { buffer: []byte, } +/* +Rollback stack allocator data. +*/ Rollback_Stack :: struct { head: ^Rollback_Stack_Block, block_size: int, @@ -111,6 +96,9 @@ rb_rollback_block :: proc(block: ^Rollback_Stack_Block, header: ^Rollback_Stack_ } } +/* +Free memory to a rollback stack allocator. +*/ @(private="file", require_results) rb_free :: proc(stack: ^Rollback_Stack, ptr: rawptr) -> Allocator_Error { parent, block, header := rb_find_ptr(stack, ptr) or_return @@ -129,6 +117,9 @@ rb_free :: proc(stack: ^Rollback_Stack, ptr: rawptr) -> Allocator_Error { return nil } +/* +Free all memory owned by the rollback stack allocator. +*/ @(private="file") rb_free_all :: proc(stack: ^Rollback_Stack) { for block := stack.head.next_block; block != nil; /**/ { @@ -142,14 +133,16 @@ rb_free_all :: proc(stack: ^Rollback_Stack) { stack.head.offset = 0 } +/* +Resize an allocation made on a rollback stack allocator. +*/ @(private="file", require_results) -rb_resize :: proc(stack: ^Rollback_Stack, ptr: rawptr, old_size, size, alignment: int) -> (result: []byte, err: Allocator_Error) { +rb_resize_non_zeroed :: proc(stack: ^Rollback_Stack, ptr: rawptr, old_size, size, alignment: int) -> (result: []byte, err: Allocator_Error) { if ptr != nil { if block, _, ok := rb_find_last_alloc(stack, ptr); ok { // `block.offset` should never underflow because it is contingent // on `old_size` in the first place, assuming sane arguments. assert(block.offset >= cast(uintptr)old_size, "Rollback Stack Allocator received invalid `old_size`.") - if block.offset + cast(uintptr)size - cast(uintptr)old_size < cast(uintptr)len(block.buffer) { // Prevent singleton allocations from fragmenting by forbidding // them to shrink, removing the possibility of overflow bugs. @@ -160,27 +153,26 @@ rb_resize :: proc(stack: ^Rollback_Stack, ptr: rawptr, old_size, size, alignment } } } - - result = rb_alloc(stack, size, alignment) or_return + result = rb_alloc_non_zeroed(stack, size, alignment) or_return runtime.mem_copy_non_overlapping(raw_data(result), ptr, old_size) err = rb_free(stack, ptr) - return } +/* +Allocate memory using the rollback stack allocator. +*/ @(private="file", require_results) -rb_alloc :: proc(stack: ^Rollback_Stack, size, alignment: int) -> (result: []byte, err: Allocator_Error) { +rb_alloc_non_zeroed :: proc(stack: ^Rollback_Stack, size, alignment: int) -> (result: []byte, err: Allocator_Error) { parent: ^Rollback_Stack_Block for block := stack.head; /**/; block = block.next_block { when !ODIN_DISABLE_ASSERT { allocated_new_block: bool } - if block == nil { if stack.block_allocator.procedure == nil { return nil, .Out_Of_Memory } - minimum_size_required := size_of(Rollback_Stack_Header) + size + alignment - 1 new_block_size := max(minimum_size_required, stack.block_size) block = rb_make_block(new_block_size, stack.block_allocator) or_return @@ -189,10 +181,8 @@ rb_alloc :: proc(stack: ^Rollback_Stack, size, alignment: int) -> (result: []byt allocated_new_block = true } } - start := raw_data(block.buffer)[block.offset:] padding := cast(uintptr)calc_padding_with_header(cast(uintptr)start, cast(uintptr)alignment, size_of(Rollback_Stack_Header)) - if block.offset + padding + cast(uintptr)size > cast(uintptr)len(block.buffer) { when !ODIN_DISABLE_ASSERT { if allocated_new_block { @@ -202,54 +192,50 @@ rb_alloc :: proc(stack: ^Rollback_Stack, size, alignment: int) -> (result: []byt parent = block continue } - header := cast(^Rollback_Stack_Header)(start[padding - size_of(Rollback_Stack_Header):]) ptr := start[padding:] - header^ = { prev_offset = block.offset, prev_ptr = uintptr(0) if block.last_alloc == nil else cast(uintptr)block.last_alloc - cast(uintptr)raw_data(block.buffer), is_free = false, } - block.last_alloc = ptr block.offset += padding + cast(uintptr)size - if len(block.buffer) > stack.block_size { // This block exceeds the allocator's standard block size and is considered a singleton. // Prevent any further allocations on it. block.offset = cast(uintptr)len(block.buffer) } - #no_bounds_check return ptr[:size], nil } - return nil, .Out_Of_Memory } @(private="file", require_results) rb_make_block :: proc(size: int, allocator: Allocator) -> (block: ^Rollback_Stack_Block, err: Allocator_Error) { buffer := runtime.mem_alloc(size_of(Rollback_Stack_Block) + size, align_of(Rollback_Stack_Block), allocator) or_return - block = cast(^Rollback_Stack_Block)raw_data(buffer) #no_bounds_check block.buffer = buffer[size_of(Rollback_Stack_Block):] return } - +/* +Initialize the rollback stack allocator using a fixed backing buffer. +*/ rollback_stack_init_buffered :: proc(stack: ^Rollback_Stack, buffer: []byte, location := #caller_location) { MIN_SIZE :: size_of(Rollback_Stack_Block) + size_of(Rollback_Stack_Header) + size_of(rawptr) assert(len(buffer) >= MIN_SIZE, "User-provided buffer to Rollback Stack Allocator is too small.", location) - block := cast(^Rollback_Stack_Block)raw_data(buffer) block^ = {} #no_bounds_check block.buffer = buffer[size_of(Rollback_Stack_Block):] - stack^ = {} stack.head = block stack.block_size = len(block.buffer) } +/* +Initialize the rollback stack alocator using a backing block allocator. +*/ rollback_stack_init_dynamic :: proc( stack: ^Rollback_Stack, block_size : int = ROLLBACK_STACK_DEFAULT_BLOCK_SIZE, @@ -262,22 +248,25 @@ rollback_stack_init_dynamic :: proc( // size is insufficient; check only on platforms with big enough ints. assert(block_size <= ROLLBACK_STACK_MAX_HEAD_BLOCK_SIZE, "Rollback Stack Allocators cannot support head blocks larger than 2 gigabytes.", location) } - block := rb_make_block(block_size, block_allocator) or_return - stack^ = {} stack.head = block stack.block_size = block_size stack.block_allocator = block_allocator - return nil } +/* +Initialize the rollback stack. +*/ rollback_stack_init :: proc { rollback_stack_init_buffered, rollback_stack_init_dynamic, } +/* +Destroy a rollback stack. +*/ rollback_stack_destroy :: proc(stack: ^Rollback_Stack) { if stack.block_allocator.procedure != nil { rb_free_all(stack) @@ -286,6 +275,37 @@ rollback_stack_destroy :: proc(stack: ^Rollback_Stack) { stack^ = {} } +/* +Rollback stack allocator. + +The Rollback Stack Allocator was designed for the test runner to be fast, +able to grow, and respect the Tracking Allocator's requirement for +individual frees. It is not overly concerned with fragmentation, however. + +It has support for expansion when configured with a block allocator and +limited support for out-of-order frees. + +Allocation has constant-time best and usual case performance. +At worst, it is linear according to the number of memory blocks. + +Allocation follows a first-fit strategy when there are multiple memory +blocks. + +Freeing has constant-time best and usual case performance. +At worst, it is linear according to the number of memory blocks and number +of freed items preceding the last item in a block. + +Resizing has constant-time performance, if it's the last item in a block, or +the new size is smaller. Naturally, this becomes linear-time if there are +multiple blocks to search for the pointer's owning block. Otherwise, the +allocator defaults to a combined alloc & free operation internally. + +Out-of-order freeing is accomplished by collapsing a run of freed items +from the last allocation backwards. + +Each allocation has an overhead of 8 bytes and any extra bytes to satisfy +the requested alignment. +*/ @(require_results) rollback_stack_allocator :: proc(stack: ^Rollback_Stack) -> Allocator { return Allocator { @@ -309,38 +329,31 @@ rollback_stack_allocator_proc :: proc( case .Alloc, .Alloc_Non_Zeroed: assert(size >= 0, "Size must be positive or zero.", location) assert(is_power_of_two(cast(uintptr)alignment), "Alignment must be a power of two.", location) - result = rb_alloc(stack, size, alignment) or_return - + result = rb_alloc_non_zeroed(stack, size, alignment) or_return if mode == .Alloc { zero_slice(result) } - case .Free: err = rb_free(stack, old_memory) case .Free_All: rb_free_all(stack) - case .Resize, .Resize_Non_Zeroed: assert(size >= 0, "Size must be positive or zero.", location) assert(old_size >= 0, "Old size must be positive or zero.", location) assert(is_power_of_two(cast(uintptr)alignment), "Alignment must be a power of two.", location) - result = rb_resize(stack, old_memory, old_size, size, alignment) or_return - + result = rb_resize_non_zeroed(stack, old_memory, old_size, size, alignment) or_return #no_bounds_check if mode == .Resize && size > old_size { zero_slice(result[old_size:]) } - case .Query_Features: set := (^Allocator_Mode_Set)(old_memory) if set != nil { set^ = {.Alloc, .Alloc_Non_Zeroed, .Free, .Free_All, .Resize, .Resize_Non_Zeroed} } return nil, nil - case .Query_Info: return nil, .Mode_Not_Implemented } - return } diff --git a/core/mem/tracking_allocator.odin b/core/mem/tracking_allocator.odin index e75844130da..e436fcb6dc2 100644 --- a/core/mem/tracking_allocator.odin +++ b/core/mem/tracking_allocator.odin @@ -4,50 +4,38 @@ package mem import "base:runtime" import "core:sync" +/* +Allocation entry for the tracking allocator. + +This structure stores the data related to an allocation. +*/ Tracking_Allocator_Entry :: struct { - memory: rawptr, - size: int, + // Pointer to an allocated region. + memory: rawptr, + // Size of the allocated memory region. + size: int, + // Requested alignment. alignment: int, - mode: Allocator_Mode, - err: Allocator_Error, + // Mode of the operation. + mode: Allocator_Mode, + // Error. + err: Allocator_Error, + // Location of the allocation. location: runtime.Source_Code_Location, } +/* +Bad free entry for a tracking allocator. +*/ Tracking_Allocator_Bad_Free_Entry :: struct { - memory: rawptr, + // Pointer, on which free operation was called. + memory: rawptr, + // The source location of where the operation was called. location: runtime.Source_Code_Location, } /* -An example of how to use the `Tracking_Allocator` to track subsequent allocations -in your program and report leaks and bad frees: - -Example: - - package foo - - import "core:mem" - import "core:fmt" - - _main :: proc() { - // do stuff - } - - main :: proc() { - track: mem.Tracking_Allocator - mem.tracking_allocator_init(&track, context.allocator) - defer mem.tracking_allocator_destroy(&track) - context.allocator = mem.tracking_allocator(&track) - - _main() - - for _, leak in track.allocation_map { - fmt.printf("%v leaked %m\n", leak.location, leak.size) - } - for bad_free in track.bad_free_array { - fmt.printf("%v allocation %p was freed badly\n", bad_free.location, bad_free.memory) - } - } +Tracking allocator data. */ Tracking_Allocator :: struct { backing: Allocator, @@ -63,6 +51,13 @@ Tracking_Allocator :: struct { current_memory_allocated: i64, } +/* +Initialize the tracking allocator. + +This procedure initializes the tracking allocator `t` with a backing allocator +specified with `backing_allocator`. The `internals_allocator` will used to +allocate the tracked data. +*/ tracking_allocator_init :: proc(t: ^Tracking_Allocator, backing_allocator: Allocator, internals_allocator := context.allocator) { t.backing = backing_allocator t.allocation_map.allocator = internals_allocator @@ -72,12 +67,22 @@ tracking_allocator_init :: proc(t: ^Tracking_Allocator, backing_allocator: Alloc } } +/* +Destroy the tracking allocator. +*/ tracking_allocator_destroy :: proc(t: ^Tracking_Allocator) { delete(t.allocation_map) delete(t.bad_free_array) } -// Clear only the current allocation data while keeping the totals intact. +/* +Clear the tracking allocator. + +This procedure clears the tracked data from a tracking allocator. + +**Note**: This procedure clears only the current allocation data while keeping +the totals intact. +*/ tracking_allocator_clear :: proc(t: ^Tracking_Allocator) { sync.mutex_lock(&t.mutex) clear(&t.allocation_map) @@ -86,7 +91,11 @@ tracking_allocator_clear :: proc(t: ^Tracking_Allocator) { sync.mutex_unlock(&t.mutex) } -// Reset all of a Tracking Allocator's allocation data back to zero. +/* +Reset the tracking allocator. + +Reset all of a Tracking Allocator's allocation data back to zero. +*/ tracking_allocator_reset :: proc(t: ^Tracking_Allocator) { sync.mutex_lock(&t.mutex) clear(&t.allocation_map) @@ -100,6 +109,39 @@ tracking_allocator_reset :: proc(t: ^Tracking_Allocator) { sync.mutex_unlock(&t.mutex) } +/* +Tracking allocator. + +The tracking allocator is an allocator wrapper that tracks memory allocations. +This allocator stores all the allocations in a map. Whenever a pointer that's +not inside of the map is freed, the `bad_free_array` entry is added. + +An example of how to use the `Tracking_Allocator` to track subsequent allocations +in your program and report leaks and bad frees: + +Example: + + package foo + + import "core:mem" + import "core:fmt" + + main :: proc() { + track: mem.Tracking_Allocator + mem.tracking_allocator_init(&track, context.allocator) + defer mem.tracking_allocator_destroy(&track) + context.allocator = mem.tracking_allocator(&track) + + do_stuff() + + for _, leak in track.allocation_map { + fmt.printf("%v leaked %m\n", leak.location, leak.size) + } + for bad_free in track.bad_free_array { + fmt.printf("%v allocation %p was freed badly\n", bad_free.location, bad_free.memory) + } + } +*/ @(require_results) tracking_allocator :: proc(data: ^Tracking_Allocator) -> Allocator { return Allocator{ From 466e29bb38040ce8737e125a8a13c97cd4e74252 Mon Sep 17 00:00:00 2001 From: flysand7 Date: Sat, 14 Sep 2024 12:13:56 +1100 Subject: [PATCH 35/35] [mem]: Rollback allocator API consistency --- core/mem/rollback_stack_allocator.odin | 205 ++++++++++++++++++++----- 1 file changed, 163 insertions(+), 42 deletions(-) diff --git a/core/mem/rollback_stack_allocator.odin b/core/mem/rollback_stack_allocator.odin index 61ec7354612..43ef10fe94e 100644 --- a/core/mem/rollback_stack_allocator.odin +++ b/core/mem/rollback_stack_allocator.odin @@ -134,36 +134,65 @@ rb_free_all :: proc(stack: ^Rollback_Stack) { } /* -Resize an allocation made on a rollback stack allocator. +Allocate memory using the rollback stack allocator. */ -@(private="file", require_results) -rb_resize_non_zeroed :: proc(stack: ^Rollback_Stack, ptr: rawptr, old_size, size, alignment: int) -> (result: []byte, err: Allocator_Error) { - if ptr != nil { - if block, _, ok := rb_find_last_alloc(stack, ptr); ok { - // `block.offset` should never underflow because it is contingent - // on `old_size` in the first place, assuming sane arguments. - assert(block.offset >= cast(uintptr)old_size, "Rollback Stack Allocator received invalid `old_size`.") - if block.offset + cast(uintptr)size - cast(uintptr)old_size < cast(uintptr)len(block.buffer) { - // Prevent singleton allocations from fragmenting by forbidding - // them to shrink, removing the possibility of overflow bugs. - if len(block.buffer) <= stack.block_size { - block.offset += cast(uintptr)size - cast(uintptr)old_size - } - #no_bounds_check return (cast([^]byte)ptr)[:size], nil - } - } +@(require_results) +rb_alloc :: proc( + stack: ^Rollback_Stack, + size: int, + alignment := DEFAULT_ALIGNMENT, + loc := #caller_location, +) -> (rawptr, Allocator_Error) { + bytes, err := rb_alloc_bytes_non_zeroed(stack, size, alignment, loc) + if bytes != nil { + zero_slice(bytes) } - result = rb_alloc_non_zeroed(stack, size, alignment) or_return - runtime.mem_copy_non_overlapping(raw_data(result), ptr, old_size) - err = rb_free(stack, ptr) - return + return raw_data(bytes), err } /* Allocate memory using the rollback stack allocator. */ -@(private="file", require_results) -rb_alloc_non_zeroed :: proc(stack: ^Rollback_Stack, size, alignment: int) -> (result: []byte, err: Allocator_Error) { +@(require_results) +rb_alloc_bytes :: proc( + stack: ^Rollback_Stack, + size: int, + alignment := DEFAULT_ALIGNMENT, + loc := #caller_location, +) -> ([]byte, Allocator_Error) { + bytes, err := rb_alloc_bytes_non_zeroed(stack, size, alignment, loc) + if bytes != nil { + zero_slice(bytes) + } + return bytes, err +} + +/* +Allocate non-initialized memory using the rollback stack allocator. +*/ +@(require_results) +rb_alloc_non_zeroed :: proc( + stack: ^Rollback_Stack, + size: int, + alignment := DEFAULT_ALIGNMENT, + loc := #caller_location, +) -> (rawptr, Allocator_Error) { + bytes, err := rb_alloc_bytes_non_zeroed(stack, size, alignment, loc) + return raw_data(bytes), err +} + +/* +Allocate non-initialized memory using the rollback stack allocator. +*/ +@(require_results) +rb_alloc_bytes_non_zeroed :: proc( + stack: ^Rollback_Stack, + size: int, + alignment := DEFAULT_ALIGNMENT, + loc := #caller_location, +) -> (result: []byte, err: Allocator_Error) { + assert(size >= 0, "Size must be positive or zero.", loc) + assert(is_power_of_two(cast(uintptr)alignment), "Alignment must be a power of two.", loc) parent: ^Rollback_Stack_Block for block := stack.head; /**/; block = block.next_block { when !ODIN_DISABLE_ASSERT { @@ -211,6 +240,106 @@ rb_alloc_non_zeroed :: proc(stack: ^Rollback_Stack, size, alignment: int) -> (re return nil, .Out_Of_Memory } +/* +Resize an allocation owned by rollback stack allocator. +*/ +@(require_results) +rb_resize :: proc( + stack: ^Rollback_Stack, + old_ptr: rawptr, + old_size: int, + size: int, + alignment := DEFAULT_ALIGNMENT, + loc := #caller_location, +) -> (rawptr, Allocator_Error) { + bytes, err := rb_resize_bytes_non_zeroed(stack, byte_slice(old_ptr, old_size), size, alignment, loc) + if bytes != nil { + if old_ptr == nil { + zero_slice(bytes) + } else if size > old_size { + zero_slice(bytes[old_size:]) + } + } + return raw_data(bytes), err +} + +/* +Resize an allocation owned by rollback stack allocator. +*/ +@(require_results) +rb_resize_bytes :: proc( + stack: ^Rollback_Stack, + old_memory: []byte, + size: int, + alignment := DEFAULT_ALIGNMENT, + loc := #caller_location, +) -> ([]u8, Allocator_Error) { + bytes, err := rb_resize_bytes_non_zeroed(stack, old_memory, size, alignment, loc) + if bytes != nil { + if old_memory == nil { + zero_slice(bytes) + } else if size > len(old_memory) { + zero_slice(bytes[len(old_memory):]) + } + } + return bytes, err +} + +/* +Resize an allocation owned by rollback stack allocator without explicit +zero-initialization. +*/ +@(require_results) +rb_resize_non_zeroed :: proc( + stack: ^Rollback_Stack, + old_ptr: rawptr, + old_size: int, + size: int, + alignment := DEFAULT_ALIGNMENT, + loc := #caller_location, +) -> (rawptr, Allocator_Error) { + bytes, err := rb_resize_bytes_non_zeroed(stack, byte_slice(old_ptr, old_size), size, alignment, loc) + return raw_data(bytes), err +} + +/* +Resize an allocation owned by rollback stack allocator without explicit +zero-initialization. +*/ +@(require_results) +rb_resize_bytes_non_zeroed :: proc( + stack: ^Rollback_Stack, + old_memory: []byte, + size: int, + alignment := DEFAULT_ALIGNMENT, + loc := #caller_location, +) -> (result: []byte, err: Allocator_Error) { + old_size := len(old_memory) + ptr := raw_data(old_memory) + assert(size >= 0, "Size must be positive or zero.", loc) + assert(old_size >= 0, "Old size must be positive or zero.", loc) + assert(is_power_of_two(cast(uintptr)alignment), "Alignment must be a power of two.", loc) + if ptr != nil { + if block, _, ok := rb_find_last_alloc(stack, ptr); ok { + // `block.offset` should never underflow because it is contingent + // on `old_size` in the first place, assuming sane arguments. + assert(block.offset >= cast(uintptr)old_size, "Rollback Stack Allocator received invalid `old_size`.") + if block.offset + cast(uintptr)size - cast(uintptr)old_size < cast(uintptr)len(block.buffer) { + // Prevent singleton allocations from fragmenting by forbidding + // them to shrink, removing the possibility of overflow bugs. + if len(block.buffer) <= stack.block_size { + block.offset += cast(uintptr)size - cast(uintptr)old_size + } + #no_bounds_check return (ptr)[:size], nil + } + } + } + result = rb_alloc_bytes_non_zeroed(stack, size, alignment) or_return + runtime.mem_copy_non_overlapping(raw_data(result), ptr, old_size) + err = rb_free(stack, ptr) + return +} + @(private="file", require_results) rb_make_block :: proc(size: int, allocator: Allocator) -> (block: ^Rollback_Stack_Block, err: Allocator_Error) { buffer := runtime.mem_alloc(size_of(Rollback_Stack_Block) + size, align_of(Rollback_Stack_Block), allocator) or_return @@ -321,31 +450,23 @@ rollback_stack_allocator_proc :: proc( size, alignment: int, old_memory: rawptr, old_size: int, - location := #caller_location, + loc := #caller_location, ) -> (result: []byte, err: Allocator_Error) { stack := cast(^Rollback_Stack)allocator_data - switch mode { - case .Alloc, .Alloc_Non_Zeroed: - assert(size >= 0, "Size must be positive or zero.", location) - assert(is_power_of_two(cast(uintptr)alignment), "Alignment must be a power of two.", location) - result = rb_alloc_non_zeroed(stack, size, alignment) or_return - if mode == .Alloc { - zero_slice(result) - } + case .Alloc: + return rb_alloc_bytes(stack, size, alignment, loc) + case .Alloc_Non_Zeroed: + return rb_alloc_bytes_non_zeroed(stack, size, alignment, loc) case .Free: - err = rb_free(stack, old_memory) - + return nil, rb_free(stack, old_memory) case .Free_All: rb_free_all(stack) - case .Resize, .Resize_Non_Zeroed: - assert(size >= 0, "Size must be positive or zero.", location) - assert(old_size >= 0, "Old size must be positive or zero.", location) - assert(is_power_of_two(cast(uintptr)alignment), "Alignment must be a power of two.", location) - result = rb_resize_non_zeroed(stack, old_memory, old_size, size, alignment) or_return - #no_bounds_check if mode == .Resize && size > old_size { - zero_slice(result[old_size:]) - } + return nil, nil + case .Resize: + return rb_resize_bytes(stack, byte_slice(old_memory, old_size), size, alignment, loc) + case .Resize_Non_Zeroed: + return rb_resize_bytes_non_zeroed(stack, byte_slice(old_memory, old_size), size, alignment, loc) case .Query_Features: set := (^Allocator_Mode_Set)(old_memory) if set != nil {