Skip to content

Allocate pools for the shared heap in batches rather than one at a time #2248

New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Merged
merged 3 commits into from
Jan 31, 2024
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
53 changes: 21 additions & 32 deletions ocaml/runtime/platform.c
Original file line number Diff line number Diff line change
Expand Up @@ -22,20 +22,37 @@
#include "caml/osdeps.h"
#include "caml/platform.h"
#include "caml/fail.h"
#include "caml/lf_skiplist.h"
#ifdef HAS_SYS_MMAN_H
#include <sys/mman.h>
#endif
#ifdef _WIN32
#include <windows.h>
#endif
#ifdef DEBUG
#include "caml/domain.h"
#endif

#include "caml/alloc.h"
#include "sync_posix.h"

#ifdef _WIN32
/* CR ocaml 5 compactor:

The runtime does not currently guarantee that memory is released to the OS in
the same block sizes as it was allocated, making it incompatible with
Windows.

This incompatibility arises from the batch-mmap patch at:

https://github.com/ocaml-flambda/flambda-backend/pull/2248

which does large memory allocations to acquire new pools. However, the
compactor releases pools one at a time. Until the compactor is updated
to be aware of large mappings, this will not work on Windows.

So, for now, Windows compatibility is broken. The assertions ensuring that
mapping and unmapping sizes agree (ocaml/ocaml PR#10908) have been reverted,
and should be restored once the compactor is updated */
#error "Windows compatibility currently broken due to mmap sizing"
#endif

/* Error reporting */

void caml_plat_fatal_error(const char * action, int err)
Expand Down Expand Up @@ -144,24 +161,8 @@ uintnat caml_mem_round_up_pages(uintnat size)

#define Is_page_aligned(size) ((size & (caml_plat_pagesize - 1)) == 0)

#ifdef DEBUG
static struct lf_skiplist mmap_blocks = {NULL};
#endif

#ifndef _WIN32
#endif

void* caml_mem_map(uintnat size, int reserve_only)
{
#ifdef DEBUG
if (mmap_blocks.head == NULL) {
/* The first call to caml_mem_map should be during caml_init_domains, called
by caml_init_gc during startup - i.e. before any domains have started. */
CAMLassert(atomic_load_acquire(&caml_num_domains_running) <= 1);
caml_lf_skiplist_init(&mmap_blocks);
}
#endif

void* mem = caml_plat_mem_map(size, reserve_only);

if (mem == 0) {
Expand All @@ -173,10 +174,6 @@ void* caml_mem_map(uintnat size, int reserve_only)
caml_gc_message(0x1000, "mmap %" ARCH_INTNAT_PRINTF_FORMAT "d"
" bytes at %p for heaps\n", size, mem);

#ifdef DEBUG
caml_lf_skiplist_insert(&mmap_blocks, (uintnat)mem, size);
#endif

return mem;
}

Expand All @@ -199,17 +196,9 @@ void caml_mem_decommit(void* mem, uintnat size)

void caml_mem_unmap(void* mem, uintnat size)
{
#ifdef DEBUG
uintnat data;
CAMLassert(caml_lf_skiplist_find(&mmap_blocks, (uintnat)mem, &data) != 0);
CAMLassert(data == size);
#endif
caml_gc_message(0x1000, "munmap %" ARCH_INTNAT_PRINTF_FORMAT "d"
" bytes at %p for heaps\n", size, mem);
caml_plat_mem_unmap(mem, size);
#ifdef DEBUG
caml_lf_skiplist_remove(&mmap_blocks, (uintnat)mem);
#endif
}

#define Min_sleep_ns 10000 // 10 us
Expand Down
55 changes: 42 additions & 13 deletions ocaml/runtime/shared_heap.c
Original file line number Diff line number Diff line change
Expand Up @@ -70,6 +70,16 @@ static struct {
caml_plat_mutex lock;
pool* free;

/* Mapped but not yet active pools */
uintnat fresh_pools;
char* next_fresh_pool;

/* Count of all pools in use across all domains and the global lists below.

Does not include unused pools ('free' above) or freshly allocated pools
('next_fresh_pool' above). */
uintnat active_pools;

/* these only contain swept memory of terminated domains*/
struct heap_stats stats;
pool* global_avail_pools[NUM_SIZECLASSES];
Expand All @@ -78,6 +88,9 @@ static struct {
} pool_freelist = {
CAML_PLAT_MUTEX_INITIALIZER,
NULL,
0,
NULL,
0,
{ 0, },
{ 0, },
{ 0, },
Expand Down Expand Up @@ -183,24 +196,34 @@ static pool* pool_acquire(struct caml_heap_state* local) {
pool* r;

caml_plat_lock(&pool_freelist.lock);
if (!pool_freelist.free) {
void* mem = caml_mem_map(Bsize_wsize(POOL_WSIZE), 0);

if (mem) {
CAMLassert(pool_freelist.free == NULL);

r = (pool*)mem;
r->next = pool_freelist.free;
r = pool_freelist.free;
if (r) {
pool_freelist.free = r->next;
} else {
if (pool_freelist.fresh_pools == 0) {
uintnat new_pools = pool_freelist.active_pools * 15 / 100;
if (new_pools < 8) new_pools = 8;

void* mem = caml_mem_map(Bsize_wsize(POOL_WSIZE) * new_pools, 0);
if (mem) {
pool_freelist.fresh_pools = new_pools;
pool_freelist.next_fresh_pool = mem;
}
}
if (pool_freelist.fresh_pools > 0) {
r = (pool*)pool_freelist.next_fresh_pool;
pool_freelist.next_fresh_pool += Bsize_wsize(POOL_WSIZE);
pool_freelist.fresh_pools --;
r->next = NULL;
r->owner = NULL;
pool_freelist.free = r;
}
}
r = pool_freelist.free;
if (r)
pool_freelist.free = r->next;
if (r) {
pool_freelist.active_pools ++;
CAMLassert(r->owner == NULL);
}
caml_plat_unlock(&pool_freelist.lock);

if (r) CAMLassert (r->owner == NULL);
return r;
}

Expand All @@ -216,6 +239,7 @@ static void pool_release(struct caml_heap_state* local,
caml_plat_lock(&pool_freelist.lock);
pool->next = pool_freelist.free;
pool_freelist.free = pool;
pool_freelist.active_pools--;
caml_plat_unlock(&pool_freelist.lock);
}

Expand Down Expand Up @@ -1244,6 +1268,7 @@ void caml_compact_heap(caml_domain_state* domain_state,
remaining pools have been filled up by evacuated blocks. */

pool* cur_pool = evacuated_pools;
uintnat freed_pools = 0;
while (cur_pool) {
pool* next_pool = cur_pool->next;

Expand All @@ -1256,7 +1281,11 @@ void caml_compact_heap(caml_domain_state* domain_state,

pool_free(heap, cur_pool, cur_pool->sz);
cur_pool = next_pool;
freed_pools++;
}
caml_plat_lock(&pool_freelist.lock);
pool_freelist.active_pools -= freed_pools;
caml_plat_unlock(&pool_freelist.lock);

CAML_EV_END(EV_COMPACT_RELEASE);
caml_global_barrier();
Expand Down