Skip to content
Draft
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
2 changes: 1 addition & 1 deletion doc/src/devdocs/locks.md
Original file line number Diff line number Diff line change
Expand Up @@ -16,7 +16,7 @@ The following are definitely leaf locks (level 1), and must not try to acquire a

> * safepoint
>
> > Note that this lock is acquired implicitly by `JL_LOCK` and `JL_UNLOCK`. use the `_NOGC` variants
> > Note that this lock is acquired implicitly by `JL_SPIN_LOCK` and `JL_SPIN_UNLOCK`. use the `_NOGC` variants
> > to avoid that for level 1 locks.
> >
> > While holding this lock, the code must not do any allocation or hit any safepoints. Note that
Expand Down
8 changes: 4 additions & 4 deletions src/aotcompile.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -297,7 +297,7 @@ void *jl_create_native_impl(jl_array_t *methods, LLVMOrcThreadSafeModuleRef llvm

// compile all methods for the current world and type-inference world

JL_LOCK(&jl_codegen_lock);
JL_SPIN_LOCK(&jl_codegen_lock);
auto target_info = clone.withModuleDo([&](Module &M) {
return std::make_pair(M.getDataLayout(), Triple(M.getTargetTriple()));
});
Expand Down Expand Up @@ -351,7 +351,7 @@ void *jl_create_native_impl(jl_array_t *methods, LLVMOrcThreadSafeModuleRef llvm
// finally, make sure all referenced methods also get compiled or fixed up
jl_compile_workqueue(params, policy);
}
JL_UNLOCK(&jl_codegen_lock); // Might GC
JL_SPIN_UNLOCK(&jl_codegen_lock); // Might GC
JL_GC_POP();

// process the globals array, before jl_merge_module destroys them
Expand Down Expand Up @@ -2130,7 +2130,7 @@ void jl_get_llvmf_defn_impl(jl_llvmf_dump_t* dump, jl_method_instance_t *mi, siz
uint8_t measure_compile_time_enabled = jl_atomic_load_relaxed(&jl_measure_compile_time_enabled);
if (measure_compile_time_enabled)
compiler_start_time = jl_hrtime();
JL_LOCK(&jl_codegen_lock);
JL_SPIN_LOCK(&jl_codegen_lock);
auto target_info = m.withModuleDo([&](Module &M) {
return std::make_pair(M.getDataLayout(), Triple(M.getTargetTriple()));
});
Expand All @@ -2148,7 +2148,7 @@ void jl_get_llvmf_defn_impl(jl_llvmf_dump_t* dump, jl_method_instance_t *mi, siz
// max debug info = llvm.dbg.declare/value intrinsics which clutter IR output
output.debug_level = std::max(2, static_cast<int>(jl_options.debug_level));
auto decls = jl_emit_code(m, mi, src, jlrettype, output);
JL_UNLOCK(&jl_codegen_lock); // Might GC
JL_SPIN_UNLOCK(&jl_codegen_lock); // Might GC

Function *F = NULL;
if (m) {
Expand Down
6 changes: 3 additions & 3 deletions src/codegen.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -2851,18 +2851,18 @@ static jl_value_t *jl_ensure_rooted(jl_codectx_t &ctx, jl_value_t *val)
jl_method_t *m = ctx.linfo->def.method;
if (jl_is_method(m)) {
// the method might have a root for this already; use it if so
JL_LOCK(&m->writelock);
JL_SPIN_LOCK(&m->writelock);
if (m->roots) {
size_t i, len = jl_array_dim0(m->roots);
for (i = 0; i < len; i++) {
jl_value_t *mval = jl_array_ptr_ref(m->roots, i);
if (mval == val || jl_egal(mval, val)) {
JL_UNLOCK(&m->writelock);
JL_SPIN_UNLOCK(&m->writelock);
return mval;
}
}
}
JL_UNLOCK(&m->writelock);
JL_SPIN_UNLOCK(&m->writelock);
}
return jl_as_global_root(val);
}
Expand Down
8 changes: 4 additions & 4 deletions src/datatype.c
Original file line number Diff line number Diff line change
Expand Up @@ -53,7 +53,7 @@ JL_DLLEXPORT jl_methtable_t *jl_new_method_table(jl_sym_t *name, jl_module_t *mo
jl_atomic_store_relaxed(&mt->cache, jl_nothing);
jl_atomic_store_relaxed(&mt->max_args, 0);
mt->backedges = NULL;
JL_MUTEX_INIT(&mt->writelock, "methodtable->writelock");
JL_SPIN_MUTEX_INIT(&mt->writelock, "methodtable->writelock");
mt->offs = 0;
mt->frozen = 0;
return mt;
Expand Down Expand Up @@ -632,7 +632,7 @@ void jl_compute_field_offsets(jl_datatype_t *st)
sz += fsz;
}
if (needlock) {
size_t offset = LLT_ALIGN(sizeof(jl_mutex_t), alignm);
size_t offset = LLT_ALIGN(sizeof(jl_spin_mutex_t), alignm);
for (i = 0; i < nfields; i++) {
desc[i].offset += offset;
}
Expand Down Expand Up @@ -1527,12 +1527,12 @@ JL_DLLEXPORT jl_value_t *jl_new_struct_uninit(jl_datatype_t *type)

JL_DLLEXPORT void jl_lock_value(jl_value_t *v) JL_NOTSAFEPOINT
{
JL_LOCK_NOGC((jl_mutex_t*)v);
JL_SPIN_LOCK_NOGC((jl_spin_mutex_t*)v);
}

JL_DLLEXPORT void jl_unlock_value(jl_value_t *v) JL_NOTSAFEPOINT
{
JL_UNLOCK_NOGC((jl_mutex_t*)v);
JL_SPIN_UNLOCK_NOGC((jl_spin_mutex_t*)v);
}

JL_DLLEXPORT int jl_field_index(jl_datatype_t *t, jl_sym_t *fld, int err)
Expand Down
6 changes: 3 additions & 3 deletions src/gc-heap-snapshot.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -121,7 +121,7 @@ struct HeapSnapshot {
// when snapshotting is on.
int gc_heap_snapshot_enabled = 0;
HeapSnapshot *g_snapshot = nullptr;
extern jl_mutex_t heapsnapshot_lock;
extern jl_spin_mutex_t heapsnapshot_lock;

void serialize_heap_snapshot(ios_t *stream, HeapSnapshot &snapshot, char all_one);
static inline void _record_gc_edge(const char *edge_type,
Expand All @@ -135,7 +135,7 @@ JL_DLLEXPORT void jl_gc_take_heap_snapshot(ios_t *stream, char all_one)
HeapSnapshot snapshot;
_add_internal_root(&snapshot);

jl_mutex_lock(&heapsnapshot_lock);
JL_SPIN_LOCK(&heapsnapshot_lock);

// Enable snapshotting
g_snapshot = &snapshot;
Expand All @@ -148,7 +148,7 @@ JL_DLLEXPORT void jl_gc_take_heap_snapshot(ios_t *stream, char all_one)
gc_heap_snapshot_enabled = false;
g_snapshot = nullptr;

jl_mutex_unlock(&heapsnapshot_lock);
JL_SPIN_UNLOCK(&heapsnapshot_lock);

// When we return, the snapshot is full
// Dump the snapshot
Expand Down
30 changes: 15 additions & 15 deletions src/gc.c
Original file line number Diff line number Diff line change
Expand Up @@ -143,11 +143,11 @@ JL_DLLEXPORT void jl_gc_set_cb_notify_gc_pressure(jl_gc_cb_notify_gc_pressure_t
// For accessing `ptls->finalizers`, the lock is needed if a thread
// is going to realloc the buffer (of its own list) or accessing the
// list of another thread
static jl_mutex_t finalizers_lock;
static jl_spin_mutex_t finalizers_lock;
static uv_mutex_t gc_cache_lock;

// mutex for gc-heap-snapshot.
jl_mutex_t heapsnapshot_lock;
jl_spin_mutex_t heapsnapshot_lock;

// Flag that tells us whether we need to support conservative marking
// of objects.
Expand Down Expand Up @@ -401,7 +401,7 @@ static void jl_gc_run_finalizers_in_list(jl_task_t *ct, arraylist_t *list) JL_NO
jl_gc_push_arraylist(ct, list);
void **items = list->items;
size_t len = list->len;
JL_UNLOCK_NOGC(&finalizers_lock);
JL_SPIN_UNLOCK_NOGC(&finalizers_lock);
// run finalizers in reverse order they were added, so lower-level finalizers run last
for (size_t i = len-4; i >= 2; i -= 2)
run_finalizer(ct, items[i], items[i + 1]);
Expand Down Expand Up @@ -430,9 +430,9 @@ static void run_finalizers(jl_task_t *ct)
// will flush it.
if (to_finalize.len == 0)
return;
JL_LOCK_NOGC(&finalizers_lock);
JL_SPIN_LOCK_NOGC(&finalizers_lock);
if (to_finalize.len == 0) {
JL_UNLOCK_NOGC(&finalizers_lock);
JL_SPIN_UNLOCK_NOGC(&finalizers_lock);
return;
}
arraylist_t copied_list;
Expand Down Expand Up @@ -545,15 +545,15 @@ void jl_gc_run_all_finalizers(jl_task_t *ct)
gc_all_tls_states = jl_atomic_load_relaxed(&jl_all_tls_states);
// this is called from `jl_atexit_hook`; threads could still be running
// so we have to guard the finalizers' lists
JL_LOCK_NOGC(&finalizers_lock);
JL_SPIN_LOCK_NOGC(&finalizers_lock);
schedule_all_finalizers(&finalizer_list_marked);
for (int i = 0; i < gc_n_threads; i++) {
jl_ptls_t ptls2 = gc_all_tls_states[i];
if (ptls2 != NULL)
schedule_all_finalizers(&ptls2->finalizers);
}
// unlock here because `run_finalizers` locks this
JL_UNLOCK_NOGC(&finalizers_lock);
JL_SPIN_UNLOCK_NOGC(&finalizers_lock);
gc_n_threads = 0;
gc_all_tls_states = NULL;
run_finalizers(ct);
Expand All @@ -572,14 +572,14 @@ void jl_gc_add_finalizer_(jl_ptls_t ptls, void *v, void *f) JL_NOTSAFEPOINT
// between the acquire and the release of the length.
size_t oldlen = jl_atomic_load_acquire((_Atomic(size_t)*)&a->len);
if (__unlikely(oldlen + 2 > a->max)) {
JL_LOCK_NOGC(&finalizers_lock);
JL_SPIN_LOCK_NOGC(&finalizers_lock);
// `a->len` might have been modified.
// Another possibility is to always grow the array to `oldlen + 2` but
// it's simpler this way and uses slightly less memory =)
oldlen = a->len;
arraylist_grow(a, 2);
a->len = oldlen;
JL_UNLOCK_NOGC(&finalizers_lock);
JL_SPIN_UNLOCK_NOGC(&finalizers_lock);
}
void **items = a->items;
items[oldlen] = v;
Expand Down Expand Up @@ -611,7 +611,7 @@ JL_DLLEXPORT void jl_gc_add_finalizer_th(jl_ptls_t ptls, jl_value_t *v, jl_funct

JL_DLLEXPORT void jl_finalize_th(jl_task_t *ct, jl_value_t *o)
{
JL_LOCK_NOGC(&finalizers_lock);
JL_SPIN_LOCK_NOGC(&finalizers_lock);
// Copy the finalizers into a temporary list so that code in the finalizer
// won't change the list as we loop through them.
// This list is also used as the GC frame when we are running the finalizers
Expand All @@ -636,7 +636,7 @@ JL_DLLEXPORT void jl_finalize_th(jl_task_t *ct, jl_value_t *o)
jl_gc_run_finalizers_in_list(ct, &copied_list);
}
else {
JL_UNLOCK_NOGC(&finalizers_lock);
JL_SPIN_UNLOCK_NOGC(&finalizers_lock);
}
arraylist_free(&copied_list);
}
Expand Down Expand Up @@ -3491,7 +3491,7 @@ JL_DLLEXPORT void jl_gc_collect(jl_gc_collection_t collection)
gc_cblist_pre_gc, (collection));

if (!jl_atomic_load_acquire(&jl_gc_disable_counter)) {
JL_LOCK_NOGC(&finalizers_lock); // all the other threads are stopped, so this does not make sense, right? otherwise, failing that, this seems like plausibly a deadlock
JL_SPIN_LOCK_NOGC(&finalizers_lock); // all the other threads are stopped, so this does not make sense, right? otherwise, failing that, this seems like plausibly a deadlock
#ifndef __clang_gcanalyzer__
if (_jl_gc_collect(ptls, collection)) {
// recollect
Expand All @@ -3500,7 +3500,7 @@ JL_DLLEXPORT void jl_gc_collect(jl_gc_collection_t collection)
assert(!ret);
}
#endif
JL_UNLOCK_NOGC(&finalizers_lock);
JL_SPIN_UNLOCK_NOGC(&finalizers_lock);
}

gc_n_threads = 0;
Expand Down Expand Up @@ -3596,8 +3596,8 @@ void jl_init_thread_heap(jl_ptls_t ptls)
// System-wide initializations
void jl_gc_init(void)
{
JL_MUTEX_INIT(&heapsnapshot_lock, "heapsnapshot_lock");
JL_MUTEX_INIT(&finalizers_lock, "finalizers_lock");
JL_SPIN_MUTEX_INIT(&heapsnapshot_lock, "heapsnapshot_lock");
JL_SPIN_MUTEX_INIT(&finalizers_lock, "finalizers_lock");
uv_mutex_init(&gc_cache_lock);
uv_mutex_init(&gc_perm_lock);
uv_mutex_init(&gc_threads_lock);
Expand Down
Loading