@@ -295,7 +295,6 @@ const char *jl_generate_ccallable(LLVMOrcThreadSafeModuleRef llvmmod, void *sysi
295
295
extern " C" JL_DLLEXPORT
296
296
int jl_compile_extern_c_impl (LLVMOrcThreadSafeModuleRef llvmmod, void *p, void *sysimg, jl_value_t *declrt, jl_value_t *sigt)
297
297
{
298
- JL_LOCK (&jl_codegen_lock);
299
298
auto ct = jl_current_task;
300
299
ct->reentrant_codegen ++;
301
300
uint64_t compiler_start_time = 0 ;
@@ -313,6 +312,7 @@ int jl_compile_extern_c_impl(LLVMOrcThreadSafeModuleRef llvmmod, void *p, void *
313
312
backing = jl_create_llvm_module (" cextern" , pparams ? pparams->tsctx : ctx, pparams ? pparams->imaging : imaging_default ());
314
313
into = &backing;
315
314
}
315
+ JL_LOCK (&jl_codegen_lock);
316
316
jl_codegen_params_t params (into->getContext ());
317
317
if (pparams == NULL )
318
318
pparams = ¶ms;
@@ -332,13 +332,12 @@ int jl_compile_extern_c_impl(LLVMOrcThreadSafeModuleRef llvmmod, void *p, void *
332
332
if (success && llvmmod == NULL )
333
333
jl_ExecutionEngine->addModule (std::move (*into));
334
334
}
335
- if (ct->reentrant_codegen == 1 && measure_compile_time_enabled)
335
+ JL_UNLOCK (&jl_codegen_lock);
336
+ if (!--ct->reentrant_codegen && measure_compile_time_enabled)
336
337
jl_atomic_fetch_add_relaxed (&jl_cumulative_compile_time, (jl_hrtime () - compiler_start_time));
337
338
if (ctx.getContext ()) {
338
339
jl_ExecutionEngine->releaseContext (std::move (ctx));
339
340
}
340
- ct->reentrant_codegen --;
341
- JL_UNLOCK (&jl_codegen_lock);
342
341
return success;
343
342
}
344
343
@@ -389,7 +388,6 @@ void jl_extern_c_impl(jl_value_t *declrt, jl_tupletype_t *sigt)
389
388
extern " C" JL_DLLEXPORT
390
389
jl_code_instance_t *jl_generate_fptr_impl (jl_method_instance_t *mi JL_PROPAGATES_ROOT, size_t world)
391
390
{
392
- JL_LOCK (&jl_codegen_lock); // also disables finalizers, to prevent any unexpected recursion
393
391
auto ct = jl_current_task;
394
392
ct->reentrant_codegen ++;
395
393
uint64_t compiler_start_time = 0 ;
@@ -400,6 +398,7 @@ jl_code_instance_t *jl_generate_fptr_impl(jl_method_instance_t *mi JL_PROPAGATES
400
398
// if we don't have any decls already, try to generate it now
401
399
jl_code_info_t *src = NULL ;
402
400
JL_GC_PUSH1 (&src);
401
+ JL_LOCK (&jl_codegen_lock); // also disables finalizers, to prevent any unexpected recursion
403
402
jl_value_t *ci = jl_rettype_inferred (mi, world, world);
404
403
jl_code_instance_t *codeinst = (ci == jl_nothing ? NULL : (jl_code_instance_t *)ci);
405
404
if (codeinst) {
@@ -442,14 +441,13 @@ jl_code_instance_t *jl_generate_fptr_impl(jl_method_instance_t *mi JL_PROPAGATES
442
441
else {
443
442
codeinst = NULL ;
444
443
}
445
- if (ct->reentrant_codegen == 1 && measure_compile_time_enabled) {
444
+ JL_UNLOCK (&jl_codegen_lock);
445
+ if (!--ct->reentrant_codegen && measure_compile_time_enabled) {
446
446
uint64_t t_comp = jl_hrtime () - compiler_start_time;
447
447
if (is_recompile)
448
448
jl_atomic_fetch_add_relaxed (&jl_cumulative_recompile_time, t_comp);
449
449
jl_atomic_fetch_add_relaxed (&jl_cumulative_compile_time, t_comp);
450
450
}
451
- ct->reentrant_codegen --;
452
- JL_UNLOCK (&jl_codegen_lock);
453
451
JL_GC_POP ();
454
452
return codeinst;
455
453
}
@@ -460,13 +458,13 @@ void jl_generate_fptr_for_unspecialized_impl(jl_code_instance_t *unspec)
460
458
if (jl_atomic_load_relaxed (&unspec->invoke ) != NULL ) {
461
459
return ;
462
460
}
463
- JL_LOCK (&jl_codegen_lock);
464
461
auto ct = jl_current_task;
465
462
ct->reentrant_codegen ++;
466
463
uint64_t compiler_start_time = 0 ;
467
464
uint8_t measure_compile_time_enabled = jl_atomic_load_relaxed (&jl_measure_compile_time_enabled);
468
465
if (measure_compile_time_enabled)
469
466
compiler_start_time = jl_hrtime ();
467
+ JL_LOCK (&jl_codegen_lock);
470
468
if (jl_atomic_load_relaxed (&unspec->invoke ) == NULL ) {
471
469
jl_code_info_t *src = NULL ;
472
470
JL_GC_PUSH1 (&src);
@@ -494,10 +492,9 @@ void jl_generate_fptr_for_unspecialized_impl(jl_code_instance_t *unspec)
494
492
}
495
493
JL_GC_POP ();
496
494
}
497
- if (ct->reentrant_codegen == 1 && measure_compile_time_enabled)
498
- jl_atomic_fetch_add_relaxed (&jl_cumulative_compile_time, (jl_hrtime () - compiler_start_time));
499
- ct->reentrant_codegen --;
500
495
JL_UNLOCK (&jl_codegen_lock); // Might GC
496
+ if (!--ct->reentrant_codegen && measure_compile_time_enabled)
497
+ jl_atomic_fetch_add_relaxed (&jl_cumulative_compile_time, (jl_hrtime () - compiler_start_time));
501
498
}
502
499
503
500
@@ -517,13 +514,13 @@ jl_value_t *jl_dump_method_asm_impl(jl_method_instance_t *mi, size_t world,
517
514
// normally we prevent native code from being generated for these functions,
518
515
// (using sentinel value `1` instead)
519
516
// so create an exception here so we can print pretty our lies
520
- JL_LOCK (&jl_codegen_lock); // also disables finalizers, to prevent any unexpected recursion
521
517
auto ct = jl_current_task;
522
- ct->reentrant_codegen -- ;
518
+ ct->reentrant_codegen ++ ;
523
519
uint64_t compiler_start_time = 0 ;
524
520
uint8_t measure_compile_time_enabled = jl_atomic_load_relaxed (&jl_measure_compile_time_enabled);
525
521
if (measure_compile_time_enabled)
526
522
compiler_start_time = jl_hrtime ();
523
+ JL_LOCK (&jl_codegen_lock); // also disables finalizers, to prevent any unexpected recursion
527
524
specfptr = (uintptr_t )jl_atomic_load_relaxed (&codeinst->specptr .fptr );
528
525
if (specfptr == 0 ) {
529
526
jl_code_info_t *src = jl_type_infer (mi, world, 0 );
@@ -547,9 +544,8 @@ jl_value_t *jl_dump_method_asm_impl(jl_method_instance_t *mi, size_t world,
547
544
}
548
545
JL_GC_POP ();
549
546
}
550
- if (ct->reentrant_codegen == 1 && measure_compile_time_enabled)
547
+ if (!-- ct->reentrant_codegen && measure_compile_time_enabled)
551
548
jl_atomic_fetch_add_relaxed (&jl_cumulative_compile_time, (jl_hrtime () - compiler_start_time));
552
- ct->reentrant_codegen --;
553
549
JL_UNLOCK (&jl_codegen_lock);
554
550
}
555
551
if (specfptr != 0 )
0 commit comments