@@ -715,7 +715,7 @@ static void domain_create(uintnat initial_minor_heap_wsize,
715
715
s -> unique_id = fresh_domain_unique_id ();
716
716
domain_state -> unique_id = s -> unique_id ;
717
717
s -> running = 1 ;
718
- atomic_fetch_add ( & caml_num_domains_running , 1 );
718
+ ( void ) caml_atomic_counter_incr ( & caml_num_domains_running );
719
719
720
720
domain_state -> c_stack = NULL ;
721
721
domain_state -> exn_handler = NULL ;
@@ -1444,7 +1444,7 @@ static void decrement_stw_domains_still_processing(void)
1444
1444
if so, clear the stw_leader to allow the new stw sections to start.
1445
1445
*/
1446
1446
intnat am_last =
1447
- atomic_fetch_add (& stw_request .num_domains_still_processing , -1 ) == 1 ;
1447
+ caml_atomic_counter_decr (& stw_request .num_domains_still_processing ) == 0 ;
1448
1448
1449
1449
if ( am_last ) {
1450
1450
/* release the STW lock to allow new STW sections */
@@ -1662,8 +1662,8 @@ int caml_try_run_on_all_domains_with_spin_work(
1662
1662
stw_request .data = data ;
1663
1663
stw_request .num_domains = stw_domains .participating_domains ;
1664
1664
/* stw_request.barrier doesn't need resetting */
1665
- atomic_store_release (& stw_request .num_domains_still_processing ,
1666
- stw_domains .participating_domains );
1665
+ caml_atomic_counter_init (& stw_request .num_domains_still_processing ,
1666
+ stw_domains .participating_domains );
1667
1667
1668
1668
int is_alone = stw_request .num_domains == 1 ;
1669
1669
int should_sync = sync && !is_alone ;
@@ -2144,7 +2144,7 @@ static void domain_terminate (void)
2144
2144
/* This is the last thing we do because we need to be able to rely
2145
2145
on caml_domain_alone (which uses caml_num_domains_running) in at least
2146
2146
the shared_heap lockfree fast paths */
2147
- atomic_fetch_add ( & caml_num_domains_running , -1 );
2147
+ ( void ) caml_atomic_counter_decr ( & caml_num_domains_running );
2148
2148
}
2149
2149
2150
2150
CAMLprim value caml_ml_domain_cpu_relax (value t )
0 commit comments