@@ -591,6 +591,7 @@ JL_DLLEXPORT const int jl_tls_elf_support = 0;
591591
592592extern int gc_first_tid ;
593593
594+ #define SLEEP_LOCK_BIT ((uint32_t)(1ull << 31))
594595#define SLEEP_HASH_BITS 6
595596#define SLEEP_IGNORED_BITS 8
596597static uv_mutex_t sleep_locks [1 << SLEEP_HASH_BITS ];
@@ -767,21 +768,25 @@ JL_DLLEXPORT void jl_exit_threaded_region(void)
767768 }
768769}
769770
771+ static int is_spin_mutex (void * lock )
772+ {
773+ return (jl_atomic_load_relaxed (& ((jl_spin_mutex_t * )lock )-> count ) & SLEEP_LOCK_BIT ) == 0 ;
774+ }
775+
770776JL_DLLEXPORT void _jl_spin_mutex_init (jl_spin_mutex_t * lock , const char * name ) JL_NOTSAFEPOINT
771777{
772- lock -> count = 0 ;
773- // high bit of padding identifies this as a spin lock
774- jl_atomic_store_relaxed (& lock -> padding , 0 );
778+ // high bit of count unset identifies this as a spin lock
779+ jl_atomic_store_relaxed (& lock -> count , 0 );
775780 jl_atomic_store_release (& lock -> owner , (jl_task_t * )NULL );
776781 jl_profile_lock_init (lock , name );
777782}
778783
779784JL_DLLEXPORT void _jl_spin_mutex_wait (jl_task_t * self , jl_spin_mutex_t * lock , int safepoint )
780785{
781- assert (jl_atomic_load_relaxed ( & lock -> padding ) == 0 && "Spin lock not initialized !" );
786+ assert (is_spin_mutex ( lock ) && "Spin lock corrupted !" );
782787 jl_task_t * owner = jl_atomic_load_relaxed (& lock -> owner );
783788 if (owner == self ) {
784- lock -> count ++ ;
789+ jl_atomic_store_relaxed ( & lock -> count , jl_atomic_load_relaxed ( & lock -> count ) + 1 ) ;
785790 return ;
786791 }
787792 // Don't use JL_TIMING for instant acquires, results in large blowup of events
@@ -794,7 +799,7 @@ JL_DLLEXPORT void _jl_spin_mutex_wait(jl_task_t *self, jl_spin_mutex_t *lock, in
794799 JL_TIMING (LOCK_SPIN , LOCK_SPIN );
795800 while (1 ) {
796801 if (owner == NULL && jl_atomic_cmpswap (& lock -> owner , & owner , self )) {
797- lock -> count = 1 ;
802+ jl_atomic_store_relaxed ( & lock -> count , 1 ) ;
798803 jl_profile_lock_acquired (lock );
799804 return ;
800805 }
@@ -843,14 +848,14 @@ JL_DLLEXPORT void _jl_spin_mutex_lock(jl_task_t *self, jl_spin_mutex_t *lock)
843848
844849JL_DLLEXPORT int _jl_spin_mutex_trylock_nogc (jl_task_t * self , jl_spin_mutex_t * lock )
845850{
846- assert (jl_atomic_load_relaxed ( & lock -> padding ) == 0 && "Spin lock not initialized !" );
851+ assert (is_spin_mutex ( lock ) && "Spin lock corrupted !" );
847852 jl_task_t * owner = jl_atomic_load_acquire (& lock -> owner );
848853 if (owner == self ) {
849- lock -> count ++ ;
854+ jl_atomic_store_relaxed ( & lock -> count , jl_atomic_load_relaxed ( & lock -> count ) + 1 ) ;
850855 return 1 ;
851856 }
852857 if (owner == NULL && jl_atomic_cmpswap (& lock -> owner , & owner , self )) {
853- lock -> count = 1 ;
858+ jl_atomic_store_relaxed ( & lock -> count , 1 ) ;
854859 return 1 ;
855860 }
856861 return 0 ;
@@ -868,11 +873,13 @@ JL_DLLEXPORT int _jl_spin_mutex_trylock(jl_task_t *self, jl_spin_mutex_t *lock)
868873
869874JL_DLLEXPORT void _jl_spin_mutex_unlock_nogc (jl_spin_mutex_t * lock )
870875{
871- assert (jl_atomic_load_relaxed ( & lock -> padding ) == 0 && "Spin lock not initialized !" );
876+ assert (is_spin_mutex ( lock ) && "Spin lock corrupted !" );
872877#ifndef __clang_gcanalyzer__
873878 assert (jl_atomic_load_relaxed (& lock -> owner ) == jl_current_task &&
874879 "Unlocking a lock in a different thread." );
875- if (-- lock -> count == 0 ) {
880+ uint32_t count = jl_atomic_load_relaxed (& lock -> count );
881+ jl_atomic_store_relaxed (& lock -> count , count - 1 );
882+ if (count == 1 ) {
876883 jl_profile_lock_release_start (lock );
877884 jl_atomic_store_release (& lock -> owner , (jl_task_t * )NULL );
878885 jl_cpu_wake ();
@@ -897,8 +904,6 @@ JL_DLLEXPORT void _jl_spin_mutex_unlock(jl_task_t *self, jl_spin_mutex_t *lock)
897904 }
898905}
899906
900- #define SLEEP_LOCK_BIT ((uint32_t)(1ull << 31))
901-
902907JL_DLLEXPORT void _jl_sleep_mutex_init (jl_sleep_mutex_t * lock , const char * name ) JL_NOTSAFEPOINT
903908{
904909 // waiters is overloaded:
@@ -913,8 +918,10 @@ JL_DLLEXPORT void _jl_sleep_mutex_init(jl_sleep_mutex_t *lock, const char *name)
913918
914919JL_DLLEXPORT void _jl_sleep_mutex_wait (jl_task_t * self , jl_sleep_mutex_t * lock , int safepoint )
915920{
916- // high bit of padding identifies this as a spin lock
917- assert ((jl_atomic_load_relaxed (& lock -> waiters ) & SLEEP_LOCK_BIT ) && "Unexpected spin lock in sleeping wait!" );
921+ // high bit of waiters identifies this as a sleep lock
922+ // call out the uninitialized case specially
923+ assert (jl_atomic_load_relaxed (& lock -> waiters ) != 0 && "Sleep lock not initialized!" );
924+ assert (!is_spin_mutex (lock ) && "Unexpected spin lock in sleeping wait!" );
918925 uint32_t old_waiters = SLEEP_LOCK_BIT ;
919926 if (jl_atomic_cmpswap_acqrel (& lock -> waiters , & old_waiters , SLEEP_LOCK_BIT | 1 )) {
920927 // no one is waiting, we just took the lock
@@ -966,8 +973,10 @@ JL_DLLEXPORT void _jl_sleep_mutex_lock(jl_task_t *self, jl_sleep_mutex_t *lock)
966973
967974JL_DLLEXPORT int _jl_sleep_mutex_trylock_nogc (jl_task_t * self , jl_sleep_mutex_t * lock )
968975{
969- // high bit of padding identifies this as a spin lock
970- assert ((jl_atomic_load_relaxed (& lock -> waiters ) & SLEEP_LOCK_BIT ) && "Unexpected spin lock in sleeping trylock!" );
976+ // high bit of waiters identifies this as a sleep lock
977+ // call out the uninitialized case specially
978+ assert (jl_atomic_load_relaxed (& lock -> waiters ) != 0 && "Sleep lock not initialized!" );
979+ assert (!is_spin_mutex (lock ) && "Unexpected spin lock in trylock!" );
971980 uint32_t none = SLEEP_LOCK_BIT ;
972981 if (jl_atomic_cmpswap_acqrel (& lock -> waiters , & none , SLEEP_LOCK_BIT | 1 )) {
973982 // no one is waiting, we just took the lock
@@ -995,8 +1004,10 @@ JL_DLLEXPORT int _jl_sleep_mutex_trylock(jl_task_t *self, jl_sleep_mutex_t *lock
9951004
9961005JL_DLLEXPORT void _jl_sleep_mutex_unlock_nogc (jl_sleep_mutex_t * lock )
9971006{
998- // high bit of padding identifies this as a spin lock
999- assert ((jl_atomic_load_relaxed (& lock -> waiters ) & SLEEP_LOCK_BIT ) && "Unexpected spin lock in sleeping unlock!" );
1007+ // high bit of waiters identifies this as a sleep lock
1008+ // call out the uninitialized case specially
1009+ assert (jl_atomic_load_relaxed (& lock -> waiters ) != 0 && "Sleep lock not initialized!" );
1010+ assert (!is_spin_mutex (lock ) && "Unexpected spin lock in unlock!" );
10001011 if (-- lock -> count == 0 ) {
10011012 //Do the release
10021013 jl_profile_lock_release_start (lock );
@@ -1042,8 +1053,7 @@ JL_DLLEXPORT void _jl_sleep_mutex_unlock(jl_task_t *self, jl_sleep_mutex_t *lock
10421053
10431054void _jl_dyn_mutex_unlock (jl_task_t * self , void * lock )
10441055{
1045- int is_sleep_mutex = jl_atomic_load_relaxed (& ((jl_spin_mutex_t * )lock )-> padding ) & SLEEP_LOCK_BIT ;
1046- if (is_sleep_mutex ) {
1056+ if (!is_spin_mutex (lock )) {
10471057 _jl_sleep_mutex_unlock (self , (jl_sleep_mutex_t * )lock );
10481058 } else {
10491059 _jl_spin_mutex_unlock (self , (jl_spin_mutex_t * )lock );
@@ -1052,8 +1062,7 @@ void _jl_dyn_mutex_unlock(jl_task_t *self, void *lock)
10521062
10531063void _jl_dyn_mutex_unlock_nogc (void * lock )
10541064{
1055- int is_sleep_mutex = jl_atomic_load_relaxed (& ((jl_spin_mutex_t * )lock )-> padding ) & SLEEP_LOCK_BIT ;
1056- if (is_sleep_mutex ) {
1065+ if (!is_spin_mutex (lock )) {
10571066 _jl_sleep_mutex_unlock_nogc ((jl_sleep_mutex_t * )lock );
10581067 } else {
10591068 _jl_spin_mutex_unlock_nogc ((jl_spin_mutex_t * )lock );
0 commit comments