Skip to content

Commit d65909f

Browse files
committed
Use int32_t for sequence lock
Use atomic storage instead of exchange Use relaxed load when starting sequence lock for write Fix some formatting
1 parent 6d34906 commit d65909f

File tree

7 files changed

+50
-24
lines changed

7 files changed

+50
-24
lines changed

Include/cpython/pyatomic.h

Lines changed: 3 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -469,6 +469,9 @@ _Py_atomic_store_int_release(int *obj, int value);
469469
static inline int
470470
_Py_atomic_load_int_acquire(const int *obj);
471471

472+
static inline uint32_t
473+
_Py_atomic_load_uint32_acquire(const uint32_t *obj);
474+
472475

473476
// --- _Py_atomic_fence ------------------------------------------------------
474477

Include/cpython/pyatomic_gcc.h

Lines changed: 3 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -495,6 +495,9 @@ static inline int
495495
_Py_atomic_load_int_acquire(const int *obj)
496496
{ return __atomic_load_n(obj, __ATOMIC_ACQUIRE); }
497497

498+
static inline uint32_t
499+
_Py_atomic_load_uint32_acquire(const uint32_t *obj)
500+
{ return __atomic_load_n(obj, __ATOMIC_ACQUIRE); }
498501

499502
// --- _Py_atomic_fence ------------------------------------------------------
500503

Include/cpython/pyatomic_msc.h

Lines changed: 11 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -938,6 +938,17 @@ _Py_atomic_load_int_acquire(const int *obj)
938938
#endif
939939
}
940940

941+
static inline uint32_t
942+
_Py_atomic_load_uint32_acquire(const uint32_t *obj)
943+
{
944+
#if defined(_M_X64) || defined(_M_IX86)
945+
return *(uint32_t volatile *)obj;
946+
#elif defined(_M_ARM64)
947+
return (int)__ldar32((uint32_t volatile *)obj);
948+
#else
949+
# error "no implementation of _Py_atomic_load_uint32_acquire"
950+
#endif
951+
}
941952

942953
// --- _Py_atomic_fence ------------------------------------------------------
943954

Include/cpython/pyatomic_std.h

Lines changed: 7 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -870,6 +870,13 @@ _Py_atomic_load_int_acquire(const int *obj)
870870
memory_order_acquire);
871871
}
872872

873+
static inline uint32_t
874+
_Py_atomic_load_uint32_acquire(const uint32_t *obj)
875+
{
876+
_Py_USING_STD;
877+
return atomic_load_explicit((const _Atomic(uint32_t)*)obj,
878+
memory_order_acquire);
879+
}
873880

874881

875882
// --- _Py_atomic_fence ------------------------------------------------------

Include/internal/pycore_lock.h

Lines changed: 4 additions & 4 deletions
Original file line numberDiff line numberDiff line change
@@ -261,7 +261,7 @@ PyAPI_FUNC(void) _PyRWMutex_Unlock(_PyRWMutex *rwmutex);
261261
// The writer can also detect that the undelering data has not changed and abandon the write
262262
// and restore the previous sequence.
263263
typedef struct {
264-
int sequence;
264+
uint32_t sequence;
265265
} _PySeqLock;
266266

267267
// Lock the sequence lock for the writer
@@ -275,15 +275,15 @@ PyAPI_FUNC(void) _PySeqLock_UnlockWrite(_PySeqLock *seqlock);
275275
PyAPI_FUNC(void) _PySeqLock_AbandonWrite(_PySeqLock *seqlock);
276276

277277
// Begin a read operation and return the current sequence number.
278-
PyAPI_FUNC(int) _PySeqLock_BeginRead(_PySeqLock *seqlock);
278+
PyAPI_FUNC(uint32_t) _PySeqLock_BeginRead(_PySeqLock *seqlock);
279279

280280
// End the read operation and confirm that the sequence number has not changed.
281281
// Returns 1 if the read was successful or 0 if the read should be re-tried.
282-
PyAPI_FUNC(int) _PySeqLock_EndRead(_PySeqLock *seqlock, int previous);
282+
PyAPI_FUNC(uint32_t) _PySeqLock_EndRead(_PySeqLock *seqlock, uint32_t previous);
283283

284284
// Check if the lock was held during a fork and clear the lock. Returns 1
285285
// if the lock was held and any associated datat should be cleared.
286-
PyAPI_FUNC(int) _PySeqLock_AfterFork(_PySeqLock *seqlock);
286+
PyAPI_FUNC(uint32_t) _PySeqLock_AfterFork(_PySeqLock *seqlock);
287287

288288
#ifdef __cplusplus
289289
}

Objects/typeobject.c

Lines changed: 6 additions & 6 deletions
Original file line numberDiff line numberDiff line change
@@ -59,13 +59,13 @@ class object "PyObject *" "&PyBaseObject_Type"
5959
// in odd behaviors w.r.t. running with the GIL as the outer type lock could
6060
// be released and reacquired during a subclass update if there's contention
6161
// on the subclass lock.
62-
#define BEGIN_TYPE_LOCK() \
63-
{ \
64-
_PyCriticalSection _cs; \
65-
_PyCriticalSection_Begin(&_cs, &_PyRuntime.types.type_mutex); \
62+
#define BEGIN_TYPE_LOCK() \
63+
{ \
64+
_PyCriticalSection _cs; \
65+
_PyCriticalSection_Begin(&_cs, &_PyRuntime.types.type_mutex); \
6666

67-
#define END_TYPE_LOCK() \
68-
_PyCriticalSection_End(&_cs); \
67+
#define END_TYPE_LOCK() \
68+
_PyCriticalSection_End(&_cs); \
6969
}
7070

7171
#define ASSERT_TYPE_LOCK_HELD() \

Python/lock.c

Lines changed: 16 additions & 14 deletions
Original file line numberDiff line numberDiff line change
@@ -465,59 +465,61 @@ _PyRWMutex_Unlock(_PyRWMutex *rwmutex)
465465
void _PySeqLock_LockWrite(_PySeqLock *seqlock)
466466
{
467467
// lock the entry by setting by moving to an odd sequence number
468-
int prev = seqlock->sequence;
468+
uint32_t prev = _Py_atomic_load_uint32_relaxed(&seqlock->sequence);
469469
while (1) {
470470
if (SEQLOCK_IS_UPDATING(prev)) {
471471
// Someone else is currently updating the cache
472472
_Py_yield();
473-
prev = _Py_atomic_load_int32_relaxed(&seqlock->sequence);
474-
} else if (_Py_atomic_compare_exchange_int32(&seqlock->sequence, &prev, prev + 1)) {
473+
prev = _Py_atomic_load_uint32_relaxed(&seqlock->sequence);
474+
}
475+
else if (_Py_atomic_compare_exchange_uint32(&seqlock->sequence, &prev, prev + 1)) {
475476
// We've locked the cache
476477
break;
477-
} else {
478+
}
479+
else {
478480
_Py_yield();
479481
}
480482
}
481483
}
482484

483485
void _PySeqLock_AbandonWrite(_PySeqLock *seqlock)
484486
{
485-
int new_seq = seqlock->sequence - 1;
487+
uint32_t new_seq = seqlock->sequence - 1;
486488
assert(!SEQLOCK_IS_UPDATING(new_seq));
487-
_Py_atomic_exchange_int32(&seqlock->sequence, new_seq);
489+
_Py_atomic_store_uint32(&seqlock->sequence, new_seq);
488490
}
489491

490492
void _PySeqLock_UnlockWrite(_PySeqLock *seqlock)
491493
{
492-
int new_seq = seqlock->sequence + 1;
494+
uint32_t new_seq = seqlock->sequence + 1;
493495
assert(!SEQLOCK_IS_UPDATING(new_seq));
494-
_Py_atomic_exchange_int32(&seqlock->sequence, new_seq);
496+
_Py_atomic_store_uint32(&seqlock->sequence, new_seq);
495497
}
496498

497-
int _PySeqLock_BeginRead(_PySeqLock *seqlock)
499+
uint32_t _PySeqLock_BeginRead(_PySeqLock *seqlock)
498500
{
499-
int sequence = _Py_atomic_load_int_acquire(&seqlock->sequence);
501+
uint32_t sequence = _Py_atomic_load_uint32_acquire(&seqlock->sequence);
500502
while (SEQLOCK_IS_UPDATING(sequence)) {
501503
_Py_yield();
502-
sequence = _Py_atomic_load_int_acquire(&seqlock->sequence);
504+
sequence = _Py_atomic_load_uint32_acquire(&seqlock->sequence);
503505
}
504506

505507
return sequence;
506508
}
507509

508-
int _PySeqLock_EndRead(_PySeqLock *seqlock, int previous)
510+
uint32_t _PySeqLock_EndRead(_PySeqLock *seqlock, uint32_t previous)
509511
{
510512
// Synchronize again and validate that the entry hasn't been updated
511513
// while we were readying the values.
512-
if (_Py_atomic_load_int_acquire(&seqlock->sequence) == previous) {
514+
if (_Py_atomic_load_uint32_acquire(&seqlock->sequence) == previous) {
513515
return 1;
514516
}
515517

516518
_Py_yield();
517519
return 0;
518520
}
519521

520-
int _PySeqLock_AfterFork(_PySeqLock *seqlock)
522+
uint32_t _PySeqLock_AfterFork(_PySeqLock *seqlock)
521523
{
522524
// Synchronize again and validate that the entry hasn't been updated
523525
// while we were readying the values.

0 commit comments

Comments
 (0)