Skip to content

Commit

Permalink
Replace ReentrantMutexLocker with ConditionalMutexLocker
Browse files Browse the repository at this point in the history
  • Loading branch information
shipilev committed Jul 28, 2023
1 parent 5962871 commit 4b14081
Show file tree
Hide file tree
Showing 10 changed files with 26 additions and 31 deletions.
1 change: 0 additions & 1 deletion src/hotspot/share/classfile/classLoader.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -929,7 +929,6 @@ void ClassLoader::load_java_library() {
}

void ClassLoader::release_load_zip_library() {
// Temporary workaround for JDK-8313210: Zip_lock can be null on some paths
ConditionalMutexLocker locker(Zip_lock, Zip_lock != nullptr, Monitor::_no_safepoint_check_flag);
if (_libzip_loaded == 0) {
load_zip_library();
Expand Down
2 changes: 1 addition & 1 deletion src/hotspot/share/code/compiledMethod.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -116,7 +116,7 @@ const char* CompiledMethod::state() const {

//-----------------------------------------------------------------------------
void CompiledMethod::set_deoptimized_done() {
ReentrantMutexLocker ml(CompiledMethod_lock, Mutex::_no_safepoint_check_flag);
ConditionalMutexLocker ml(CompiledMethod_lock, !CompiledMethod_lock->owned_by_self(), Mutex::_no_safepoint_check_flag);
if (_deoptimization_status != deoptimize_done) { // can't go backwards
Atomic::store(&_deoptimization_status, deoptimize_done);
}
Expand Down
2 changes: 1 addition & 1 deletion src/hotspot/share/code/nmethod.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -1339,7 +1339,7 @@ bool nmethod::make_not_entrant() {

{
// Enter critical section. Does not block for safepoint.
ReentrantMutexLocker ml(CompiledMethod_lock, Mutex::_no_safepoint_check_flag);
ConditionalMutexLocker ml(CompiledMethod_lock, !CompiledMethod_lock->owned_by_self(), Mutex::_no_safepoint_check_flag);

if (Atomic::load(&_state) == not_entrant) {
// another thread already performed this transition so nothing
Expand Down
6 changes: 3 additions & 3 deletions src/hotspot/share/oops/instanceKlass.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -3406,7 +3406,7 @@ void InstanceKlass::add_osr_nmethod(nmethod* n) {
// Remove osr nmethod from the list. Return true if found and removed.
bool InstanceKlass::remove_osr_nmethod(nmethod* n) {
// This is a short non-blocking critical region, so the no safepoint check is ok.
ReentrantMutexLocker ml(CompiledMethod_lock, Mutex::_no_safepoint_check_flag);
ConditionalMutexLocker ml(CompiledMethod_lock, !CompiledMethod_lock->owned_by_self(), Mutex::_no_safepoint_check_flag);
assert(n->is_osr_method(), "wrong kind of nmethod");
nmethod* last = nullptr;
nmethod* cur = osr_nmethods_head();
Expand Down Expand Up @@ -3447,7 +3447,7 @@ bool InstanceKlass::remove_osr_nmethod(nmethod* n) {
}

int InstanceKlass::mark_osr_nmethods(DeoptimizationScope* deopt_scope, const Method* m) {
ReentrantMutexLocker ml(CompiledMethod_lock, Mutex::_no_safepoint_check_flag);
ConditionalMutexLocker ml(CompiledMethod_lock, !CompiledMethod_lock->owned_by_self(), Mutex::_no_safepoint_check_flag);
nmethod* osr = osr_nmethods_head();
int found = 0;
while (osr != nullptr) {
Expand All @@ -3462,7 +3462,7 @@ int InstanceKlass::mark_osr_nmethods(DeoptimizationScope* deopt_scope, const Met
}

nmethod* InstanceKlass::lookup_osr_nmethod(const Method* m, int bci, int comp_level, bool match_level) const {
ReentrantMutexLocker ml(CompiledMethod_lock, Mutex::_no_safepoint_check_flag);
ConditionalMutexLocker ml(CompiledMethod_lock, !CompiledMethod_lock->owned_by_self(), Mutex::_no_safepoint_check_flag);
nmethod* osr = osr_nmethods_head();
nmethod* best = nullptr;
while (osr != nullptr) {
Expand Down
4 changes: 2 additions & 2 deletions src/hotspot/share/oops/method.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -1148,7 +1148,7 @@ void Method::clear_code() {
}

void Method::unlink_code(CompiledMethod *compare) {
ReentrantMutexLocker ml(CompiledMethod_lock, Mutex::_no_safepoint_check_flag);
ConditionalMutexLocker ml(CompiledMethod_lock, !CompiledMethod_lock->owned_by_self(), Mutex::_no_safepoint_check_flag);
// We need to check if either the _code or _from_compiled_code_entry_point
// refer to this nmethod because there is a race in setting these two fields
// in Method* as seen in bugid 4947125.
Expand All @@ -1159,7 +1159,7 @@ void Method::unlink_code(CompiledMethod *compare) {
}

void Method::unlink_code() {
ReentrantMutexLocker ml(CompiledMethod_lock, Mutex::_no_safepoint_check_flag);
ConditionalMutexLocker ml(CompiledMethod_lock, !CompiledMethod_lock->owned_by_self(), Mutex::_no_safepoint_check_flag);
clear_code();
}

Expand Down
8 changes: 4 additions & 4 deletions src/hotspot/share/runtime/deoptimization.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -116,7 +116,7 @@ DeoptimizationScope::~DeoptimizationScope() {
}

void DeoptimizationScope::mark(CompiledMethod* cm, bool inc_recompile_counts) {
ReentrantMutexLocker ml(CompiledMethod_lock, Mutex::_no_safepoint_check_flag);
ConditionalMutexLocker ml(CompiledMethod_lock, !CompiledMethod_lock->owned_by_self(), Mutex::_no_safepoint_check_flag);

// If it's already marked but we still need it to be deopted.
if (cm->is_marked_for_deoptimization()) {
Expand All @@ -137,7 +137,7 @@ void DeoptimizationScope::mark(CompiledMethod* cm, bool inc_recompile_counts) {
}

void DeoptimizationScope::dependent(CompiledMethod* cm) {
ReentrantMutexLocker ml(CompiledMethod_lock, Mutex::_no_safepoint_check_flag);
ConditionalMutexLocker ml(CompiledMethod_lock, !CompiledMethod_lock->owned_by_self(), Mutex::_no_safepoint_check_flag);

// A method marked by someone else may have a _required_gen lower than what we marked with.
// Therefore only store it if it's higher than _required_gen.
Expand Down Expand Up @@ -168,7 +168,7 @@ void DeoptimizationScope::deoptimize_marked() {
bool wait = false;
while (true) {
{
ReentrantMutexLocker ml(CompiledMethod_lock, Mutex::_no_safepoint_check_flag);
ConditionalMutexLocker ml(CompiledMethod_lock, !CompiledMethod_lock->owned_by_self(), Mutex::_no_safepoint_check_flag);

// First we check if we or someone else already deopted the gen we want.
if (DeoptimizationScope::_committed_deopt_gen >= _required_gen) {
Expand Down Expand Up @@ -196,7 +196,7 @@ void DeoptimizationScope::deoptimize_marked() {
Deoptimization::deoptimize_all_marked(); // May safepoint and an additional deopt may have occurred.
DEBUG_ONLY(_deopted = true;)
{
ReentrantMutexLocker ml(CompiledMethod_lock, Mutex::_no_safepoint_check_flag);
ConditionalMutexLocker ml(CompiledMethod_lock, !CompiledMethod_lock->owned_by_self(), Mutex::_no_safepoint_check_flag);

// Make sure that committed doesn't go backwards.
// Should only happen if we did a deopt during a safepoint above.
Expand Down
2 changes: 1 addition & 1 deletion src/hotspot/share/runtime/handshake.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -503,7 +503,7 @@ bool HandshakeState::has_operation(bool allow_suspend, bool check_async_exceptio

bool HandshakeState::has_async_exception_operation() {
if (!has_operation()) return false;
ReentrantMutexLocker ml(&_lock, Mutex::_no_safepoint_check_flag);
ConditionalMutexLocker ml(&_lock, !_lock.owned_by_self(), Mutex::_no_safepoint_check_flag);
return _queue.peek(async_exception_filter) != nullptr;
}

Expand Down
20 changes: 7 additions & 13 deletions src/hotspot/share/runtime/mutexLocker.hpp
Original file line number Diff line number Diff line change
Expand Up @@ -187,6 +187,8 @@ void assert_lock_strong(const Mutex* lock);
#define assert_lock_strong(lock)
#endif

// Internal implementation. Skips on null Mutex.
// Subclasses enforce stronger invariants.
class MutexLockerImpl: public StackObj {
protected:
Mutex* _mutex;
Expand Down Expand Up @@ -226,7 +228,8 @@ class MutexLockerImpl: public StackObj {
static void post_initialize();
};

// Simplest locker, does not allow reentrancy
// Simplest mutex locker.
// Does not allow null mutexes.
class MutexLocker: public MutexLockerImpl {
public:
MutexLocker(Mutex* mutex, Mutex::SafepointCheckFlag flag = Mutex::_safepoint_check_flag) :
Expand All @@ -240,7 +243,8 @@ class MutexLocker: public MutexLockerImpl {
}
};

// Conditional locker: only lock when condition is true
// Conditional mutex locker.
// Like MutexLocker above, but only locks when condition is true.
class ConditionalMutexLocker: public MutexLockerImpl {
public:
ConditionalMutexLocker(Mutex* mutex, bool condition, Mutex::SafepointCheckFlag flag = Mutex::_safepoint_check_flag) :
Expand All @@ -254,19 +258,9 @@ class ConditionalMutexLocker: public MutexLockerImpl {
}
};

// Reentrant locker: only lock when mutex is not owned already
class ReentrantMutexLocker: public ConditionalMutexLocker {
public:
ReentrantMutexLocker(Mutex* mutex, Mutex::SafepointCheckFlag flag = Mutex::_safepoint_check_flag) :
ConditionalMutexLocker(mutex, !mutex->owned_by_self(), flag) {}

ReentrantMutexLocker(Thread* thread, Mutex* mutex, bool condition, Mutex::SafepointCheckFlag flag = Mutex::_safepoint_check_flag) :
ConditionalMutexLocker(thread, mutex, !mutex->owned_by_self(), flag) {}
};

// A MonitorLocker is like a MutexLocker above, except it allows
// wait/notify as well which are delegated to the underlying Monitor.

// It also disallows null.
class MonitorLocker: public MutexLocker {
Mutex::SafepointCheckFlag _flag;

Expand Down
10 changes: 6 additions & 4 deletions src/hotspot/share/runtime/task.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -84,9 +84,10 @@ PeriodicTask::~PeriodicTask() {
// enroll the current PeriodicTask
void PeriodicTask::enroll() {
// Follow normal safepoint aware lock enter protocol if the caller does
// not already own the PeriodicTask_lock.
// not already own the PeriodicTask_lock. Otherwise, we don't try to
// enter it again because VM internal Mutexes do not support recursion.
//
ReentrantMutexLocker ml(PeriodicTask_lock);
ConditionalMutexLocker ml(PeriodicTask_lock, !PeriodicTask_lock->owned_by_self());

if (_num_tasks == PeriodicTask::max_tasks) {
fatal("Overflow in PeriodicTask table");
Expand All @@ -104,9 +105,10 @@ void PeriodicTask::enroll() {
// disenroll the current PeriodicTask
void PeriodicTask::disenroll() {
// Follow normal safepoint aware lock enter protocol if the caller does
// not already own the PeriodicTask_lock.
// not already own the PeriodicTask_lock. Otherwise, we don't try to
// enter it again because VM internal Mutexes do not support recursion.
//
ReentrantMutexLocker ml(PeriodicTask_lock);
ConditionalMutexLocker ml(PeriodicTask_lock, !PeriodicTask_lock->owned_by_self());

int index;
for(index = 0; index < _num_tasks && _tasks[index] != this; index++)
Expand Down
2 changes: 1 addition & 1 deletion src/hotspot/share/runtime/threadSMR.inline.hpp
Original file line number Diff line number Diff line change
Expand Up @@ -140,7 +140,7 @@ inline ThreadsList* ThreadsSMRSupport::get_java_thread_list() {
}

inline bool ThreadsSMRSupport::is_a_protected_JavaThread_with_lock(JavaThread *thread) {
ReentrantMutexLocker ml(Threads_lock);
ConditionalMutexLocker ml(Threads_lock, !Threads_lock->owned_by_self());
return is_a_protected_JavaThread(thread);
}

Expand Down

0 comments on commit 4b14081

Please sign in to comment.