diff --git a/runtime/vm/compiler/backend/il.cc b/runtime/vm/compiler/backend/il.cc index 9fdefe6ff520c..04c0b16dd9649 100644 --- a/runtime/vm/compiler/backend/il.cc +++ b/runtime/vm/compiler/backend/il.cc @@ -824,7 +824,7 @@ void CallTargets::CreateHelper(Zone* zone, const ICData& ic_data) { const auto& cache = MegamorphicCache::Handle( zone, MegamorphicCacheTable::Lookup(thread, name, descriptor)); { - SafepointMutexLocker ml(thread->isolate()->type_feedback_mutex()); + SafepointMutexLocker ml(thread->isolate_group()->type_feedback_mutex()); MegamorphicCacheEntries entries(Array::Handle(zone, cache.buckets())); for (intptr_t i = 0, n = entries.Length(); i < n; i++) { const intptr_t id = diff --git a/runtime/vm/isolate.cc b/runtime/vm/isolate.cc index 3849287955aa6..3f9b43f8b9df4 100644 --- a/runtime/vm/isolate.cc +++ b/runtime/vm/isolate.cc @@ -361,6 +361,12 @@ IsolateGroup::IsolateGroup(std::shared_ptr source, "IsolateGroup::type_arguments_canonicalization_mutex_")), subtype_test_cache_mutex_( NOT_IN_PRODUCT("IsolateGroup::subtype_test_cache_mutex_")), + megamorphic_table_mutex_( + NOT_IN_PRODUCT("IsolateGroup::megamorphic_table_mutex_")), + type_feedback_mutex_( + NOT_IN_PRODUCT("IsolateGroup::type_feedback_mutex_")), + patchable_call_mutex_( + NOT_IN_PRODUCT("IsolateGroup::patchable_call_mutex_")), program_lock_(new SafepointRwLock()), active_mutators_monitor_(new Monitor()), max_active_mutators_(Scavenger::MaxMutatorThreadCount()) { @@ -1648,9 +1654,6 @@ Isolate::Isolate(IsolateGroup* isolate_group, mutex_(NOT_IN_PRODUCT("Isolate::mutex_")), constant_canonicalization_mutex_( NOT_IN_PRODUCT("Isolate::constant_canonicalization_mutex_")), - megamorphic_table_mutex_( - NOT_IN_PRODUCT("Isolate::megamorphic_table_mutex_")), - type_feedback_mutex_(NOT_IN_PRODUCT("Isolate::type_feedback_mutex_")), kernel_data_lib_cache_mutex_( NOT_IN_PRODUCT("Isolate::kernel_data_lib_cache_mutex_")), kernel_data_class_cache_mutex_( diff --git a/runtime/vm/isolate.h b/runtime/vm/isolate.h index 77991a9b30332..a98d48b07d9b6 100644 --- a/runtime/vm/isolate.h +++ b/runtime/vm/isolate.h @@ -424,6 +424,9 @@ class IsolateGroup : public IntrusiveDListEntry { return &type_arguments_canonicalization_mutex_; } Mutex* subtype_test_cache_mutex() { return &subtype_test_cache_mutex_; } + Mutex* megamorphic_table_mutex() { return &megamorphic_table_mutex_; } + Mutex* type_feedback_mutex() { return &type_feedback_mutex_; } + Mutex* patchable_call_mutex() { return &patchable_call_mutex_; } #if defined(DART_PRECOMPILED_RUNTIME) Mutex* unlinked_call_map_mutex() { return &unlinked_call_map_mutex_; } @@ -722,6 +725,9 @@ class IsolateGroup : public IntrusiveDListEntry { Mutex type_canonicalization_mutex_; Mutex type_arguments_canonicalization_mutex_; Mutex subtype_test_cache_mutex_; + Mutex megamorphic_table_mutex_; + Mutex type_feedback_mutex_; + Mutex patchable_call_mutex_; #if defined(DART_PRECOMPILED_RUNTIME) Mutex unlinked_call_map_mutex_; @@ -964,9 +970,6 @@ class Isolate : public BaseIsolate, public IntrusiveDListEntry { Mutex* constant_canonicalization_mutex() { return &constant_canonicalization_mutex_; } - Mutex* megamorphic_table_mutex() { return &megamorphic_table_mutex_; } - Mutex* type_feedback_mutex() { return &type_feedback_mutex_; } - Mutex* kernel_data_lib_cache_mutex() { return &kernel_data_lib_cache_mutex_; } Mutex* kernel_data_class_cache_mutex() { return &kernel_data_class_cache_mutex_; @@ -1601,8 +1604,6 @@ class Isolate : public BaseIsolate, public IntrusiveDListEntry { Simulator* simulator_ = nullptr; Mutex mutex_; // Protects compiler stats. Mutex constant_canonicalization_mutex_; // Protects const canonicalization. - Mutex megamorphic_table_mutex_; - Mutex type_feedback_mutex_; Mutex kernel_data_lib_cache_mutex_; Mutex kernel_data_class_cache_mutex_; Mutex kernel_constants_mutex_; diff --git a/runtime/vm/megamorphic_cache_table.cc b/runtime/vm/megamorphic_cache_table.cc index 7a1b7f697310e..2e7d1b3978bd4 100644 --- a/runtime/vm/megamorphic_cache_table.cc +++ b/runtime/vm/megamorphic_cache_table.cc @@ -17,7 +17,7 @@ MegamorphicCachePtr MegamorphicCacheTable::Lookup(Thread* thread, const String& name, const Array& descriptor) { Isolate* isolate = thread->isolate(); - SafepointMutexLocker ml(isolate->megamorphic_table_mutex()); + SafepointMutexLocker ml(thread->isolate_group()->megamorphic_table_mutex()); ASSERT(name.IsSymbol()); // TODO(rmacnak): ASSERT(descriptor.IsCanonical()); @@ -46,7 +46,7 @@ MegamorphicCachePtr MegamorphicCacheTable::Lookup(Thread* thread, void MegamorphicCacheTable::PrintSizes(Isolate* isolate) { auto thread = Thread::Current(); - SafepointMutexLocker ml(thread->isolate()->megamorphic_table_mutex()); + SafepointMutexLocker ml(thread->isolate_group()->megamorphic_table_mutex()); StackZone zone(thread); intptr_t size = 0; diff --git a/runtime/vm/object.cc b/runtime/vm/object.cc index 3f6ff74dc740d..56e839d71a9c0 100644 --- a/runtime/vm/object.cc +++ b/runtime/vm/object.cc @@ -15239,7 +15239,7 @@ bool ICData::ValidateInterceptor(const Function& target) const { void ICData::EnsureHasCheck(const GrowableArray& class_ids, const Function& target, intptr_t count) const { - SafepointMutexLocker ml(Isolate::Current()->type_feedback_mutex()); + SafepointMutexLocker ml(IsolateGroup::Current()->type_feedback_mutex()); if (FindCheck(class_ids) != -1) return; AddCheckInternal(class_ids, target, count); @@ -15248,14 +15248,15 @@ void ICData::EnsureHasCheck(const GrowableArray& class_ids, void ICData::AddCheck(const GrowableArray& class_ids, const Function& target, intptr_t count) const { - SafepointMutexLocker ml(Isolate::Current()->type_feedback_mutex()); + SafepointMutexLocker ml(IsolateGroup::Current()->type_feedback_mutex()); AddCheckInternal(class_ids, target, count); } void ICData::AddCheckInternal(const GrowableArray& class_ids, const Function& target, intptr_t count) const { - ASSERT(Isolate::Current()->type_feedback_mutex()->IsOwnedByCurrentThread()); + ASSERT( + IsolateGroup::Current()->type_feedback_mutex()->IsOwnedByCurrentThread()); ASSERT(!is_tracking_exactness()); ASSERT(!target.IsNull()); @@ -15343,7 +15344,7 @@ void ICData::EnsureHasReceiverCheck(intptr_t receiver_class_id, const Function& target, intptr_t count, StaticTypeExactnessState exactness) const { - SafepointMutexLocker ml(Isolate::Current()->type_feedback_mutex()); + SafepointMutexLocker ml(IsolateGroup::Current()->type_feedback_mutex()); GrowableArray class_ids(1); class_ids.Add(receiver_class_id); @@ -15356,7 +15357,7 @@ void ICData::AddReceiverCheck(intptr_t receiver_class_id, const Function& target, intptr_t count, StaticTypeExactnessState exactness) const { - SafepointMutexLocker ml(Isolate::Current()->type_feedback_mutex()); + SafepointMutexLocker ml(IsolateGroup::Current()->type_feedback_mutex()); AddReceiverCheckInternal(receiver_class_id, target, count, exactness); } @@ -17174,7 +17175,7 @@ MegamorphicCachePtr MegamorphicCache::New(const String& target_name, void MegamorphicCache::EnsureContains(const Smi& class_id, const Object& target) const { - SafepointMutexLocker ml(Isolate::Current()->type_feedback_mutex()); + SafepointMutexLocker ml(IsolateGroup::Current()->type_feedback_mutex()); if (LookupLocked(class_id) == Object::null()) { InsertLocked(class_id, target); @@ -17195,15 +17196,16 @@ void MegamorphicCache::EnsureContains(const Smi& class_id, } ObjectPtr MegamorphicCache::Lookup(const Smi& class_id) const { - SafepointMutexLocker ml(Isolate::Current()->type_feedback_mutex()); + SafepointMutexLocker ml(IsolateGroup::Current()->type_feedback_mutex()); return LookupLocked(class_id); } ObjectPtr MegamorphicCache::LookupLocked(const Smi& class_id) const { auto thread = Thread::Current(); + auto isolate_group = thread->isolate_group(); auto zone = thread->zone(); ASSERT(thread->IsMutatorThread()); - ASSERT(thread->isolate()->type_feedback_mutex()->IsOwnedByCurrentThread()); + ASSERT(isolate_group->type_feedback_mutex()->IsOwnedByCurrentThread()); const auto& backing_array = Array::Handle(zone, buckets()); intptr_t id_mask = mask(); @@ -17225,7 +17227,7 @@ ObjectPtr MegamorphicCache::LookupLocked(const Smi& class_id) const { void MegamorphicCache::InsertLocked(const Smi& class_id, const Object& target) const { auto isolate_group = IsolateGroup::Current(); - ASSERT(Isolate::Current()->type_feedback_mutex()->IsOwnedByCurrentThread()); + ASSERT(isolate_group->type_feedback_mutex()->IsOwnedByCurrentThread()); // As opposed to ICData we are stopping mutator threads from other isolates // while modifying the megamorphic cache, since updates are not atomic. @@ -17242,17 +17244,20 @@ void MegamorphicCache::InsertLocked(const Smi& class_id, } void MegamorphicCache::EnsureCapacityLocked() const { - ASSERT(Isolate::Current()->type_feedback_mutex()->IsOwnedByCurrentThread()); + auto thread = Thread::Current(); + auto zone = thread->zone(); + auto isolate_group = thread->isolate_group(); + ASSERT(isolate_group->type_feedback_mutex()->IsOwnedByCurrentThread()); intptr_t old_capacity = mask() + 1; double load_limit = kLoadFactor * static_cast(old_capacity); if (static_cast(filled_entry_count() + 1) > load_limit) { - const Array& old_buckets = Array::Handle(buckets()); + const Array& old_buckets = Array::Handle(zone, buckets()); intptr_t new_capacity = old_capacity * 2; const Array& new_buckets = - Array::Handle(Array::New(kEntryLength * new_capacity)); + Array::Handle(zone, Array::New(kEntryLength * new_capacity)); - auto& target = Object::Handle(); + auto& target = Object::Handle(zone); for (intptr_t i = 0; i < new_capacity; ++i) { SetEntry(new_buckets, i, smi_illegal_cid(), target); } @@ -17261,7 +17266,7 @@ void MegamorphicCache::EnsureCapacityLocked() const { set_filled_entry_count(0); // Rehash the valid entries. - Smi& class_id = Smi::Handle(); + Smi& class_id = Smi::Handle(zone); for (intptr_t i = 0; i < old_capacity; ++i) { class_id ^= GetClassId(old_buckets, i); if (class_id.Value() != kIllegalCid) { @@ -17274,7 +17279,9 @@ void MegamorphicCache::EnsureCapacityLocked() const { void MegamorphicCache::InsertEntryLocked(const Smi& class_id, const Object& target) const { - ASSERT(Isolate::Current()->type_feedback_mutex()->IsOwnedByCurrentThread()); + auto thread = Thread::Current(); + auto isolate_group = thread->isolate_group(); + ASSERT(isolate_group->type_feedback_mutex()->IsOwnedByCurrentThread()); ASSERT(Thread::Current()->IsMutatorThread()); ASSERT(static_cast(filled_entry_count() + 1) <= diff --git a/runtime/vm/runtime_entry.cc b/runtime/vm/runtime_entry.cc index b5f1d00daa7f3..b05b00d98bd31 100644 --- a/runtime/vm/runtime_entry.cc +++ b/runtime/vm/runtime_entry.cc @@ -1361,6 +1361,8 @@ static FunctionPtr InlineCacheMissHandler( return target_function.raw(); } + SafepointMutexLocker ml(thread->isolate_group()->patchable_call_mutex()); + return InlineCacheMissHandlerGivenTargetFunction(args, ic_data, 1, target_function); } @@ -1646,8 +1648,8 @@ void SwitchableCallHandler::DoUnlinkedCall(const UnlinkedCall& unlinked, code = StubCode::MonomorphicSmiableCheck().raw(); } } - CodePatcher::PatchSwitchableCallAtWithMutatorsStopped( - thread_, caller_frame_->pc(), caller_code_, object, code); + CodePatcher::PatchSwitchableCallAt(caller_frame_->pc(), caller_code_, object, + code); // Return the ICData. The miss stub will jump to continue in the IC lookup // stub. @@ -1765,8 +1767,8 @@ void SwitchableCallHandler::DoMonomorphicMiss(const Object& data, cache.set_lower_limit(lower); cache.set_upper_limit(upper); const Code& stub = StubCode::SingleTargetCall(); - CodePatcher::PatchSwitchableCallAtWithMutatorsStopped( - thread_, caller_frame_->pc(), caller_code_, cache, stub); + CodePatcher::PatchSwitchableCallAt(caller_frame_->pc(), caller_code_, cache, + stub); // Return the ICData. The miss stub will jump to continue in the IC call // stub. arguments_.SetArgAt(0, StubCode::ICCallThroughCode()); @@ -1776,8 +1778,8 @@ void SwitchableCallHandler::DoMonomorphicMiss(const Object& data, // Patch to call through stub. const Code& stub = StubCode::ICCallThroughCode(); - CodePatcher::PatchSwitchableCallAtWithMutatorsStopped( - thread_, caller_frame_->pc(), caller_code_, ic_data, stub); + CodePatcher::PatchSwitchableCallAt(caller_frame_->pc(), caller_code_, ic_data, + stub); // Return the ICData. The miss stub will jump to continue in the IC lookup // stub. @@ -1794,25 +1796,21 @@ void SwitchableCallHandler::DoMonomorphicMiss(const Object& data, const Code& stub = ic_data.is_tracking_exactness() ? StubCode::OneArgCheckInlineCacheWithExactnessCheck() : StubCode::OneArgCheckInlineCache(); - CodePatcher::PatchInstanceCallAtWithMutatorsStopped( - thread_, caller_frame_->pc(), caller_code_, ic_data, stub); + CodePatcher::PatchInstanceCallAt(caller_frame_->pc(), caller_code_, ic_data, + stub); if (FLAG_trace_ic) { OS::PrintErr("Instance call at %" Px " switching to polymorphic dispatch, %s\n", caller_frame_->pc(), ic_data.ToCString()); } - // ICData can be shared between unoptimized and optimized code, so beware that - // the new receiver class may have already been added through the optimized - // code. - if (!ic_data.HasReceiverClassId(receiver_.GetClassId())) { - GrowableArray args(1); - args.Add(&receiver_); - // Don't count during insertion because the IC stub we continue through will - // do an increment. - InlineCacheMissHandlerGivenTargetFunction(args, ic_data, /*count=*/0, - target_function); - } + // Don't count during insertion because the IC stub we continue through will + // do an increment. + GrowableArray args(1); + args.Add(&receiver_); + InlineCacheMissHandlerGivenTargetFunction(args, ic_data, /*count=*/0, + target_function); + arguments_.SetArgAt(0, stub); arguments_.SetReturn(ic_data); #endif // defined(DART_PRECOMPILED_RUNTIME) @@ -1848,8 +1846,8 @@ void SwitchableCallHandler::DoSingleTargetMiss( // Call site is not single target, switch to call using ICData. const Code& stub = StubCode::ICCallThroughCode(); - CodePatcher::PatchSwitchableCallAtWithMutatorsStopped( - thread_, caller_frame_->pc(), caller_code_, ic_data, stub); + CodePatcher::PatchSwitchableCallAt(caller_frame_->pc(), caller_code_, ic_data, + stub); // Return the ICData. The single target stub will jump to continue in the // IC call stub. @@ -1893,25 +1891,21 @@ void SwitchableCallHandler::DoICDataMiss(const ICData& ic_data, const Smi& expected_cid = Smi::Handle(zone_, Smi::New(receiver_.GetClassId())); ASSERT(target_code.HasMonomorphicEntry()); - CodePatcher::PatchSwitchableCallAtWithMutatorsStopped( - thread_, caller_frame_->pc(), caller_code_, expected_cid, target_code); + CodePatcher::PatchSwitchableCallAt(caller_frame_->pc(), caller_code_, + expected_cid, target_code); arguments_.SetArgAt(0, target_code); arguments_.SetReturn(expected_cid); } else { // IC entry might have been added while we waited to get into runtime. - GrowableArray class_ids(1); - class_ids.Add(receiver_.GetClassId()); - if (ic_data.FindCheck(class_ids) == -1) { - ic_data.AddReceiverCheck(receiver_.GetClassId(), target_function); - } + ic_data.EnsureHasReceiverCheck(receiver_.GetClassId(), target_function); if (number_of_checks > FLAG_max_polymorphic_checks) { // Switch to megamorphic call. const MegamorphicCache& cache = MegamorphicCache::Handle( zone_, MegamorphicCacheTable::Lookup(thread_, name, descriptor)); const Code& stub = StubCode::MegamorphicCall(); - CodePatcher::PatchSwitchableCallAtWithMutatorsStopped( - thread_, caller_frame_->pc(), caller_code_, cache, stub); + CodePatcher::PatchSwitchableCallAt(caller_frame_->pc(), caller_code_, + cache, stub); arguments_.SetArgAt(0, stub); arguments_.SetReturn(cache); } else { @@ -2087,40 +2081,42 @@ DEFINE_RUNTIME_ENTRY(SwitchableCallMiss, 2) { const Function& caller_function = Function::Handle(zone, caller_frame->LookupDartFunction()); - Object& old_data = Object::Handle(zone); - Code& old_code = Code::Handle(zone); + SwitchableCallHandler handler(thread, receiver, arguments, caller_frame, + caller_code, caller_function); + // Find out actual target (which can be time consuminmg) without holding any + // locks. + Object& old_data = Object::Handle(zone); #if defined(DART_PRECOMPILED_RUNTIME) - // Grab old_data and do potentially long-running step of resolving the - // target function before we stop mutators. - // This will reduce amount of time spent with all mutators are stopped - // hopefully leaving only code patching to be done then. old_data = CodePatcher::GetSwitchableCallDataAt(caller_frame->pc(), caller_code); #else - old_code ^= CodePatcher::GetInstanceCallAt(caller_frame->pc(), caller_code, - &old_data); + CodePatcher::GetInstanceCallAt(caller_frame->pc(), caller_code, &old_data); #endif - SwitchableCallHandler handler(thread, receiver, arguments, caller_frame, - caller_code, caller_function); const Function& target_function = Function::Handle(zone, handler.ResolveTargetFunction(old_data)); - thread->isolate_group()->RunWithStoppedMutators( - [&]() { + + { + // We ensure any transition in a patchable calls are done in an atomic + // manner, we ensure we always transition forward (e.g. Monomorphic -> + // Polymorphic). + // + // Mutators are only stopped if we actually need to patch a patchable call. + // We may not do that if we e.g. just add one more check to an ICData. + SafepointMutexLocker ml(thread->isolate_group()->patchable_call_mutex()); + + auto& old_code = Code::Handle(zone); #if defined(DART_PRECOMPILED_RUNTIME) - old_data = CodePatcher::GetSwitchableCallDataAt(caller_frame->pc(), - caller_code); -#if defined(DEBUG) - old_code ^= CodePatcher::GetSwitchableCallTargetAt(caller_frame->pc(), - caller_code); -#endif + old_data = + CodePatcher::GetSwitchableCallDataAt(caller_frame->pc(), caller_code); + DEBUG_ONLY(old_code = CodePatcher::GetSwitchableCallTargetAt( + caller_frame->pc(), caller_code)); #else - old_code ^= CodePatcher::GetInstanceCallAt(caller_frame->pc(), - caller_code, &old_data); + old_code ^= CodePatcher::GetInstanceCallAt(caller_frame->pc(), caller_code, + &old_data); #endif - handler.HandleMiss(old_data, old_code, target_function); - }, - /*use_force_growth=*/true); + handler.HandleMiss(old_data, old_code, target_function); + } } // Used to find the correct receiver and function to invoke or to fall back to