Skip to content

[stable-25-1] Improve tiny ydb perfomance (#19909) #19926

New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Open
wants to merge 1 commit into
base: stable-25-1
Choose a base branch
from
Open
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
57 changes: 54 additions & 3 deletions ydb/core/driver_lib/run/auto_config_initializer.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -244,15 +244,66 @@ namespace NKikimr::NAutoConfigInitializer {
scheduler->SetProgressThreshold(10'000);
}

auto *serviceExecutor = config->AddServiceExecutor();
serviceExecutor->SetServiceName("Interconnect");

if (useSharedThreads && cpuCount >= 1 && cpuCount <= 3) {
config->SetUserExecutor(0);
config->SetSysExecutor(1);
config->SetBatchExecutor(2);
config->SetIoExecutor(3);
serviceExecutor->SetExecutorId(4);

auto *systemExecutor = config->AddExecutor();
auto *userExecutor = config->AddExecutor();
auto *batchExecutor = config->AddExecutor();
auto *ioExecutor = config->AddExecutor();
auto *icExecutor = config->AddExecutor();

ioExecutor->SetType(NKikimrConfig::TActorSystemConfig::TExecutor::IO);
ioExecutor->SetThreads(config->HasForceIOPoolThreads() ? config->GetForceIOPoolThreads() : 1);
ioExecutor->SetName("IO");

auto assignPool = [&](auto *executor, TString name, i16 priority, bool hasSharedThread) {
executor->SetType(NKikimrConfig::TActorSystemConfig::TExecutor::BASIC);
executor->SetThreads(hasSharedThread);
executor->SetMaxThreads(hasSharedThread);
executor->SetName(name);
executor->SetPriority(priority);
executor->SetSpinThreshold(0);
executor->SetHasSharedThread(hasSharedThread);
};

assignPool(systemExecutor, "System", 30, cpuCount >= 3);
assignPool(userExecutor, "User", 20, cpuCount >= 2);
assignPool(batchExecutor, "Batch", 10, false);
assignPool(icExecutor, "IC", 40, true);

batchExecutor->SetForcedForeignSlots(1);
userExecutor->SetForcedForeignSlots(2);
icExecutor->SetForcedForeignSlots(2);
systemExecutor->SetForcedForeignSlots(2);

if (cpuCount >= 2) {
userExecutor->AddAdjacentPools(2);
}
if (cpuCount <= 2) {
icExecutor->AddAdjacentPools(0);
}
if (cpuCount == 1) {
icExecutor->AddAdjacentPools(1);
icExecutor->AddAdjacentPools(2);
}

return;
}

TASPools pools = GetASPools(cpuCount);
ui8 poolCount = pools.GetRealPoolCount();
std::vector<TString> names = pools.GetRealPoolNames();
std::vector<ui8> executorIds = pools.GetIndeces();
std::vector<ui8> priorities = pools.GetPriorities();

auto *serviceExecutor = config->AddServiceExecutor();
serviceExecutor->SetServiceName("Interconnect");

config->SetUserExecutor(pools.SystemPoolId);
config->SetSysExecutor(pools.UserPoolId);
config->SetBatchExecutor(pools.BatchPoolId);
Expand Down
8 changes: 7 additions & 1 deletion ydb/core/driver_lib/run/config_helpers.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -92,8 +92,14 @@ void AddExecutorPool(NActors::TCpuManagerConfig& cpuManager, const NKikimrConfig
if (poolConfig.HasMaxLocalQueueSize()) {
basic.MaxLocalQueueSize = poolConfig.GetMaxLocalQueueSize();
}
for (const auto& pool : poolConfig.GetAdjacentPools()) {
basic.AdjacentPools.push_back(pool);
}
if (poolConfig.HasForcedForeignSlots()) {
basic.ForcedForeignSlotCount = poolConfig.GetForcedForeignSlots();
}
cpuManager.Basic.emplace_back(std::move(basic));

break;
}

Expand Down
11 changes: 11 additions & 0 deletions ydb/core/driver_lib/run/run.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -1114,9 +1114,20 @@ void TKikimrRunner::InitializeAppData(const TKikimrRunConfig& runConfig)
const auto& cfg = runConfig.AppConfig;

bool useAutoConfig = !cfg.HasActorSystemConfig() || (cfg.GetActorSystemConfig().HasUseAutoConfig() && cfg.GetActorSystemConfig().GetUseAutoConfig());
bool useSharedThreads = cfg.HasActorSystemConfig() && cfg.GetActorSystemConfig().HasUseSharedThreads() && cfg.GetActorSystemConfig().GetUseSharedThreads();
NAutoConfigInitializer::TASPools pools = NAutoConfigInitializer::GetASPools(cfg.GetActorSystemConfig(), useAutoConfig);
TMap<TString, ui32> servicePools = NAutoConfigInitializer::GetServicePools(cfg.GetActorSystemConfig(), useAutoConfig);

if (useSharedThreads) {
pools.SystemPoolId = 0;
pools.UserPoolId = 1;
pools.BatchPoolId = 2;
pools.IOPoolId = 3;
pools.ICPoolId = 4;
servicePools.clear();
servicePools["Interconnect"] = 4;
}

AppData.Reset(new TAppData(pools.SystemPoolId, pools.UserPoolId, pools.IOPoolId, pools.BatchPoolId,
servicePools,
TypeRegistry.Get(),
Expand Down
4 changes: 4 additions & 0 deletions ydb/core/protos/config.proto
Original file line number Diff line number Diff line change
Expand Up @@ -88,6 +88,10 @@ message TActorSystemConfig {
optional bool HasSharedThread = 18;
optional uint32 MaxLocalQueueSize = 20;
optional uint32 MinLocalQueueSize = 21;

// Tiny YDB
repeated uint32 AdjacentPools = 22;
optional uint32 ForcedForeignSlots = 23;
}

message TScheduler {
Expand Down
4 changes: 4 additions & 0 deletions ydb/library/actors/core/config.h
Original file line number Diff line number Diff line change
Expand Up @@ -34,6 +34,10 @@ namespace NActors {
bool UseRingQueue = false;
ui16 MinLocalQueueSize = 0;
ui16 MaxLocalQueueSize = 0;

// tiny-ydb configs
std::vector<i16> AdjacentPools;
i16 ForcedForeignSlotCount = 0;
};

struct TSharedExecutorPoolConfig {
Expand Down
20 changes: 18 additions & 2 deletions ydb/library/actors/core/cpu_manager.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -56,10 +56,26 @@ namespace NActors {
if (sharedThreadCount) {
sht = 1;
}
poolInfos.push_back(TPoolShortInfo{static_cast<i16>(Config.Basic[poolIds[i]].PoolId), sharedThreadCount, true, Config.Basic[poolIds[i]].PoolName});
poolInfos.push_back(TPoolShortInfo{
.PoolId = static_cast<i16>(Config.Basic[poolIds[i]].PoolId),
.SharedThreadCount = sharedThreadCount,
.ForeignSlots = Config.Basic[poolIds[i]].ForcedForeignSlotCount,
.InPriorityOrder = true,
.PoolName = Config.Basic[poolIds[i]].PoolName,
.ForcedForeignSlots = Config.Basic[poolIds[i]].ForcedForeignSlotCount > 0,
.AdjacentPools = Config.Basic[poolIds[i]].AdjacentPools,
});
}
for (ui32 i = 0; i < Config.IO.size(); ++i) {
poolInfos.push_back(TPoolShortInfo{static_cast<i16>(Config.IO[i].PoolId), 0, false, Config.IO[i].PoolName});
poolInfos.push_back(TPoolShortInfo{
.PoolId = static_cast<i16>(Config.IO[poolIds[i]].PoolId),
.SharedThreadCount = 0,
.ForeignSlots = 0,
.InPriorityOrder = false,
.PoolName = Config.IO[i].PoolName,
.ForcedForeignSlots = false,
.AdjacentPools = {},
});
}
Shared = std::make_unique<TSharedExecutorPool>(Config.Shared, poolInfos);

Expand Down
67 changes: 57 additions & 10 deletions ydb/library/actors/core/executor_pool_shared.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -49,6 +49,48 @@ namespace NActors {
});
}

namespace {
bool CheckPoolAdjacency(const TPoolManager& poolManager, i16 poolId, i16 adjacentPoolId) {
if (poolId == adjacentPoolId) {
return true;
}
Y_ABORT_UNLESS((ui32)poolId < poolManager.PoolInfos.size());
const auto& poolInfo = poolManager.PoolInfos[poolId];
return std::find(poolInfo.AdjacentPools.begin(), poolInfo.AdjacentPools.end(), adjacentPoolId) != poolInfo.AdjacentPools.end();
}

bool HasAdjacentPools(const TPoolManager& poolManager, i16 poolId) {
Y_ABORT_UNLESS((ui32)poolId < poolManager.PoolInfos.size());
const auto& poolInfo = poolManager.PoolInfos[poolId];
return !poolInfo.AdjacentPools.empty();
}

i16 NextAdjacentPool(const TPoolManager& poolManager, i16 poolId, i16 currentPoolId) {
if (poolId == currentPoolId) {
if (poolManager.PoolInfos[poolId].AdjacentPools.empty()) {
return poolId;
}
return poolManager.PoolInfos[poolId].AdjacentPools[0];
}
Y_ABORT_UNLESS((ui32)poolId < poolManager.PoolInfos.size());
const auto& poolInfo = poolManager.PoolInfos[poolId];
auto it = std::find(poolInfo.AdjacentPools.begin(), poolInfo.AdjacentPools.end(), currentPoolId);
if (it == poolInfo.AdjacentPools.end() || it + 1 == poolInfo.AdjacentPools.end()) {
return poolId;
}
return *(it + 1);
}

std::optional<i16> GetForcedForeignSlots(const TPoolManager& poolManager, i16 poolId) {
const auto& poolInfo = poolManager.PoolInfos[poolId];
if (poolInfo.ForcedForeignSlots) {
return poolInfo.ForcedForeignSlots;
}
return std::nullopt;
}

}

LWTRACE_USING(ACTORLIB_PROVIDER);

TSharedExecutorPool::TSharedExecutorPool(
Expand Down Expand Up @@ -80,8 +122,8 @@ namespace NActors {
}
}
for (ui64 i = 0; i < PoolManager.PoolInfos.size(); ++i) {
ForeignThreadsAllowedByPool[i].store(0, std::memory_order_release);
ForeignThreadSlots[i].store(0, std::memory_order_release);
ForeignThreadsAllowedByPool[i].store(PoolManager.PoolInfos[i].ForeignSlots, std::memory_order_release);
ForeignThreadSlots[i].store(PoolManager.PoolInfos[i].ForeignSlots, std::memory_order_release);
LocalThreads[i].store(PoolManager.PoolInfos[i].SharedThreadCount, std::memory_order_release);
LocalNotifications[i].store(0, std::memory_order_release);
}
Expand Down Expand Up @@ -118,7 +160,7 @@ namespace NActors {
continue;
}

if (thread.OwnerPoolId == i) {
if (CheckPoolAdjacency(PoolManager, thread.OwnerPoolId, i)) {
EXECUTOR_POOL_SHARED_DEBUG(EDebugLevel::Executor, "ownerPoolId == poolId; ownerPoolId == ", thread.OwnerPoolId, " poolId == ", i);
return i;
}
Expand Down Expand Up @@ -162,8 +204,10 @@ namespace NActors {
auto &thread = Threads[workerId];
thread.UnsetWork();
TMailbox *mailbox = nullptr;
bool hasAdjacentPools = HasAdjacentPools(PoolManager, thread.OwnerPoolId);
while (!StopFlag.load(std::memory_order_acquire)) {
if (hpnow < thread.SoftDeadlineForPool || thread.CurrentPoolId == thread.OwnerPoolId) {
bool adjacentPool = CheckPoolAdjacency(PoolManager, thread.OwnerPoolId, thread.CurrentPoolId);
if (hpnow < thread.SoftDeadlineForPool || !hasAdjacentPools && adjacentPool) {
EXECUTOR_POOL_SHARED_DEBUG(EDebugLevel::Activation, "continue same pool; ownerPoolId == ", thread.OwnerPoolId, " currentPoolId == ", thread.CurrentPoolId);
if (thread.SoftDeadlineForPool == Max<NHPTimer::STime>()) {
thread.SoftDeadlineForPool = GetCycleCountFast() + thread.SoftProcessingDurationTs;
Expand All @@ -181,6 +225,7 @@ namespace NActors {
EXECUTOR_POOL_SHARED_DEBUG(EDebugLevel::Executor, "no mailbox and need to find new pool; ownerPoolId == ", thread.OwnerPoolId, " currentPoolId == ", thread.CurrentPoolId, " processedActivationsByCurrentPool == ", TlsThreadContext->ProcessedActivationsByCurrentPool);
TlsThreadContext->ProcessedActivationsByCurrentPool = 0;
if (thread.CurrentPoolId != thread.OwnerPoolId) {
thread.AdjacentPoolId = NextAdjacentPool(PoolManager, thread.OwnerPoolId, thread.AdjacentPoolId);
SwitchToPool(thread.OwnerPoolId, hpnow);
continue;
}
Expand All @@ -190,10 +235,11 @@ namespace NActors {
EXECUTOR_POOL_SHARED_DEBUG(EDebugLevel::Activation, "no mailbox and no need to wait; ownerPoolId == ", thread.OwnerPoolId, " currentPoolId == ", thread.CurrentPoolId);
return nullptr;
} else {
EXECUTOR_POOL_SHARED_DEBUG(EDebugLevel::Executor, "comeback to owner pool; ownerPoolId == ", thread.OwnerPoolId, " currentPoolId == ", thread.CurrentPoolId, " processedActivationsByCurrentPool == ", TlsThreadContext->ProcessedActivationsByCurrentPool, " hpnow == ", hpnow, " softDeadlineForPool == ", thread.SoftDeadlineForPool);
EXECUTOR_POOL_SHARED_DEBUG(EDebugLevel::Executor, "change adjacent pool; ownerPoolId == ", thread.OwnerPoolId, " currentPoolId == ", thread.CurrentPoolId, " processedActivationsByCurrentPool == ", TlsThreadContext->ProcessedActivationsByCurrentPool, " hpnow == ", hpnow, " softDeadlineForPool == ", thread.SoftDeadlineForPool);
TlsThreadContext->ProcessedActivationsByCurrentPool = 0;
SwitchToPool(thread.OwnerPoolId, hpnow);
// after soft deadline we check owner pool again
thread.AdjacentPoolId = NextAdjacentPool(PoolManager, thread.OwnerPoolId, thread.AdjacentPoolId);
SwitchToPool(thread.AdjacentPoolId, hpnow);
// after soft deadline we check adjacent pool
continue;
}
bool goToSleep = true;
Expand Down Expand Up @@ -308,7 +354,7 @@ namespace NActors {
Y_ABORT_UNLESS(Threads[i].OwnerPoolId < static_cast<i16>(Pools.size()), "OwnerPoolId is out of range i %" PRIu16 " OwnerPoolId == %" PRIu16, i, Threads[i].OwnerPoolId);
Y_ABORT_UNLESS(Threads[i].OwnerPoolId >= 0, "OwnerPoolId is out of range i %" PRIu16 " OwnerPoolId == %" PRIu16, i, Threads[i].OwnerPoolId);
EXECUTOR_POOL_SHARED_DEBUG(EDebugLevel::ExecutorPool, "create thread ", i, " OwnerPoolId == ", Threads[i].OwnerPoolId);
Threads[i].Thread.reset(
Threads[i].Thread.reset(
new TExecutorThread(
i,
actorSystem,
Expand Down Expand Up @@ -482,8 +528,6 @@ namespace NActors {
return false;
}



void TSharedExecutorPool::FillForeignThreadsAllowed(std::vector<i16>& foreignThreadsAllowed) const {
foreignThreadsAllowed.resize(PoolManager.PoolInfos.size());
for (ui64 i = 0; i < foreignThreadsAllowed.size(); ++i) {
Expand Down Expand Up @@ -513,6 +557,9 @@ namespace NActors {
}

void TSharedExecutorPool::SetForeignThreadSlots(i16 poolId, i16 slots) {
if (auto forcedSlots = GetForcedForeignSlots(PoolManager, poolId)) {
return;
}
i16 current = ForeignThreadsAllowedByPool[poolId].load(std::memory_order_acquire);
if (current == slots) {
return;
Expand Down
5 changes: 4 additions & 1 deletion ydb/library/actors/core/executor_pool_shared.h
Original file line number Diff line number Diff line change
Expand Up @@ -29,8 +29,11 @@ namespace NActors {
struct TPoolShortInfo {
i16 PoolId = 0;
i16 SharedThreadCount = 0;
i16 ForeignSlots = 0;
bool InPriorityOrder = false;
TString PoolName;
bool ForcedForeignSlots = false;
std::vector<i16> AdjacentPools;
};

struct TPoolThreadRange {
Expand All @@ -42,7 +45,7 @@ namespace NActors {
TStackVec<TPoolShortInfo, 8> PoolInfos;
TStackVec<TPoolThreadRange, 8> PoolThreadRanges;
TStackVec<i16, 8> PriorityOrder;

TPoolManager(const TVector<TPoolShortInfo> &poolInfos);
};

Expand Down
3 changes: 2 additions & 1 deletion ydb/library/actors/core/executor_thread_ctx.h
Original file line number Diff line number Diff line change
Expand Up @@ -64,7 +64,7 @@ namespace NActors {
template <typename TDerived, typename TWaitState>
bool Sleep(std::atomic<bool> *stopFlag);
};

struct TExecutorThreadCtx : public TGenericExecutorThreadCtx {
using TBase = TGenericExecutorThreadCtx;

Expand Down Expand Up @@ -104,6 +104,7 @@ namespace NActors {
i16 PoolLeaseIndex = -1;
i16 OwnerPoolId = -1;
i16 CurrentPoolId = -1;
i16 AdjacentPoolId = -1;
NHPTimer::STime SoftDeadlineForPool = 0;
NHPTimer::STime SoftProcessingDurationTs = 0;

Expand Down
Loading
Loading