Skip to content

healthcheck config #14860

New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Merged
merged 4 commits into from
Mar 7, 2025
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
2 changes: 2 additions & 0 deletions ydb/core/base/appdata.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -70,6 +70,7 @@ struct TAppData::TImpl {
NKikimrReplication::TReplicationDefaults ReplicationConfig;
NKikimrProto::TDataIntegrityTrailsConfig DataIntegrityTrailsConfig;
NKikimrConfig::TDataErasureConfig DataErasureConfig;
NKikimrConfig::THealthCheckConfig HealthCheckConfig;
};

TAppData::TAppData(
Expand Down Expand Up @@ -127,6 +128,7 @@ TAppData::TAppData(
, ReplicationConfig(Impl->ReplicationConfig)
, DataIntegrityTrailsConfig(Impl->DataIntegrityTrailsConfig)
, DataErasureConfig(Impl->DataErasureConfig)
, HealthCheckConfig(Impl->HealthCheckConfig)
, KikimrShouldContinue(kikimrShouldContinue)
, TracingConfigurator(MakeIntrusive<NJaegerTracing::TSamplingThrottlingConfigurator>(TimeProvider, RandomProvider))
{}
Expand Down
2 changes: 2 additions & 0 deletions ydb/core/base/appdata_fwd.h
Original file line number Diff line number Diff line change
Expand Up @@ -73,6 +73,7 @@ namespace NKikimrConfig {
class TMetadataCacheConfig;
class TMemoryControllerConfig;
class TFeatureFlags;
class THealthCheckConfig;
}

namespace NKikimrReplication {
Expand Down Expand Up @@ -242,6 +243,7 @@ struct TAppData {
NKikimrReplication::TReplicationDefaults& ReplicationConfig;
NKikimrProto::TDataIntegrityTrailsConfig& DataIntegrityTrailsConfig;
NKikimrConfig::TDataErasureConfig& DataErasureConfig;
NKikimrConfig::THealthCheckConfig& HealthCheckConfig;
bool EnforceUserTokenRequirement = false;
bool EnforceUserTokenCheckRequirement = false; // check token if it was specified
bool AllowHugeKeyValueDeletes = true; // delete when all clients limit deletes per request
Expand Down
1 change: 1 addition & 0 deletions ydb/core/cms/console/configs_dispatcher.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -67,6 +67,7 @@ const THashSet<ui32> DYNAMIC_KINDS({
(ui32)NKikimrConsole::TConfigItem::BlobStorageConfigItem,
(ui32)NKikimrConsole::TConfigItem::MetadataCacheConfigItem,
(ui32)NKikimrConsole::TConfigItem::MemoryControllerConfigItem,
(ui32)NKikimrConsole::TConfigItem::HealthCheckConfigItem,
});

const THashSet<ui32> NON_YAML_KINDS({
Expand Down
4 changes: 4 additions & 0 deletions ydb/core/driver_lib/run/run.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -1200,6 +1200,10 @@ void TKikimrRunner::InitializeAppData(const TKikimrRunConfig& runConfig)
AppData->ReplicationConfig = runConfig.AppConfig.GetReplicationConfig();
}

if (runConfig.AppConfig.HasHealthCheckConfig()) {
AppData->HealthCheckConfig = runConfig.AppConfig.GetHealthCheckConfig();
}

// setup resource profiles
AppData->ResourceProfiles = new TResourceProfiles;
if (runConfig.AppConfig.GetBootstrapConfig().ResourceProfilesSize())
Expand Down
36 changes: 26 additions & 10 deletions ydb/core/health_check/health_check.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -16,6 +16,7 @@
#include <ydb/core/base/path.h>
#include <ydb/core/base/statestorage.h>
#include <ydb/core/base/tablet_pipe.h>
#include <ydb/core/cms/console/configs_dispatcher.h>
#include <ydb/core/mon/mon.h>
#include <ydb/core/base/nameservice.h>
#include <ydb/core/blobstorage/base/blobstorage_events.h>
Expand All @@ -28,6 +29,7 @@
#include <ydb/core/util/tuples.h>

#include <ydb/core/protos/blobstorage_distributed_config.pb.h>
#include <ydb/core/protos/config.pb.h>
#include <ydb/core/sys_view/common/events.h>

#include <ydb/public/api/grpc/ydb_monitoring_v1.grpc.pb.h>
Expand Down Expand Up @@ -121,11 +123,12 @@ class TSelfCheckRequest : public TActorBootstrapped<TSelfCheckRequest> {
ui64 Cookie;
NWilson::TSpan Span;

TSelfCheckRequest(const TActorId& sender, THolder<TEvSelfCheckRequest> request, ui64 cookie, NWilson::TTraceId&& traceId)
TSelfCheckRequest(const TActorId& sender, THolder<TEvSelfCheckRequest> request, ui64 cookie, NWilson::TTraceId&& traceId, const NKikimrConfig::THealthCheckConfig& config)
: Sender(sender)
, Request(std::move(request))
, Cookie(cookie)
, Span(TComponentTracingLevels::TTablet::Basic, std::move(traceId), "health_check", NWilson::EFlags::AUTO_END)
, HealthCheckConfig(config)
{}

using TGroupId = ui32;
Expand Down Expand Up @@ -163,7 +166,7 @@ class TSelfCheckRequest : public TActorBootstrapped<TSelfCheckRequest> {
struct TNodeTabletState {
struct TTabletStateSettings {
TInstant AliveBarrier;
ui32 MaxRestartsPerPeriod = 30; // per hour
ui32 MaxRestartsPerPeriod; // per hour
ui32 MaxTabletIdsStored = 10;
bool ReportGoodTabletsIds = false;
};
Expand Down Expand Up @@ -647,6 +650,8 @@ class TSelfCheckRequest : public TActorBootstrapped<TSelfCheckRequest> {
std::optional<TRequestResponse<TEvStateStorage::TEvBoardInfo>> DatabaseBoardInfo;
THashSet<TNodeId> UnknownStaticGroups;

const NKikimrConfig::THealthCheckConfig& HealthCheckConfig;

std::vector<TNodeId> SubscribedNodeIds;
THashSet<TNodeId> StorageNodeIds;
THashSet<TNodeId> ComputeNodeIds;
Expand Down Expand Up @@ -1504,6 +1509,7 @@ class TSelfCheckRequest : public TActorBootstrapped<TSelfCheckRequest> {
for (const auto& [hiveId, hiveResponse] : HiveInfo) {
if (hiveResponse.IsOk()) {
settings.AliveBarrier = TInstant::MilliSeconds(hiveResponse->Record.GetResponseTimestamp()) - TDuration::Minutes(5);
settings.MaxRestartsPerPeriod = HealthCheckConfig.GetTabletsRestartsPerPeriodOrangeThreshold();
for (const NKikimrHive::TTabletInfo& hiveTablet : hiveResponse->Record.GetTablets()) {
TSubDomainKey tenantId = TSubDomainKey(hiveTablet.GetObjectDomain());
auto itDomain = FilterDomainKey.find(tenantId);
Expand Down Expand Up @@ -1729,9 +1735,9 @@ class TSelfCheckRequest : public TActorBootstrapped<TSelfCheckRequest> {
FillNodeInfo(nodeId, context.Location.mutable_compute()->mutable_node());

TSelfCheckContext rrContext(&context, "NODE_UPTIME");
if (databaseState.NodeRestartsPerPeriod[nodeId] >= 30) {
if (databaseState.NodeRestartsPerPeriod[nodeId] >= HealthCheckConfig.GetNodeRestartsPerPeriodOrangeThreshold()) {
rrContext.ReportStatus(Ydb::Monitoring::StatusFlag::ORANGE, "Node is restarting too often", ETags::Uptime);
} else if (databaseState.NodeRestartsPerPeriod[nodeId] >= 10) {
} else if (databaseState.NodeRestartsPerPeriod[nodeId] >= HealthCheckConfig.GetNodeRestartsPerPeriodYellowThreshold()) {
rrContext.ReportStatus(Ydb::Monitoring::StatusFlag::YELLOW, "The number of node restarts has increased", ETags::Uptime);
} else {
rrContext.ReportStatus(Ydb::Monitoring::StatusFlag::GREEN);
Expand Down Expand Up @@ -1769,9 +1775,9 @@ class TSelfCheckRequest : public TActorBootstrapped<TSelfCheckRequest> {
long timeDifferenceUs = nodeSystemState.GetMaxClockSkewWithPeerUs();
TDuration timeDifferenceDuration = TDuration::MicroSeconds(abs(timeDifferenceUs));
Ydb::Monitoring::StatusFlag::Status status;
if (timeDifferenceDuration > MAX_CLOCKSKEW_ORANGE_ISSUE_TIME) {
if (timeDifferenceDuration > TDuration::MicroSeconds(HealthCheckConfig.GetNodesTimeDifferenceUsOrangeThreshold())) {
status = Ydb::Monitoring::StatusFlag::ORANGE;
} else if (timeDifferenceDuration > MAX_CLOCKSKEW_YELLOW_ISSUE_TIME) {
} else if (timeDifferenceDuration > TDuration::MicroSeconds(HealthCheckConfig.GetNodesTimeDifferenceUsYellowThreshold())) {
status = Ydb::Monitoring::StatusFlag::YELLOW;
} else {
status = Ydb::Monitoring::StatusFlag::GREEN;
Expand Down Expand Up @@ -2921,9 +2927,6 @@ class TSelfCheckRequest : public TActorBootstrapped<TSelfCheckRequest> {
}
}

const TDuration MAX_CLOCKSKEW_ORANGE_ISSUE_TIME = TDuration::MicroSeconds(25000);
const TDuration MAX_CLOCKSKEW_YELLOW_ISSUE_TIME = TDuration::MicroSeconds(5000);

void FillResult(TOverallStateContext context) {
if (IsSpecificDatabaseFilter()) {
FillDatabaseResult(context, FilterDatabase, DatabaseState[FilterDatabase]);
Expand Down Expand Up @@ -3252,12 +3255,16 @@ void TNodeCheckRequest<NMon::TEvHttpInfo>::Bootstrap() {
class THealthCheckService : public TActorBootstrapped<THealthCheckService> {
public:
static constexpr NKikimrServices::TActivity::EType ActorActivityType() { return NKikimrServices::TActivity::MONITORING_SERVICE; }
NKikimrConfig::THealthCheckConfig HealthCheckConfig;

THealthCheckService()
{
}

void Bootstrap() {
HealthCheckConfig.CopyFrom(AppData()->HealthCheckConfig);
Send(NConsole::MakeConfigsDispatcherID(SelfId().NodeId()),
new NConsole::TEvConfigsDispatcher::TEvSetConfigSubscriptionRequest({NKikimrConsole::TConfigItem::HealthCheckConfigItem}));
TMon* mon = AppData()->Mon;
if (mon) {
mon->RegisterActorPage({
Expand All @@ -3270,8 +3277,16 @@ class THealthCheckService : public TActorBootstrapped<THealthCheckService> {
Become(&THealthCheckService::StateWork);
}

void Handle(NConsole::TEvConsole::TEvConfigNotificationRequest::TPtr& ev) {
const auto& record = ev->Get()->Record;
if (record.GetConfig().HasHealthCheckConfig()) {
HealthCheckConfig.CopyFrom(record.GetConfig().GetHealthCheckConfig());
}
Send(ev->Sender, new NConsole::TEvConsole::TEvConfigNotificationResponse(record), 0, ev->Cookie);
}

void Handle(TEvSelfCheckRequest::TPtr& ev) {
Register(new TSelfCheckRequest(ev->Sender, ev.Get()->Release(), ev->Cookie, std::move(ev->TraceId)));
Register(new TSelfCheckRequest(ev->Sender, ev.Get()->Release(), ev->Cookie, std::move(ev->TraceId), HealthCheckConfig));
}

std::shared_ptr<NYdbGrpc::TGRpcClientLow> GRpcClientLow;
Expand Down Expand Up @@ -3299,6 +3314,7 @@ class THealthCheckService : public TActorBootstrapped<THealthCheckService> {
hFunc(TEvSelfCheckRequest, Handle);
hFunc(TEvNodeCheckRequest, Handle);
hFunc(NMon::TEvHttpInfo, Handle);
hFunc(NConsole::TEvConsole::TEvConfigNotificationRequest, Handle);
cFunc(TEvents::TSystem::PoisonPill, PassAway);
}
}
Expand Down
152 changes: 152 additions & 0 deletions ydb/core/health_check/health_check_ut.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -6,6 +6,7 @@
#include <ydb/core/mind/hive/hive_events.h>
#include <ydb/core/node_whiteboard/node_whiteboard.h>
#include <ydb/core/blobstorage/base/blobstorage_events.h>
#include <ydb/core/protos/config.pb.h>
#include <ydb/core/tx/schemeshard/schemeshard.h>
#include "health_check.cpp"

Expand Down Expand Up @@ -1961,5 +1962,156 @@ Y_UNIT_TEST_SUITE(THealthCheckTest) {

UNIT_ASSERT(HasDeadTabletIssue(result));
}

void SendHealthCheckConfigUpdate(TTestActorRuntime &runtime, const TActorId& sender, const NKikimrConfig::THealthCheckConfig &cfg) {
auto *event = new NConsole::TEvConsole::TEvConfigureRequest;

event->Record.AddActions()->MutableRemoveConfigItems()->MutableCookieFilter()->AddCookies("cookie");

auto &item = *event->Record.AddActions()->MutableAddConfigItem()->MutableConfigItem();
item.MutableConfig()->MutableHealthCheckConfig()->CopyFrom(cfg);
item.SetCookie("cookie");

runtime.SendToPipe(MakeConsoleID(), sender, event, 0, GetPipeConfigWithRetries());

TAutoPtr<IEventHandle> handle;
auto record = runtime.GrabEdgeEvent<NConsole::TEvConsole::TEvConfigureResponse>(handle)->Record;
UNIT_ASSERT_VALUES_EQUAL(record.MutableStatus()->GetCode(), Ydb::StatusIds::SUCCESS);
}

void ChangeNodeRestartsPerPeriod(TTestActorRuntime &runtime, const TActorId& sender, const ui32 restartsYellow, const ui32 restartsOrange) {
NKikimrConfig::TAppConfig ext;
auto &cfg = *ext.MutableHealthCheckConfig();
cfg.SetNodeRestartsPerPeriodYellowThreshold(restartsYellow);
cfg.SetNodeRestartsPerPeriodOrangeThreshold(restartsOrange);
SendHealthCheckConfigUpdate(runtime, sender, cfg);
}

void TestConfigUpdateNodeRestartsPerPeriod(TTestActorRuntime &runtime, const TActorId& sender, const ui32 restartsYellow, const ui32 restartsOrange, const ui32 nodeId, Ydb::Monitoring::StatusFlag::Status expectedStatus) {
ChangeNodeRestartsPerPeriod(runtime, sender, restartsYellow, restartsOrange);

TAutoPtr<IEventHandle> handle;
auto *request = new NHealthCheck::TEvSelfCheckRequest;
request->Request.set_return_verbose_status(true);
request->Database = "/Root/database";

runtime.Send(new IEventHandle(NHealthCheck::MakeHealthCheckID(), sender, request, 0));
auto result = runtime.GrabEdgeEvent<NHealthCheck::TEvSelfCheckResult>(handle)->Result;
Ctest << result.ShortDebugString() << Endl;

const auto &database_status = result.database_status(0);
UNIT_ASSERT_VALUES_EQUAL(database_status.name(), "/Root/database");
UNIT_ASSERT_VALUES_EQUAL(database_status.compute().overall(), expectedStatus);
UNIT_ASSERT_VALUES_EQUAL(database_status.compute().nodes()[0].id(), ToString(nodeId));
}

Y_UNIT_TEST(HealthCheckConfigUpdate) {
TPortManager tp;
ui16 port = tp.GetPort(2134);
ui16 grpcPort = tp.GetPort(2135);
auto settings = TServerSettings(port)
.SetNodeCount(1)
.SetDynamicNodeCount(1)
.SetUseRealThreads(false)
.SetDomainName("Root");

TServer server(settings);
server.EnableGRpc(grpcPort);
TClient client(settings);
TTestActorRuntime& runtime = *server.GetRuntime();
TActorId sender = runtime.AllocateEdgeActor();

const ui32 nodeRestarts = 10;
const ui32 nodeId = runtime.GetNodeId(1);
auto observerFunc = [&](TAutoPtr<IEventHandle>& ev) {
switch (ev->GetTypeRewrite()) {
case NConsole::TEvConsole::EvGetTenantStatusResponse: {
auto *x = reinterpret_cast<NConsole::TEvConsole::TEvGetTenantStatusResponse::TPtr*>(&ev);
ChangeGetTenantStatusResponse(x, "/Root/database");
break;
}
case TEvTxProxySchemeCache::EvNavigateKeySetResult: {
auto *x = reinterpret_cast<TEvTxProxySchemeCache::TEvNavigateKeySetResult::TPtr*>(&ev);
TSchemeCacheNavigate::TEntry& entry((*x)->Get()->Request->ResultSet.front());
const TString path = CanonizePath(entry.Path);
if (path == "/Root/database" || entry.TableId.PathId == SUBDOMAIN_KEY) {
entry.Status = TSchemeCacheNavigate::EStatus::Ok;
entry.Kind = TSchemeCacheNavigate::EKind::KindExtSubdomain;
entry.Path = {"Root", "database"};
entry.DomainInfo = MakeIntrusive<TDomainInfo>(SUBDOMAIN_KEY, SUBDOMAIN_KEY);
auto domains = runtime.GetAppData().DomainsInfo;
ui64 hiveId = domains->GetHive();
entry.DomainInfo->Params.SetHive(hiveId);
}
break;
}
case TEvHive::EvResponseHiveNodeStats: {
auto *x = reinterpret_cast<TEvHive::TEvResponseHiveNodeStats::TPtr*>(&ev);
auto &record = (*x)->Get()->Record;
record.ClearNodeStats();
auto *nodeStats = record.MutableNodeStats()->Add();
nodeStats->SetNodeId(nodeId);
nodeStats->SetRestartsPerPeriod(nodeRestarts);
nodeStats->MutableNodeDomain()->SetSchemeShard(SUBDOMAIN_KEY.OwnerId);
nodeStats->MutableNodeDomain()->SetPathId(SUBDOMAIN_KEY.LocalPathId);
break;
}
case TEvSchemeShard::EvDescribeSchemeResult: {
auto *x = reinterpret_cast<NSchemeShard::TEvSchemeShard::TEvDescribeSchemeResult::TPtr*>(&ev);
auto record = (*x)->Get()->MutableRecord();
if (record->path() == "/Root/database") {
record->set_status(NKikimrScheme::StatusSuccess);
// no pools
}
break;
}
case TEvBlobStorage::EvControllerConfigResponse: {
auto *x = reinterpret_cast<TEvBlobStorage::TEvControllerConfigResponse::TPtr*>(&ev);
AddGroupVSlotInControllerConfigResponseWithStaticGroup(x, NKikimrBlobStorage::TGroupStatus::FULL, TVDisks(1));
break;
}
case NSysView::TEvSysView::EvGetVSlotsResponse: {
auto* x = reinterpret_cast<NSysView::TEvSysView::TEvGetVSlotsResponse::TPtr*>(&ev);
AddVSlotsToSysViewResponse(x, 1, TVDisks(1));
break;
}
case NSysView::TEvSysView::EvGetGroupsResponse: {
auto* x = reinterpret_cast<NSysView::TEvSysView::TEvGetGroupsResponse::TPtr*>(&ev);
AddGroupsToSysViewResponse(x);
break;
}
case NSysView::TEvSysView::EvGetStoragePoolsResponse: {
auto* x = reinterpret_cast<NSysView::TEvSysView::TEvGetStoragePoolsResponse::TPtr*>(&ev);
AddStoragePoolsToSysViewResponse(x);
break;
}
case TEvWhiteboard::EvSystemStateResponse: {
auto *x = reinterpret_cast<TEvWhiteboard::TEvSystemStateResponse::TPtr*>(&ev);
ClearLoadAverage(x);
break;
}
case TEvInterconnect::EvNodesInfo: {
auto *x = reinterpret_cast<TEvInterconnect::TEvNodesInfo::TPtr*>(&ev);
auto nodes = MakeIntrusive<TIntrusiveVector<TEvInterconnect::TNodeInfo>>((*x)->Get()->Nodes);
if (!nodes->empty()) {
nodes->erase(nodes->begin() + 1, nodes->end());
nodes->begin()->NodeId = nodeId;
}
auto newEv = IEventHandle::Downcast<TEvInterconnect::TEvNodesInfo>(
new IEventHandle((*x)->Recipient, (*x)->Sender, new TEvInterconnect::TEvNodesInfo(nodes))
);
x->Swap(newEv);
break;
}
}

return TTestActorRuntime::EEventAction::PROCESS;
};
runtime.SetObserverFunc(observerFunc);

TestConfigUpdateNodeRestartsPerPeriod(runtime, sender, nodeRestarts + 5, nodeRestarts + 10, nodeId, Ydb::Monitoring::StatusFlag::GREEN);
TestConfigUpdateNodeRestartsPerPeriod(runtime, sender, nodeRestarts / 2, nodeRestarts + 5, nodeId, Ydb::Monitoring::StatusFlag::YELLOW);
TestConfigUpdateNodeRestartsPerPeriod(runtime, sender, nodeRestarts / 5, nodeRestarts / 2, nodeId, Ydb::Monitoring::StatusFlag::ORANGE);
}
}
}
9 changes: 9 additions & 0 deletions ydb/core/protos/config.proto
Original file line number Diff line number Diff line change
Expand Up @@ -1768,6 +1768,14 @@ message THiveConfig {
optional uint64 NodeRestartsForPenalty = 85 [default = 3];
}

message THealthCheckConfig {
optional uint32 NodeRestartsPerPeriodYellowThreshold = 1 [default = 10];
Copy link
Member

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

How do I set the duration of the "period"?

Copy link
Collaborator

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

that part is configured through Hive config

Copy link
Member

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

This is very unexpected. Do you have an improvement plan?

Copy link
Collaborator

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

optional uint32 NodeRestartsPerPeriodOrangeThreshold = 2 [default = 30];
optional uint64 NodesTimeDifferenceUsYellowThreshold = 3 [default = 5000];
optional uint64 NodesTimeDifferenceUsOrangeThreshold = 4 [default = 25000];
optional uint32 TabletsRestartsPerPeriodOrangeThreshold = 5 [default = 30];
}

message TBlobCacheConfig {
optional uint64 MaxSizeBytes = 1 [default = 1073741824];
}
Expand Down Expand Up @@ -2253,6 +2261,7 @@ message TAppConfig {
optional TSelfManagementConfig SelfManagementConfig = 86;
optional NKikimrProto.TDataIntegrityTrailsConfig DataIntegrityTrailsConfig = 87;
optional TDataErasureConfig DataErasureConfig = 88;
optional THealthCheckConfig HealthCheckConfig = 89;

repeated TNamedConfig NamedConfigs = 100;
optional string ClusterYamlConfig = 101;
Expand Down
1 change: 1 addition & 0 deletions ydb/core/protos/console_config.proto
Original file line number Diff line number Diff line change
Expand Up @@ -143,6 +143,7 @@ message TConfigItem {
GroupedMemoryLimiterConfig = 82;
ReplicationConfigItem = 83;
CompPrioritiesConfig = 85;
HealthCheckConfigItem = 89;
Copy link
Member

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

Suggested change
HealthCheckConfigItem = 89;
HealthCheckConfigItem = 86;

?

Copy link
Collaborator Author

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

I believe the item number should match the configuration number in AppConfig so that the console knows which subscribers to send updates to. If the number is changed, the test fails.


NamedConfigsItem = 100;
ClusterYamlConfigItem = 101;
Expand Down
Loading