@@ -177,6 +177,8 @@ namespace NKikimr::NBsController {
177
177
178
178
THashMap<TEntityId, ui32> NumDisksPerDevice;
179
179
180
+ bool Correct = true ;
181
+
180
182
TGroupLayout (const TBlobStorageGroupInfo::TTopology& topology)
181
183
: Topology(topology)
182
184
, NumDisksInRealm(Topology.GetTotalFailRealmsNum())
@@ -187,17 +189,19 @@ namespace NKikimr::NBsController {
187
189
188
190
void UpdateDisk (const TPDiskLayoutPosition& pos, ui32 orderNumber, ui32 value) {
189
191
NumDisks += value;
190
- NumDisksPerRealmGroup[pos.RealmGroup ] += value;
192
+ const ui32 z = NumDisksPerRealmGroup[pos.RealmGroup ] += value;
191
193
const TVDiskIdShort vdisk = Topology.GetVDiskId (orderNumber);
192
- NumDisksInRealm[vdisk.FailRealm ] += value;
193
- NumDisksPerRealm[vdisk.FailRealm ][pos.Realm ] += value;
194
- NumDisksPerRealmTotal[pos.Realm ] += value;
194
+ const ui32 x1 = NumDisksInRealm[vdisk.FailRealm ] += value;
195
+ const ui32 x2 = NumDisksPerRealm[vdisk.FailRealm ][pos.Realm ] += value;
196
+ const ui32 x3 = NumDisksPerRealmTotal[pos.Realm ] += value;
195
197
const ui32 domainIdx = Topology.GetFailDomainOrderNumber (vdisk);
196
- NumDisksInDomain[domainIdx] += value;
197
- NumDisksPerDomain[domainIdx][pos.Domain ] += value;
198
- NumDisksPerDomainTotal[pos.Domain ] += value;
198
+ const ui32 y1 = NumDisksInDomain[domainIdx] += value;
199
+ const ui32 y2 = NumDisksPerDomain[domainIdx][pos.Domain ] += value;
200
+ const ui32 y3 = NumDisksPerDomainTotal[pos.Domain ] += value;
199
201
200
202
NumDisksPerDevice[pos.Device ] += value;
203
+
204
+ Correct = Correct && x1 == x2 && x2 == x3 && y1 == y2 && y2 == y3 && z == NumDisks;
201
205
}
202
206
203
207
void AddDisk (const TPDiskLayoutPosition& pos, ui32 orderNumber) {
@@ -233,6 +237,46 @@ namespace NKikimr::NBsController {
233
237
AddDisk (pos, orderNumber);
234
238
return score;
235
239
}
240
+
241
+ bool IsCorrect () const {
242
+ #ifdef NDEBUG
243
+ return Correct;
244
+ #endif
245
+
246
+ if (NumDisksPerRealmGroup.size () != 1 ) { // all disks must reside in the same realm group
247
+ Y_DEBUG_ABORT_UNLESS (!Correct);
248
+ return false ;
249
+ }
250
+
251
+ for (size_t i = 0 , num = NumDisksInRealm.size (); i < num; ++i) {
252
+ for (const auto & [entityId, numDisks] : NumDisksPerRealm[i]) {
253
+ Y_DEBUG_ABORT_UNLESS (NumDisksPerRealmTotal.contains (entityId));
254
+ if (numDisks != NumDisksInRealm[i] || numDisks != NumDisksPerRealmTotal.at (entityId)) {
255
+ // the first case is when group realm contains disks from different real-world realms (DC's)
256
+ // -- this is not as bad as it seems, but breaks strict failure model; the second one is a bit
257
+ // worse, it means that disks from this real-world realm (DC) are in several realms, which
258
+ // may lead to unavailability when DC goes down
259
+ Y_DEBUG_ABORT_UNLESS (!Correct);
260
+ return false ;
261
+ }
262
+ }
263
+ }
264
+
265
+ // the same code goes for domains
266
+ for (size_t j = 0 , num = NumDisksInDomain.size (); j < num; ++j) {
267
+ for (const auto & [entityId, numDisks] : NumDisksPerDomain[j]) {
268
+ Y_DEBUG_ABORT_UNLESS (NumDisksPerDomainTotal.contains (entityId));
269
+ if (numDisks != NumDisksInDomain[j] || numDisks != NumDisksPerDomainTotal.at (entityId)) {
270
+ Y_DEBUG_ABORT_UNLESS (!Correct);
271
+ return false ;
272
+ }
273
+
274
+ }
275
+ }
276
+
277
+ Y_DEBUG_ABORT_UNLESS (Correct);
278
+ return true ;
279
+ }
236
280
};
237
281
238
282
} // NLayoutChecker
0 commit comments