@@ -254,7 +254,7 @@ func TestUpgradeableDirectory(t *testing.T) {
254
254
compareDirectoryEntries (t , upgradedDir , dir , []* ipld.Link {missingLink })
255
255
}
256
256
257
- // Test that we fetch as little nodes needed to reach the HAMTShardingSize
257
+ // Test that we fetch as little nodes as needed to reach the HAMTShardingSize
258
258
// during the sizeBelowThreshold computation.
259
259
// FIXME: This only works for a sequential DAG walk.
260
260
// FIXME: Failing in the CI for Ubuntu. This may likely be an indication of race
@@ -269,7 +269,7 @@ func TestHAMTEnumerationWhenComputingSize(t *testing.T) {
269
269
// are the "value" links pointing to antyhing that is *not* another Shard).
270
270
estimatedLinkSize = mockLinkSizeFunc (1 )
271
271
defer func () { estimatedLinkSize = productionLinkSize }()
272
- // Use an identity hash function to ease the construction of "full " HAMTs
272
+ // Use an identity hash function to ease the construction of "complete " HAMTs
273
273
// (see CreateCompleteHAMT below for more details). (Ideally this should be
274
274
// a parameter we pass and not a global option we modify in the caller.)
275
275
oldHashFunc := hamt .HAMTHashFunction
@@ -280,38 +280,40 @@ func TestHAMTEnumerationWhenComputingSize(t *testing.T) {
280
280
//DefaultShardWidth = 8
281
281
// FIXME: We should be able to use a smaller DefaultShardWidth to have
282
282
// a deeper tree and cheaper tests once the import cycle is resolved
283
- // in hamt.CreateCompleteHAMT.
283
+ // in hamt.CreateCompleteHAMT and the DefaultShardWidth value is not
284
+ // hardcoded there.
284
285
285
286
// We create a "complete" HAMT (see CreateCompleteHAMT for more details)
286
287
// with a regular structure to be able to predict how many Shard nodes we
287
288
// will need to fetch in order to reach the HAMTShardingSize threshold in
288
289
// sizeBelowThreshold (assuming a sequential DAG walk function).
289
290
oldHamtOption := HAMTShardingSize
290
291
defer func () { HAMTShardingSize = oldHamtOption }()
292
+ // (Some arbitrary values below that make this test not that expensive.)
291
293
treeHeight := 2
292
294
thresholdToWidthRatio := 4 // How many leaf shards nodes (with value links,
293
- // directory entries) do we need to reach the threshold.
295
+ // i.e., directory entries) do we need to reach the threshold.
294
296
HAMTShardingSize = DefaultShardWidth * thresholdToWidthRatio
295
297
// With this structure we will then need to fetch the following nodes:
296
- // * `thresholdToWidthRatio` leaf Shards with enough value link to reach
298
+ // * `thresholdToWidthRatio` leaf Shards with enough value links to reach
297
299
// the HAMTShardingSize threshold.
298
300
// * `(treeHeight - 1)` internal nodes to reach those leaf Shard nodes
299
301
// (assuming we have thresholdToWidthRatio below the DefaultShardWidth,
300
302
// i.e., all leaf nodes come from the same parent).
301
303
nodesToFetch := thresholdToWidthRatio + treeHeight - 1
302
304
ds := mdtest .Mock ()
303
- node , err := hamt .CreateCompleteHAMT (ds , treeHeight )
305
+ completeHAMTRoot , err := hamt .CreateCompleteHAMT (ds , treeHeight )
304
306
assert .NoError (t , err )
305
307
306
308
countGetsDS := newCountGetsDS (ds )
307
- hamtDir , err := newHAMTDirectoryFromNode (countGetsDS , node )
309
+ hamtDir , err := newHAMTDirectoryFromNode (countGetsDS , completeHAMTRoot )
308
310
assert .NoError (t , err )
309
311
310
312
countGetsDS .resetCounter ()
311
313
// FIXME: Only works with sequential DAG walk (now hardcoded, needs to be
312
314
// added to the internal API) where we can predict the Get requests and
313
315
// tree traversal. It would be desirable to have some test for the concurrent
314
- // walk (actually used in production).
316
+ // walk (which is the one used in production).
315
317
below , err := hamtDir .sizeBelowThreshold (context .TODO (), 0 )
316
318
assert .NoError (t , err )
317
319
assert .False (t , below )
@@ -491,17 +493,17 @@ func (d *countGetsDS) uniqueCidsFetched() int {
491
493
func (d * countGetsDS ) Get (ctx context.Context , c cid.Cid ) (ipld.Node , error ) {
492
494
node , err := d .DAGService .Get (ctx , c )
493
495
if err != nil {
494
- return node , err
496
+ return nil , err
495
497
}
496
498
497
499
d .mapLock .Lock ()
498
500
d .cidsFetched [c ] = struct {}{}
499
501
d .mapLock .Unlock ()
500
502
501
- return node , err
503
+ return node , nil
502
504
}
503
505
504
- // Process sequentially. We don't care about performance here .
506
+ // Process sequentially (blocking) calling Get which tracks requests .
505
507
func (d * countGetsDS ) GetMany (ctx context.Context , cids []cid.Cid ) <- chan * ipld.NodeOption {
506
508
out := make (chan * ipld.NodeOption , len (cids ))
507
509
defer close (out )
0 commit comments