Skip to content

Commit f8a1d88

Browse files
committed
remove ModDataSetSubLockStrategy, add DataNodeLayoutSubLockStrategy
1 parent 226cd5b commit f8a1d88

File tree

5 files changed

+33
-40
lines changed

5 files changed

+33
-40
lines changed

hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSConfigKeys.java

Lines changed: 0 additions & 4 deletions
Original file line numberDiff line numberDiff line change
@@ -1744,10 +1744,6 @@ public class DFSConfigKeys extends CommonConfigurationKeys {
17441744
public static final boolean
17451745
DFS_DATANODE_LOCKMANAGER_TRACE_DEFAULT = false;
17461746

1747-
public static final String DFS_DATANODE_DATASET_SUBLOCK_COUNT_KEY =
1748-
"dfs.datanode.dataset.sublock.count";
1749-
public static final long DFS_DATANODE_DATASET_SUBLOCK_COUNT_DEFAULT = 1000L;
1750-
17511747
// dfs.client.retry confs are moved to HdfsClientConfigKeys.Retry
17521748
@Deprecated
17531749
public static final String DFS_CLIENT_RETRY_POLICY_ENABLED_KEY
Lines changed: 3 additions & 23 deletions
Original file line numberDiff line numberDiff line change
@@ -18,36 +18,16 @@
1818

1919
package org.apache.hadoop.hdfs.server.datanode;
2020

21-
import org.slf4j.Logger;
22-
import org.slf4j.LoggerFactory;
23-
24-
import java.util.ArrayList;
2521
import java.util.List;
2622

27-
public class ModDataSetSubLockStrategy implements DataSetSubLockStrategy {
28-
public static final Logger LOG = LoggerFactory.getLogger(DataSetSubLockStrategy.class);
29-
30-
private static final String LOCK_NAME_PERFIX = "SubLock";
31-
private long modFactor;
32-
33-
public ModDataSetSubLockStrategy(long mod) {
34-
if (mod <= 0) {
35-
mod = 1L;
36-
}
37-
this.modFactor = mod;
38-
}
39-
23+
public class DataNodeLayoutSubLockStrategy implements DataSetSubLockStrategy {
4024
@Override
4125
public String blockIdToSubLock(long blockid) {
42-
return LOCK_NAME_PERFIX + (blockid % modFactor);
26+
return DatanodeUtil.idToBlockDirSuffixName(blockid);
4327
}
4428

4529
@Override
4630
public List<String> getAllSubLockName() {
47-
List<String> res = new ArrayList<>();
48-
for (long i = 0L; i < modFactor; i++) {
49-
res.add(LOCK_NAME_PERFIX + i);
50-
}
51-
return res;
31+
return DatanodeUtil.getAllSubDirNameForDataSetLock();
5232
}
5333
}

hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/DatanodeUtil.java

Lines changed: 28 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -21,6 +21,8 @@
2121
import java.io.FileInputStream;
2222
import java.io.FileNotFoundException;
2323
import java.io.IOException;
24+
import java.util.ArrayList;
25+
import java.util.List;
2426

2527
import org.apache.hadoop.classification.InterfaceAudience;
2628
import org.apache.hadoop.hdfs.protocol.Block;
@@ -115,6 +117,8 @@ public static boolean dirNoFilesRecursive(
115117
/**
116118
* Get the directory where a finalized block with this ID should be stored.
117119
* Do not attempt to create the directory.
120+
* Note: update {@link DatanodeUtil#idToBlockDirSuffixName(long)} and
121+
* {@link DatanodeUtil#getAllSubDirNameForDataSetLock()} when current method changed.
118122
* @param root the root directory where finalized blocks are stored
119123
* @param blockId
120124
* @return
@@ -127,6 +131,30 @@ public static File idToBlockDir(File root, long blockId) {
127131
return new File(root, path);
128132
}
129133

134+
/**
135+
* Take an example: we hava a block with blockid mapping to:
136+
* "/data1/hadoop/hdfs/datanode/current/BP-xxxx/current/finalized/subdir0/subdir0"
137+
* We return "subdir0/subdir0"
138+
* @return
139+
*/
140+
public static String idToBlockDirSuffixName(long blockId) {
141+
int d1 = (int) ((blockId >> 16) & 0x1F);
142+
int d2 = (int) ((blockId >> 8) & 0x1F);
143+
return DataStorage.BLOCK_SUBDIR_PREFIX + d1 + SEP +
144+
DataStorage.BLOCK_SUBDIR_PREFIX + d2;
145+
}
146+
147+
public static List<String> getAllSubDirNameForDataSetLock() {
148+
List<String> res = new ArrayList<>();
149+
for (int d1 = 0; d1 <= 0x1F; d1++) {
150+
for (int d2 = 0; d2 <= 0x1F; d2++) {
151+
res.add(DataStorage.BLOCK_SUBDIR_PREFIX + d1 + SEP +
152+
DataStorage.BLOCK_SUBDIR_PREFIX + d2);
153+
}
154+
}
155+
return res;
156+
}
157+
130158
/**
131159
* @return the FileInputStream for the meta data of the given block.
132160
* @throws FileNotFoundException

hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/FsDatasetImpl.java

Lines changed: 2 additions & 5 deletions
Original file line numberDiff line numberDiff line change
@@ -64,12 +64,12 @@
6464
import org.apache.hadoop.hdfs.server.common.DataNodeLockManager;
6565
import org.apache.hadoop.hdfs.server.common.DataNodeLockManager.LockLevel;
6666
import org.apache.hadoop.hdfs.server.datanode.DataNodeFaultInjector;
67+
import org.apache.hadoop.hdfs.server.datanode.DataNodeLayoutSubLockStrategy;
6768
import org.apache.hadoop.hdfs.server.datanode.DataSetLockManager;
6869
import org.apache.hadoop.hdfs.server.datanode.DataSetSubLockStrategy;
6970
import org.apache.hadoop.hdfs.server.datanode.FileIoProvider;
7071
import org.apache.hadoop.hdfs.server.datanode.FinalizedReplica;
7172
import org.apache.hadoop.hdfs.server.datanode.LocalReplica;
72-
import org.apache.hadoop.hdfs.server.datanode.ModDataSetSubLockStrategy;
7373
import org.apache.hadoop.hdfs.server.datanode.metrics.DataNodeMetrics;
7474
import org.apache.hadoop.util.AutoCloseableLock;
7575
import org.apache.hadoop.hdfs.protocol.Block;
@@ -292,7 +292,6 @@ public LengthInputStream getMetaDataInputStream(ExtendedBlock b)
292292
private volatile long lastDirScannerFinishTime;
293293

294294
private final DataSetSubLockStrategy datasetSubLockStrategy;
295-
private final long datasetSubLockCount;
296295

297296
/**
298297
* An FSDataset has a directory where it loads its data files.
@@ -398,9 +397,7 @@ public LengthInputStream getMetaDataInputStream(ExtendedBlock b)
398397
DFSConfigKeys.DFS_DATANODE_DIRECTORYSCAN_MAX_NOTIFY_COUNT_KEY,
399398
DFSConfigKeys.DFS_DATANODE_DIRECTORYSCAN_MAX_NOTIFY_COUNT_DEFAULT);
400399
lastDirScannerNotifyTime = System.currentTimeMillis();
401-
datasetSubLockCount = conf.getLong(DFSConfigKeys.DFS_DATANODE_DATASET_SUBLOCK_COUNT_KEY,
402-
DFSConfigKeys.DFS_DATANODE_DATASET_SUBLOCK_COUNT_DEFAULT);
403-
this.datasetSubLockStrategy = new ModDataSetSubLockStrategy(datasetSubLockCount);
400+
this.datasetSubLockStrategy = new DataNodeLayoutSubLockStrategy();
404401
}
405402

406403
/**

hadoop-hdfs-project/hadoop-hdfs/src/main/resources/hdfs-default.xml

Lines changed: 0 additions & 8 deletions
Original file line numberDiff line numberDiff line change
@@ -6569,14 +6569,6 @@
65696569
</description>
65706570
</property>
65716571

6572-
<property>
6573-
<name>dfs.datanode.dataset.sublock.count</name>
6574-
<value>1000</value>
6575-
<description>
6576-
The dataset readwrite lock counts for a volume.
6577-
</description>
6578-
</property>
6579-
65806572
<property>
65816573
<name>dfs.client.fsck.connect.timeout</name>
65826574
<value>60000ms</value>

0 commit comments

Comments
 (0)