Skip to content

Commit ff45cbf

Browse files
wchevreuilApache9
authored andcommitted
HBASE-26454 CreateTableProcedure still relies on temp dir and renames… (#3845)
Signed-off-by: Duo Zhang <zhangduo@apache.org>
1 parent 6c899ea commit ff45cbf

File tree

5 files changed

+53
-191
lines changed

5 files changed

+53
-191
lines changed

hbase-server/src/main/java/org/apache/hadoop/hbase/master/procedure/CreateTableProcedure.java

Lines changed: 5 additions & 25 deletions
Original file line numberDiff line numberDiff line change
@@ -22,7 +22,6 @@
2222
import java.io.IOException;
2323
import java.util.ArrayList;
2424
import java.util.List;
25-
import org.apache.hadoop.fs.FileSystem;
2625
import org.apache.hadoop.fs.Path;
2726
import org.apache.hadoop.hbase.DoNotRetryIOException;
2827
import org.apache.hadoop.hbase.HBaseIOException;
@@ -316,41 +315,22 @@ protected static List<RegionInfo> createFsLayout(final MasterProcedureEnv env,
316315
final TableDescriptor tableDescriptor, List<RegionInfo> newRegions,
317316
final CreateHdfsRegions hdfsRegionHandler) throws IOException {
318317
final MasterFileSystem mfs = env.getMasterServices().getMasterFileSystem();
319-
final Path tempdir = mfs.getTempDir();
320318

321319
// 1. Create Table Descriptor
322320
// using a copy of descriptor, table will be created enabling first
323-
final Path tempTableDir = CommonFSUtils.getTableDir(tempdir, tableDescriptor.getTableName());
321+
final Path tableDir = CommonFSUtils.getTableDir(mfs.getRootDir(),
322+
tableDescriptor.getTableName());
324323
((FSTableDescriptors)(env.getMasterServices().getTableDescriptors()))
325-
.createTableDescriptorForTableDirectory(tempTableDir, tableDescriptor, false);
324+
.createTableDescriptorForTableDirectory(
325+
tableDir, tableDescriptor, false);
326326

327327
// 2. Create Regions
328-
newRegions = hdfsRegionHandler.createHdfsRegions(env, tempdir,
328+
newRegions = hdfsRegionHandler.createHdfsRegions(env, mfs.getRootDir(),
329329
tableDescriptor.getTableName(), newRegions);
330330

331-
// 3. Move Table temp directory to the hbase root location
332-
moveTempDirectoryToHBaseRoot(env, tableDescriptor, tempTableDir);
333-
334331
return newRegions;
335332
}
336333

337-
protected static void moveTempDirectoryToHBaseRoot(
338-
final MasterProcedureEnv env,
339-
final TableDescriptor tableDescriptor,
340-
final Path tempTableDir) throws IOException {
341-
final MasterFileSystem mfs = env.getMasterServices().getMasterFileSystem();
342-
final Path tableDir =
343-
CommonFSUtils.getTableDir(mfs.getRootDir(), tableDescriptor.getTableName());
344-
FileSystem fs = mfs.getFileSystem();
345-
if (!fs.delete(tableDir, true) && fs.exists(tableDir)) {
346-
throw new IOException("Couldn't delete " + tableDir);
347-
}
348-
if (!fs.rename(tempTableDir, tableDir)) {
349-
throw new IOException("Unable to move table from temp=" + tempTableDir +
350-
" to hbase root=" + tableDir);
351-
}
352-
}
353-
354334
protected static List<RegionInfo> addTableToMeta(final MasterProcedureEnv env,
355335
final TableDescriptor tableDescriptor, final List<RegionInfo> regions) throws IOException {
356336
assert (regions != null && regions.size() > 0) : "expected at least 1 region, got " + regions;

hbase-server/src/main/java/org/apache/hadoop/hbase/master/procedure/DeleteTableProcedure.java

Lines changed: 40 additions & 75 deletions
Original file line numberDiff line numberDiff line change
@@ -20,10 +20,7 @@
2020

2121
import java.io.IOException;
2222
import java.util.ArrayList;
23-
import java.util.Arrays;
2423
import java.util.List;
25-
import java.util.stream.Collectors;
26-
import org.apache.hadoop.fs.FileStatus;
2724
import org.apache.hadoop.fs.FileSystem;
2825
import org.apache.hadoop.fs.Path;
2926
import org.apache.hadoop.hbase.MetaTableAccessor;
@@ -52,11 +49,12 @@
5249
import org.slf4j.Logger;
5350
import org.slf4j.LoggerFactory;
5451

52+
import org.apache.hbase.thirdparty.org.apache.commons.collections4.CollectionUtils;
53+
5554
import org.apache.hadoop.hbase.shaded.protobuf.ProtobufUtil;
5655
import org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos;
5756
import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProcedureProtos;
5857
import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProcedureProtos.DeleteTableState;
59-
import org.apache.hbase.thirdparty.org.apache.commons.collections4.CollectionUtils;
6058

6159
@InterfaceAudience.Private
6260
public class DeleteTableProcedure
@@ -278,92 +276,59 @@ protected static void deleteFromFs(final MasterProcedureEnv env,
278276
final boolean archive) throws IOException {
279277
final MasterFileSystem mfs = env.getMasterServices().getMasterFileSystem();
280278
final FileSystem fs = mfs.getFileSystem();
281-
final Path tempdir = mfs.getTempDir();
282279

283280
final Path tableDir = CommonFSUtils.getTableDir(mfs.getRootDir(), tableName);
284-
final Path tempTableDir = CommonFSUtils.getTableDir(tempdir, tableName);
285281

286282
if (fs.exists(tableDir)) {
287-
// Ensure temp exists
288-
if (!fs.exists(tempdir) && !fs.mkdirs(tempdir)) {
289-
throw new IOException("HBase temp directory '" + tempdir + "' creation failure.");
290-
}
291-
292-
// Ensure parent exists
293-
if (!fs.exists(tempTableDir.getParent()) && !fs.mkdirs(tempTableDir.getParent())) {
294-
throw new IOException("HBase temp directory '" + tempdir + "' creation failure.");
283+
// Archive regions from FS (temp directory)
284+
if (archive) {
285+
List<Path> regionDirList = new ArrayList<>();
286+
for (RegionInfo region : regions) {
287+
if (RegionReplicaUtil.isDefaultReplica(region)) {
288+
regionDirList.add(FSUtils.getRegionDirFromTableDir(tableDir, region));
289+
List<RegionInfo> mergeRegions = MetaTableAccessor
290+
.getMergeRegions(env.getMasterServices().getConnection(), region.getRegionName());
291+
if (!CollectionUtils.isEmpty(mergeRegions)) {
292+
mergeRegions.stream().forEach(
293+
r -> regionDirList.add(FSUtils.getRegionDirFromTableDir(tableDir, r)));
294+
}
295+
}
296+
}
297+
HFileArchiver
298+
.archiveRegions(env.getMasterConfiguration(), fs, mfs.getRootDir(), tableDir,
299+
regionDirList);
300+
if (!regionDirList.isEmpty()) {
301+
LOG.debug("Archived {} regions", tableName);
302+
}
295303
}
296304

297-
if (fs.exists(tempTableDir)) {
298-
// TODO
299-
// what's in this dir? something old? probably something manual from the user...
300-
// let's get rid of this stuff...
301-
FileStatus[] files = fs.listStatus(tempTableDir);
302-
if (files != null && files.length > 0) {
303-
List<Path> regionDirList = Arrays.stream(files)
304-
.filter(FileStatus::isDirectory)
305-
.map(FileStatus::getPath)
306-
.collect(Collectors.toList());
307-
HFileArchiver.archiveRegions(env.getMasterConfiguration(), fs, mfs.getRootDir(),
308-
tempTableDir, regionDirList);
309-
}
310-
fs.delete(tempTableDir, true);
305+
// Archive mob data
306+
Path mobTableDir =
307+
CommonFSUtils.getTableDir(new Path(mfs.getRootDir(), MobConstants.MOB_DIR_NAME), tableName);
308+
Path regionDir = new Path(mobTableDir, MobUtils.getMobRegionInfo(tableName).getEncodedName());
309+
if (fs.exists(regionDir)) {
310+
HFileArchiver.archiveRegion(fs, mfs.getRootDir(), mobTableDir, regionDir);
311311
}
312312

313-
// Move the table in /hbase/.tmp
314-
if (!fs.rename(tableDir, tempTableDir)) {
315-
throw new IOException("Unable to move '" + tableDir + "' to temp '" + tempTableDir + "'");
313+
// Delete table directory from FS
314+
if (!fs.delete(tableDir, true) && fs.exists(tableDir)) {
315+
throw new IOException("Couldn't delete " + tableDir);
316316
}
317-
}
318317

319-
// Archive regions from FS (temp directory)
320-
if (archive) {
321-
List<Path> regionDirList = new ArrayList<>();
322-
for (RegionInfo region : regions) {
323-
if (RegionReplicaUtil.isDefaultReplica(region)) {
324-
regionDirList.add(FSUtils.getRegionDirFromTableDir(tempTableDir, region));
325-
List<RegionInfo> mergeRegions = MetaTableAccessor
326-
.getMergeRegions(env.getMasterServices().getConnection(), region.getRegionName());
327-
if (!CollectionUtils.isEmpty(mergeRegions)) {
328-
mergeRegions.stream()
329-
.forEach(r -> regionDirList.add(FSUtils.getRegionDirFromTableDir(tempTableDir, r)));
330-
}
318+
// Delete the table directory where the mob files are saved
319+
if (mobTableDir != null && fs.exists(mobTableDir)) {
320+
if (!fs.delete(mobTableDir, true)) {
321+
throw new IOException("Couldn't delete mob dir " + mobTableDir);
331322
}
332323
}
333-
HFileArchiver.archiveRegions(env.getMasterConfiguration(), fs, mfs.getRootDir(), tempTableDir,
334-
regionDirList);
335-
if (!regionDirList.isEmpty()) {
336-
LOG.debug("Archived {} regions", tableName);
337-
}
338-
}
339-
340-
// Archive mob data
341-
Path mobTableDir =
342-
CommonFSUtils.getTableDir(new Path(mfs.getRootDir(), MobConstants.MOB_DIR_NAME), tableName);
343-
Path regionDir =
344-
new Path(mobTableDir, MobUtils.getMobRegionInfo(tableName).getEncodedName());
345-
if (fs.exists(regionDir)) {
346-
HFileArchiver.archiveRegion(fs, mfs.getRootDir(), mobTableDir, regionDir);
347-
}
348-
349-
// Delete table directory from FS (temp directory)
350-
if (!fs.delete(tempTableDir, true) && fs.exists(tempTableDir)) {
351-
throw new IOException("Couldn't delete " + tempTableDir);
352-
}
353324

354-
// Delete the table directory where the mob files are saved
355-
if (mobTableDir != null && fs.exists(mobTableDir)) {
356-
if (!fs.delete(mobTableDir, true)) {
357-
throw new IOException("Couldn't delete mob dir " + mobTableDir);
325+
// Delete the directory on wal filesystem
326+
FileSystem walFs = mfs.getWALFileSystem();
327+
Path tableWALDir = CommonFSUtils.getWALTableDir(env.getMasterConfiguration(), tableName);
328+
if (walFs.exists(tableWALDir) && !walFs.delete(tableWALDir, true)) {
329+
throw new IOException("Couldn't delete table dir on wal filesystem" + tableWALDir);
358330
}
359331
}
360-
361-
// Delete the directory on wal filesystem
362-
FileSystem walFs = mfs.getWALFileSystem();
363-
Path tableWALDir = CommonFSUtils.getWALTableDir(env.getMasterConfiguration(), tableName);
364-
if (walFs.exists(tableWALDir) && !walFs.delete(tableWALDir, true)) {
365-
throw new IOException("Couldn't delete table dir on wal filesystem" + tableWALDir);
366-
}
367332
}
368333

369334
/**

hbase-server/src/main/java/org/apache/hadoop/hbase/security/access/SnapshotScannerHDFSAclHelper.java

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -478,8 +478,8 @@ List<Path> getNamespaceRootPaths(String namespace) {
478478
*/
479479
List<Path> getTableRootPaths(TableName tableName, boolean includeSnapshotPath)
480480
throws IOException {
481-
List<Path> paths = Lists.newArrayList(pathHelper.getTmpTableDir(tableName),
482-
pathHelper.getDataTableDir(tableName), pathHelper.getMobTableDir(tableName),
481+
List<Path> paths = Lists.newArrayList(pathHelper.getDataTableDir(tableName),
482+
pathHelper.getMobTableDir(tableName),
483483
pathHelper.getArchiveTableDir(tableName));
484484
if (includeSnapshotPath) {
485485
paths.addAll(getTableSnapshotPaths(tableName));

hbase-server/src/test/java/org/apache/hadoop/hbase/master/TestMasterFileSystem.java

Lines changed: 6 additions & 23 deletions
Original file line numberDiff line numberDiff line change
@@ -18,8 +18,7 @@
1818
package org.apache.hadoop.hbase.master;
1919

2020
import static org.junit.Assert.assertEquals;
21-
import static org.junit.Assert.assertTrue;
22-
import static org.junit.Assert.fail;
21+
import static org.junit.Assert.assertFalse;
2322

2423
import java.util.List;
2524
import org.apache.hadoop.fs.FileSystem;
@@ -33,7 +32,6 @@
3332
import org.apache.hadoop.hbase.testclassification.MediumTests;
3433
import org.apache.hadoop.hbase.util.Bytes;
3534
import org.apache.hadoop.hbase.util.CommonFSUtils;
36-
import org.apache.hadoop.hbase.util.HFileArchiveTestingUtil;
3735
import org.junit.AfterClass;
3836
import org.junit.BeforeClass;
3937
import org.junit.ClassRule;
@@ -85,7 +83,7 @@ public void testFsUriSetProperly() throws Exception {
8583
}
8684

8785
@Test
88-
public void testCheckTempDir() throws Exception {
86+
public void testCheckNoTempDir() throws Exception {
8987
final MasterFileSystem masterFileSystem =
9088
UTIL.getMiniHBaseCluster().getMaster().getMasterFileSystem();
9189

@@ -110,28 +108,13 @@ public void testCheckTempDir() throws Exception {
110108
// disable the table so that we can manipulate the files
111109
UTIL.getAdmin().disableTable(tableName);
112110

113-
final Path tableDir = CommonFSUtils.getTableDir(masterFileSystem.getRootDir(), tableName);
114111
final Path tempDir = masterFileSystem.getTempDir();
115-
final Path tempTableDir = CommonFSUtils.getTableDir(tempDir, tableName);
112+
final Path tempNsDir = CommonFSUtils.getNamespaceDir(tempDir,
113+
tableName.getNamespaceAsString());
116114
final FileSystem fs = masterFileSystem.getFileSystem();
117115

118-
// move the table to the temporary directory
119-
if (!fs.rename(tableDir, tempTableDir)) {
120-
fail();
121-
}
122-
123-
masterFileSystem.checkTempDir(tempDir, UTIL.getConfiguration(), fs);
124-
125-
// check if the temporary directory exists and is empty
126-
assertTrue(fs.exists(tempDir));
127-
assertEquals(0, fs.listStatus(tempDir).length);
128-
129-
// check for the existence of the archive directory
130-
for (HRegion region : regions) {
131-
Path archiveDir = HFileArchiveTestingUtil.getRegionArchiveDir(UTIL.getConfiguration(),
132-
region);
133-
assertTrue(fs.exists(archiveDir));
134-
}
116+
// checks the temporary directory does not exist
117+
assertFalse(fs.exists(tempNsDir));
135118

136119
UTIL.deleteTable(tableName);
137120
}

hbase-server/src/test/java/org/apache/hadoop/hbase/master/procedure/TestDeleteTableProcedure.java

Lines changed: 0 additions & 66 deletions
Original file line numberDiff line numberDiff line change
@@ -17,34 +17,23 @@
1717
*/
1818
package org.apache.hadoop.hbase.master.procedure;
1919

20-
import static org.junit.Assert.assertEquals;
21-
import static org.junit.Assert.assertFalse;
2220
import static org.junit.Assert.assertNotNull;
2321
import static org.junit.Assert.assertNull;
2422
import static org.junit.Assert.assertTrue;
25-
import static org.junit.Assert.fail;
2623

2724
import java.util.ArrayList;
2825
import java.util.List;
29-
import org.apache.hadoop.fs.FileSystem;
30-
import org.apache.hadoop.fs.FileUtil;
31-
import org.apache.hadoop.fs.Path;
3226
import org.apache.hadoop.hbase.HBaseClassTestRule;
3327
import org.apache.hadoop.hbase.TableName;
3428
import org.apache.hadoop.hbase.TableNotDisabledException;
3529
import org.apache.hadoop.hbase.TableNotFoundException;
3630
import org.apache.hadoop.hbase.client.RegionInfo;
37-
import org.apache.hadoop.hbase.client.Table;
38-
import org.apache.hadoop.hbase.master.MasterFileSystem;
3931
import org.apache.hadoop.hbase.procedure2.Procedure;
4032
import org.apache.hadoop.hbase.procedure2.ProcedureExecutor;
4133
import org.apache.hadoop.hbase.procedure2.ProcedureTestingUtility;
42-
import org.apache.hadoop.hbase.regionserver.HRegion;
4334
import org.apache.hadoop.hbase.testclassification.MasterTests;
4435
import org.apache.hadoop.hbase.testclassification.MediumTests;
4536
import org.apache.hadoop.hbase.util.Bytes;
46-
import org.apache.hadoop.hbase.util.CommonFSUtils;
47-
import org.apache.hadoop.hbase.util.HFileArchiveTestingUtil;
4837
import org.junit.ClassRule;
4938
import org.junit.Rule;
5039
import org.junit.Test;
@@ -186,59 +175,4 @@ public void testRecoveryAndDoubleExecution() throws Exception {
186175

187176
MasterProcedureTestingUtility.validateTableDeletion(getMaster(), tableName);
188177
}
189-
190-
@Test
191-
public void testDeleteWhenTempDirIsNotEmpty() throws Exception {
192-
final TableName tableName = TableName.valueOf(name.getMethodName());
193-
final String FAM = "fam";
194-
final byte[][] splitKeys = new byte[][] {
195-
Bytes.toBytes("b"), Bytes.toBytes("c"), Bytes.toBytes("d")
196-
};
197-
198-
// create the table
199-
MasterProcedureTestingUtility.createTable(
200-
getMasterProcedureExecutor(), tableName, splitKeys, FAM);
201-
202-
// get the current store files for the regions
203-
List<HRegion> regions = UTIL.getHBaseCluster().getRegions(tableName);
204-
// make sure we have 4 regions serving this table
205-
assertEquals(4, regions.size());
206-
207-
// load the table
208-
try (Table table = UTIL.getConnection().getTable(tableName)) {
209-
UTIL.loadTable(table, Bytes.toBytes(FAM));
210-
}
211-
212-
// disable the table so that we can manipulate the files
213-
UTIL.getAdmin().disableTable(tableName);
214-
215-
final MasterFileSystem masterFileSystem =
216-
UTIL.getMiniHBaseCluster().getMaster().getMasterFileSystem();
217-
final Path tableDir = CommonFSUtils.getTableDir(masterFileSystem.getRootDir(), tableName);
218-
final Path tempDir = masterFileSystem.getTempDir();
219-
final Path tempTableDir = CommonFSUtils.getTableDir(tempDir, tableName);
220-
final FileSystem fs = masterFileSystem.getFileSystem();
221-
222-
// copy the table to the temporary directory to make sure the temp directory is not empty
223-
if (!FileUtil.copy(fs, tableDir, fs, tempTableDir, false, UTIL.getConfiguration())) {
224-
fail();
225-
}
226-
227-
// delete the table
228-
final ProcedureExecutor<MasterProcedureEnv> procExec = getMasterProcedureExecutor();
229-
long procId = ProcedureTestingUtility.submitAndWait(procExec,
230-
new DeleteTableProcedure(procExec.getEnvironment(), tableName));
231-
ProcedureTestingUtility.assertProcNotFailed(procExec, procId);
232-
MasterProcedureTestingUtility.validateTableDeletion(getMaster(), tableName);
233-
234-
// check if the temporary directory is deleted
235-
assertFalse(fs.exists(tempTableDir));
236-
237-
// check for the existence of the archive directory
238-
for (HRegion region : regions) {
239-
Path archiveDir = HFileArchiveTestingUtil.getRegionArchiveDir(UTIL.getConfiguration(),
240-
region);
241-
assertTrue(fs.exists(archiveDir));
242-
}
243-
}
244178
}

0 commit comments

Comments
 (0)