Skip to content

Commit c90f703

Browse files
committed
HBASE-22594 Clean up for backup examples
1 parent 12d7db7 commit c90f703

File tree

6 files changed

+64
-37
lines changed

6 files changed

+64
-37
lines changed

hbase-checkstyle/src/main/resources/hbase/checkstyle-suppressions.xml

Lines changed: 1 addition & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -40,6 +40,7 @@
4040
<suppress checks="EmptyBlockCheck" files="org.apache.hadoop.hbase.http.TestServletFilter.java"/>
4141
<suppress checks="EmptyBlockCheck" files="org.apache.hadoop.hbase.http.TestGlobalFilter.java"/>
4242
<suppress checks="EmptyBlockCheck" files="org.apache.hadoop.hbase.http.TestPathFilter.java"/>
43+
<suppress checks="EmptyBlockCheck" files="org.apache.hadoop.hbase.backup.example.TestZooKeeperTableArchiveClient.java"/>
4344
<suppress checks="EqualsHashCode" files="org.apache.hadoop.hbase.favored.StartcodeAgnosticServerName.java"/>
4445
<suppress checks="MethodLength" files="org.apache.hadoop.hbase.tool.coprocessor.Branch1CoprocessorMethods.java"/>
4546
<suppress checks="IllegalImport" message="org\.apache\.htrace\.core"/>

hbase-server/src/main/java/org/apache/hadoop/hbase/backup/example/HFileArchiveManager.java

Lines changed: 9 additions & 10 deletions
Original file line numberDiff line numberDiff line change
@@ -21,12 +21,12 @@
2121

2222
import org.apache.hadoop.conf.Configuration;
2323
import org.apache.hadoop.hbase.ZooKeeperConnectionException;
24-
import org.apache.hadoop.hbase.zookeeper.ZKWatcher;
25-
import org.apache.yetus.audience.InterfaceAudience;
2624
import org.apache.hadoop.hbase.client.Connection;
2725
import org.apache.hadoop.hbase.util.Bytes;
2826
import org.apache.hadoop.hbase.zookeeper.ZKUtil;
27+
import org.apache.hadoop.hbase.zookeeper.ZKWatcher;
2928
import org.apache.hadoop.hbase.zookeeper.ZNodePaths;
29+
import org.apache.yetus.audience.InterfaceAudience;
3030
import org.apache.zookeeper.KeeperException;
3131
import org.slf4j.Logger;
3232
import org.slf4j.LoggerFactory;
@@ -68,14 +68,14 @@ public HFileArchiveManager enableHFileBackup(byte[] table) throws KeeperExceptio
6868

6969
/**
7070
* Stop retaining HFiles for the given table in the archive. HFiles will be cleaned up on the next
71-
* pass of the {@link org.apache.hadoop.hbase.master.cleaner.HFileCleaner}, if the HFiles are retained by another
72-
* cleaner.
71+
* pass of the {@link org.apache.hadoop.hbase.master.cleaner.HFileCleaner}, if the HFiles are
72+
* retained by another cleaner.
7373
* @param table name of the table for which to disable hfile retention.
7474
* @return <tt>this</tt> for chaining.
7575
* @throws KeeperException if if we can't reach zookeeper to update the hfile cleaner.
7676
*/
7777
public HFileArchiveManager disableHFileBackup(byte[] table) throws KeeperException {
78-
disable(this.zooKeeper, table);
78+
disable(this.zooKeeper, table);
7979
return this;
8080
}
8181

@@ -95,17 +95,16 @@ public HFileArchiveManager disableHFileBackup() throws IOException {
9595
}
9696

9797
/**
98-
* Perform a best effort enable of hfile retention, which relies on zookeeper communicating the //
99-
* * change back to the hfile cleaner.
98+
* Perform a best effort enable of hfile retention, which relies on zookeeper communicating the
99+
* change back to the hfile cleaner.
100100
* <p>
101101
* No attempt is made to make sure that backups are successfully created - it is inherently an
102102
* <b>asynchronous operation</b>.
103103
* @param zooKeeper watcher connection to zk cluster
104104
* @param table table name on which to enable archiving
105-
* @throws KeeperException
105+
* @throws KeeperException if a ZooKeeper operation fails
106106
*/
107-
private void enable(ZKWatcher zooKeeper, byte[] table)
108-
throws KeeperException {
107+
private void enable(ZKWatcher zooKeeper, byte[] table) throws KeeperException {
109108
LOG.debug("Ensuring archiving znode exists");
110109
ZKUtil.createAndFailSilent(zooKeeper, archiveZnode);
111110

hbase-server/src/main/java/org/apache/hadoop/hbase/backup/example/LongTermArchivingHFileCleaner.java

Lines changed: 14 additions & 8 deletions
Original file line numberDiff line numberDiff line change
@@ -19,14 +19,14 @@
1919

2020
import java.io.IOException;
2121

22-
import org.apache.yetus.audience.InterfaceAudience;
2322
import org.apache.hadoop.conf.Configuration;
2423
import org.apache.hadoop.fs.FileStatus;
2524
import org.apache.hadoop.fs.FileSystem;
2625
import org.apache.hadoop.fs.Path;
2726
import org.apache.hadoop.hbase.HBaseInterfaceAudience;
2827
import org.apache.hadoop.hbase.master.cleaner.BaseHFileCleanerDelegate;
2928
import org.apache.hadoop.hbase.util.FSUtils;
29+
import org.apache.yetus.audience.InterfaceAudience;
3030
import org.apache.zookeeper.KeeperException;
3131
import org.slf4j.Logger;
3232
import org.slf4j.LoggerFactory;
@@ -35,7 +35,7 @@
3535
* {@link BaseHFileCleanerDelegate} that only cleans HFiles that don't belong to a table that is
3636
* currently being archived.
3737
* <p>
38-
* This only works properly if the
38+
* This only works properly if the
3939
* {@link org.apache.hadoop.hbase.master.cleaner.TimeToLiveHFileCleaner}
4040
* is also enabled (it always should be), since it may take a little time
4141
* for the ZK notification to propagate, in which case we may accidentally
@@ -53,14 +53,18 @@ public class LongTermArchivingHFileCleaner extends BaseHFileCleanerDelegate {
5353
public boolean isFileDeletable(FileStatus fStat) {
5454
try {
5555
// if its a directory, then it can be deleted
56-
if (fStat.isDirectory()) return true;
56+
if (fStat.isDirectory()) {
57+
return true;
58+
}
5759

5860
Path file = fStat.getPath();
5961
// check to see if
6062
FileStatus[] deleteStatus = FSUtils.listStatus(this.fs, file, null);
6163
// if the file doesn't exist, then it can be deleted (but should never
6264
// happen since deleted files shouldn't get passed in)
63-
if (deleteStatus == null) return true;
65+
if (deleteStatus == null) {
66+
return true;
67+
}
6468

6569
// otherwise, we need to check the file's table and see its being archived
6670
Path family = file.getParent();
@@ -69,7 +73,8 @@ public boolean isFileDeletable(FileStatus fStat) {
6973

7074
String tableName = table.getName();
7175
boolean ret = !archiveTracker.keepHFiles(tableName);
72-
LOG.debug("Archiver says to [" + (ret ? "delete" : "keep") + "] files for table:" + tableName);
76+
LOG.debug("Archiver says to [" + (ret ? "delete" : "keep") + "] files for table:" +
77+
tableName);
7378
return ret;
7479
} catch (IOException e) {
7580
LOG.error("Failed to lookup status of:" + fStat.getPath() + ", keeping it just incase.", e);
@@ -97,13 +102,14 @@ public void setConf(Configuration config) {
97102

98103
@Override
99104
public void stop(String reason) {
100-
if (this.isStopped()) return;
105+
if (this.isStopped()) {
106+
return;
107+
}
108+
101109
super.stop(reason);
102110
if (this.archiveTracker != null) {
103111
LOG.info("Stopping " + this.archiveTracker);
104112
this.archiveTracker.stop();
105113
}
106-
107114
}
108-
109115
}

hbase-server/src/main/java/org/apache/hadoop/hbase/backup/example/TableHFileArchiveTracker.java

Lines changed: 17 additions & 9 deletions
Original file line numberDiff line numberDiff line change
@@ -20,12 +20,12 @@
2020
import java.io.IOException;
2121
import java.util.List;
2222

23-
import org.apache.hadoop.hbase.zookeeper.ZKWatcher;
24-
import org.apache.yetus.audience.InterfaceAudience;
2523
import org.apache.hadoop.conf.Configuration;
2624
import org.apache.hadoop.hbase.ZooKeeperConnectionException;
27-
import org.apache.hadoop.hbase.zookeeper.ZKUtil;
2825
import org.apache.hadoop.hbase.zookeeper.ZKListener;
26+
import org.apache.hadoop.hbase.zookeeper.ZKUtil;
27+
import org.apache.hadoop.hbase.zookeeper.ZKWatcher;
28+
import org.apache.yetus.audience.InterfaceAudience;
2929
import org.apache.zookeeper.KeeperException;
3030
import org.slf4j.Logger;
3131
import org.slf4j.LoggerFactory;
@@ -38,7 +38,7 @@
3838
* archive.
3939
*/
4040
@InterfaceAudience.Private
41-
public class TableHFileArchiveTracker extends ZKListener {
41+
public final class TableHFileArchiveTracker extends ZKListener {
4242
private static final Logger LOG = LoggerFactory.getLogger(TableHFileArchiveTracker.class);
4343
public static final String HFILE_ARCHIVE_ZNODE_PARENT = "hfilearchive";
4444
private HFileArchiveTableMonitor monitor;
@@ -67,15 +67,16 @@ public void start() throws KeeperException {
6767
@Override
6868
public void nodeCreated(String path) {
6969
// if it is the archive path
70-
if (!path.startsWith(archiveHFileZNode)) return;
70+
if (!path.startsWith(archiveHFileZNode)) {
71+
return;
72+
}
7173

7274
LOG.debug("Archive node: " + path + " created");
7375
// since we are already enabled, just update a single table
7476
String table = path.substring(archiveHFileZNode.length());
7577

7678
// the top level node has come up, so read in all the tables
7779
if (table.length() == 0) {
78-
7980
checkEnabledAndUpdate();
8081
return;
8182
}
@@ -90,7 +91,9 @@ public void nodeCreated(String path) {
9091

9192
@Override
9293
public void nodeChildrenChanged(String path) {
93-
if (!path.startsWith(archiveHFileZNode)) return;
94+
if (!path.startsWith(archiveHFileZNode)) {
95+
return;
96+
}
9497

9598
LOG.debug("Archive node: " + path + " children changed.");
9699
// a table was added to the archive
@@ -134,7 +137,9 @@ private void safeStopTrackingTable(String tableZnode) throws KeeperException {
134137

135138
@Override
136139
public void nodeDeleted(String path) {
137-
if (!path.startsWith(archiveHFileZNode)) return;
140+
if (!path.startsWith(archiveHFileZNode)) {
141+
return;
142+
}
138143

139144
LOG.debug("Archive node: " + path + " deleted");
140145
String table = path.substring(archiveHFileZNode.length());
@@ -260,7 +265,10 @@ public ZKWatcher getZooKeeperWatcher() {
260265
* Stop this tracker and the passed zookeeper
261266
*/
262267
public void stop() {
263-
if (this.stopped) return;
268+
if (this.stopped) {
269+
return;
270+
}
271+
264272
this.stopped = true;
265273
this.watcher.close();
266274
}

hbase-server/src/main/java/org/apache/hadoop/hbase/backup/example/ZKTableArchiveClient.java

Lines changed: 2 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -18,6 +18,7 @@
1818
package org.apache.hadoop.hbase.backup.example;
1919

2020
import java.io.IOException;
21+
2122
import org.apache.hadoop.conf.Configuration;
2223
import org.apache.hadoop.conf.Configured;
2324
import org.apache.hadoop.hbase.client.Connection;
@@ -109,7 +110,7 @@ public void disableHFileBackup() throws IOException, KeeperException {
109110
* @param table name of the table to check
110111
* @return <tt>true</tt> if it is, <tt>false</tt> otherwise
111112
* @throws IOException if a connection to ZooKeeper cannot be established
112-
* @throws KeeperException
113+
* @throws KeeperException if a ZooKeeper operation fails
113114
*/
114115
public boolean getArchivingEnabled(byte[] table) throws IOException, KeeperException {
115116
HFileArchiveManager manager = createHFileArchiveManager();

hbase-server/src/test/java/org/apache/hadoop/hbase/backup/example/TestZooKeeperTableArchiveClient.java

Lines changed: 21 additions & 9 deletions
Original file line numberDiff line numberDiff line change
@@ -284,8 +284,11 @@ public void testMultipleTables() throws Exception {
284284
for (Path file : files) {
285285
String tableName = file.getParent().getParent().getParent().getName();
286286
// check to which table this file belongs
287-
if (tableName.equals(otherTable)) initialCountForOtherTable++;
288-
else if (tableName.equals(STRING_TABLE_NAME)) initialCountForPrimary++;
287+
if (tableName.equals(otherTable)) {
288+
initialCountForOtherTable++;
289+
} else if (tableName.equals(STRING_TABLE_NAME)) {
290+
initialCountForPrimary++;
291+
}
289292
}
290293

291294
assertTrue("Didn't archive files for:" + STRING_TABLE_NAME, initialCountForPrimary > 0);
@@ -308,11 +311,13 @@ public void testMultipleTables() throws Exception {
308311
String tableName = file.getParent().getParent().getParent().getName();
309312
// ensure we don't have files from the non-archived table
310313
assertFalse("Have a file from the non-archived table: " + file, tableName.equals(otherTable));
311-
if (tableName.equals(STRING_TABLE_NAME)) archivedForPrimary++;
314+
if (tableName.equals(STRING_TABLE_NAME)) {
315+
archivedForPrimary++;
316+
}
312317
}
313318

314-
assertEquals("Not all archived files for the primary table were retained.", initialCountForPrimary,
315-
archivedForPrimary);
319+
assertEquals("Not all archived files for the primary table were retained.",
320+
initialCountForPrimary, archivedForPrimary);
316321

317322
// but we still have the archive directory
318323
assertTrue("Archive directory was deleted via archiver", fs.exists(archiveDir));
@@ -389,7 +394,10 @@ public Iterable<FileStatus> answer(InvocationOnMock invocation) throws Throwable
389394

390395
@SuppressWarnings("unchecked")
391396
Iterable<FileStatus> ret = (Iterable<FileStatus>) invocation.callRealMethod();
392-
if (counter[0] >= expected) finished.countDown();
397+
if (counter[0] >= expected) {
398+
finished.countDown();
399+
}
400+
393401
return ret;
394402
}
395403
}).when(delegateSpy).getDeletableFiles(Mockito.anyList());
@@ -414,7 +422,11 @@ private List<Path> getAllFiles(FileSystem fs, Path dir) throws IOException {
414422
for (FileStatus file : files) {
415423
if (file.isDirectory()) {
416424
List<Path> subFiles = getAllFiles(fs, file.getPath());
417-
if (subFiles != null) allFiles.addAll(subFiles);
425+
426+
if (subFiles != null) {
427+
allFiles.addAll(subFiles);
428+
}
429+
418430
continue;
419431
}
420432
allFiles.add(file.getPath());
@@ -441,7 +453,7 @@ private void loadFlushAndCompact(HRegion region, byte[] family) throws IOExcepti
441453
* Create a new hfile in the passed region
442454
* @param region region to operate on
443455
* @param columnFamily family for which to add data
444-
* @throws IOException
456+
* @throws IOException if doing the put or flush fails
445457
*/
446458
private void createHFileInRegion(HRegion region, byte[] columnFamily) throws IOException {
447459
// put one row in the region
@@ -453,7 +465,7 @@ private void createHFileInRegion(HRegion region, byte[] columnFamily) throws IOE
453465
}
454466

455467
/**
456-
* @param cleaner
468+
* @param cleaner the cleaner to use
457469
*/
458470
private void runCleaner(HFileCleaner cleaner, CountDownLatch finished, Stoppable stop)
459471
throws InterruptedException {

0 commit comments

Comments
 (0)