Skip to content

Commit 2576470

Browse files
committed
HBASE-7003 Moved backup examples into hbase-examples
1 parent 1c1638f commit 2576470

35 files changed

+123
-166
lines changed

hbase-checkstyle/src/main/resources/hbase/checkstyle-suppressions.xml

Lines changed: 1 addition & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -40,6 +40,7 @@
4040
<suppress checks="EmptyBlockCheck" files="TestServletFilter.java"/>
4141
<suppress checks="EmptyBlockCheck" files="TestGlobalFilter.java"/>
4242
<suppress checks="EmptyBlockCheck" files="TestPathFilter.java"/>
43+
<suppress checks="EmptyBlockCheck" files="TestZooKeeperTableArchiveClient.java"/>
4344
<suppress checks="EqualsHashCode" files="StartcodeAgnosticServerName.java"/>
4445
<suppress checks="MethodLength" files="Branch1CoprocessorMethods.java"/>
4546
<suppress checks="IllegalImport" message="org\.apache\.htrace\.core"/>
Lines changed: 10 additions & 11 deletions
Original file line numberDiff line numberDiff line change
@@ -15,18 +15,18 @@
1515
* See the License for the specific language governing permissions and
1616
* limitations under the License.
1717
*/
18-
package org.apache.hadoop.hbase.backup.example;
18+
package org.apache.hadoop.hbase.backup;
1919

2020
import java.io.IOException;
2121

2222
import org.apache.hadoop.conf.Configuration;
2323
import org.apache.hadoop.hbase.ZooKeeperConnectionException;
24-
import org.apache.hadoop.hbase.zookeeper.ZKWatcher;
25-
import org.apache.yetus.audience.InterfaceAudience;
2624
import org.apache.hadoop.hbase.client.Connection;
2725
import org.apache.hadoop.hbase.util.Bytes;
2826
import org.apache.hadoop.hbase.zookeeper.ZKUtil;
27+
import org.apache.hadoop.hbase.zookeeper.ZKWatcher;
2928
import org.apache.hadoop.hbase.zookeeper.ZNodePaths;
29+
import org.apache.yetus.audience.InterfaceAudience;
3030
import org.apache.zookeeper.KeeperException;
3131
import org.slf4j.Logger;
3232
import org.slf4j.LoggerFactory;
@@ -68,14 +68,14 @@ public HFileArchiveManager enableHFileBackup(byte[] table) throws KeeperExceptio
6868

6969
/**
7070
* Stop retaining HFiles for the given table in the archive. HFiles will be cleaned up on the next
71-
* pass of the {@link org.apache.hadoop.hbase.master.cleaner.HFileCleaner}, if the HFiles are retained by another
72-
* cleaner.
71+
* pass of the {@link org.apache.hadoop.hbase.master.cleaner.HFileCleaner}, if the HFiles are
72+
* retained by another cleaner.
7373
* @param table name of the table for which to disable hfile retention.
7474
* @return <tt>this</tt> for chaining.
7575
* @throws KeeperException if if we can't reach zookeeper to update the hfile cleaner.
7676
*/
7777
public HFileArchiveManager disableHFileBackup(byte[] table) throws KeeperException {
78-
disable(this.zooKeeper, table);
78+
disable(this.zooKeeper, table);
7979
return this;
8080
}
8181

@@ -95,17 +95,16 @@ public HFileArchiveManager disableHFileBackup() throws IOException {
9595
}
9696

9797
/**
98-
* Perform a best effort enable of hfile retention, which relies on zookeeper communicating the //
99-
* * change back to the hfile cleaner.
98+
* Perform a best effort enable of hfile retention, which relies on zookeeper communicating the
99+
* change back to the hfile cleaner.
100100
* <p>
101101
* No attempt is made to make sure that backups are successfully created - it is inherently an
102102
* <b>asynchronous operation</b>.
103103
* @param zooKeeper watcher connection to zk cluster
104104
* @param table table name on which to enable archiving
105-
* @throws KeeperException
105+
* @throws KeeperException if a ZooKeeper operation fails
106106
*/
107-
private void enable(ZKWatcher zooKeeper, byte[] table)
108-
throws KeeperException {
107+
private void enable(ZKWatcher zooKeeper, byte[] table) throws KeeperException {
109108
LOG.debug("Ensuring archiving znode exists");
110109
ZKUtil.createAndFailSilent(zooKeeper, archiveZnode);
111110

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -15,7 +15,7 @@
1515
* See the License for the specific language governing permissions and
1616
* limitations under the License.
1717
*/
18-
package org.apache.hadoop.hbase.backup.example;
18+
package org.apache.hadoop.hbase.backup;
1919

2020
import java.util.List;
2121
import java.util.Set;
Lines changed: 16 additions & 10 deletions
Original file line numberDiff line numberDiff line change
@@ -15,18 +15,18 @@
1515
* See the License for the specific language governing permissions and
1616
* limitations under the License.
1717
*/
18-
package org.apache.hadoop.hbase.backup.example;
18+
package org.apache.hadoop.hbase.backup;
1919

2020
import java.io.IOException;
2121

22-
import org.apache.yetus.audience.InterfaceAudience;
2322
import org.apache.hadoop.conf.Configuration;
2423
import org.apache.hadoop.fs.FileStatus;
2524
import org.apache.hadoop.fs.FileSystem;
2625
import org.apache.hadoop.fs.Path;
2726
import org.apache.hadoop.hbase.HBaseInterfaceAudience;
2827
import org.apache.hadoop.hbase.master.cleaner.BaseHFileCleanerDelegate;
2928
import org.apache.hadoop.hbase.util.FSUtils;
29+
import org.apache.yetus.audience.InterfaceAudience;
3030
import org.apache.zookeeper.KeeperException;
3131
import org.slf4j.Logger;
3232
import org.slf4j.LoggerFactory;
@@ -35,7 +35,7 @@
3535
* {@link BaseHFileCleanerDelegate} that only cleans HFiles that don't belong to a table that is
3636
* currently being archived.
3737
* <p>
38-
* This only works properly if the
38+
* This only works properly if the
3939
* {@link org.apache.hadoop.hbase.master.cleaner.TimeToLiveHFileCleaner}
4040
* is also enabled (it always should be), since it may take a little time
4141
* for the ZK notification to propagate, in which case we may accidentally
@@ -53,14 +53,18 @@ public class LongTermArchivingHFileCleaner extends BaseHFileCleanerDelegate {
5353
public boolean isFileDeletable(FileStatus fStat) {
5454
try {
5555
// if its a directory, then it can be deleted
56-
if (fStat.isDirectory()) return true;
57-
56+
if (fStat.isDirectory()) {
57+
return true;
58+
}
59+
5860
Path file = fStat.getPath();
5961
// check to see if
6062
FileStatus[] deleteStatus = FSUtils.listStatus(this.fs, file, null);
6163
// if the file doesn't exist, then it can be deleted (but should never
6264
// happen since deleted files shouldn't get passed in)
63-
if (deleteStatus == null) return true;
65+
if (deleteStatus == null) {
66+
return true;
67+
}
6468

6569
// otherwise, we need to check the file's table and see its being archived
6670
Path family = file.getParent();
@@ -69,7 +73,8 @@ public boolean isFileDeletable(FileStatus fStat) {
6973

7074
String tableName = table.getName();
7175
boolean ret = !archiveTracker.keepHFiles(tableName);
72-
LOG.debug("Archiver says to [" + (ret ? "delete" : "keep") + "] files for table:" + tableName);
76+
LOG.debug("Archiver says to [" + (ret ? "delete" : "keep") + "] files for table:" +
77+
tableName);
7378
return ret;
7479
} catch (IOException e) {
7580
LOG.error("Failed to lookup status of:" + fStat.getPath() + ", keeping it just incase.", e);
@@ -97,13 +102,14 @@ public void setConf(Configuration config) {
97102

98103
@Override
99104
public void stop(String reason) {
100-
if (this.isStopped()) return;
105+
if (this.isStopped()) {
106+
return;
107+
}
108+
101109
super.stop(reason);
102110
if (this.archiveTracker != null) {
103111
LOG.info("Stopping " + this.archiveTracker);
104112
this.archiveTracker.stop();
105113
}
106-
107114
}
108-
109115
}
Lines changed: 18 additions & 10 deletions
Original file line numberDiff line numberDiff line change
@@ -15,17 +15,17 @@
1515
* See the License for the specific language governing permissions and
1616
* limitations under the License.
1717
*/
18-
package org.apache.hadoop.hbase.backup.example;
18+
package org.apache.hadoop.hbase.backup;
1919

2020
import java.io.IOException;
2121
import java.util.List;
2222

23-
import org.apache.hadoop.hbase.zookeeper.ZKWatcher;
24-
import org.apache.yetus.audience.InterfaceAudience;
2523
import org.apache.hadoop.conf.Configuration;
2624
import org.apache.hadoop.hbase.ZooKeeperConnectionException;
27-
import org.apache.hadoop.hbase.zookeeper.ZKUtil;
2825
import org.apache.hadoop.hbase.zookeeper.ZKListener;
26+
import org.apache.hadoop.hbase.zookeeper.ZKUtil;
27+
import org.apache.hadoop.hbase.zookeeper.ZKWatcher;
28+
import org.apache.yetus.audience.InterfaceAudience;
2929
import org.apache.zookeeper.KeeperException;
3030
import org.slf4j.Logger;
3131
import org.slf4j.LoggerFactory;
@@ -38,7 +38,7 @@
3838
* archive.
3939
*/
4040
@InterfaceAudience.Private
41-
public class TableHFileArchiveTracker extends ZKListener {
41+
public final class TableHFileArchiveTracker extends ZKListener {
4242
private static final Logger LOG = LoggerFactory.getLogger(TableHFileArchiveTracker.class);
4343
public static final String HFILE_ARCHIVE_ZNODE_PARENT = "hfilearchive";
4444
private HFileArchiveTableMonitor monitor;
@@ -67,15 +67,16 @@ public void start() throws KeeperException {
6767
@Override
6868
public void nodeCreated(String path) {
6969
// if it is the archive path
70-
if (!path.startsWith(archiveHFileZNode)) return;
70+
if (!path.startsWith(archiveHFileZNode)) {
71+
return;
72+
}
7173

7274
LOG.debug("Archive node: " + path + " created");
7375
// since we are already enabled, just update a single table
7476
String table = path.substring(archiveHFileZNode.length());
7577

7678
// the top level node has come up, so read in all the tables
7779
if (table.length() == 0) {
78-
7980
checkEnabledAndUpdate();
8081
return;
8182
}
@@ -90,7 +91,9 @@ public void nodeCreated(String path) {
9091

9192
@Override
9293
public void nodeChildrenChanged(String path) {
93-
if (!path.startsWith(archiveHFileZNode)) return;
94+
if (!path.startsWith(archiveHFileZNode)) {
95+
return;
96+
}
9497

9598
LOG.debug("Archive node: " + path + " children changed.");
9699
// a table was added to the archive
@@ -134,7 +137,9 @@ private void safeStopTrackingTable(String tableZnode) throws KeeperException {
134137

135138
@Override
136139
public void nodeDeleted(String path) {
137-
if (!path.startsWith(archiveHFileZNode)) return;
140+
if (!path.startsWith(archiveHFileZNode)) {
141+
return;
142+
}
138143

139144
LOG.debug("Archive node: " + path + " deleted");
140145
String table = path.substring(archiveHFileZNode.length());
@@ -260,7 +265,10 @@ public ZKWatcher getZooKeeperWatcher() {
260265
* Stop this tracker and the passed zookeeper
261266
*/
262267
public void stop() {
263-
if (this.stopped) return;
268+
if (this.stopped) {
269+
return;
270+
}
271+
264272
this.stopped = true;
265273
this.watcher.close();
266274
}
Lines changed: 4 additions & 4 deletions
Original file line numberDiff line numberDiff line change
@@ -15,17 +15,17 @@
1515
* See the License for the specific language governing permissions and
1616
* limitations under the License.
1717
*/
18-
package org.apache.hadoop.hbase.backup.example;
18+
package org.apache.hadoop.hbase.backup;
1919

2020
import java.io.IOException;
2121

22-
import org.apache.hadoop.hbase.zookeeper.ZKWatcher;
23-
import org.apache.yetus.audience.InterfaceAudience;
2422
import org.apache.hadoop.conf.Configuration;
2523
import org.apache.hadoop.conf.Configured;
2624
import org.apache.hadoop.hbase.client.ClusterConnection;
2725
import org.apache.hadoop.hbase.util.Bytes;
26+
import org.apache.hadoop.hbase.zookeeper.ZKWatcher;
2827
import org.apache.hadoop.hbase.zookeeper.ZNodePaths;
28+
import org.apache.yetus.audience.InterfaceAudience;
2929
import org.apache.zookeeper.KeeperException;
3030

3131
/**
@@ -110,7 +110,7 @@ public void disableHFileBackup() throws IOException, KeeperException {
110110
* @param table name of the table to check
111111
* @return <tt>true</tt> if it is, <tt>false</tt> otherwise
112112
* @throws IOException if a connection to ZooKeeper cannot be established
113-
* @throws KeeperException
113+
* @throws KeeperException if a ZooKeeper operation fails
114114
*/
115115
public boolean getArchivingEnabled(byte[] table) throws IOException, KeeperException {
116116
HFileArchiveManager manager = createHFileArchiveManager();
Lines changed: 1 addition & 6 deletions
Original file line numberDiff line numberDiff line change
@@ -15,7 +15,7 @@
1515
* See the License for the specific language governing permissions and
1616
* limitations under the License.
1717
*/
18-
package org.apache.hadoop.hbase.client.example;
18+
package org.apache.hadoop.hbase.client;
1919

2020
import static org.apache.hadoop.hbase.util.FutureUtils.addListener;
2121

@@ -28,11 +28,6 @@
2828
import org.apache.commons.io.IOUtils;
2929
import org.apache.hadoop.conf.Configured;
3030
import org.apache.hadoop.hbase.TableName;
31-
import org.apache.hadoop.hbase.client.AsyncConnection;
32-
import org.apache.hadoop.hbase.client.AsyncTable;
33-
import org.apache.hadoop.hbase.client.ConnectionFactory;
34-
import org.apache.hadoop.hbase.client.Get;
35-
import org.apache.hadoop.hbase.client.Put;
3631
import org.apache.hadoop.hbase.util.Bytes;
3732
import org.apache.hadoop.hbase.util.Threads;
3833
import org.apache.hadoop.util.Tool;
Lines changed: 1 addition & 7 deletions
Original file line numberDiff line numberDiff line change
@@ -16,7 +16,7 @@
1616
* See the License for the specific language governing permissions and
1717
* limitations under the License.
1818
*/
19-
package org.apache.hadoop.hbase.client.example;
19+
package org.apache.hadoop.hbase.client;
2020

2121
import java.io.IOException;
2222
import java.util.ArrayList;
@@ -30,12 +30,6 @@
3030
import java.util.concurrent.TimeoutException;
3131
import org.apache.hadoop.conf.Configured;
3232
import org.apache.hadoop.hbase.TableName;
33-
import org.apache.hadoop.hbase.client.BufferedMutator;
34-
import org.apache.hadoop.hbase.client.BufferedMutatorParams;
35-
import org.apache.hadoop.hbase.client.Connection;
36-
import org.apache.hadoop.hbase.client.ConnectionFactory;
37-
import org.apache.hadoop.hbase.client.Put;
38-
import org.apache.hadoop.hbase.client.RetriesExhaustedWithDetailsException;
3933
import org.apache.hadoop.hbase.util.Bytes;
4034
import org.apache.hadoop.util.Tool;
4135
import org.apache.hadoop.util.ToolRunner;
Lines changed: 1 addition & 10 deletions
Original file line numberDiff line numberDiff line change
@@ -15,7 +15,7 @@
1515
* See the License for the specific language governing permissions and
1616
* limitations under the License.
1717
*/
18-
package org.apache.hadoop.hbase.client.example;
18+
package org.apache.hadoop.hbase.client;
1919

2020
import java.util.ArrayList;
2121
import java.util.List;
@@ -24,15 +24,6 @@
2424
import org.apache.hadoop.fs.Path;
2525
import org.apache.hadoop.hbase.HBaseConfiguration;
2626
import org.apache.hadoop.hbase.TableName;
27-
import org.apache.hadoop.hbase.client.Admin;
28-
import org.apache.hadoop.hbase.client.ColumnFamilyDescriptorBuilder;
29-
import org.apache.hadoop.hbase.client.Connection;
30-
import org.apache.hadoop.hbase.client.ConnectionFactory;
31-
import org.apache.hadoop.hbase.client.Put;
32-
import org.apache.hadoop.hbase.client.Scan;
33-
import org.apache.hadoop.hbase.client.Table;
34-
import org.apache.hadoop.hbase.client.TableDescriptor;
35-
import org.apache.hadoop.hbase.client.TableDescriptorBuilder;
3627
import org.apache.hadoop.hbase.coprocessor.Export;
3728
import org.apache.hadoop.hbase.util.Bytes;
3829
import org.apache.yetus.audience.InterfaceAudience;
Lines changed: 1 addition & 5 deletions
Original file line numberDiff line numberDiff line change
@@ -15,7 +15,7 @@
1515
* See the License for the specific language governing permissions and
1616
* limitations under the License.
1717
*/
18-
package org.apache.hadoop.hbase.client.example;
18+
package org.apache.hadoop.hbase.client;
1919

2020
import static org.apache.hadoop.hbase.util.FutureUtils.addListener;
2121

@@ -26,10 +26,6 @@
2626
import org.apache.hadoop.conf.Configuration;
2727
import org.apache.hadoop.hbase.HBaseConfiguration;
2828
import org.apache.hadoop.hbase.TableName;
29-
import org.apache.hadoop.hbase.client.AsyncConnection;
30-
import org.apache.hadoop.hbase.client.ConnectionFactory;
31-
import org.apache.hadoop.hbase.client.Get;
32-
import org.apache.hadoop.hbase.client.Put;
3329
import org.apache.hadoop.hbase.ipc.NettyRpcClientConfigHelper;
3430
import org.apache.hadoop.hbase.util.Bytes;
3531
import org.apache.yetus.audience.InterfaceAudience;

0 commit comments

Comments
 (0)