Skip to content

Commit 5598fe4

Browse files
DieterDP-ngndimiduk
authored andcommitted
HBASE-28568 Incremental backup set does not correctly shrink (#5876)
The incremental backup set is the set of tables included when an incremental backup is created, it is managed per backup root dir and contains all tables that are present in at least one backup (in that root dir). The incremental backup set can only shrink when backups are deleted. However, the implementation was incorrect, causing this set to never be able to shrink. Reviewed-by: Ray Mattingly <rmdmattingly@gmail.com> Signed-off-by: Nick Dimiduk <ndimiduk@apache.org>
1 parent 6af1823 commit 5598fe4

File tree

3 files changed

+58
-38
lines changed

3 files changed

+58
-38
lines changed

hbase-backup/src/main/java/org/apache/hadoop/hbase/backup/impl/BackupAdminImpl.java

Lines changed: 16 additions & 24 deletions
Original file line numberDiff line numberDiff line change
@@ -94,7 +94,6 @@ public BackupInfo getBackupInfo(String backupId) throws IOException {
9494
public int deleteBackups(String[] backupIds) throws IOException {
9595

9696
int totalDeleted = 0;
97-
Map<String, HashSet<TableName>> allTablesMap = new HashMap<>();
9897

9998
boolean deleteSessionStarted;
10099
boolean snapshotDone;
@@ -130,20 +129,16 @@ public int deleteBackups(String[] backupIds) throws IOException {
130129
}
131130
snapshotDone = true;
132131
try {
132+
List<String> affectedBackupRootDirs = new ArrayList<>();
133133
for (int i = 0; i < backupIds.length; i++) {
134134
BackupInfo info = sysTable.readBackupInfo(backupIds[i]);
135-
if (info != null) {
136-
String rootDir = info.getBackupRootDir();
137-
HashSet<TableName> allTables = allTablesMap.get(rootDir);
138-
if (allTables == null) {
139-
allTables = new HashSet<>();
140-
allTablesMap.put(rootDir, allTables);
141-
}
142-
allTables.addAll(info.getTableNames());
143-
totalDeleted += deleteBackup(backupIds[i], sysTable);
135+
if (info == null) {
136+
continue;
144137
}
138+
affectedBackupRootDirs.add(info.getBackupRootDir());
139+
totalDeleted += deleteBackup(backupIds[i], sysTable);
145140
}
146-
finalizeDelete(allTablesMap, sysTable);
141+
finalizeDelete(affectedBackupRootDirs, sysTable);
147142
// Finish
148143
sysTable.finishDeleteOperation();
149144
// delete snapshot
@@ -176,26 +171,23 @@ public int deleteBackups(String[] backupIds) throws IOException {
176171

177172
/**
178173
* Updates incremental backup set for every backupRoot
179-
* @param tablesMap map [backupRoot: {@code Set<TableName>}]
180-
* @param table backup system table
174+
* @param backupRoots backupRoots for which to revise the incremental backup set
175+
* @param table backup system table
181176
* @throws IOException if a table operation fails
182177
*/
183-
private void finalizeDelete(Map<String, HashSet<TableName>> tablesMap, BackupSystemTable table)
178+
private void finalizeDelete(List<String> backupRoots, BackupSystemTable table)
184179
throws IOException {
185-
for (String backupRoot : tablesMap.keySet()) {
180+
for (String backupRoot : backupRoots) {
186181
Set<TableName> incrTableSet = table.getIncrementalBackupTableSet(backupRoot);
187-
Map<TableName, ArrayList<BackupInfo>> tableMap =
182+
Map<TableName, List<BackupInfo>> tableMap =
188183
table.getBackupHistoryForTableSet(incrTableSet, backupRoot);
189-
for (Map.Entry<TableName, ArrayList<BackupInfo>> entry : tableMap.entrySet()) {
190-
if (entry.getValue() == null) {
191-
// No more backups for a table
192-
incrTableSet.remove(entry.getKey());
193-
}
194-
}
184+
185+
// Keep only the tables that are present in other backups
186+
incrTableSet.retainAll(tableMap.keySet());
187+
188+
table.deleteIncrementalBackupTableSet(backupRoot);
195189
if (!incrTableSet.isEmpty()) {
196190
table.addIncrementalBackupTableSet(incrTableSet, backupRoot);
197-
} else { // empty
198-
table.deleteIncrementalBackupTableSet(backupRoot);
199191
}
200192
}
201193
}

hbase-backup/src/main/java/org/apache/hadoop/hbase/backup/impl/BackupSystemTable.java

Lines changed: 17 additions & 14 deletions
Original file line numberDiff line numberDiff line change
@@ -86,8 +86,9 @@
8686
* <ul>
8787
* <li>1. Backup sessions rowkey= "session:"+backupId; value =serialized BackupInfo</li>
8888
* <li>2. Backup start code rowkey = "startcode:"+backupRoot; value = startcode</li>
89-
* <li>3. Incremental backup set rowkey="incrbackupset:"+backupRoot; value=[list of tables]</li>
90-
* <li>4. Table-RS-timestamp map rowkey="trslm:"+backupRoot+table_name; value = map[RS-%3E last WAL
89+
* <li>3. Incremental backup set rowkey="incrbackupset:"+backupRoot; table="meta:"+tablename of
90+
* include table; value=empty</li>
91+
* <li>4. Table-RS-timestamp map rowkey="trslm:"+backupRoot+table_name; value = map[RS-> last WAL
9192
* timestamp]</li>
9293
* <li>5. RS - WAL ts map rowkey="rslogts:"+backupRoot +server; value = last WAL timestamp</li>
9394
* <li>6. WALs recorded rowkey="wals:"+WAL unique file name; value = backupId and full WAL file
@@ -839,23 +840,25 @@ public List<BackupInfo> getBackupHistoryForTable(TableName name) throws IOExcept
839840
return tableHistory;
840841
}
841842

842-
public Map<TableName, ArrayList<BackupInfo>> getBackupHistoryForTableSet(Set<TableName> set,
843+
/**
844+
* Goes through all backup history corresponding to the provided root folder, and collects all
845+
* backup info mentioning each of the provided tables.
846+
* @param set the tables for which to collect the {@code BackupInfo}
847+
* @param backupRoot backup destination path to retrieve backup history for
848+
* @return a map containing (a subset of) the provided {@code TableName}s, mapped to a list of at
849+
* least one {@code BackupInfo}
850+
* @throws IOException if getting the backup history fails
851+
*/
852+
public Map<TableName, List<BackupInfo>> getBackupHistoryForTableSet(Set<TableName> set,
843853
String backupRoot) throws IOException {
844854
List<BackupInfo> history = getBackupHistory(backupRoot);
845-
Map<TableName, ArrayList<BackupInfo>> tableHistoryMap = new HashMap<>();
846-
for (Iterator<BackupInfo> iterator = history.iterator(); iterator.hasNext();) {
847-
BackupInfo info = iterator.next();
848-
if (!backupRoot.equals(info.getBackupRootDir())) {
849-
continue;
850-
}
855+
Map<TableName, List<BackupInfo>> tableHistoryMap = new HashMap<>();
856+
for (BackupInfo info : history) {
851857
List<TableName> tables = info.getTableNames();
852858
for (TableName tableName : tables) {
853859
if (set.contains(tableName)) {
854-
ArrayList<BackupInfo> list = tableHistoryMap.get(tableName);
855-
if (list == null) {
856-
list = new ArrayList<>();
857-
tableHistoryMap.put(tableName, list);
858-
}
860+
List<BackupInfo> list =
861+
tableHistoryMap.computeIfAbsent(tableName, k -> new ArrayList<>());
859862
list.add(info);
860863
}
861864
}

hbase-backup/src/test/java/org/apache/hadoop/hbase/backup/TestBackupDelete.java

Lines changed: 25 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -17,6 +17,7 @@
1717
*/
1818
package org.apache.hadoop.hbase.backup;
1919

20+
import static org.junit.Assert.assertEquals;
2021
import static org.junit.Assert.assertTrue;
2122

2223
import java.io.ByteArrayOutputStream;
@@ -30,6 +31,7 @@
3031
import org.apache.hadoop.hbase.testclassification.LargeTests;
3132
import org.apache.hadoop.hbase.util.EnvironmentEdge;
3233
import org.apache.hadoop.hbase.util.EnvironmentEdgeManager;
34+
import org.apache.hadoop.thirdparty.com.google.common.collect.Sets;
3335
import org.apache.hadoop.util.ToolRunner;
3436
import org.junit.Assert;
3537
import org.junit.ClassRule;
@@ -158,4 +160,27 @@ public long currentTime() {
158160
LOG.info(baos.toString());
159161
assertTrue(output.indexOf("Deleted 1 backups") >= 0);
160162
}
163+
164+
/**
165+
* Verify that backup deletion updates the incremental-backup-set.
166+
*/
167+
@Test
168+
public void testBackupDeleteUpdatesIncrementalBackupSet() throws Exception {
169+
LOG.info("Test backup delete updates the incremental backup set");
170+
BackupSystemTable backupSystemTable = new BackupSystemTable(TEST_UTIL.getConnection());
171+
172+
String backupId1 = fullTableBackup(Lists.newArrayList(table1, table2));
173+
assertTrue(checkSucceeded(backupId1));
174+
assertEquals(Sets.newHashSet(table1, table2),
175+
backupSystemTable.getIncrementalBackupTableSet(BACKUP_ROOT_DIR));
176+
177+
String backupId2 = fullTableBackup(Lists.newArrayList(table3));
178+
assertTrue(checkSucceeded(backupId2));
179+
assertEquals(Sets.newHashSet(table1, table2, table3),
180+
backupSystemTable.getIncrementalBackupTableSet(BACKUP_ROOT_DIR));
181+
182+
getBackupAdmin().deleteBackups(new String[] { backupId1 });
183+
assertEquals(Sets.newHashSet(table3),
184+
backupSystemTable.getIncrementalBackupTableSet(BACKUP_ROOT_DIR));
185+
}
161186
}

0 commit comments

Comments
 (0)