Skip to content

Commit 24f48b0

Browse files
Apache9Duo Zhang
authored andcommitted
HBASE-26224 Introduce a MigrationStoreFileTracker to support migrating from different store file tracker implementations (#3656)
Signed-off-by: Wellington Chevreuil <wchevreuil@apache.org>
1 parent bd924ab commit 24f48b0

File tree

7 files changed

+343
-20
lines changed

7 files changed

+343
-20
lines changed

hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/storefiletracker/DefaultStoreFileTracker.java

Lines changed: 9 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -19,6 +19,7 @@
1919

2020
import java.io.IOException;
2121
import java.util.Collection;
22+
import java.util.Collections;
2223
import java.util.List;
2324
import org.apache.hadoop.conf.Configuration;
2425

@@ -39,7 +40,9 @@ public DefaultStoreFileTracker(Configuration conf, boolean isPrimaryReplica, Sto
3940

4041
@Override
4142
public List<StoreFileInfo> load() throws IOException {
42-
return ctx.getRegionFileSystem().getStoreFiles(ctx.getFamily().getNameAsString());
43+
List<StoreFileInfo> files =
44+
ctx.getRegionFileSystem().getStoreFiles(ctx.getFamily().getNameAsString());
45+
return files != null ? files : Collections.emptyList();
4346
}
4447

4548
@Override
@@ -57,4 +60,9 @@ protected void doAddCompactionResults(Collection<StoreFileInfo> compactedFiles,
5760
Collection<StoreFileInfo> newFiles) throws IOException {
5861
// NOOP
5962
}
63+
64+
@Override
65+
void set(List<StoreFileInfo> files) {
66+
// NOOP
67+
}
6068
}

hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/storefiletracker/FileBasedStoreFileTracker.java

Lines changed: 14 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -48,7 +48,7 @@
4848
* storages.
4949
*/
5050
@InterfaceAudience.Private
51-
public class FileBasedStoreFileTracker extends StoreFileTrackerBase {
51+
class FileBasedStoreFileTracker extends StoreFileTrackerBase {
5252

5353
private final StoreFileListFile backedFile;
5454

@@ -139,4 +139,17 @@ protected void doAddCompactionResults(Collection<StoreFileInfo> compactedFiles,
139139
}
140140
}
141141
}
142+
143+
@Override
144+
void set(List<StoreFileInfo> files) throws IOException {
145+
synchronized (storefiles) {
146+
storefiles.clear();
147+
StoreFileList.Builder builder = StoreFileList.newBuilder();
148+
for (StoreFileInfo info : files) {
149+
storefiles.put(info.getPath().getName(), info);
150+
builder.addStoreFile(toStoreFileEntry(info));
151+
}
152+
backedFile.update(builder);
153+
}
154+
}
142155
}
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,88 @@
1+
/**
2+
* Licensed to the Apache Software Foundation (ASF) under one
3+
* or more contributor license agreements. See the NOTICE file
4+
* distributed with this work for additional information
5+
* regarding copyright ownership. The ASF licenses this file
6+
* to you under the Apache License, Version 2.0 (the
7+
* "License"); you may not use this file except in compliance
8+
* with the License. You may obtain a copy of the License at
9+
*
10+
* http://www.apache.org/licenses/LICENSE-2.0
11+
*
12+
* Unless required by applicable law or agreed to in writing, software
13+
* distributed under the License is distributed on an "AS IS" BASIS,
14+
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
15+
* See the License for the specific language governing permissions and
16+
* limitations under the License.
17+
*/
18+
package org.apache.hadoop.hbase.regionserver.storefiletracker;
19+
20+
import java.io.IOException;
21+
import java.util.Collection;
22+
import java.util.List;
23+
import org.apache.hadoop.conf.Configuration;
24+
import org.apache.hadoop.hbase.regionserver.StoreContext;
25+
import org.apache.hadoop.hbase.regionserver.StoreFileInfo;
26+
import org.apache.yetus.audience.InterfaceAudience;
27+
28+
import org.apache.hbase.thirdparty.com.google.common.base.Preconditions;
29+
30+
/**
31+
* A store file tracker used for migrating between store file tracker implementations.
32+
*/
33+
@InterfaceAudience.Private
34+
class MigrationStoreFileTracker extends StoreFileTrackerBase {
35+
36+
public static final String SRC_IMPL = "hbase.store.file-tracker.migration.src.impl";
37+
38+
public static final String DST_IMPL = "hbase.store.file-tracker.migration.dst.impl";
39+
40+
private final StoreFileTrackerBase src;
41+
42+
private final StoreFileTrackerBase dst;
43+
44+
public MigrationStoreFileTracker(Configuration conf, boolean isPrimaryReplica, StoreContext ctx) {
45+
super(conf, isPrimaryReplica, ctx);
46+
this.src = StoreFileTrackerFactory.create(conf, SRC_IMPL, isPrimaryReplica, ctx);
47+
this.dst = StoreFileTrackerFactory.create(conf, DST_IMPL, isPrimaryReplica, ctx);
48+
Preconditions.checkArgument(!src.getClass().equals(dst.getClass()),
49+
"src and dst is the same: %s", src.getClass());
50+
}
51+
52+
@Override
53+
public List<StoreFileInfo> load() throws IOException {
54+
List<StoreFileInfo> files = src.load();
55+
dst.set(files);
56+
return files;
57+
}
58+
59+
@Override
60+
protected boolean requireWritingToTmpDirFirst() {
61+
// Returns true if either of the two StoreFileTracker returns true.
62+
// For example, if we want to migrate from a tracker implementation which can ignore the broken
63+
// files under data directory to a tracker implementation which can not, if we still allow
64+
// writing in tmp directory directly, we may have some broken files under the data directory and
65+
// then after we finally change the implementation which can not ignore the broken files, we
66+
// will be in trouble.
67+
return src.requireWritingToTmpDirFirst() || dst.requireWritingToTmpDirFirst();
68+
}
69+
70+
@Override
71+
protected void doAddNewStoreFiles(Collection<StoreFileInfo> newFiles) throws IOException {
72+
src.doAddNewStoreFiles(newFiles);
73+
dst.doAddNewStoreFiles(newFiles);
74+
}
75+
76+
@Override
77+
protected void doAddCompactionResults(Collection<StoreFileInfo> compactedFiles,
78+
Collection<StoreFileInfo> newFiles) throws IOException {
79+
src.doAddCompactionResults(compactedFiles, newFiles);
80+
dst.doAddCompactionResults(compactedFiles, newFiles);
81+
}
82+
83+
@Override
84+
void set(List<StoreFileInfo> files) {
85+
throw new UnsupportedOperationException(
86+
"Should not call this method on " + getClass().getSimpleName());
87+
}
88+
}

hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/storefiletracker/StoreFileListFile.java

Lines changed: 4 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -29,7 +29,6 @@
2929
import org.slf4j.Logger;
3030
import org.slf4j.LoggerFactory;
3131

32-
import org.apache.hbase.thirdparty.com.google.common.base.Preconditions;
3332
import org.apache.hbase.thirdparty.com.google.common.io.ByteStreams;
3433
import org.apache.hbase.thirdparty.com.google.protobuf.InvalidProtocolBufferException;
3534

@@ -121,7 +120,10 @@ StoreFileList load() throws IOException {
121120
* We will set the timestamp in this method so just pass the builder in
122121
*/
123122
void update(StoreFileList.Builder builder) throws IOException {
124-
Preconditions.checkState(nextTrackFile >= 0, "should call load first before calling update");
123+
if (nextTrackFile < 0) {
124+
// we need to call load first to load the prevTimestamp and also the next file
125+
load();
126+
}
125127
FileSystem fs = ctx.getRegionFileSystem().getFileSystem();
126128
long timestamp = Math.max(prevTimestamp + 1, EnvironmentEdgeManager.currentTime());
127129
try (FSDataOutputStream out = fs.create(trackFiles[nextTrackFile], true)) {

hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/storefiletracker/StoreFileTrackerBase.java

Lines changed: 10 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -19,6 +19,7 @@
1919

2020
import java.io.IOException;
2121
import java.util.Collection;
22+
import java.util.List;
2223
import org.apache.hadoop.conf.Configuration;
2324
import org.apache.hadoop.fs.Path;
2425
import org.apache.hadoop.hbase.client.ColumnFamilyDescriptor;
@@ -95,8 +96,7 @@ private HFileContext createFileContext(Compression.Algorithm compression,
9596
}
9697

9798
@Override
98-
public final StoreFileWriter createWriter(CreateStoreFileWriterParams params)
99-
throws IOException {
99+
public final StoreFileWriter createWriter(CreateStoreFileWriterParams params) throws IOException {
100100
if (!isPrimaryReplica) {
101101
throw new IllegalStateException("Should not call create writer on secondary replicas");
102102
}
@@ -170,4 +170,12 @@ public final StoreFileWriter createWriter(CreateStoreFileWriterParams params)
170170

171171
protected abstract void doAddCompactionResults(Collection<StoreFileInfo> compactedFiles,
172172
Collection<StoreFileInfo> newFiles) throws IOException;
173+
174+
/**
175+
* used to mirror the store file list after loading when migration.
176+
* <p/>
177+
* Do not add this method to the {@link StoreFileTracker} interface since we do not need this
178+
* method in upper layer.
179+
*/
180+
abstract void set(List<StoreFileInfo> files) throws IOException;
173181
}

hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/storefiletracker/StoreFileTrackerFactory.java

Lines changed: 25 additions & 14 deletions
Original file line numberDiff line numberDiff line change
@@ -30,6 +30,8 @@
3030
import org.slf4j.Logger;
3131
import org.slf4j.LoggerFactory;
3232

33+
import org.apache.hbase.thirdparty.com.google.common.base.Preconditions;
34+
3335
/**
3436
* Factory method for creating store file tracker.
3537
*/
@@ -39,30 +41,39 @@ public final class StoreFileTrackerFactory {
3941
private static final Logger LOG = LoggerFactory.getLogger(StoreFileTrackerFactory.class);
4042

4143
public static StoreFileTracker create(Configuration conf, boolean isPrimaryReplica,
42-
StoreContext ctx) {
44+
StoreContext ctx) {
4345
Class<? extends StoreFileTracker> tracker =
4446
conf.getClass(TRACK_IMPL, DefaultStoreFileTracker.class, StoreFileTracker.class);
4547
LOG.info("instantiating StoreFileTracker impl {}", tracker.getName());
4648
return ReflectionUtils.newInstance(tracker, conf, isPrimaryReplica, ctx);
4749
}
4850

4951
public static StoreFileTracker create(Configuration conf, boolean isPrimaryReplica, String family,
50-
HRegionFileSystem regionFs) {
52+
HRegionFileSystem regionFs) {
5153
ColumnFamilyDescriptorBuilder fDescBuilder =
5254
ColumnFamilyDescriptorBuilder.newBuilder(Bytes.toBytes(family));
53-
StoreContext ctx = StoreContext.getBuilder().
54-
withColumnFamilyDescriptor(fDescBuilder.build()).
55-
withRegionFileSystem(regionFs).
56-
build();
57-
return StoreFileTrackerFactory.create(conf, isPrimaryReplica, ctx);
55+
StoreContext ctx = StoreContext.getBuilder().withColumnFamilyDescriptor(fDescBuilder.build())
56+
.withRegionFileSystem(regionFs).build();
57+
return StoreFileTrackerFactory.create(conf, TRACK_IMPL, isPrimaryReplica, ctx);
58+
}
59+
60+
public static Configuration mergeConfigurations(Configuration global, TableDescriptor table,
61+
ColumnFamilyDescriptor family) {
62+
return new CompoundConfiguration().add(global).addBytesMap(table.getValues())
63+
.addStringMap(family.getConfiguration()).addBytesMap(family.getValues());
5864
}
5965

60-
public static Configuration mergeConfigurations(Configuration global,
61-
TableDescriptor table, ColumnFamilyDescriptor family) {
62-
return new CompoundConfiguration()
63-
.add(global)
64-
.addBytesMap(table.getValues())
65-
.addStringMap(family.getConfiguration())
66-
.addBytesMap(family.getValues());
66+
static StoreFileTrackerBase create(Configuration conf, String configName,
67+
boolean isPrimaryReplica, StoreContext ctx) {
68+
String className =
69+
Preconditions.checkNotNull(conf.get(configName), "config %s is not set", configName);
70+
Class<? extends StoreFileTrackerBase> tracker;
71+
try {
72+
tracker = Class.forName(className).asSubclass(StoreFileTrackerBase.class);
73+
} catch (ClassNotFoundException e) {
74+
throw new RuntimeException(e);
75+
}
76+
LOG.info("instantiating StoreFileTracker impl {} as {}", tracker.getName(), configName);
77+
return ReflectionUtils.newInstance(tracker, conf, isPrimaryReplica, ctx);
6778
}
6879
}

0 commit comments

Comments
 (0)