Skip to content

Commit 99ba978

Browse files
Apache9Duo Zhang
authored andcommitted
HBASE-26064 Introduce a StoreFileTracker to abstract the store file tracking logic
Signed-off-by: Wellington Chevreuil <wchevreuil@apache.org>
1 parent 2ce2f93 commit 99ba978

36 files changed

+1224
-705
lines changed

hbase-server/src/main/java/org/apache/hadoop/hbase/mob/DefaultMobStoreCompactor.java

Lines changed: 10 additions & 12 deletions
Original file line numberDiff line numberDiff line change
@@ -29,7 +29,6 @@
2929
import java.util.List;
3030
import java.util.Map.Entry;
3131
import java.util.Optional;
32-
3332
import org.apache.hadoop.conf.Configuration;
3433
import org.apache.hadoop.fs.FileStatus;
3534
import org.apache.hadoop.fs.FileSystem;
@@ -144,17 +143,16 @@ public InternalScanner createScanner(ScanInfo scanInfo, List<StoreFileScanner> s
144143
};
145144

146145
private final CellSinkFactory<StoreFileWriter> writerFactory =
147-
new CellSinkFactory<StoreFileWriter>() {
148-
@Override
149-
public StoreFileWriter createWriter(InternalScanner scanner,
150-
org.apache.hadoop.hbase.regionserver.compactions.Compactor.FileDetails fd,
151-
boolean shouldDropBehind, boolean major) throws IOException {
152-
// make this writer with tags always because of possible new cells with tags.
153-
return store.createWriterInTmp(fd.maxKeyCount,
154-
major ? majorCompactionCompression : minorCompactionCompression,
155-
true, true, true, shouldDropBehind);
156-
}
157-
};
146+
new CellSinkFactory<StoreFileWriter>() {
147+
@Override
148+
public StoreFileWriter createWriter(InternalScanner scanner,
149+
org.apache.hadoop.hbase.regionserver.compactions.Compactor.FileDetails fd,
150+
boolean shouldDropBehind, boolean major) throws IOException {
151+
// make this writer with tags always because of possible new cells with tags.
152+
return store.getStoreEngine().createWriter(
153+
createParams(fd, shouldDropBehind, major).includeMVCCReadpoint(true).includesTag(true));
154+
}
155+
};
158156

159157
public DefaultMobStoreCompactor(Configuration conf, HStore store) {
160158
super(conf, store);

hbase-server/src/main/java/org/apache/hadoop/hbase/mob/DefaultMobStoreFlusher.java

Lines changed: 1 addition & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -25,7 +25,6 @@
2525
import java.util.HashSet;
2626
import java.util.List;
2727
import java.util.Set;
28-
2928
import org.apache.hadoop.conf.Configuration;
3029
import org.apache.hadoop.fs.Path;
3130
import org.apache.hadoop.hbase.Cell;
@@ -127,8 +126,7 @@ public List<Path> flushSnapshot(MemStoreSnapshot snapshot, long cacheFlushId,
127126
synchronized (flushLock) {
128127
status.setStatus("Flushing " + store + ": creating writer");
129128
// Write the map out to the disk
130-
writer = store.createWriterInTmp(cellsCount, store.getColumnFamilyDescriptor().getCompressionType(),
131-
false, true, true, false);
129+
writer = createWriter(snapshot, true);
132130
IOException e = null;
133131
try {
134132
// It's a mob store, flush the cells in a mob way. This is the difference of flushing
Lines changed: 134 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,134 @@
1+
/**
2+
* Licensed to the Apache Software Foundation (ASF) under one
3+
* or more contributor license agreements. See the NOTICE file
4+
* distributed with this work for additional information
5+
* regarding copyright ownership. The ASF licenses this file
6+
* to you under the Apache License, Version 2.0 (the
7+
* "License"); you may not use this file except in compliance
8+
* with the License. You may obtain a copy of the License at
9+
*
10+
* http://www.apache.org/licenses/LICENSE-2.0
11+
*
12+
* Unless required by applicable law or agreed to in writing, software
13+
* distributed under the License is distributed on an "AS IS" BASIS,
14+
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
15+
* See the License for the specific language governing permissions and
16+
* limitations under the License.
17+
*/
18+
package org.apache.hadoop.hbase.regionserver;
19+
20+
import org.apache.hadoop.hbase.HConstants;
21+
import org.apache.hadoop.hbase.io.compress.Compression;
22+
import org.apache.yetus.audience.InterfaceAudience;
23+
24+
@InterfaceAudience.Private
25+
public final class CreateStoreFileWriterParams {
26+
27+
private long maxKeyCount;
28+
29+
private Compression.Algorithm compression;
30+
31+
private boolean isCompaction;
32+
33+
private boolean includeMVCCReadpoint;
34+
35+
private boolean includesTag;
36+
37+
private boolean shouldDropBehind;
38+
39+
private long totalCompactedFilesSize = -1;
40+
41+
private String fileStoragePolicy = HConstants.EMPTY_STRING;
42+
43+
private CreateStoreFileWriterParams() {
44+
}
45+
46+
public long maxKeyCount() {
47+
return maxKeyCount;
48+
}
49+
50+
public CreateStoreFileWriterParams maxKeyCount(long maxKeyCount) {
51+
this.maxKeyCount = maxKeyCount;
52+
return this;
53+
}
54+
55+
public Compression.Algorithm compression() {
56+
return compression;
57+
}
58+
59+
/**
60+
* Set the compression algorithm to use
61+
*/
62+
public CreateStoreFileWriterParams compression(Compression.Algorithm compression) {
63+
this.compression = compression;
64+
return this;
65+
}
66+
67+
public boolean isCompaction() {
68+
return isCompaction;
69+
}
70+
71+
/**
72+
* Whether we are creating a new file in a compaction
73+
*/
74+
public CreateStoreFileWriterParams isCompaction(boolean isCompaction) {
75+
this.isCompaction = isCompaction;
76+
return this;
77+
}
78+
79+
public boolean includeMVCCReadpoint() {
80+
return includeMVCCReadpoint;
81+
}
82+
83+
/**
84+
* Whether to include MVCC or not
85+
*/
86+
public CreateStoreFileWriterParams includeMVCCReadpoint(boolean includeMVCCReadpoint) {
87+
this.includeMVCCReadpoint = includeMVCCReadpoint;
88+
return this;
89+
}
90+
91+
public boolean includesTag() {
92+
return includesTag;
93+
}
94+
95+
/**
96+
* Whether to includesTag or not
97+
*/
98+
public CreateStoreFileWriterParams includesTag(boolean includesTag) {
99+
this.includesTag = includesTag;
100+
return this;
101+
}
102+
103+
public boolean shouldDropBehind() {
104+
return shouldDropBehind;
105+
}
106+
107+
public CreateStoreFileWriterParams shouldDropBehind(boolean shouldDropBehind) {
108+
this.shouldDropBehind = shouldDropBehind;
109+
return this;
110+
}
111+
112+
public long totalCompactedFilesSize() {
113+
return totalCompactedFilesSize;
114+
}
115+
116+
public CreateStoreFileWriterParams totalCompactedFilesSize(long totalCompactedFilesSize) {
117+
this.totalCompactedFilesSize = totalCompactedFilesSize;
118+
return this;
119+
}
120+
121+
public String fileStoragePolicy() {
122+
return fileStoragePolicy;
123+
}
124+
125+
public CreateStoreFileWriterParams fileStoragePolicy(String fileStoragePolicy) {
126+
this.fileStoragePolicy = fileStoragePolicy;
127+
return this;
128+
}
129+
130+
public static CreateStoreFileWriterParams create() {
131+
return new CreateStoreFileWriterParams();
132+
}
133+
134+
}

hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/DateTieredStoreEngine.java

Lines changed: 2 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -19,18 +19,17 @@
1919

2020
import java.io.IOException;
2121
import java.util.List;
22-
2322
import org.apache.hadoop.conf.Configuration;
2423
import org.apache.hadoop.fs.Path;
2524
import org.apache.hadoop.hbase.CellComparator;
26-
import org.apache.hadoop.hbase.regionserver.compactions.CompactionRequestImpl;
27-
import org.apache.yetus.audience.InterfaceAudience;
2825
import org.apache.hadoop.hbase.regionserver.compactions.CompactionContext;
26+
import org.apache.hadoop.hbase.regionserver.compactions.CompactionRequestImpl;
2927
import org.apache.hadoop.hbase.regionserver.compactions.DateTieredCompactionPolicy;
3028
import org.apache.hadoop.hbase.regionserver.compactions.DateTieredCompactionRequest;
3129
import org.apache.hadoop.hbase.regionserver.compactions.DateTieredCompactor;
3230
import org.apache.hadoop.hbase.regionserver.throttle.ThroughputController;
3331
import org.apache.hadoop.hbase.security.User;
32+
import org.apache.yetus.audience.InterfaceAudience;
3433

3534
/**
3635
* HBASE-15400 This store engine allows us to store data in date tiered layout with exponential

hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/DefaultStoreEngine.java

Lines changed: 2 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -20,7 +20,6 @@
2020

2121
import java.io.IOException;
2222
import java.util.List;
23-
2423
import org.apache.hadoop.conf.Configuration;
2524
import org.apache.hadoop.fs.Path;
2625
import org.apache.hadoop.hbase.CellComparator;
@@ -39,8 +38,8 @@
3938
* their derivatives.
4039
*/
4140
@InterfaceAudience.LimitedPrivate(HBaseInterfaceAudience.CONFIG)
42-
public class DefaultStoreEngine extends StoreEngine<
43-
DefaultStoreFlusher, RatioBasedCompactionPolicy, DefaultCompactor, DefaultStoreFileManager> {
41+
public class DefaultStoreEngine extends StoreEngine<DefaultStoreFlusher,
42+
RatioBasedCompactionPolicy, DefaultCompactor, DefaultStoreFileManager> {
4443

4544
public static final String DEFAULT_STORE_FLUSHER_CLASS_KEY =
4645
"hbase.hstore.defaultengine.storeflusher.class";

hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/DefaultStoreFlusher.java

Lines changed: 4 additions & 7 deletions
Original file line numberDiff line numberDiff line change
@@ -21,15 +21,14 @@
2121
import java.io.IOException;
2222
import java.util.ArrayList;
2323
import java.util.List;
24-
25-
import org.apache.yetus.audience.InterfaceAudience;
26-
import org.slf4j.Logger;
27-
import org.slf4j.LoggerFactory;
2824
import org.apache.hadoop.conf.Configuration;
2925
import org.apache.hadoop.fs.Path;
3026
import org.apache.hadoop.hbase.monitoring.MonitoredTask;
3127
import org.apache.hadoop.hbase.regionserver.throttle.ThroughputController;
3228
import org.apache.hadoop.util.StringUtils;
29+
import org.apache.yetus.audience.InterfaceAudience;
30+
import org.slf4j.Logger;
31+
import org.slf4j.LoggerFactory;
3332

3433
/**
3534
* Default implementation of StoreFlusher.
@@ -60,9 +59,7 @@ public List<Path> flushSnapshot(MemStoreSnapshot snapshot, long cacheFlushId,
6059
synchronized (flushLock) {
6160
status.setStatus("Flushing " + store + ": creating writer");
6261
// Write the map out to the disk
63-
writer = store.createWriterInTmp(cellsCount,
64-
store.getColumnFamilyDescriptor().getCompressionType(), false, true,
65-
snapshot.isTagsPresent(), false);
62+
writer = createWriter(snapshot, false);
6663
IOException e = null;
6764
try {
6865
performFlush(scanner, writer, throughputController);

hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HMobStore.java

Lines changed: 1 addition & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -28,7 +28,6 @@
2828
import java.util.UUID;
2929
import java.util.concurrent.ConcurrentHashMap;
3030
import java.util.concurrent.atomic.AtomicLong;
31-
3231
import org.apache.hadoop.conf.Configuration;
3332
import org.apache.hadoop.fs.FileSystem;
3433
import org.apache.hadoop.fs.Path;
@@ -158,7 +157,7 @@ protected KeyValueScanner createScanner(Scan scan, ScanInfo scanInfo,
158157
protected StoreEngine<?, ?, ?, ?> createStoreEngine(HStore store, Configuration conf,
159158
CellComparator cellComparator) throws IOException {
160159
MobStoreEngine engine = new MobStoreEngine();
161-
engine.createComponents(conf, store, cellComparator);
160+
engine.createComponentsOnce(conf, store, cellComparator);
162161
return engine;
163162
}
164163

hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HRegionFileSystem.java

Lines changed: 3 additions & 7 deletions
Original file line numberDiff line numberDiff line change
@@ -142,7 +142,7 @@ public Path getRegionDir() {
142142
// Temp Helpers
143143
// ===========================================================================
144144
/** @return {@link Path} to the region's temp directory, used for file creations */
145-
Path getTempDir() {
145+
public Path getTempDir() {
146146
return new Path(getRegionDir(), REGION_TEMP_DIR);
147147
}
148148

@@ -237,11 +237,7 @@ public String getStoragePolicyName(String familyName) {
237237
* @param familyName Column Family Name
238238
* @return a set of {@link StoreFileInfo} for the specified family.
239239
*/
240-
public Collection<StoreFileInfo> getStoreFiles(final byte[] familyName) throws IOException {
241-
return getStoreFiles(Bytes.toString(familyName));
242-
}
243-
244-
public Collection<StoreFileInfo> getStoreFiles(final String familyName) throws IOException {
240+
public List<StoreFileInfo> getStoreFiles(final String familyName) throws IOException {
245241
return getStoreFiles(familyName, true);
246242
}
247243

@@ -251,7 +247,7 @@ public Collection<StoreFileInfo> getStoreFiles(final String familyName) throws I
251247
* @param familyName Column Family Name
252248
* @return a set of {@link StoreFileInfo} for the specified family.
253249
*/
254-
public Collection<StoreFileInfo> getStoreFiles(final String familyName, final boolean validate)
250+
public List<StoreFileInfo> getStoreFiles(final String familyName, final boolean validate)
255251
throws IOException {
256252
Path familyDir = getStoreDir(familyName);
257253
FileStatus[] files = CommonFSUtils.listStatus(this.fs, familyDir);

0 commit comments

Comments
 (0)