|
| 1 | +/** |
| 2 | + * Licensed to the Apache Software Foundation (ASF) under one |
| 3 | + * or more contributor license agreements. See the NOTICE file |
| 4 | + * distributed with this work for additional information |
| 5 | + * regarding copyright ownership. The ASF licenses this file |
| 6 | + * to you under the Apache License, Version 2.0 (the |
| 7 | + * "License"); you may not use this file except in compliance |
| 8 | + * with the License. You may obtain a copy of the License at |
| 9 | + * |
| 10 | + * http://www.apache.org/licenses/LICENSE-2.0 |
| 11 | + * |
| 12 | + * Unless required by applicable law or agreed to in writing, software |
| 13 | + * distributed under the License is distributed on an "AS IS" BASIS, |
| 14 | + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. |
| 15 | + * See the License for the specific language governing permissions and |
| 16 | + * limitations under the License. |
| 17 | + */ |
| 18 | +package org.apache.hadoop.hbase.util; |
| 19 | + |
| 20 | +import java.io.IOException; |
| 21 | + |
| 22 | +import org.apache.hadoop.conf.Configuration; |
| 23 | +import org.apache.hadoop.hbase.CompoundConfiguration; |
| 24 | +import org.apache.hadoop.hbase.DoNotRetryIOException; |
| 25 | +import org.apache.hadoop.hbase.HConstants; |
| 26 | +import org.apache.hadoop.hbase.client.ColumnFamilyDescriptor; |
| 27 | +import org.apache.hadoop.hbase.client.ColumnFamilyDescriptorBuilder; |
| 28 | +import org.apache.hadoop.hbase.client.TableDescriptor; |
| 29 | +import org.apache.hadoop.hbase.master.HMaster; |
| 30 | +import org.apache.hadoop.hbase.regionserver.DefaultStoreEngine; |
| 31 | +import org.apache.hadoop.hbase.regionserver.HStore; |
| 32 | +import org.apache.hadoop.hbase.regionserver.RegionCoprocessorHost; |
| 33 | +import org.apache.hadoop.hbase.regionserver.RegionSplitPolicy; |
| 34 | +import org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy; |
| 35 | +import org.apache.hadoop.hbase.regionserver.compactions.FIFOCompactionPolicy; |
| 36 | +import org.apache.hadoop.hbase.shaded.protobuf.generated.WALProtos; |
| 37 | +import org.apache.yetus.audience.InterfaceAudience; |
| 38 | +import org.slf4j.Logger; |
| 39 | +import org.slf4j.LoggerFactory; |
| 40 | + |
| 41 | +/** |
| 42 | + * Used for master to sanity check {@link org.apache.hadoop.hbase.client.TableDescriptor}. |
| 43 | + */ |
| 44 | +@InterfaceAudience.Private |
| 45 | +public class TableDescriptorChecker { |
| 46 | + private static Logger LOG = LoggerFactory.getLogger(TableDescriptorChecker.class); |
| 47 | + |
| 48 | + public static final String TABLE_SANITY_CHECKS = "hbase.table.sanity.checks"; |
| 49 | + public static final boolean DEFAULT_TABLE_SANITY_CHECKS = true; |
| 50 | + |
| 51 | + /** |
| 52 | + * Checks whether the table conforms to some sane limits, and configured |
| 53 | + * values (compression, etc) work. Throws an exception if something is wrong. |
| 54 | + */ |
| 55 | + public static void sanityCheck(final Configuration conf, final TableDescriptor td) |
| 56 | + throws IOException { |
| 57 | + sanityCheck(conf, td, |
| 58 | + conf.getBoolean(HMaster.MASTER_CHECK_COMPRESSION, HMaster.DEFAULT_MASTER_CHECK_COMPRESSION), |
| 59 | + conf.getBoolean(HMaster.MASTER_CHECK_ENCRYPTION, HMaster.DEFAULT_MASTER_CHECK_ENCRYPTION)); |
| 60 | + } |
| 61 | + |
| 62 | + /** |
| 63 | + * Checks whether the table conforms to some sane limits, and configured |
| 64 | + * values (compression, etc) work. Throws an exception if something is wrong. |
| 65 | + */ |
| 66 | + public static void sanityCheck(final Configuration conf, final TableDescriptor td, |
| 67 | + boolean checkCompression, boolean checkEncryption) throws IOException { |
| 68 | + boolean logWarn = false; |
| 69 | + if (!conf.getBoolean(TABLE_SANITY_CHECKS, DEFAULT_TABLE_SANITY_CHECKS)) { |
| 70 | + logWarn = true; |
| 71 | + } |
| 72 | + String tableVal = td.getValue(TABLE_SANITY_CHECKS); |
| 73 | + if (tableVal != null && !Boolean.valueOf(tableVal)) { |
| 74 | + logWarn = true; |
| 75 | + } |
| 76 | + |
| 77 | + // check max file size |
| 78 | + long maxFileSizeLowerLimit = 2 * 1024 * 1024L; // 2M is the default lower limit |
| 79 | + long maxFileSize = td.getMaxFileSize(); |
| 80 | + if (maxFileSize < 0) { |
| 81 | + maxFileSize = conf.getLong(HConstants.HREGION_MAX_FILESIZE, maxFileSizeLowerLimit); |
| 82 | + } |
| 83 | + if (maxFileSize < conf.getLong("hbase.hregion.max.filesize.limit", maxFileSizeLowerLimit)) { |
| 84 | + String message = |
| 85 | + "MAX_FILESIZE for table descriptor or " + "\"hbase.hregion.max.filesize\" (" + |
| 86 | + maxFileSize + ") is too small, which might cause over splitting into unmanageable " + |
| 87 | + "number of regions."; |
| 88 | + warnOrThrowExceptionForFailure(logWarn, message, null); |
| 89 | + } |
| 90 | + |
| 91 | + // check flush size |
| 92 | + long flushSizeLowerLimit = 1024 * 1024L; // 1M is the default lower limit |
| 93 | + long flushSize = td.getMemStoreFlushSize(); |
| 94 | + if (flushSize < 0) { |
| 95 | + flushSize = conf.getLong(HConstants.HREGION_MEMSTORE_FLUSH_SIZE, flushSizeLowerLimit); |
| 96 | + } |
| 97 | + if (flushSize < conf.getLong("hbase.hregion.memstore.flush.size.limit", flushSizeLowerLimit)) { |
| 98 | + String message = "MEMSTORE_FLUSHSIZE for table descriptor or " + |
| 99 | + "\"hbase.hregion.memstore.flush.size\" (" + flushSize + |
| 100 | + ") is too small, which might cause" + " very frequent flushing."; |
| 101 | + warnOrThrowExceptionForFailure(logWarn, message, null); |
| 102 | + } |
| 103 | + |
| 104 | + // check that coprocessors and other specified plugin classes can be loaded |
| 105 | + try { |
| 106 | + checkClassLoading(conf, td); |
| 107 | + } catch (Exception ex) { |
| 108 | + warnOrThrowExceptionForFailure(logWarn, ex.getMessage(), null); |
| 109 | + } |
| 110 | + |
| 111 | + if (checkCompression) { |
| 112 | + // check compression can be loaded |
| 113 | + try { |
| 114 | + checkCompression(td); |
| 115 | + } catch (IOException e) { |
| 116 | + warnOrThrowExceptionForFailure(logWarn, e.getMessage(), e); |
| 117 | + } |
| 118 | + } |
| 119 | + |
| 120 | + if (checkEncryption) { |
| 121 | + // check encryption can be loaded |
| 122 | + try { |
| 123 | + checkEncryption(conf, td); |
| 124 | + } catch (IOException e) { |
| 125 | + warnOrThrowExceptionForFailure(logWarn, e.getMessage(), e); |
| 126 | + } |
| 127 | + } |
| 128 | + |
| 129 | + // Verify compaction policy |
| 130 | + try { |
| 131 | + checkCompactionPolicy(conf, td); |
| 132 | + } catch (IOException e) { |
| 133 | + warnOrThrowExceptionForFailure(false, e.getMessage(), e); |
| 134 | + } |
| 135 | + // check that we have at least 1 CF |
| 136 | + if (td.getColumnFamilyCount() == 0) { |
| 137 | + String message = "Table should have at least one column family."; |
| 138 | + warnOrThrowExceptionForFailure(logWarn, message, null); |
| 139 | + } |
| 140 | + |
| 141 | + // check that we have minimum 1 region replicas |
| 142 | + int regionReplicas = td.getRegionReplication(); |
| 143 | + if (regionReplicas < 1) { |
| 144 | + String message = "Table region replication should be at least one."; |
| 145 | + warnOrThrowExceptionForFailure(logWarn, message, null); |
| 146 | + } |
| 147 | + |
| 148 | + for (ColumnFamilyDescriptor hcd : td.getColumnFamilies()) { |
| 149 | + if (hcd.getTimeToLive() <= 0) { |
| 150 | + String message = "TTL for column family " + hcd.getNameAsString() + " must be positive."; |
| 151 | + warnOrThrowExceptionForFailure(logWarn, message, null); |
| 152 | + } |
| 153 | + |
| 154 | + // check blockSize |
| 155 | + if (hcd.getBlocksize() < 1024 || hcd.getBlocksize() > 16 * 1024 * 1024) { |
| 156 | + String message = "Block size for column family " + hcd.getNameAsString() + |
| 157 | + " must be between 1K and 16MB."; |
| 158 | + warnOrThrowExceptionForFailure(logWarn, message, null); |
| 159 | + } |
| 160 | + |
| 161 | + // check versions |
| 162 | + if (hcd.getMinVersions() < 0) { |
| 163 | + String message = |
| 164 | + "Min versions for column family " + hcd.getNameAsString() + " must be positive."; |
| 165 | + warnOrThrowExceptionForFailure(logWarn, message, null); |
| 166 | + } |
| 167 | + // max versions already being checked |
| 168 | + |
| 169 | + // HBASE-13776 Setting illegal versions for ColumnFamilyDescriptor |
| 170 | + // does not throw IllegalArgumentException |
| 171 | + // check minVersions <= maxVerions |
| 172 | + if (hcd.getMinVersions() > hcd.getMaxVersions()) { |
| 173 | + String message = "Min versions for column family " + hcd.getNameAsString() + |
| 174 | + " must be less than the Max versions."; |
| 175 | + warnOrThrowExceptionForFailure(logWarn, message, null); |
| 176 | + } |
| 177 | + |
| 178 | + // check replication scope |
| 179 | + checkReplicationScope(hcd); |
| 180 | + // check bloom filter type |
| 181 | + checkBloomFilterType(hcd); |
| 182 | + |
| 183 | + // check data replication factor, it can be 0(default value) when user has not explicitly |
| 184 | + // set the value, in this case we use default replication factor set in the file system. |
| 185 | + if (hcd.getDFSReplication() < 0) { |
| 186 | + String message = "HFile Replication for column family " + hcd.getNameAsString() + |
| 187 | + " must be greater than zero."; |
| 188 | + warnOrThrowExceptionForFailure(logWarn, message, null); |
| 189 | + } |
| 190 | + } |
| 191 | + } |
| 192 | + |
| 193 | + private static void checkReplicationScope(final ColumnFamilyDescriptor cfd) throws IOException { |
| 194 | + // check replication scope |
| 195 | + WALProtos.ScopeType scop = WALProtos.ScopeType.valueOf(cfd.getScope()); |
| 196 | + if (scop == null) { |
| 197 | + String message = |
| 198 | + "Replication scope for column family " + cfd.getNameAsString() + " is " + cfd.getScope() + |
| 199 | + " which is invalid."; |
| 200 | + |
| 201 | + LOG.error(message); |
| 202 | + throw new DoNotRetryIOException(message); |
| 203 | + } |
| 204 | + } |
| 205 | + |
| 206 | + private static void checkCompactionPolicy(Configuration conf, TableDescriptor td) |
| 207 | + throws IOException { |
| 208 | + // FIFO compaction has some requirements |
| 209 | + // Actually FCP ignores periodic major compactions |
| 210 | + String className = td.getValue(DefaultStoreEngine.DEFAULT_COMPACTION_POLICY_CLASS_KEY); |
| 211 | + if (className == null) { |
| 212 | + className = conf.get(DefaultStoreEngine.DEFAULT_COMPACTION_POLICY_CLASS_KEY, |
| 213 | + ExploringCompactionPolicy.class.getName()); |
| 214 | + } |
| 215 | + |
| 216 | + int blockingFileCount = HStore.DEFAULT_BLOCKING_STOREFILE_COUNT; |
| 217 | + String sv = td.getValue(HStore.BLOCKING_STOREFILES_KEY); |
| 218 | + if (sv != null) { |
| 219 | + blockingFileCount = Integer.parseInt(sv); |
| 220 | + } else { |
| 221 | + blockingFileCount = conf.getInt(HStore.BLOCKING_STOREFILES_KEY, blockingFileCount); |
| 222 | + } |
| 223 | + |
| 224 | + for (ColumnFamilyDescriptor hcd : td.getColumnFamilies()) { |
| 225 | + String compactionPolicy = |
| 226 | + hcd.getConfigurationValue(DefaultStoreEngine.DEFAULT_COMPACTION_POLICY_CLASS_KEY); |
| 227 | + if (compactionPolicy == null) { |
| 228 | + compactionPolicy = className; |
| 229 | + } |
| 230 | + if (!compactionPolicy.equals(FIFOCompactionPolicy.class.getName())) { |
| 231 | + continue; |
| 232 | + } |
| 233 | + // FIFOCompaction |
| 234 | + String message = null; |
| 235 | + |
| 236 | + // 1. Check TTL |
| 237 | + if (hcd.getTimeToLive() == ColumnFamilyDescriptorBuilder.DEFAULT_TTL) { |
| 238 | + message = "Default TTL is not supported for FIFO compaction"; |
| 239 | + throw new IOException(message); |
| 240 | + } |
| 241 | + |
| 242 | + // 2. Check min versions |
| 243 | + if (hcd.getMinVersions() > 0) { |
| 244 | + message = "MIN_VERSION > 0 is not supported for FIFO compaction"; |
| 245 | + throw new IOException(message); |
| 246 | + } |
| 247 | + |
| 248 | + // 3. blocking file count |
| 249 | + sv = hcd.getConfigurationValue(HStore.BLOCKING_STOREFILES_KEY); |
| 250 | + if (sv != null) { |
| 251 | + blockingFileCount = Integer.parseInt(sv); |
| 252 | + } |
| 253 | + if (blockingFileCount < 1000) { |
| 254 | + message = |
| 255 | + "Blocking file count '" + HStore.BLOCKING_STOREFILES_KEY + "' " + blockingFileCount + |
| 256 | + " is below recommended minimum of 1000 for column family " + hcd.getNameAsString(); |
| 257 | + throw new IOException(message); |
| 258 | + } |
| 259 | + } |
| 260 | + } |
| 261 | + |
| 262 | + private static void checkBloomFilterType(ColumnFamilyDescriptor cfd) throws IOException { |
| 263 | + Configuration conf = new CompoundConfiguration().addStringMap(cfd.getConfiguration()); |
| 264 | + try { |
| 265 | + BloomFilterUtil.getBloomFilterParam(cfd.getBloomFilterType(), conf); |
| 266 | + } catch (IllegalArgumentException e) { |
| 267 | + throw new DoNotRetryIOException("Failed to get bloom filter param", e); |
| 268 | + } |
| 269 | + } |
| 270 | + |
| 271 | + private static void checkCompression(final TableDescriptor td) throws IOException { |
| 272 | + for (ColumnFamilyDescriptor cfd : td.getColumnFamilies()) { |
| 273 | + CompressionTest.testCompression(cfd.getCompressionType()); |
| 274 | + CompressionTest.testCompression(cfd.getCompactionCompressionType()); |
| 275 | + } |
| 276 | + } |
| 277 | + |
| 278 | + private static void checkEncryption(final Configuration conf, final TableDescriptor td) |
| 279 | + throws IOException { |
| 280 | + for (ColumnFamilyDescriptor cfd : td.getColumnFamilies()) { |
| 281 | + EncryptionTest.testEncryption(conf, cfd.getEncryptionType(), cfd.getEncryptionKey()); |
| 282 | + } |
| 283 | + } |
| 284 | + |
| 285 | + private static void checkClassLoading(final Configuration conf, final TableDescriptor td) |
| 286 | + throws IOException { |
| 287 | + RegionSplitPolicy.getSplitPolicyClass(td, conf); |
| 288 | + RegionCoprocessorHost.testTableCoprocessorAttrs(conf, td); |
| 289 | + } |
| 290 | + |
| 291 | + // HBASE-13350 - Helper method to log warning on sanity check failures if checks disabled. |
| 292 | + private static void warnOrThrowExceptionForFailure(boolean logWarn, String message, |
| 293 | + Exception cause) throws IOException { |
| 294 | + if (!logWarn) { |
| 295 | + throw new DoNotRetryIOException(message + " Set " + TABLE_SANITY_CHECKS + |
| 296 | + " to false at conf or table descriptor if you want to bypass sanity checks", cause); |
| 297 | + } |
| 298 | + LOG.warn(message); |
| 299 | + } |
| 300 | +} |
0 commit comments