|
14 | 14 | #include "rocksdb/slice_transform.h"
|
15 | 15 | #include "rocksdb/filter_policy.h"
|
16 | 16 | #include "base/Configuration.h"
|
| 17 | +#include "rocksdb/concurrent_task_limiter.h" |
| 18 | +#include "rocksdb/rate_limiter.h" |
17 | 19 |
|
18 | 20 | // [WAL]
|
19 | 21 | DEFINE_bool(rocksdb_disable_wal,
|
@@ -63,6 +65,12 @@ DEFINE_string(rocksdb_compression_per_level, "", "Specify per level compression
|
63 | 65 | DEFINE_bool(enable_rocksdb_statistics, false, "Whether or not to enable rocksdb's statistics");
|
64 | 66 | DEFINE_string(rocksdb_stats_level, "kExceptHistogramOrTimers", "rocksdb statistics level");
|
65 | 67 |
|
| 68 | +DEFINE_int32(num_compaction_threads, 0, |
| 69 | + "Number of total compaction threads. 0 means unlimited."); |
| 70 | + |
| 71 | +DEFINE_int32(rate_limit, 0, |
| 72 | + "write limit in bytes per sec. The unit is MB. 0 means unlimited."); |
| 73 | + |
66 | 74 | namespace nebula {
|
67 | 75 | namespace kvstore {
|
68 | 76 |
|
@@ -165,6 +173,16 @@ rocksdb::Status initRocksdbOptions(rocksdb::Options &baseOpts) {
|
165 | 173 | = rocksdb::NewLRUCache(FLAGS_rocksdb_block_cache * 1024 * 1024, 8/*shard bits*/);
|
166 | 174 | bbtOpts.block_cache = blockCache;
|
167 | 175 | }
|
| 176 | + if (FLAGS_num_compaction_threads > 0) { |
| 177 | + static std::shared_ptr<rocksdb::ConcurrentTaskLimiter> compaction_thread_limiter{ |
| 178 | + rocksdb::NewConcurrentTaskLimiter("compaction", FLAGS_num_compaction_threads)}; |
| 179 | + baseOpts.compaction_thread_limiter = compaction_thread_limiter; |
| 180 | + } |
| 181 | + if (FLAGS_rate_limit > 0) { |
| 182 | + static std::shared_ptr<rocksdb::RateLimiter> rate_limiter{ |
| 183 | + rocksdb::NewGenericRateLimiter(FLAGS_rate_limit * 1024 * 1024)}; |
| 184 | + baseOpts.rate_limiter = rate_limiter; |
| 185 | + } |
168 | 186 |
|
169 | 187 | bbtOpts.filter_policy.reset(rocksdb::NewBloomFilterPolicy(10, false));
|
170 | 188 | if (FLAGS_enable_partitioned_index_filter) {
|
|
0 commit comments