Skip to content

Commit 8363681

Browse files
ankitsinghalsaintstack
authored andcommitted
HBASE-23054 Remove synchronization block from MetaTableMetrics and fix LossyCounting algorithm
1 parent ef79b40 commit 8363681

File tree

4 files changed

+108
-129
lines changed

4 files changed

+108
-129
lines changed

hbase-server/src/main/java/org/apache/hadoop/hbase/coprocessor/MetaTableMetrics.java

Lines changed: 46 additions & 91 deletions
Original file line numberDiff line numberDiff line change
@@ -1,23 +1,31 @@
11
/**
2-
* Licensed to the Apache Software Foundation (ASF) under one or more contributor license
3-
* agreements. See the NOTICE file distributed with this work for additional information regarding
4-
* copyright ownership. The ASF licenses this file to you under the Apache License, Version 2.0 (the
5-
* "License"); you may not use this file except in compliance with the License. You may obtain a
6-
* copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable
7-
* law or agreed to in writing, software distributed under the License is distributed on an "AS IS"
8-
* BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License
9-
* for the specific language governing permissions and limitations under the License.
2+
*
3+
* Licensed to the Apache Software Foundation (ASF) under one
4+
* or more contributor license agreements. See the NOTICE file
5+
* distributed with this work for additional information
6+
* regarding copyright ownership. The ASF licenses this file
7+
* to you under the Apache License, Version 2.0 (the
8+
* "License"); you may not use this file except in compliance
9+
* with the License. You may obtain a copy of the License at
10+
*
11+
* http://www.apache.org/licenses/LICENSE-2.0
12+
*
13+
* Unless required by applicable law or agreed to in writing, software
14+
* distributed under the License is distributed on an "AS IS" BASIS,
15+
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
16+
* See the License for the specific language governing permissions and
17+
* limitations under the License.
1018
*/
1119

1220
package org.apache.hadoop.hbase.coprocessor;
1321

1422
import java.io.IOException;
1523
import java.nio.charset.StandardCharsets;
24+
import java.util.HashSet;
1625
import java.util.List;
17-
import java.util.Map;
1826
import java.util.Optional;
1927
import java.util.Set;
20-
import java.util.concurrent.ConcurrentHashMap;
28+
2129
import org.apache.hadoop.hbase.Cell;
2230
import org.apache.hadoop.hbase.CoprocessorEnvironment;
2331
import org.apache.hadoop.hbase.TableName;
@@ -27,12 +35,11 @@
2735
import org.apache.hadoop.hbase.client.Put;
2836
import org.apache.hadoop.hbase.client.Row;
2937
import org.apache.hadoop.hbase.ipc.RpcServer;
30-
import org.apache.hadoop.hbase.metrics.Meter;
31-
import org.apache.hadoop.hbase.metrics.Metric;
3238
import org.apache.hadoop.hbase.metrics.MetricRegistry;
3339
import org.apache.hadoop.hbase.util.LossyCounting;
3440
import org.apache.hadoop.hbase.wal.WALEdit;
3541
import org.apache.yetus.audience.InterfaceAudience;
42+
3643
import org.apache.hbase.thirdparty.com.google.common.collect.ImmutableMap;
3744

3845

@@ -49,10 +56,10 @@
4956
public class MetaTableMetrics implements RegionCoprocessor {
5057

5158
private ExampleRegionObserverMeta observer;
52-
private Map<String, Optional<Metric>> requestsMap;
5359
private MetricRegistry registry;
5460
private LossyCounting clientMetricsLossyCounting, regionMetricsLossyCounting;
5561
private boolean active = false;
62+
private Set<String> metrics = new HashSet<String>();
5663

5764
enum MetaTableOps {
5865
GET, PUT, DELETE;
@@ -101,66 +108,6 @@ private void registerAndMarkMetrics(ObserverContext<RegionCoprocessorEnvironment
101108
opWithClientMetricRegisterAndMark(row);
102109
}
103110

104-
private void markMeterIfPresent(String requestMeter) {
105-
if (requestMeter.isEmpty()) {
106-
return;
107-
}
108-
109-
Optional<Metric> optionalMetric = requestsMap.get(requestMeter);
110-
if (optionalMetric != null && optionalMetric.isPresent()) {
111-
Meter metric = (Meter) optionalMetric.get();
112-
metric.mark();
113-
}
114-
}
115-
116-
private void registerMeterIfNotPresent(String requestMeter) {
117-
if (requestMeter.isEmpty()) {
118-
return;
119-
}
120-
if (!requestsMap.containsKey(requestMeter)) {
121-
registry.meter(requestMeter);
122-
requestsMap.put(requestMeter, registry.get(requestMeter));
123-
}
124-
}
125-
126-
/**
127-
* Registers and counts lossyCount for Meters that kept by lossy counting.
128-
* By using lossy count to maintain meters, at most 7 / e meters will be kept (e is error rate)
129-
* e.g. when e is 0.02 by default, at most 350 Clients request metrics will be kept
130-
* also, all kept elements have frequency higher than e * N. (N is total count)
131-
* @param requestMeter meter to be registered
132-
* @param lossyCounting lossyCounting object for one type of meters.
133-
*/
134-
private void registerLossyCountingMeterIfNotPresent(String requestMeter,
135-
LossyCounting lossyCounting) {
136-
137-
if (requestMeter.isEmpty()) {
138-
return;
139-
}
140-
synchronized (lossyCounting) {
141-
Set<String> metersToBeRemoved = lossyCounting.addByOne(requestMeter);
142-
143-
boolean isNewMeter = !requestsMap.containsKey(requestMeter);
144-
boolean requestMeterRemoved = metersToBeRemoved.contains(requestMeter);
145-
if (isNewMeter) {
146-
if (requestMeterRemoved) {
147-
// if the new metric is swept off by lossyCounting then don't add in the map
148-
metersToBeRemoved.remove(requestMeter);
149-
} else {
150-
// else register the new metric and add in the map
151-
registry.meter(requestMeter);
152-
requestsMap.put(requestMeter, registry.get(requestMeter));
153-
}
154-
}
155-
156-
for (String meter : metersToBeRemoved) {
157-
//cleanup requestsMap according to the swept data from lossy count;
158-
requestsMap.remove(meter);
159-
registry.remove(meter);
160-
}
161-
}
162-
}
163-
164111
/**
165112
* Get table name from Ops such as: get, put, delete.
166113
* @param op such as get, put or delete.
@@ -196,13 +143,13 @@ private boolean isMetaTableOp(ObserverContext<RegionCoprocessorEnvironment> e) {
196143

197144
private void clientMetricRegisterAndMark() {
198145
// Mark client metric
199-
String clientIP = RpcServer.getRemoteIp() != null ? RpcServer.getRemoteIp().toString() : "";
146+
String clientIP = RpcServer.getRemoteIp() != null ? RpcServer.getRemoteIp().toString() : null;
200147
if (clientIP == null || clientIP.isEmpty()) {
201148
return;
202149
}
203150
String clientRequestMeter = clientRequestMeterName(clientIP);
204-
registerLossyCountingMeterIfNotPresent(clientRequestMeter, clientMetricsLossyCounting);
205-
markMeterIfPresent(clientRequestMeter);
151+
clientMetricsLossyCounting.add(clientRequestMeter);
152+
registerAndMarkMeter(clientRequestMeter);
206153
}
207154

208155
private void tableMetricRegisterAndMark(Row op) {
@@ -212,7 +159,7 @@ private void tableMetricRegisterAndMark(Row op) {
212159
return;
213160
}
214161
String tableRequestMeter = tableMeterName(tableName);
215-
registerAndMarkMeterIfNotPresent(tableRequestMeter);
162+
registerAndMarkMeter(tableRequestMeter);
216163
}
217164

218165
private void regionMetricRegisterAndMark(Row op) {
@@ -222,8 +169,8 @@ private void regionMetricRegisterAndMark(Row op) {
222169
return;
223170
}
224171
String regionRequestMeter = regionMeterName(regionId);
225-
registerLossyCountingMeterIfNotPresent(regionRequestMeter, regionMetricsLossyCounting);
226-
markMeterIfPresent(regionRequestMeter);
172+
regionMetricsLossyCounting.add(regionRequestMeter);
173+
registerAndMarkMeter(regionRequestMeter);
227174
}
228175

229176
private void opMetricRegisterAndMark(Row op) {
@@ -232,7 +179,7 @@ private void opMetricRegisterAndMark(Row op) {
232179
if (opMeterName == null || opMeterName.isEmpty()) {
233180
return;
234181
}
235-
registerAndMarkMeterIfNotPresent(opMeterName);
182+
registerAndMarkMeter(opMeterName);
236183
}
237184

238185
private void opWithClientMetricRegisterAndMark(Object op) {
@@ -241,13 +188,18 @@ private void opWithClientMetricRegisterAndMark(Object op) {
241188
if (opWithClientMeterName == null || opWithClientMeterName.isEmpty()) {
242189
return;
243190
}
244-
registerAndMarkMeterIfNotPresent(opWithClientMeterName);
191+
registerAndMarkMeter(opWithClientMeterName);
245192
}
246193

247194
// Helper function to register and mark meter if not present
248-
private void registerAndMarkMeterIfNotPresent(String name) {
249-
registerMeterIfNotPresent(name);
250-
markMeterIfPresent(name);
195+
private void registerAndMarkMeter(String requestMeter) {
196+
if (requestMeter.isEmpty()) {
197+
return;
198+
}
199+
if(!registry.get(requestMeter).isPresent()){
200+
metrics.add(requestMeter);
201+
}
202+
registry.meter(requestMeter).mark();
251203
}
252204

253205
private String opWithClientMeterName(Object op) {
@@ -327,9 +279,14 @@ public void start(CoprocessorEnvironment env) throws IOException {
327279
.equals(TableName.META_TABLE_NAME)) {
328280
RegionCoprocessorEnvironment regionCoprocessorEnv = (RegionCoprocessorEnvironment) env;
329281
registry = regionCoprocessorEnv.getMetricRegistryForRegionServer();
330-
requestsMap = new ConcurrentHashMap<>();
331-
clientMetricsLossyCounting = new LossyCounting("clientMetaMetrics");
332-
regionMetricsLossyCounting = new LossyCounting("regionMetaMetrics");
282+
LossyCounting.LossyCountingListener listener = new LossyCounting.LossyCountingListener(){
283+
@Override public void sweep(String key) {
284+
registry.remove(key);
285+
metrics.remove(key);
286+
}
287+
};
288+
clientMetricsLossyCounting = new LossyCounting("clientMetaMetrics",listener);
289+
regionMetricsLossyCounting = new LossyCounting("regionMetaMetrics",listener);
333290
// only be active mode when this region holds meta table.
334291
active = true;
335292
}
@@ -338,10 +295,8 @@ public void start(CoprocessorEnvironment env) throws IOException {
338295
@Override
339296
public void stop(CoprocessorEnvironment env) throws IOException {
340297
// since meta region can move around, clear stale metrics when stop.
341-
if (requestsMap != null) {
342-
for (String meterName : requestsMap.keySet()) {
343-
registry.remove(meterName);
344-
}
298+
for(String metric:metrics){
299+
registry.remove(metric);
345300
}
346301
}
347302
}

hbase-server/src/main/java/org/apache/hadoop/hbase/util/LossyCounting.java

Lines changed: 35 additions & 20 deletions
Original file line numberDiff line numberDiff line change
@@ -19,7 +19,6 @@
1919

2020
package org.apache.hadoop.hbase.util;
2121

22-
import java.util.HashSet;
2322
import java.util.Map;
2423
import java.util.Set;
2524
import java.util.concurrent.ConcurrentHashMap;
@@ -47,13 +46,18 @@
4746
public class LossyCounting {
4847
private static final Logger LOG = LoggerFactory.getLogger(LossyCounting.class);
4948
private long bucketSize;
50-
private long currentTerm;
49+
private int currentTerm;
5150
private double errorRate;
5251
private Map<String, Integer> data;
5352
private long totalDataCount;
5453
private String name;
54+
private LossyCountingListener listener;
5555

56-
public LossyCounting(double errorRate, String name) {
56+
public interface LossyCountingListener {
57+
void sweep(String key);
58+
}
59+
60+
public LossyCounting(double errorRate, String name, LossyCountingListener listener) {
5761
this.errorRate = errorRate;
5862
this.name = name;
5963
if (errorRate < 0.0 || errorRate > 1.0) {
@@ -63,48 +67,55 @@ public LossyCounting(double errorRate, String name) {
6367
this.currentTerm = 1;
6468
this.totalDataCount = 0;
6569
this.data = new ConcurrentHashMap<>();
70+
this.listener = listener;
6671
calculateCurrentTerm();
6772
}
6873

69-
public LossyCounting(String name) {
74+
public LossyCounting(String name, LossyCountingListener listener) {
7075
this(HBaseConfiguration.create().getDouble(HConstants.DEFAULT_LOSSY_COUNTING_ERROR_RATE, 0.02),
71-
name);
76+
name, listener);
7277
}
7378

74-
public Set<String> addByOne(String key) {
75-
data.put(key, data.getOrDefault(key, 0) + 1);
79+
private void addByOne(String key) {
80+
//If entry exists, we update the entry by incrementing its frequency by one. Otherwise,
81+
//we create a new entry starting with currentTerm so that it will not be pruned immediately
82+
data.put(key, data.getOrDefault(key, currentTerm != 0 ? currentTerm - 1 : 0) + 1);
83+
84+
//update totalDataCount and term
7685
totalDataCount++;
7786
calculateCurrentTerm();
78-
Set<String> dataToBeSwept = new HashSet<>();
87+
}
88+
89+
public void add(String key) {
90+
addByOne(key);
7991
if(totalDataCount % bucketSize == 0) {
80-
dataToBeSwept = sweep();
92+
//sweep the entries at bucket boundaries
93+
sweep();
8194
}
82-
return dataToBeSwept;
8395
}
8496

97+
8598
/**
8699
* sweep low frequency data
87100
* @return Names of elements got swept
88101
*/
89-
private Set<String> sweep() {
90-
Set<String> dataToBeSwept = new HashSet<>();
102+
private void sweep() {
91103
for(Map.Entry<String, Integer> entry : data.entrySet()) {
92-
if(entry.getValue() + errorRate < currentTerm) {
93-
dataToBeSwept.add(entry.getKey());
104+
if(entry.getValue() < currentTerm) {
105+
String metric = entry.getKey();
106+
data.remove(metric);
107+
if (listener != null) {
108+
listener.sweep(metric);
109+
}
94110
}
95111
}
96-
for(String key : dataToBeSwept) {
97-
data.remove(key);
98-
}
99-
LOG.trace(String.format("%s swept %d elements.", name, dataToBeSwept.size()));
100-
return dataToBeSwept;
101112
}
102113

103114
/**
104115
* Calculate and set current term
105116
*/
106117
private void calculateCurrentTerm() {
107-
this.currentTerm = (int) Math.ceil(1.0 * totalDataCount / bucketSize);
118+
this.currentTerm = (int) Math.ceil(1.0 * totalDataCount / (double) bucketSize);
108119
}
109120

110121
public long getBucketSize(){
@@ -119,6 +130,10 @@ public boolean contains(String key) {
119130
return data.containsKey(key);
120131
}
121132

133+
public Set<String> getElements(){
134+
return data.keySet();
135+
}
136+
122137
public long getCurrentTerm() {
123138
return currentTerm;
124139
}

hbase-server/src/test/java/org/apache/hadoop/hbase/coprocessor/TestMetaTableMetrics.java

Lines changed: 16 additions & 8 deletions
Original file line numberDiff line numberDiff line change
@@ -1,12 +1,20 @@
11
/**
2-
* Licensed to the Apache Software Foundation (ASF) under one or more contributor license
3-
* agreements. See the NOTICE file distributed with this work for additional information regarding
4-
* copyright ownership. The ASF licenses this file to you under the Apache License, Version 2.0 (the
5-
* "License"); you may not use this file except in compliance with the License. You may obtain a
6-
* copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable
7-
* law or agreed to in writing, software distributed under the License is distributed on an "AS IS"
8-
* BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License
9-
* for the specific language governing permissions and limitations under the License.
2+
*
3+
* Licensed to the Apache Software Foundation (ASF) under one
4+
* or more contributor license agreements. See the NOTICE file
5+
* distributed with this work for additional information
6+
* regarding copyright ownership. The ASF licenses this file
7+
* to you under the Apache License, Version 2.0 (the
8+
* "License"); you may not use this file except in compliance
9+
* with the License. You may obtain a copy of the License at
10+
*
11+
* http://www.apache.org/licenses/LICENSE-2.0
12+
*
13+
* Unless required by applicable law or agreed to in writing, software
14+
* distributed under the License is distributed on an "AS IS" BASIS,
15+
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
16+
* See the License for the specific language governing permissions and
17+
* limitations under the License.
1018
*/
1119

1220
package org.apache.hadoop.hbase.coprocessor;

0 commit comments

Comments
 (0)