Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

metrics: Remove LabelSet #527

Merged
merged 7 commits into from
Mar 28, 2020
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
18 changes: 9 additions & 9 deletions docs/examples/basic_meter/basic_metrics.py
Original file line number Diff line number Diff line change
Expand Up @@ -84,21 +84,21 @@ def usage(argv):
label_keys=("environment",),
)

# Labelsets are used to identify key-values that are associated with a specific
# Labels are used to identify key-values that are associated with a specific
# metric that you want to record. These are useful for pre-aggregation and can
# be used to store custom dimensions pertaining to a metric
staging_label_set = meter.get_label_set({"environment": "staging"})
testing_label_set = meter.get_label_set({"environment": "testing"})
staging_labels = {"environment": "staging"}
testing_labels = {"environment": "testing"}

# Update the metric instruments using the direct calling convention
requests_counter.add(25, staging_label_set)
requests_size.record(100, staging_label_set)
requests_counter.add(25, staging_labels)
requests_size.record(100, staging_labels)
time.sleep(5)

requests_counter.add(50, staging_label_set)
requests_size.record(5000, staging_label_set)
requests_counter.add(50, staging_labels)
requests_size.record(5000, staging_labels)
time.sleep(5)

requests_counter.add(35, testing_label_set)
requests_size.record(2, testing_label_set)
requests_counter.add(35, testing_labels)
requests_size.record(2, testing_labels)
time.sleep(5)
18 changes: 9 additions & 9 deletions docs/examples/basic_meter/calling_conventions.py
Original file line number Diff line number Diff line change
Expand Up @@ -56,27 +56,27 @@
label_keys=("environment",),
)

label_set = meter.get_label_set({"environment": "staging"})
labels = {"environment": "staging"}

print("Updating using direct calling convention...")
# You can record metrics directly using the metric instrument. You pass in a
# labelset that you would like to record for.
requests_counter.add(25, label_set)
# You can record metrics directly using the metric instrument. You pass in
# labels that you would like to record for.
requests_counter.add(25, labels)
time.sleep(5)

print("Updating using a bound instrument...")
# You can record metrics with bound metric instruments. Bound metric
# instruments are created by passing in a labelset. A bound metric instrument
# instruments are created by passing in labels. A bound metric instrument
# is essentially metric data that corresponds to a specific set of labels.
mauriciovasquezbernal marked this conversation as resolved.
Show resolved Hide resolved
# Therefore, getting a bound metric instrument using the same set of labels
# will yield the same bound metric instrument.
bound_requests_counter = requests_counter.bind(label_set)
bound_requests_counter = requests_counter.bind(labels)
bound_requests_counter.add(100)
time.sleep(5)

print("Updating using batch calling convention...")
# You can record metrics in a batch by passing in a labelset and a sequence of
# You can record metrics in a batch by passing in labels and a sequence of
# (metric, value) pairs. The value would be recorded for each metric using the
# specified labelset for each.
meter.record_batch(label_set, ((requests_counter, 50), (clicks_counter, 70)))
# specified labels for each.
meter.record_batch(labels, ((requests_counter, 50), (clicks_counter, 70)))
time.sleep(5)
8 changes: 4 additions & 4 deletions docs/examples/basic_meter/observer.py
Original file line number Diff line number Diff line change
Expand Up @@ -19,7 +19,7 @@
import psutil

from opentelemetry import metrics
from opentelemetry.sdk.metrics import LabelSet, MeterProvider
from opentelemetry.sdk.metrics import MeterProvider
from opentelemetry.sdk.metrics.export import ConsoleMetricsExporter
from opentelemetry.sdk.metrics.export.batcher import UngroupedBatcher
from opentelemetry.sdk.metrics.export.controller import PushController
Expand All @@ -35,8 +35,8 @@
# Callback to gather cpu usage
def get_cpu_usage_callback(observer):
for (number, percent) in enumerate(psutil.cpu_percent(percpu=True)):
label_set = meter.get_label_set({"cpu_number": str(number)})
observer.observe(percent, label_set)
labels = {"cpu_number": str(number)}
observer.observe(percent, labels)


meter.register_observer(
Expand All @@ -52,7 +52,7 @@ def get_cpu_usage_callback(observer):
# Callback to gather RAM memory usage
def get_ram_usage_callback(observer):
ram_percent = psutil.virtual_memory().percent
observer.observe(ram_percent, LabelSet())
observer.observe(ram_percent, {})


meter.register_observer(
Expand Down
4 changes: 2 additions & 2 deletions docs/examples/otcollector-metrics/collector.py
Original file line number Diff line number Diff line change
Expand Up @@ -40,8 +40,8 @@
label_keys=("environment",),
)

staging_label_set = meter.get_label_set({"environment": "staging"})
requests_counter.add(25, staging_label_set)
staging_labels = {"environment": "staging"}
requests_counter.add(25, staging_labels)

print("Metrics are available now at http://localhost:9090/graph")
input("Press any key to exit...")
4 changes: 2 additions & 2 deletions docs/examples/prometheus/prometheus.py
Original file line number Diff line number Diff line change
Expand Up @@ -41,8 +41,8 @@
label_keys=("environment",),
)

staging_label_set = meter.get_label_set({"environment": "staging"})
requests_counter.add(25, staging_label_set)
staging_labels = {"environment": "staging"}
requests_counter.add(25, staging_labels)

print("Metrics are available now at http://localhost:8000/")
input("Press any key to exit...")
24 changes: 11 additions & 13 deletions docs/getting-started.rst
Original file line number Diff line number Diff line change
Expand Up @@ -249,7 +249,7 @@ The following is an example of emitting metrics to console, in a similar fashion
exporter = ConsoleMetricsExporter()
controller = PushController(meter, exporter, 5)

staging_label_set = meter.get_label_set({"environment": "staging"})
staging_labels = {"environment": "staging"}

requests_counter = meter.create_metric(
name="requests",
Expand All @@ -260,10 +260,10 @@ The following is an example of emitting metrics to console, in a similar fashion
label_keys=("environment",),
)

requests_counter.add(25, staging_label_set)
requests_counter.add(25, staging_labels)
time.sleep(5)

requests_counter.add(20, staging_label_set)
requests_counter.add(20, staging_labels)
time.sleep(5)


Expand All @@ -272,8 +272,8 @@ The sleeps will cause the script to take a while, but running it should yield:
.. code-block:: sh

$ python metrics.py
ConsoleMetricsExporter(data="Counter(name="requests", description="number of requests")", label_set="(('environment', 'staging'),)", value=25)
ConsoleMetricsExporter(data="Counter(name="requests", description="number of requests")", label_set="(('environment', 'staging'),)", value=45)
ConsoleMetricsExporter(data="Counter(name="requests", description="number of requests")", labels="(('environment', 'staging'),)", value=25)
ConsoleMetricsExporter(data="Counter(name="requests", description="number of requests")", labels="(('environment', 'staging'),)", value=45)

Using Prometheus
----------------
Expand Down Expand Up @@ -331,7 +331,7 @@ And use that instead of the `ConsoleMetricsExporter`:
exporter = PrometheusMetricsExporter("MyAppPrefix")
controller = PushController(meter, exporter, 5)

staging_label_set = meter.get_label_set({"environment": "staging"})
staging_labels = {"environment": "staging"}

requests_counter = meter.create_metric(
name="requests",
Expand All @@ -342,10 +342,10 @@ And use that instead of the `ConsoleMetricsExporter`:
label_keys=("environment",),
)

requests_counter.add(25, staging_label_set)
requests_counter.add(25, staging_labels)
time.sleep(5)

requests_counter.add(20, staging_label_set)
requests_counter.add(20, staging_labels)
time.sleep(5)

# This line is added to keep the HTTP server up long enough to scrape.
Expand Down Expand Up @@ -463,11 +463,9 @@ And execute the following script:
metric_type=Counter,
label_keys=("environment",),
)

# Labelsets are used to identify key-values that are associated with a specific
# Labels are used to identify key-values that are associated with a specific
# metric that you want to record. These are useful for pre-aggregation and can
# be used to store custom dimensions pertaining to a metric
label_set = meter.get_label_set({"environment": "staging"})

requests_counter.add(25, label_set)
labels = {"environment": "staging"}
requests_counter.add(25, labels)
time.sleep(10) # give push_controller time to push metrics
4 changes: 2 additions & 2 deletions docs/metrics_example.py
Original file line number Diff line number Diff line change
Expand Up @@ -17,7 +17,7 @@
label_keys=("environment",),
)

staging_label_set = meter.get_label_set({"environment": "staging"})
requests_counter.add(25, staging_label_set)
staging_labels = {"environment": "staging"}
requests_counter.add(25, staging_labels)

input("Press a key to finish...\n")
Original file line number Diff line number Diff line change
Expand Up @@ -77,10 +77,10 @@
Counter,
("environment",),
)
# Labelsets are used to identify key-values that are associated with a specific
# Labels are used to identify key-values that are associated with a specific
# metric that you want to record. These are useful for pre-aggregation and can
# be used to store custom dimensions pertaining to a metric
label_set = meter.get_label_set({"environment": "staging"})
labels = {"environment": "staging"}

counter.add(25, label_set)
counter.add(25, labels)
"""
Original file line number Diff line number Diff line change
Expand Up @@ -106,7 +106,7 @@ def translate_to_collector(

label_values = []
label_keys = []
for label_tuple in metric_record.label_set.labels:
for label_tuple in metric_record.labels:
label_keys.append(metrics_pb2.LabelKey(key=label_tuple[0]))
label_values.append(
metrics_pb2.LabelValue(
Expand Down Expand Up @@ -145,11 +145,12 @@ def get_collector_metric_type(metric: Metric) -> metrics_pb2.MetricDescriptor:


def get_collector_point(metric_record: MetricRecord) -> metrics_pb2.Point:
# TODO: horrible hack to get original list of keys to then get the bound
mauriciovasquezbernal marked this conversation as resolved.
Show resolved Hide resolved
# instrument
key = dict(metric_record.labels)
point = metrics_pb2.Point(
timestamp=utils.proto_timestamp_from_time_ns(
metric_record.metric.bind(
metric_record.label_set
).last_update_timestamp
metric_record.metric.bind(key).last_update_timestamp
)
)
if metric_record.metric.value_type == int:
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -21,7 +21,12 @@

from opentelemetry import metrics
from opentelemetry.ext.otcollector import metrics_exporter
from opentelemetry.sdk.metrics import Counter, Measure, MeterProvider
from opentelemetry.sdk.metrics import (
Counter,
Measure,
MeterProvider,
get_labels_as_key,
)
from opentelemetry.sdk.metrics.export import (
MetricRecord,
MetricsExportResult,
Expand All @@ -36,8 +41,8 @@ def setUpClass(cls):
# pylint: disable=protected-access
metrics.set_meter_provider(MeterProvider())
cls._meter = metrics.get_meter(__name__)
kvp = {"environment": "staging"}
cls._test_label_set = cls._meter.get_label_set(kvp)
cls._labels = {"environment": "staging"}
cls._key_labels = get_labels_as_key(cls._labels)

def test_constructor(self):
mock_get_node = mock.Mock()
Expand Down Expand Up @@ -77,7 +82,6 @@ def test_get_collector_metric_type(self):

def test_get_collector_point(self):
aggregator = aggregate.CounterAggregator()
label_set = self._meter.get_label_set({"environment": "staging"})
int_counter = self._meter.create_metric(
"testName", "testDescription", "unit", int, Counter
)
Expand All @@ -88,21 +92,21 @@ def test_get_collector_point(self):
"testName", "testDescription", "unit", float, Measure
)
result = metrics_exporter.get_collector_point(
MetricRecord(aggregator, label_set, int_counter)
MetricRecord(aggregator, self._key_labels, int_counter)
)
self.assertIsInstance(result, metrics_pb2.Point)
self.assertIsInstance(result.timestamp, Timestamp)
self.assertEqual(result.int64_value, 0)
aggregator.update(123.5)
aggregator.take_checkpoint()
result = metrics_exporter.get_collector_point(
MetricRecord(aggregator, label_set, float_counter)
MetricRecord(aggregator, self._key_labels, float_counter)
)
self.assertEqual(result.double_value, 123.5)
self.assertRaises(
TypeError,
metrics_exporter.get_collector_point(
MetricRecord(aggregator, label_set, measure)
MetricRecord(aggregator, self._key_labels, measure)
),
)

Expand All @@ -118,7 +122,7 @@ def test_export(self):
"testname", "testdesc", "unit", int, Counter, ["environment"]
)
record = MetricRecord(
aggregate.CounterAggregator(), self._test_label_set, test_metric
aggregate.CounterAggregator(), self._key_labels, test_metric
)

result = collector_exporter.export([record])
Expand All @@ -137,14 +141,13 @@ def test_export(self):
)

def test_translate_to_collector(self):

test_metric = self._meter.create_metric(
"testname", "testdesc", "unit", int, Counter, ["environment"]
)
aggregator = aggregate.CounterAggregator()
aggregator.update(123)
aggregator.take_checkpoint()
record = MetricRecord(aggregator, self._test_label_set, test_metric)
record = MetricRecord(aggregator, self._key_labels, test_metric)
output_metrics = metrics_exporter.translate_to_collector([record])
self.assertEqual(len(output_metrics), 1)
self.assertIsInstance(output_metrics[0], metrics_pb2.Metric)
Expand Down Expand Up @@ -175,12 +178,12 @@ def test_translate_to_collector(self):
self.assertEqual(len(output_metrics[0].timeseries[0].points), 1)
self.assertEqual(
output_metrics[0].timeseries[0].points[0].timestamp.seconds,
record.metric.bind(record.label_set).last_update_timestamp
record.metric.bind(self._labels).last_update_timestamp
// 1000000000,
)
self.assertEqual(
output_metrics[0].timeseries[0].points[0].timestamp.nanos,
record.metric.bind(record.label_set).last_update_timestamp
record.metric.bind(self._labels).last_update_timestamp
% 1000000000,
)
self.assertEqual(
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -54,12 +54,12 @@
("environment",),
)

# Labelsets are used to identify key-values that are associated with a specific
# Labels are used to identify key-values that are associated with a specific
# metric that you want to record. These are useful for pre-aggregation and can
# be used to store custom dimensions pertaining to a metric
label_set = meter.get_label_set({"environment": "staging"})
labels = {"environment": "staging"}

counter.add(25, label_set)
counter.add(25, labels)
input("Press any key to exit...")

API
Expand Down Expand Up @@ -145,7 +145,7 @@ def _translate_to_prometheus(self, metric_record: MetricRecord):
prometheus_metric = None
label_values = []
label_keys = []
for label_tuple in metric_record.label_set.labels:
for label_tuple in metric_record.labels:
label_keys.append(self._sanitize(label_tuple[0]))
label_values.append(label_tuple[1])

Expand Down
Loading