From 0bbc5dd1552f8459c878114b8ae590ddcbbf661d Mon Sep 17 00:00:00 2001 From: Povilas Versockas Date: Thu, 20 Jul 2023 22:01:26 +0300 Subject: [PATCH] [receiver/k8scluster] Change k8s.daemonset metrics to use mdatagen (#24269) **Description:** Change k8s.daemonset metrics to use mdatagen. The migration has the following side effects, we add scope and schema to metrics: ``` schemaUrl: "https://opentelemetry.io/schemas/1.18.0" ... scope: name: otelcol/k8sclusterreceiver version: latest ``` **Link to tracking Issue:** https://github.com/open-telemetry/opentelemetry-collector-contrib/issues/4367 --------- Co-authored-by: Curtis Robert <92119472+crobert-1@users.noreply.github.com> --- .../k8s-cluster-receiver-daemonset-2.yaml | 21 + .../internal/collection/collector.go | 2 +- .../internal/demonset/daemonsets.go | 95 +---- .../internal/demonset/daemonsets_test.go | 40 +- .../internal/demonset/doc.go | 6 + .../internal/demonset/documentation.md | 56 +++ .../internal/metadata/generated_config.go | 92 +++++ .../metadata/generated_config_test.go | 78 ++++ .../internal/metadata/generated_metrics.go | 386 ++++++++++++++++++ .../metadata/generated_metrics_test.go | 178 ++++++++ .../internal/metadata/testdata/config.yaml | 39 ++ .../internal/demonset/metadata.yaml | 52 +++ .../internal/demonset/testdata/expected.yaml | 45 ++ .../testdata/e2e/expected.yaml | 10 +- 14 files changed, 995 insertions(+), 105 deletions(-) create mode 100755 .chloggen/k8s-cluster-receiver-daemonset-2.yaml create mode 100644 receiver/k8sclusterreceiver/internal/demonset/doc.go create mode 100644 receiver/k8sclusterreceiver/internal/demonset/documentation.md create mode 100644 receiver/k8sclusterreceiver/internal/demonset/internal/metadata/generated_config.go create mode 100644 receiver/k8sclusterreceiver/internal/demonset/internal/metadata/generated_config_test.go create mode 100644 receiver/k8sclusterreceiver/internal/demonset/internal/metadata/generated_metrics.go create mode 100644 receiver/k8sclusterreceiver/internal/demonset/internal/metadata/generated_metrics_test.go create mode 100644 receiver/k8sclusterreceiver/internal/demonset/internal/metadata/testdata/config.yaml create mode 100644 receiver/k8sclusterreceiver/internal/demonset/metadata.yaml create mode 100644 receiver/k8sclusterreceiver/internal/demonset/testdata/expected.yaml diff --git a/.chloggen/k8s-cluster-receiver-daemonset-2.yaml b/.chloggen/k8s-cluster-receiver-daemonset-2.yaml new file mode 100755 index 000000000000..3e6d5c106e00 --- /dev/null +++ b/.chloggen/k8s-cluster-receiver-daemonset-2.yaml @@ -0,0 +1,21 @@ +# Use this changelog template to create an entry for release notes. +# If your change doesn't affect end users, such as a test fix or a tooling change, +# you should instead start your pull request title with [chore] or use the "Skip Changelog" label. + +# One of 'breaking', 'deprecation', 'new_component', 'enhancement', 'bug_fix' +change_type: enhancement + +# The name of the component, or a single word describing the area of concern, (e.g. filelogreceiver) +component: k8sclusterreceiver + +# A brief description of the change. Surround your text with quotes ("") if it needs to start with a backtick (`). +note: Change k8s.daemonset metrics to use mdatagen + + +# Mandatory: One or more tracking issues related to the change. You can use the PR number here if no issue exists. +issues: [10553] + +# (Optional) One or more lines of additional information to render under the primary note. +# These lines will be padded with 2 spaces and then inserted directly into the document. +# Use pipe (|) for multiline entries. +subtext: diff --git a/receiver/k8sclusterreceiver/internal/collection/collector.go b/receiver/k8sclusterreceiver/internal/collection/collector.go index 008d25bfb73b..430c75a95705 100644 --- a/receiver/k8sclusterreceiver/internal/collection/collector.go +++ b/receiver/k8sclusterreceiver/internal/collection/collector.go @@ -118,7 +118,7 @@ func (dc *DataCollector) SyncMetrics(obj interface{}) { case *appsv1.ReplicaSet: md = replicaset.GetMetrics(dc.settings, o) case *appsv1.DaemonSet: - md = ocsToMetrics(demonset.GetMetrics(o)) + md = demonset.GetMetrics(dc.settings, o) case *appsv1.StatefulSet: md = statefulset.GetMetrics(dc.settings, o) case *batchv1.Job: diff --git a/receiver/k8sclusterreceiver/internal/demonset/daemonsets.go b/receiver/k8sclusterreceiver/internal/demonset/daemonsets.go index 4b5208e8e716..d14b17cb739a 100644 --- a/receiver/k8sclusterreceiver/internal/demonset/daemonsets.go +++ b/receiver/k8sclusterreceiver/internal/demonset/daemonsets.go @@ -4,46 +4,19 @@ package demonset // import "github.com/open-telemetry/opentelemetry-collector-contrib/receiver/k8sclusterreceiver/internal/demonset" import ( - agentmetricspb "github.com/census-instrumentation/opencensus-proto/gen-go/agent/metrics/v1" - metricspb "github.com/census-instrumentation/opencensus-proto/gen-go/metrics/v1" - resourcepb "github.com/census-instrumentation/opencensus-proto/gen-go/resource/v1" - conventions "go.opentelemetry.io/collector/semconv/v1.6.1" + "time" + + "go.opentelemetry.io/collector/pdata/pcommon" + "go.opentelemetry.io/collector/pdata/pmetric" + "go.opentelemetry.io/collector/receiver" appsv1 "k8s.io/api/apps/v1" "github.com/open-telemetry/opentelemetry-collector-contrib/pkg/experimentalmetricmetadata" "github.com/open-telemetry/opentelemetry-collector-contrib/receiver/k8sclusterreceiver/internal/constants" + imetadataphase "github.com/open-telemetry/opentelemetry-collector-contrib/receiver/k8sclusterreceiver/internal/demonset/internal/metadata" "github.com/open-telemetry/opentelemetry-collector-contrib/receiver/k8sclusterreceiver/internal/metadata" - "github.com/open-telemetry/opentelemetry-collector-contrib/receiver/k8sclusterreceiver/internal/utils" ) -var daemonSetCurrentScheduledMetric = &metricspb.MetricDescriptor{ - Name: "k8s.daemonset.current_scheduled_nodes", - Description: "Number of nodes that are running at least 1 daemon pod and are supposed to run the daemon pod", - Unit: "1", - Type: metricspb.MetricDescriptor_GAUGE_INT64, -} - -var daemonSetDesiredScheduledMetric = &metricspb.MetricDescriptor{ - Name: "k8s.daemonset.desired_scheduled_nodes", - Description: "Number of nodes that should be running the daemon pod (including nodes currently running the daemon pod)", - Unit: "1", - Type: metricspb.MetricDescriptor_GAUGE_INT64, -} - -var daemonSetMisScheduledMetric = &metricspb.MetricDescriptor{ - Name: "k8s.daemonset.misscheduled_nodes", - Description: "Number of nodes that are running the daemon pod, but are not supposed to run the daemon pod", - Unit: "1", - Type: metricspb.MetricDescriptor_GAUGE_INT64, -} - -var daemonSetReadyMetric = &metricspb.MetricDescriptor{ - Name: "k8s.daemonset.ready_nodes", - Description: "Number of nodes that should be running the daemon pod and have one or more of the daemon pod running and ready", - Unit: "1", - Type: metricspb.MetricDescriptor_GAUGE_INT64, -} - // Transform transforms the pod to remove the fields that we don't use to reduce RAM utilization. // IMPORTANT: Make sure to update this function before using new daemonset fields. func Transform(ds *appsv1.DaemonSet) *appsv1.DaemonSet { @@ -58,51 +31,21 @@ func Transform(ds *appsv1.DaemonSet) *appsv1.DaemonSet { } } -func GetMetrics(ds *appsv1.DaemonSet) []*agentmetricspb.ExportMetricsServiceRequest { - metrics := []*metricspb.Metric{ - { - MetricDescriptor: daemonSetCurrentScheduledMetric, - Timeseries: []*metricspb.TimeSeries{ - utils.GetInt64TimeSeries(int64(ds.Status.CurrentNumberScheduled)), - }, - }, - { - MetricDescriptor: daemonSetDesiredScheduledMetric, - Timeseries: []*metricspb.TimeSeries{ - utils.GetInt64TimeSeries(int64(ds.Status.DesiredNumberScheduled)), - }, - }, - { - MetricDescriptor: daemonSetMisScheduledMetric, - Timeseries: []*metricspb.TimeSeries{ - utils.GetInt64TimeSeries(int64(ds.Status.NumberMisscheduled)), - }, - }, - { - MetricDescriptor: daemonSetReadyMetric, - Timeseries: []*metricspb.TimeSeries{ - utils.GetInt64TimeSeries(int64(ds.Status.NumberReady)), - }, - }, - } +func GetMetrics(set receiver.CreateSettings, ds *appsv1.DaemonSet) pmetric.Metrics { + mbphase := imetadataphase.NewMetricsBuilder(imetadataphase.DefaultMetricsBuilderConfig(), set) + ts := pcommon.NewTimestampFromTime(time.Now()) + mbphase.RecordK8sDaemonsetCurrentScheduledNodesDataPoint(ts, int64(ds.Status.CurrentNumberScheduled)) + mbphase.RecordK8sDaemonsetDesiredScheduledNodesDataPoint(ts, int64(ds.Status.DesiredNumberScheduled)) + mbphase.RecordK8sDaemonsetMisscheduledNodesDataPoint(ts, int64(ds.Status.NumberMisscheduled)) + mbphase.RecordK8sDaemonsetReadyNodesDataPoint(ts, int64(ds.Status.NumberReady)) - return []*agentmetricspb.ExportMetricsServiceRequest{ - { - Resource: getResource(ds), - Metrics: metrics, - }, - } -} + return mbphase.Emit( + imetadataphase.WithK8sNamespaceName(ds.Namespace), + imetadataphase.WithK8sDaemonsetName(ds.Name), + imetadataphase.WithK8sDaemonsetUID(string(ds.UID)), + imetadataphase.WithOpencensusResourcetype("k8s"), + ) -func getResource(ds *appsv1.DaemonSet) *resourcepb.Resource { - return &resourcepb.Resource{ - Type: constants.K8sType, - Labels: map[string]string{ - conventions.AttributeK8SDaemonSetUID: string(ds.UID), - conventions.AttributeK8SDaemonSetName: ds.Name, - conventions.AttributeK8SNamespaceName: ds.Namespace, - }, - } } func GetMetadata(ds *appsv1.DaemonSet) map[experimentalmetricmetadata.ResourceID]*metadata.KubernetesMetadata { diff --git a/receiver/k8sclusterreceiver/internal/demonset/daemonsets_test.go b/receiver/k8sclusterreceiver/internal/demonset/daemonsets_test.go index 5dd869f9e6b0..db6f8bdf41e5 100644 --- a/receiver/k8sclusterreceiver/internal/demonset/daemonsets_test.go +++ b/receiver/k8sclusterreceiver/internal/demonset/daemonsets_test.go @@ -4,47 +4,35 @@ package demonset import ( + "path/filepath" "testing" - metricspb "github.com/census-instrumentation/opencensus-proto/gen-go/metrics/v1" "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" + "go.opentelemetry.io/collector/receiver/receivertest" appsv1 "k8s.io/api/apps/v1" corev1 "k8s.io/api/core/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" - "github.com/open-telemetry/opentelemetry-collector-contrib/receiver/k8sclusterreceiver/internal/constants" + "github.com/open-telemetry/opentelemetry-collector-contrib/internal/coreinternal/golden" + "github.com/open-telemetry/opentelemetry-collector-contrib/pkg/pdatatest/pmetrictest" "github.com/open-telemetry/opentelemetry-collector-contrib/receiver/k8sclusterreceiver/internal/testutils" ) func TestDaemonsetMetrics(t *testing.T) { ds := testutils.NewDaemonset("1") - actualResourceMetrics := GetMetrics(ds) - - require.Equal(t, 1, len(actualResourceMetrics)) - require.Equal(t, 4, len(actualResourceMetrics[0].Metrics)) - - rm := actualResourceMetrics[0] - testutils.AssertResource(t, rm.Resource, constants.K8sType, - map[string]string{ - "k8s.daemonset.uid": "test-daemonset-1-uid", - "k8s.daemonset.name": "test-daemonset-1", - "k8s.namespace.name": "test-namespace", - }, + m := GetMetrics(receivertest.NewNopCreateSettings(), ds) + expected, err := golden.ReadMetrics(filepath.Join("testdata", "expected.yaml")) + require.NoError(t, err) + require.NoError(t, pmetrictest.CompareMetrics(expected, m, + pmetrictest.IgnoreTimestamp(), + pmetrictest.IgnoreStartTimestamp(), + pmetrictest.IgnoreResourceMetricsOrder(), + pmetrictest.IgnoreMetricsOrder(), + pmetrictest.IgnoreScopeMetricsOrder(), + ), ) - - testutils.AssertMetricsInt(t, rm.Metrics[0], "k8s.daemonset.current_scheduled_nodes", - metricspb.MetricDescriptor_GAUGE_INT64, 3) - - testutils.AssertMetricsInt(t, rm.Metrics[1], "k8s.daemonset.desired_scheduled_nodes", - metricspb.MetricDescriptor_GAUGE_INT64, 5) - - testutils.AssertMetricsInt(t, rm.Metrics[2], "k8s.daemonset.misscheduled_nodes", - metricspb.MetricDescriptor_GAUGE_INT64, 1) - - testutils.AssertMetricsInt(t, rm.Metrics[3], "k8s.daemonset.ready_nodes", - metricspb.MetricDescriptor_GAUGE_INT64, 2) } func TestTransform(t *testing.T) { diff --git a/receiver/k8sclusterreceiver/internal/demonset/doc.go b/receiver/k8sclusterreceiver/internal/demonset/doc.go new file mode 100644 index 000000000000..391dda75f538 --- /dev/null +++ b/receiver/k8sclusterreceiver/internal/demonset/doc.go @@ -0,0 +1,6 @@ +// Copyright The OpenTelemetry Authors +// SPDX-License-Identifier: Apache-2.0 + +//go:generate mdatagen metadata.yaml + +package demonset // import "github.com/open-telemetry/opentelemetry-collector-contrib/receiver/k8sclusterreceiver/internal/demonset" diff --git a/receiver/k8sclusterreceiver/internal/demonset/documentation.md b/receiver/k8sclusterreceiver/internal/demonset/documentation.md new file mode 100644 index 000000000000..fc07caa369a4 --- /dev/null +++ b/receiver/k8sclusterreceiver/internal/demonset/documentation.md @@ -0,0 +1,56 @@ +[comment]: <> (Code generated by mdatagen. DO NOT EDIT.) + +# k8s/daemonset + +**Parent Component:** k8s_cluster + +## Default Metrics + +The following metrics are emitted by default. Each of them can be disabled by applying the following configuration: + +```yaml +metrics: + : + enabled: false +``` + +### k8s.daemonset.current_scheduled_nodes + +Number of nodes that are running at least 1 daemon pod and are supposed to run the daemon pod + +| Unit | Metric Type | Value Type | +| ---- | ----------- | ---------- | +| 1 | Gauge | Int | + +### k8s.daemonset.desired_scheduled_nodes + +Number of nodes that should be running the daemon pod (including nodes currently running the daemon pod) + +| Unit | Metric Type | Value Type | +| ---- | ----------- | ---------- | +| 1 | Gauge | Int | + +### k8s.daemonset.misscheduled_nodes + +Number of nodes that are running the daemon pod, but are not supposed to run the daemon pod + +| Unit | Metric Type | Value Type | +| ---- | ----------- | ---------- | +| 1 | Gauge | Int | + +### k8s.daemonset.ready_nodes + +Number of nodes that should be running the daemon pod and have one or more of the daemon pod running and ready + +| Unit | Metric Type | Value Type | +| ---- | ----------- | ---------- | +| 1 | Gauge | Int | + +## Resource Attributes + +| Name | Description | Values | Enabled | +| ---- | ----------- | ------ | ------- | +| k8s.daemonset.name | The k8s daemonset name. | Any Str | true | +| k8s.daemonset.uid | The k8s daemonset uid. | Any Str | true | +| k8s.namespace.name | The k8s namespace name. | Any Str | true | +| opencensus.resourcetype | The OpenCensus resource type. | Any Str | true | diff --git a/receiver/k8sclusterreceiver/internal/demonset/internal/metadata/generated_config.go b/receiver/k8sclusterreceiver/internal/demonset/internal/metadata/generated_config.go new file mode 100644 index 000000000000..c72532a98474 --- /dev/null +++ b/receiver/k8sclusterreceiver/internal/demonset/internal/metadata/generated_config.go @@ -0,0 +1,92 @@ +// Code generated by mdatagen. DO NOT EDIT. + +package metadata + +import "go.opentelemetry.io/collector/confmap" + +// MetricConfig provides common config for a particular metric. +type MetricConfig struct { + Enabled bool `mapstructure:"enabled"` + + enabledSetByUser bool +} + +func (ms *MetricConfig) Unmarshal(parser *confmap.Conf) error { + if parser == nil { + return nil + } + err := parser.Unmarshal(ms, confmap.WithErrorUnused()) + if err != nil { + return err + } + ms.enabledSetByUser = parser.IsSet("enabled") + return nil +} + +// MetricsConfig provides config for k8s/daemonset metrics. +type MetricsConfig struct { + K8sDaemonsetCurrentScheduledNodes MetricConfig `mapstructure:"k8s.daemonset.current_scheduled_nodes"` + K8sDaemonsetDesiredScheduledNodes MetricConfig `mapstructure:"k8s.daemonset.desired_scheduled_nodes"` + K8sDaemonsetMisscheduledNodes MetricConfig `mapstructure:"k8s.daemonset.misscheduled_nodes"` + K8sDaemonsetReadyNodes MetricConfig `mapstructure:"k8s.daemonset.ready_nodes"` +} + +func DefaultMetricsConfig() MetricsConfig { + return MetricsConfig{ + K8sDaemonsetCurrentScheduledNodes: MetricConfig{ + Enabled: true, + }, + K8sDaemonsetDesiredScheduledNodes: MetricConfig{ + Enabled: true, + }, + K8sDaemonsetMisscheduledNodes: MetricConfig{ + Enabled: true, + }, + K8sDaemonsetReadyNodes: MetricConfig{ + Enabled: true, + }, + } +} + +// ResourceAttributeConfig provides common config for a particular resource attribute. +type ResourceAttributeConfig struct { + Enabled bool `mapstructure:"enabled"` +} + +// ResourceAttributesConfig provides config for k8s/daemonset resource attributes. +type ResourceAttributesConfig struct { + K8sDaemonsetName ResourceAttributeConfig `mapstructure:"k8s.daemonset.name"` + K8sDaemonsetUID ResourceAttributeConfig `mapstructure:"k8s.daemonset.uid"` + K8sNamespaceName ResourceAttributeConfig `mapstructure:"k8s.namespace.name"` + OpencensusResourcetype ResourceAttributeConfig `mapstructure:"opencensus.resourcetype"` +} + +func DefaultResourceAttributesConfig() ResourceAttributesConfig { + return ResourceAttributesConfig{ + K8sDaemonsetName: ResourceAttributeConfig{ + Enabled: true, + }, + K8sDaemonsetUID: ResourceAttributeConfig{ + Enabled: true, + }, + K8sNamespaceName: ResourceAttributeConfig{ + Enabled: true, + }, + OpencensusResourcetype: ResourceAttributeConfig{ + Enabled: true, + }, + } +} + +// MetricsBuilderConfig is a configuration for k8s/daemonset metrics builder. +type MetricsBuilderConfig struct { + Metrics MetricsConfig `mapstructure:"metrics"` + ResourceAttributes ResourceAttributesConfig `mapstructure:"resource_attributes"` +} + +func DefaultMetricsBuilderConfig() MetricsBuilderConfig { + return MetricsBuilderConfig{ + Metrics: DefaultMetricsConfig(), + ResourceAttributes: DefaultResourceAttributesConfig(), + } +} diff --git a/receiver/k8sclusterreceiver/internal/demonset/internal/metadata/generated_config_test.go b/receiver/k8sclusterreceiver/internal/demonset/internal/metadata/generated_config_test.go new file mode 100644 index 000000000000..242cea723fa4 --- /dev/null +++ b/receiver/k8sclusterreceiver/internal/demonset/internal/metadata/generated_config_test.go @@ -0,0 +1,78 @@ +// Code generated by mdatagen. DO NOT EDIT. + +package metadata + +import ( + "path/filepath" + "testing" + + "github.com/google/go-cmp/cmp" + "github.com/google/go-cmp/cmp/cmpopts" + "github.com/stretchr/testify/require" + "go.opentelemetry.io/collector/component" + "go.opentelemetry.io/collector/confmap/confmaptest" +) + +func TestMetricsBuilderConfig(t *testing.T) { + tests := []struct { + name string + want MetricsBuilderConfig + }{ + { + name: "default", + want: DefaultMetricsBuilderConfig(), + }, + { + name: "all_set", + want: MetricsBuilderConfig{ + Metrics: MetricsConfig{ + K8sDaemonsetCurrentScheduledNodes: MetricConfig{Enabled: true}, + K8sDaemonsetDesiredScheduledNodes: MetricConfig{Enabled: true}, + K8sDaemonsetMisscheduledNodes: MetricConfig{Enabled: true}, + K8sDaemonsetReadyNodes: MetricConfig{Enabled: true}, + }, + ResourceAttributes: ResourceAttributesConfig{ + K8sDaemonsetName: ResourceAttributeConfig{Enabled: true}, + K8sDaemonsetUID: ResourceAttributeConfig{Enabled: true}, + K8sNamespaceName: ResourceAttributeConfig{Enabled: true}, + OpencensusResourcetype: ResourceAttributeConfig{Enabled: true}, + }, + }, + }, + { + name: "none_set", + want: MetricsBuilderConfig{ + Metrics: MetricsConfig{ + K8sDaemonsetCurrentScheduledNodes: MetricConfig{Enabled: false}, + K8sDaemonsetDesiredScheduledNodes: MetricConfig{Enabled: false}, + K8sDaemonsetMisscheduledNodes: MetricConfig{Enabled: false}, + K8sDaemonsetReadyNodes: MetricConfig{Enabled: false}, + }, + ResourceAttributes: ResourceAttributesConfig{ + K8sDaemonsetName: ResourceAttributeConfig{Enabled: false}, + K8sDaemonsetUID: ResourceAttributeConfig{Enabled: false}, + K8sNamespaceName: ResourceAttributeConfig{Enabled: false}, + OpencensusResourcetype: ResourceAttributeConfig{Enabled: false}, + }, + }, + }, + } + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + cfg := loadMetricsBuilderConfig(t, tt.name) + if diff := cmp.Diff(tt.want, cfg, cmpopts.IgnoreUnexported(MetricConfig{}, ResourceAttributeConfig{})); diff != "" { + t.Errorf("Config mismatch (-expected +actual):\n%s", diff) + } + }) + } +} + +func loadMetricsBuilderConfig(t *testing.T, name string) MetricsBuilderConfig { + cm, err := confmaptest.LoadConf(filepath.Join("testdata", "config.yaml")) + require.NoError(t, err) + sub, err := cm.Sub(name) + require.NoError(t, err) + cfg := DefaultMetricsBuilderConfig() + require.NoError(t, component.UnmarshalConfig(sub, &cfg)) + return cfg +} diff --git a/receiver/k8sclusterreceiver/internal/demonset/internal/metadata/generated_metrics.go b/receiver/k8sclusterreceiver/internal/demonset/internal/metadata/generated_metrics.go new file mode 100644 index 000000000000..11457703d18d --- /dev/null +++ b/receiver/k8sclusterreceiver/internal/demonset/internal/metadata/generated_metrics.go @@ -0,0 +1,386 @@ +// Code generated by mdatagen. DO NOT EDIT. + +package metadata + +import ( + "time" + + "go.opentelemetry.io/collector/component" + "go.opentelemetry.io/collector/pdata/pcommon" + "go.opentelemetry.io/collector/pdata/pmetric" + "go.opentelemetry.io/collector/receiver" + conventions "go.opentelemetry.io/collector/semconv/v1.18.0" +) + +type metricK8sDaemonsetCurrentScheduledNodes struct { + data pmetric.Metric // data buffer for generated metric. + config MetricConfig // metric config provided by user. + capacity int // max observed number of data points added to the metric. +} + +// init fills k8s.daemonset.current_scheduled_nodes metric with initial data. +func (m *metricK8sDaemonsetCurrentScheduledNodes) init() { + m.data.SetName("k8s.daemonset.current_scheduled_nodes") + m.data.SetDescription("Number of nodes that are running at least 1 daemon pod and are supposed to run the daemon pod") + m.data.SetUnit("1") + m.data.SetEmptyGauge() +} + +func (m *metricK8sDaemonsetCurrentScheduledNodes) recordDataPoint(start pcommon.Timestamp, ts pcommon.Timestamp, val int64) { + if !m.config.Enabled { + return + } + dp := m.data.Gauge().DataPoints().AppendEmpty() + dp.SetStartTimestamp(start) + dp.SetTimestamp(ts) + dp.SetIntValue(val) +} + +// updateCapacity saves max length of data point slices that will be used for the slice capacity. +func (m *metricK8sDaemonsetCurrentScheduledNodes) updateCapacity() { + if m.data.Gauge().DataPoints().Len() > m.capacity { + m.capacity = m.data.Gauge().DataPoints().Len() + } +} + +// emit appends recorded metric data to a metrics slice and prepares it for recording another set of data points. +func (m *metricK8sDaemonsetCurrentScheduledNodes) emit(metrics pmetric.MetricSlice) { + if m.config.Enabled && m.data.Gauge().DataPoints().Len() > 0 { + m.updateCapacity() + m.data.MoveTo(metrics.AppendEmpty()) + m.init() + } +} + +func newMetricK8sDaemonsetCurrentScheduledNodes(cfg MetricConfig) metricK8sDaemonsetCurrentScheduledNodes { + m := metricK8sDaemonsetCurrentScheduledNodes{config: cfg} + if cfg.Enabled { + m.data = pmetric.NewMetric() + m.init() + } + return m +} + +type metricK8sDaemonsetDesiredScheduledNodes struct { + data pmetric.Metric // data buffer for generated metric. + config MetricConfig // metric config provided by user. + capacity int // max observed number of data points added to the metric. +} + +// init fills k8s.daemonset.desired_scheduled_nodes metric with initial data. +func (m *metricK8sDaemonsetDesiredScheduledNodes) init() { + m.data.SetName("k8s.daemonset.desired_scheduled_nodes") + m.data.SetDescription("Number of nodes that should be running the daemon pod (including nodes currently running the daemon pod)") + m.data.SetUnit("1") + m.data.SetEmptyGauge() +} + +func (m *metricK8sDaemonsetDesiredScheduledNodes) recordDataPoint(start pcommon.Timestamp, ts pcommon.Timestamp, val int64) { + if !m.config.Enabled { + return + } + dp := m.data.Gauge().DataPoints().AppendEmpty() + dp.SetStartTimestamp(start) + dp.SetTimestamp(ts) + dp.SetIntValue(val) +} + +// updateCapacity saves max length of data point slices that will be used for the slice capacity. +func (m *metricK8sDaemonsetDesiredScheduledNodes) updateCapacity() { + if m.data.Gauge().DataPoints().Len() > m.capacity { + m.capacity = m.data.Gauge().DataPoints().Len() + } +} + +// emit appends recorded metric data to a metrics slice and prepares it for recording another set of data points. +func (m *metricK8sDaemonsetDesiredScheduledNodes) emit(metrics pmetric.MetricSlice) { + if m.config.Enabled && m.data.Gauge().DataPoints().Len() > 0 { + m.updateCapacity() + m.data.MoveTo(metrics.AppendEmpty()) + m.init() + } +} + +func newMetricK8sDaemonsetDesiredScheduledNodes(cfg MetricConfig) metricK8sDaemonsetDesiredScheduledNodes { + m := metricK8sDaemonsetDesiredScheduledNodes{config: cfg} + if cfg.Enabled { + m.data = pmetric.NewMetric() + m.init() + } + return m +} + +type metricK8sDaemonsetMisscheduledNodes struct { + data pmetric.Metric // data buffer for generated metric. + config MetricConfig // metric config provided by user. + capacity int // max observed number of data points added to the metric. +} + +// init fills k8s.daemonset.misscheduled_nodes metric with initial data. +func (m *metricK8sDaemonsetMisscheduledNodes) init() { + m.data.SetName("k8s.daemonset.misscheduled_nodes") + m.data.SetDescription("Number of nodes that are running the daemon pod, but are not supposed to run the daemon pod") + m.data.SetUnit("1") + m.data.SetEmptyGauge() +} + +func (m *metricK8sDaemonsetMisscheduledNodes) recordDataPoint(start pcommon.Timestamp, ts pcommon.Timestamp, val int64) { + if !m.config.Enabled { + return + } + dp := m.data.Gauge().DataPoints().AppendEmpty() + dp.SetStartTimestamp(start) + dp.SetTimestamp(ts) + dp.SetIntValue(val) +} + +// updateCapacity saves max length of data point slices that will be used for the slice capacity. +func (m *metricK8sDaemonsetMisscheduledNodes) updateCapacity() { + if m.data.Gauge().DataPoints().Len() > m.capacity { + m.capacity = m.data.Gauge().DataPoints().Len() + } +} + +// emit appends recorded metric data to a metrics slice and prepares it for recording another set of data points. +func (m *metricK8sDaemonsetMisscheduledNodes) emit(metrics pmetric.MetricSlice) { + if m.config.Enabled && m.data.Gauge().DataPoints().Len() > 0 { + m.updateCapacity() + m.data.MoveTo(metrics.AppendEmpty()) + m.init() + } +} + +func newMetricK8sDaemonsetMisscheduledNodes(cfg MetricConfig) metricK8sDaemonsetMisscheduledNodes { + m := metricK8sDaemonsetMisscheduledNodes{config: cfg} + if cfg.Enabled { + m.data = pmetric.NewMetric() + m.init() + } + return m +} + +type metricK8sDaemonsetReadyNodes struct { + data pmetric.Metric // data buffer for generated metric. + config MetricConfig // metric config provided by user. + capacity int // max observed number of data points added to the metric. +} + +// init fills k8s.daemonset.ready_nodes metric with initial data. +func (m *metricK8sDaemonsetReadyNodes) init() { + m.data.SetName("k8s.daemonset.ready_nodes") + m.data.SetDescription("Number of nodes that should be running the daemon pod and have one or more of the daemon pod running and ready") + m.data.SetUnit("1") + m.data.SetEmptyGauge() +} + +func (m *metricK8sDaemonsetReadyNodes) recordDataPoint(start pcommon.Timestamp, ts pcommon.Timestamp, val int64) { + if !m.config.Enabled { + return + } + dp := m.data.Gauge().DataPoints().AppendEmpty() + dp.SetStartTimestamp(start) + dp.SetTimestamp(ts) + dp.SetIntValue(val) +} + +// updateCapacity saves max length of data point slices that will be used for the slice capacity. +func (m *metricK8sDaemonsetReadyNodes) updateCapacity() { + if m.data.Gauge().DataPoints().Len() > m.capacity { + m.capacity = m.data.Gauge().DataPoints().Len() + } +} + +// emit appends recorded metric data to a metrics slice and prepares it for recording another set of data points. +func (m *metricK8sDaemonsetReadyNodes) emit(metrics pmetric.MetricSlice) { + if m.config.Enabled && m.data.Gauge().DataPoints().Len() > 0 { + m.updateCapacity() + m.data.MoveTo(metrics.AppendEmpty()) + m.init() + } +} + +func newMetricK8sDaemonsetReadyNodes(cfg MetricConfig) metricK8sDaemonsetReadyNodes { + m := metricK8sDaemonsetReadyNodes{config: cfg} + if cfg.Enabled { + m.data = pmetric.NewMetric() + m.init() + } + return m +} + +// MetricsBuilder provides an interface for scrapers to report metrics while taking care of all the transformations +// required to produce metric representation defined in metadata and user config. +type MetricsBuilder struct { + startTime pcommon.Timestamp // start time that will be applied to all recorded data points. + metricsCapacity int // maximum observed number of metrics per resource. + resourceCapacity int // maximum observed number of resource attributes. + metricsBuffer pmetric.Metrics // accumulates metrics data before emitting. + buildInfo component.BuildInfo // contains version information + resourceAttributesConfig ResourceAttributesConfig + metricK8sDaemonsetCurrentScheduledNodes metricK8sDaemonsetCurrentScheduledNodes + metricK8sDaemonsetDesiredScheduledNodes metricK8sDaemonsetDesiredScheduledNodes + metricK8sDaemonsetMisscheduledNodes metricK8sDaemonsetMisscheduledNodes + metricK8sDaemonsetReadyNodes metricK8sDaemonsetReadyNodes +} + +// metricBuilderOption applies changes to default metrics builder. +type metricBuilderOption func(*MetricsBuilder) + +// WithStartTime sets startTime on the metrics builder. +func WithStartTime(startTime pcommon.Timestamp) metricBuilderOption { + return func(mb *MetricsBuilder) { + mb.startTime = startTime + } +} + +func NewMetricsBuilder(mbc MetricsBuilderConfig, settings receiver.CreateSettings, options ...metricBuilderOption) *MetricsBuilder { + mb := &MetricsBuilder{ + startTime: pcommon.NewTimestampFromTime(time.Now()), + metricsBuffer: pmetric.NewMetrics(), + buildInfo: settings.BuildInfo, + resourceAttributesConfig: mbc.ResourceAttributes, + metricK8sDaemonsetCurrentScheduledNodes: newMetricK8sDaemonsetCurrentScheduledNodes(mbc.Metrics.K8sDaemonsetCurrentScheduledNodes), + metricK8sDaemonsetDesiredScheduledNodes: newMetricK8sDaemonsetDesiredScheduledNodes(mbc.Metrics.K8sDaemonsetDesiredScheduledNodes), + metricK8sDaemonsetMisscheduledNodes: newMetricK8sDaemonsetMisscheduledNodes(mbc.Metrics.K8sDaemonsetMisscheduledNodes), + metricK8sDaemonsetReadyNodes: newMetricK8sDaemonsetReadyNodes(mbc.Metrics.K8sDaemonsetReadyNodes), + } + for _, op := range options { + op(mb) + } + return mb +} + +// updateCapacity updates max length of metrics and resource attributes that will be used for the slice capacity. +func (mb *MetricsBuilder) updateCapacity(rm pmetric.ResourceMetrics) { + if mb.metricsCapacity < rm.ScopeMetrics().At(0).Metrics().Len() { + mb.metricsCapacity = rm.ScopeMetrics().At(0).Metrics().Len() + } + if mb.resourceCapacity < rm.Resource().Attributes().Len() { + mb.resourceCapacity = rm.Resource().Attributes().Len() + } +} + +// ResourceMetricsOption applies changes to provided resource metrics. +type ResourceMetricsOption func(ResourceAttributesConfig, pmetric.ResourceMetrics) + +// WithK8sDaemonsetName sets provided value as "k8s.daemonset.name" attribute for current resource. +func WithK8sDaemonsetName(val string) ResourceMetricsOption { + return func(rac ResourceAttributesConfig, rm pmetric.ResourceMetrics) { + if rac.K8sDaemonsetName.Enabled { + rm.Resource().Attributes().PutStr("k8s.daemonset.name", val) + } + } +} + +// WithK8sDaemonsetUID sets provided value as "k8s.daemonset.uid" attribute for current resource. +func WithK8sDaemonsetUID(val string) ResourceMetricsOption { + return func(rac ResourceAttributesConfig, rm pmetric.ResourceMetrics) { + if rac.K8sDaemonsetUID.Enabled { + rm.Resource().Attributes().PutStr("k8s.daemonset.uid", val) + } + } +} + +// WithK8sNamespaceName sets provided value as "k8s.namespace.name" attribute for current resource. +func WithK8sNamespaceName(val string) ResourceMetricsOption { + return func(rac ResourceAttributesConfig, rm pmetric.ResourceMetrics) { + if rac.K8sNamespaceName.Enabled { + rm.Resource().Attributes().PutStr("k8s.namespace.name", val) + } + } +} + +// WithOpencensusResourcetype sets provided value as "opencensus.resourcetype" attribute for current resource. +func WithOpencensusResourcetype(val string) ResourceMetricsOption { + return func(rac ResourceAttributesConfig, rm pmetric.ResourceMetrics) { + if rac.OpencensusResourcetype.Enabled { + rm.Resource().Attributes().PutStr("opencensus.resourcetype", val) + } + } +} + +// WithStartTimeOverride overrides start time for all the resource metrics data points. +// This option should be only used if different start time has to be set on metrics coming from different resources. +func WithStartTimeOverride(start pcommon.Timestamp) ResourceMetricsOption { + return func(_ ResourceAttributesConfig, rm pmetric.ResourceMetrics) { + var dps pmetric.NumberDataPointSlice + metrics := rm.ScopeMetrics().At(0).Metrics() + for i := 0; i < metrics.Len(); i++ { + switch metrics.At(i).Type() { + case pmetric.MetricTypeGauge: + dps = metrics.At(i).Gauge().DataPoints() + case pmetric.MetricTypeSum: + dps = metrics.At(i).Sum().DataPoints() + } + for j := 0; j < dps.Len(); j++ { + dps.At(j).SetStartTimestamp(start) + } + } + } +} + +// EmitForResource saves all the generated metrics under a new resource and updates the internal state to be ready for +// recording another set of data points as part of another resource. This function can be helpful when one scraper +// needs to emit metrics from several resources. Otherwise calling this function is not required, +// just `Emit` function can be called instead. +// Resource attributes should be provided as ResourceMetricsOption arguments. +func (mb *MetricsBuilder) EmitForResource(rmo ...ResourceMetricsOption) { + rm := pmetric.NewResourceMetrics() + rm.SetSchemaUrl(conventions.SchemaURL) + rm.Resource().Attributes().EnsureCapacity(mb.resourceCapacity) + ils := rm.ScopeMetrics().AppendEmpty() + ils.Scope().SetName("otelcol/k8sclusterreceiver") + ils.Scope().SetVersion(mb.buildInfo.Version) + ils.Metrics().EnsureCapacity(mb.metricsCapacity) + mb.metricK8sDaemonsetCurrentScheduledNodes.emit(ils.Metrics()) + mb.metricK8sDaemonsetDesiredScheduledNodes.emit(ils.Metrics()) + mb.metricK8sDaemonsetMisscheduledNodes.emit(ils.Metrics()) + mb.metricK8sDaemonsetReadyNodes.emit(ils.Metrics()) + + for _, op := range rmo { + op(mb.resourceAttributesConfig, rm) + } + if ils.Metrics().Len() > 0 { + mb.updateCapacity(rm) + rm.MoveTo(mb.metricsBuffer.ResourceMetrics().AppendEmpty()) + } +} + +// Emit returns all the metrics accumulated by the metrics builder and updates the internal state to be ready for +// recording another set of metrics. This function will be responsible for applying all the transformations required to +// produce metric representation defined in metadata and user config, e.g. delta or cumulative. +func (mb *MetricsBuilder) Emit(rmo ...ResourceMetricsOption) pmetric.Metrics { + mb.EmitForResource(rmo...) + metrics := mb.metricsBuffer + mb.metricsBuffer = pmetric.NewMetrics() + return metrics +} + +// RecordK8sDaemonsetCurrentScheduledNodesDataPoint adds a data point to k8s.daemonset.current_scheduled_nodes metric. +func (mb *MetricsBuilder) RecordK8sDaemonsetCurrentScheduledNodesDataPoint(ts pcommon.Timestamp, val int64) { + mb.metricK8sDaemonsetCurrentScheduledNodes.recordDataPoint(mb.startTime, ts, val) +} + +// RecordK8sDaemonsetDesiredScheduledNodesDataPoint adds a data point to k8s.daemonset.desired_scheduled_nodes metric. +func (mb *MetricsBuilder) RecordK8sDaemonsetDesiredScheduledNodesDataPoint(ts pcommon.Timestamp, val int64) { + mb.metricK8sDaemonsetDesiredScheduledNodes.recordDataPoint(mb.startTime, ts, val) +} + +// RecordK8sDaemonsetMisscheduledNodesDataPoint adds a data point to k8s.daemonset.misscheduled_nodes metric. +func (mb *MetricsBuilder) RecordK8sDaemonsetMisscheduledNodesDataPoint(ts pcommon.Timestamp, val int64) { + mb.metricK8sDaemonsetMisscheduledNodes.recordDataPoint(mb.startTime, ts, val) +} + +// RecordK8sDaemonsetReadyNodesDataPoint adds a data point to k8s.daemonset.ready_nodes metric. +func (mb *MetricsBuilder) RecordK8sDaemonsetReadyNodesDataPoint(ts pcommon.Timestamp, val int64) { + mb.metricK8sDaemonsetReadyNodes.recordDataPoint(mb.startTime, ts, val) +} + +// Reset resets metrics builder to its initial state. It should be used when external metrics source is restarted, +// and metrics builder should update its startTime and reset it's internal state accordingly. +func (mb *MetricsBuilder) Reset(options ...metricBuilderOption) { + mb.startTime = pcommon.NewTimestampFromTime(time.Now()) + for _, op := range options { + op(mb) + } +} diff --git a/receiver/k8sclusterreceiver/internal/demonset/internal/metadata/generated_metrics_test.go b/receiver/k8sclusterreceiver/internal/demonset/internal/metadata/generated_metrics_test.go new file mode 100644 index 000000000000..e96e6c0c62d3 --- /dev/null +++ b/receiver/k8sclusterreceiver/internal/demonset/internal/metadata/generated_metrics_test.go @@ -0,0 +1,178 @@ +// Code generated by mdatagen. DO NOT EDIT. + +package metadata + +import ( + "testing" + + "github.com/stretchr/testify/assert" + "go.opentelemetry.io/collector/pdata/pcommon" + "go.opentelemetry.io/collector/pdata/pmetric" + "go.opentelemetry.io/collector/receiver/receivertest" + "go.uber.org/zap" + "go.uber.org/zap/zaptest/observer" +) + +type testConfigCollection int + +const ( + testSetDefault testConfigCollection = iota + testSetAll + testSetNone +) + +func TestMetricsBuilder(t *testing.T) { + tests := []struct { + name string + configSet testConfigCollection + }{ + { + name: "default", + configSet: testSetDefault, + }, + { + name: "all_set", + configSet: testSetAll, + }, + { + name: "none_set", + configSet: testSetNone, + }, + } + for _, test := range tests { + t.Run(test.name, func(t *testing.T) { + start := pcommon.Timestamp(1_000_000_000) + ts := pcommon.Timestamp(1_000_001_000) + observedZapCore, observedLogs := observer.New(zap.WarnLevel) + settings := receivertest.NewNopCreateSettings() + settings.Logger = zap.New(observedZapCore) + mb := NewMetricsBuilder(loadMetricsBuilderConfig(t, test.name), settings, WithStartTime(start)) + + expectedWarnings := 0 + assert.Equal(t, expectedWarnings, observedLogs.Len()) + + defaultMetricsCount := 0 + allMetricsCount := 0 + + defaultMetricsCount++ + allMetricsCount++ + mb.RecordK8sDaemonsetCurrentScheduledNodesDataPoint(ts, 1) + + defaultMetricsCount++ + allMetricsCount++ + mb.RecordK8sDaemonsetDesiredScheduledNodesDataPoint(ts, 1) + + defaultMetricsCount++ + allMetricsCount++ + mb.RecordK8sDaemonsetMisscheduledNodesDataPoint(ts, 1) + + defaultMetricsCount++ + allMetricsCount++ + mb.RecordK8sDaemonsetReadyNodesDataPoint(ts, 1) + + metrics := mb.Emit(WithK8sDaemonsetName("k8s.daemonset.name-val"), WithK8sDaemonsetUID("k8s.daemonset.uid-val"), WithK8sNamespaceName("k8s.namespace.name-val"), WithOpencensusResourcetype("opencensus.resourcetype-val")) + + if test.configSet == testSetNone { + assert.Equal(t, 0, metrics.ResourceMetrics().Len()) + return + } + + assert.Equal(t, 1, metrics.ResourceMetrics().Len()) + rm := metrics.ResourceMetrics().At(0) + attrCount := 0 + enabledAttrCount := 0 + attrVal, ok := rm.Resource().Attributes().Get("k8s.daemonset.name") + attrCount++ + assert.Equal(t, mb.resourceAttributesConfig.K8sDaemonsetName.Enabled, ok) + if mb.resourceAttributesConfig.K8sDaemonsetName.Enabled { + enabledAttrCount++ + assert.EqualValues(t, "k8s.daemonset.name-val", attrVal.Str()) + } + attrVal, ok = rm.Resource().Attributes().Get("k8s.daemonset.uid") + attrCount++ + assert.Equal(t, mb.resourceAttributesConfig.K8sDaemonsetUID.Enabled, ok) + if mb.resourceAttributesConfig.K8sDaemonsetUID.Enabled { + enabledAttrCount++ + assert.EqualValues(t, "k8s.daemonset.uid-val", attrVal.Str()) + } + attrVal, ok = rm.Resource().Attributes().Get("k8s.namespace.name") + attrCount++ + assert.Equal(t, mb.resourceAttributesConfig.K8sNamespaceName.Enabled, ok) + if mb.resourceAttributesConfig.K8sNamespaceName.Enabled { + enabledAttrCount++ + assert.EqualValues(t, "k8s.namespace.name-val", attrVal.Str()) + } + attrVal, ok = rm.Resource().Attributes().Get("opencensus.resourcetype") + attrCount++ + assert.Equal(t, mb.resourceAttributesConfig.OpencensusResourcetype.Enabled, ok) + if mb.resourceAttributesConfig.OpencensusResourcetype.Enabled { + enabledAttrCount++ + assert.EqualValues(t, "opencensus.resourcetype-val", attrVal.Str()) + } + assert.Equal(t, enabledAttrCount, rm.Resource().Attributes().Len()) + assert.Equal(t, attrCount, 4) + + assert.Equal(t, 1, rm.ScopeMetrics().Len()) + ms := rm.ScopeMetrics().At(0).Metrics() + if test.configSet == testSetDefault { + assert.Equal(t, defaultMetricsCount, ms.Len()) + } + if test.configSet == testSetAll { + assert.Equal(t, allMetricsCount, ms.Len()) + } + validatedMetrics := make(map[string]bool) + for i := 0; i < ms.Len(); i++ { + switch ms.At(i).Name() { + case "k8s.daemonset.current_scheduled_nodes": + assert.False(t, validatedMetrics["k8s.daemonset.current_scheduled_nodes"], "Found a duplicate in the metrics slice: k8s.daemonset.current_scheduled_nodes") + validatedMetrics["k8s.daemonset.current_scheduled_nodes"] = true + assert.Equal(t, pmetric.MetricTypeGauge, ms.At(i).Type()) + assert.Equal(t, 1, ms.At(i).Gauge().DataPoints().Len()) + assert.Equal(t, "Number of nodes that are running at least 1 daemon pod and are supposed to run the daemon pod", ms.At(i).Description()) + assert.Equal(t, "1", ms.At(i).Unit()) + dp := ms.At(i).Gauge().DataPoints().At(0) + assert.Equal(t, start, dp.StartTimestamp()) + assert.Equal(t, ts, dp.Timestamp()) + assert.Equal(t, pmetric.NumberDataPointValueTypeInt, dp.ValueType()) + assert.Equal(t, int64(1), dp.IntValue()) + case "k8s.daemonset.desired_scheduled_nodes": + assert.False(t, validatedMetrics["k8s.daemonset.desired_scheduled_nodes"], "Found a duplicate in the metrics slice: k8s.daemonset.desired_scheduled_nodes") + validatedMetrics["k8s.daemonset.desired_scheduled_nodes"] = true + assert.Equal(t, pmetric.MetricTypeGauge, ms.At(i).Type()) + assert.Equal(t, 1, ms.At(i).Gauge().DataPoints().Len()) + assert.Equal(t, "Number of nodes that should be running the daemon pod (including nodes currently running the daemon pod)", ms.At(i).Description()) + assert.Equal(t, "1", ms.At(i).Unit()) + dp := ms.At(i).Gauge().DataPoints().At(0) + assert.Equal(t, start, dp.StartTimestamp()) + assert.Equal(t, ts, dp.Timestamp()) + assert.Equal(t, pmetric.NumberDataPointValueTypeInt, dp.ValueType()) + assert.Equal(t, int64(1), dp.IntValue()) + case "k8s.daemonset.misscheduled_nodes": + assert.False(t, validatedMetrics["k8s.daemonset.misscheduled_nodes"], "Found a duplicate in the metrics slice: k8s.daemonset.misscheduled_nodes") + validatedMetrics["k8s.daemonset.misscheduled_nodes"] = true + assert.Equal(t, pmetric.MetricTypeGauge, ms.At(i).Type()) + assert.Equal(t, 1, ms.At(i).Gauge().DataPoints().Len()) + assert.Equal(t, "Number of nodes that are running the daemon pod, but are not supposed to run the daemon pod", ms.At(i).Description()) + assert.Equal(t, "1", ms.At(i).Unit()) + dp := ms.At(i).Gauge().DataPoints().At(0) + assert.Equal(t, start, dp.StartTimestamp()) + assert.Equal(t, ts, dp.Timestamp()) + assert.Equal(t, pmetric.NumberDataPointValueTypeInt, dp.ValueType()) + assert.Equal(t, int64(1), dp.IntValue()) + case "k8s.daemonset.ready_nodes": + assert.False(t, validatedMetrics["k8s.daemonset.ready_nodes"], "Found a duplicate in the metrics slice: k8s.daemonset.ready_nodes") + validatedMetrics["k8s.daemonset.ready_nodes"] = true + assert.Equal(t, pmetric.MetricTypeGauge, ms.At(i).Type()) + assert.Equal(t, 1, ms.At(i).Gauge().DataPoints().Len()) + assert.Equal(t, "Number of nodes that should be running the daemon pod and have one or more of the daemon pod running and ready", ms.At(i).Description()) + assert.Equal(t, "1", ms.At(i).Unit()) + dp := ms.At(i).Gauge().DataPoints().At(0) + assert.Equal(t, start, dp.StartTimestamp()) + assert.Equal(t, ts, dp.Timestamp()) + assert.Equal(t, pmetric.NumberDataPointValueTypeInt, dp.ValueType()) + assert.Equal(t, int64(1), dp.IntValue()) + } + } + }) + } +} diff --git a/receiver/k8sclusterreceiver/internal/demonset/internal/metadata/testdata/config.yaml b/receiver/k8sclusterreceiver/internal/demonset/internal/metadata/testdata/config.yaml new file mode 100644 index 000000000000..1a2eea8c9efa --- /dev/null +++ b/receiver/k8sclusterreceiver/internal/demonset/internal/metadata/testdata/config.yaml @@ -0,0 +1,39 @@ +default: +all_set: + metrics: + k8s.daemonset.current_scheduled_nodes: + enabled: true + k8s.daemonset.desired_scheduled_nodes: + enabled: true + k8s.daemonset.misscheduled_nodes: + enabled: true + k8s.daemonset.ready_nodes: + enabled: true + resource_attributes: + k8s.daemonset.name: + enabled: true + k8s.daemonset.uid: + enabled: true + k8s.namespace.name: + enabled: true + opencensus.resourcetype: + enabled: true +none_set: + metrics: + k8s.daemonset.current_scheduled_nodes: + enabled: false + k8s.daemonset.desired_scheduled_nodes: + enabled: false + k8s.daemonset.misscheduled_nodes: + enabled: false + k8s.daemonset.ready_nodes: + enabled: false + resource_attributes: + k8s.daemonset.name: + enabled: false + k8s.daemonset.uid: + enabled: false + k8s.namespace.name: + enabled: false + opencensus.resourcetype: + enabled: false diff --git a/receiver/k8sclusterreceiver/internal/demonset/metadata.yaml b/receiver/k8sclusterreceiver/internal/demonset/metadata.yaml new file mode 100644 index 000000000000..57f55b69dd11 --- /dev/null +++ b/receiver/k8sclusterreceiver/internal/demonset/metadata.yaml @@ -0,0 +1,52 @@ +type: k8s/daemonset + +sem_conv_version: 1.18.0 + +parent: k8s_cluster + +resource_attributes: + k8s.daemonset.name: + description: The k8s daemonset name. + type: string + enabled: true + + k8s.daemonset.uid: + description: The k8s daemonset uid. + type: string + enabled: true + + k8s.namespace.name: + description: The k8s namespace name. + type: string + enabled: true + + opencensus.resourcetype: + description: The OpenCensus resource type. + type: string + enabled: true + +metrics: + k8s.daemonset.current_scheduled_nodes: + enabled: true + description: Number of nodes that are running at least 1 daemon pod and are supposed to run the daemon pod + unit: "1" + gauge: + value_type: int + k8s.daemonset.desired_scheduled_nodes: + enabled: true + description: Number of nodes that should be running the daemon pod (including nodes currently running the daemon pod) + unit: "1" + gauge: + value_type: int + k8s.daemonset.misscheduled_nodes: + enabled: true + description: Number of nodes that are running the daemon pod, but are not supposed to run the daemon pod + unit: "1" + gauge: + value_type: int + k8s.daemonset.ready_nodes: + enabled: true + description: Number of nodes that should be running the daemon pod and have one or more of the daemon pod running and ready + unit: "1" + gauge: + value_type: int diff --git a/receiver/k8sclusterreceiver/internal/demonset/testdata/expected.yaml b/receiver/k8sclusterreceiver/internal/demonset/testdata/expected.yaml new file mode 100644 index 000000000000..c5dafb3a7824 --- /dev/null +++ b/receiver/k8sclusterreceiver/internal/demonset/testdata/expected.yaml @@ -0,0 +1,45 @@ +resourceMetrics: + - resource: + attributes: + - key: k8s.namespace.name + value: + stringValue: test-namespace + - key: k8s.daemonset.name + value: + stringValue: test-daemonset-1 + - key: k8s.daemonset.uid + value: + stringValue: test-daemonset-1-uid + - key: opencensus.resourcetype + value: + stringValue: k8s + schemaUrl: https://opentelemetry.io/schemas/1.18.0 + scopeMetrics: + - metrics: + - description: Number of nodes that are running at least 1 daemon pod and are supposed to run the daemon pod + gauge: + dataPoints: + - asInt: "3" + name: k8s.daemonset.current_scheduled_nodes + unit: "1" + - description: Number of nodes that should be running the daemon pod (including nodes currently running the daemon pod) + gauge: + dataPoints: + - asInt: "5" + name: k8s.daemonset.desired_scheduled_nodes + unit: "1" + - description: Number of nodes that are running the daemon pod, but are not supposed to run the daemon pod + gauge: + dataPoints: + - asInt: "1" + name: k8s.daemonset.misscheduled_nodes + unit: "1" + - description: Number of nodes that should be running the daemon pod and have one or more of the daemon pod running and ready + gauge: + dataPoints: + - asInt: "2" + name: k8s.daemonset.ready_nodes + unit: "1" + scope: + name: otelcol/k8sclusterreceiver + version: latest diff --git a/receiver/k8sclusterreceiver/testdata/e2e/expected.yaml b/receiver/k8sclusterreceiver/testdata/e2e/expected.yaml index 56ebbdd0ca39..63ec1c246b39 100644 --- a/receiver/k8sclusterreceiver/testdata/e2e/expected.yaml +++ b/receiver/k8sclusterreceiver/testdata/e2e/expected.yaml @@ -157,6 +157,7 @@ resourceMetrics: - key: opencensus.resourcetype value: stringValue: k8s + schemaUrl: "https://opentelemetry.io/schemas/1.18.0" scopeMetrics: - metrics: - description: Number of nodes that are running at least 1 daemon pod and are supposed to run the daemon pod @@ -187,7 +188,9 @@ resourceMetrics: timeUnixNano: "1686772769034865545" name: k8s.daemonset.ready_nodes unit: "1" - scope: {} + scope: + name: otelcol/k8sclusterreceiver + version: latest - resource: attributes: - key: k8s.daemonset.name @@ -202,6 +205,7 @@ resourceMetrics: - key: opencensus.resourcetype value: stringValue: k8s + schemaUrl: "https://opentelemetry.io/schemas/1.18.0" scopeMetrics: - metrics: - description: Number of nodes that are running at least 1 daemon pod and are supposed to run the daemon pod @@ -232,7 +236,9 @@ resourceMetrics: timeUnixNano: "1686772769034865545" name: k8s.daemonset.ready_nodes unit: "1" - scope: {} + scope: + name: otelcol/k8sclusterreceiver + version: latest - resource: attributes: - key: k8s.deployment.name