From bbcbe3cfbfee86d36b79e7c80a0f1d7e8acfbb66 Mon Sep 17 00:00:00 2001 From: Anthony Mirabella Date: Wed, 8 May 2024 11:58:37 -0400 Subject: [PATCH 01/55] Move Aneurysm9 to emeritus status (#32943) I have been unable to provide this position the bandwidth that it deserves and it is time to formalize recognition of that fact. Signed-off-by: Anthony J Mirabella --- .github/auto_assign.yml | 1 - CONTRIBUTING.md | 1 - README.md | 2 +- 3 files changed, 1 insertion(+), 3 deletions(-) diff --git a/.github/auto_assign.yml b/.github/auto_assign.yml index 523388fff897..2b5c2f30e72e 100644 --- a/.github/auto_assign.yml +++ b/.github/auto_assign.yml @@ -11,7 +11,6 @@ useAssigneeGroups: true assigneeGroups: approvers_maintainers: # Approvers - - Aneurysm9 - atoulme - bryan-aguilar - crobert-1 diff --git a/CONTRIBUTING.md b/CONTRIBUTING.md index a671b1d97619..6643c475eb00 100644 --- a/CONTRIBUTING.md +++ b/CONTRIBUTING.md @@ -253,7 +253,6 @@ The following GitHub users are the currently available sponsors, either by being * [@crobert-1](https://github.com/crobert-1) * [@djaglowski](https://github.com/djaglowski) * [@codeboten](https://github.com/codeboten) -* [@Aneurysm9](https://github.com/Aneurysm9) * [@mx-psi](https://github.com/mx-psi) * [@dmitryax](https://github.com/dmitryax) * [@evan-bradley](https://github.com/evan-bradley) diff --git a/README.md b/README.md index 60bdd2d11647..3b221077ede0 100644 --- a/README.md +++ b/README.md @@ -85,7 +85,6 @@ Emeritus Triagers: Approvers ([@open-telemetry/collector-contrib-approvers](https://github.com/orgs/open-telemetry/teams/collector-contrib-approvers)): -- [Anthony Mirabella](https://github.com/Aneurysm9), AWS - [Antoine Toulme](https://github.com/atoulme), Splunk - [Bryan Aguilar](https://github.com/bryan-aguilar), AWS - [Curtis Robert](https://github.com/crobert-1), Splunk @@ -97,6 +96,7 @@ Emeritus Approvers: - [Przemek Maciolek](https://github.com/pmm-sumo) - [Ruslan Kovalov](https://github.com/kovrus) +- [Anthony Mirabella](https://github.com/Aneurysm9), AWS Maintainers ([@open-telemetry/collector-contrib-maintainer](https://github.com/orgs/open-telemetry/teams/collector-contrib-maintainer)): From 097c74557b4d1f5569397ea5204549e8953af043 Mon Sep 17 00:00:00 2001 From: Curtis Robert Date: Wed, 8 May 2024 09:32:00 -0700 Subject: [PATCH 02/55] [receiver/googlecloudpubsubreceiver] Fix memory leak during shutdown (#32361) **Description:** This PR contains the following changes: 1. Add `Close` call to the receiver's GRPC client. Without this, goroutines were being leaked on shutdown. 2. Change `grpc.Dial` -> `grpc.NewClient`. They offer the same functionality, but `Dial` is being deprecated in favor of `NewClient`. 3. Enable `goleak` checks on this receiver to help ensure no goroutines are being leaked. 4. Change a couple `Assert.Nil` calls to `Assert.NoError`. The output of `NoError` includes the error message if hit, `Nil` simply includes the object's address, i.e. `&status.Error{s:(*status.Status)(0xc00007e158)}` **Link to tracking Issue:** #30438 **Testing:** All existing tests are passing, as well as added goleak check. --- .chloggen/goleak_googlepubsubrec.yaml | 27 +++++++++++++++++++ .../generated_package_test.go | 4 ++- .../googlecloudpubsubreceiver/metadata.yaml | 5 +++- .../googlecloudpubsubreceiver/receiver.go | 16 ++++++++--- .../receiver_test.go | 4 +-- 5 files changed, 49 insertions(+), 7 deletions(-) create mode 100644 .chloggen/goleak_googlepubsubrec.yaml diff --git a/.chloggen/goleak_googlepubsubrec.yaml b/.chloggen/goleak_googlepubsubrec.yaml new file mode 100644 index 000000000000..4e994677e896 --- /dev/null +++ b/.chloggen/goleak_googlepubsubrec.yaml @@ -0,0 +1,27 @@ +# Use this changelog template to create an entry for release notes. + +# One of 'breaking', 'deprecation', 'new_component', 'enhancement', 'bug_fix' +change_type: bug_fix + +# The name of the component, or a single word describing the area of concern, (e.g. filelogreceiver) +component: googlecloudpubsubreceiver + +# A brief description of the change. Surround your text with quotes ("") if it needs to start with a backtick (`). +note: Fix memory leak during shutdown + +# Mandatory: One or more tracking issues related to the change. You can use the PR number here if no issue exists. +issues: [32361] + +# (Optional) One or more lines of additional information to render under the primary note. +# These lines will be padded with 2 spaces and then inserted directly into the document. +# Use pipe (|) for multiline entries. +subtext: + +# If your change doesn't affect end users or the exported elements of any package, +# you should instead start your pull request title with [chore] or use the "Skip Changelog" label. +# Optional: The change log or logs in which this entry should be included. +# e.g. '[user]' or '[user, api]' +# Include 'user' if the change is relevant to end users. +# Include 'api' if there is a change to a library API. +# Default: '[user]' +change_logs: [] diff --git a/receiver/googlecloudpubsubreceiver/generated_package_test.go b/receiver/googlecloudpubsubreceiver/generated_package_test.go index 9a70013ef296..0bde1b757a42 100644 --- a/receiver/googlecloudpubsubreceiver/generated_package_test.go +++ b/receiver/googlecloudpubsubreceiver/generated_package_test.go @@ -4,8 +4,10 @@ package googlecloudpubsubreceiver import ( "testing" + + "go.uber.org/goleak" ) func TestMain(m *testing.M) { - // skipping goleak test as per metadata.yml configuration + goleak.VerifyTestMain(m, goleak.IgnoreTopFunction("go.opencensus.io/stats/view.(*worker).start")) } diff --git a/receiver/googlecloudpubsubreceiver/metadata.yaml b/receiver/googlecloudpubsubreceiver/metadata.yaml index 9930f9763546..f4517dffd812 100644 --- a/receiver/googlecloudpubsubreceiver/metadata.yaml +++ b/receiver/googlecloudpubsubreceiver/metadata.yaml @@ -18,5 +18,8 @@ tests: skip_lifecycle: true skip_shutdown: true goleak: - skip: true + skip: false + ignore: + # See https://github.com/census-instrumentation/opencensus-go/issues/1191 for more information. + top: go.opencensus.io/stats/view.(*worker).start diff --git a/receiver/googlecloudpubsubreceiver/receiver.go b/receiver/googlecloudpubsubreceiver/receiver.go index 02f9c1964843..504546201e2e 100644 --- a/receiver/googlecloudpubsubreceiver/receiver.go +++ b/receiver/googlecloudpubsubreceiver/receiver.go @@ -26,7 +26,9 @@ import ( "go.uber.org/zap" "google.golang.org/api/option" "google.golang.org/grpc" + "google.golang.org/grpc/codes" "google.golang.org/grpc/credentials/insecure" + "google.golang.org/grpc/status" "github.com/open-telemetry/opentelemetry-collector-contrib/receiver/googlecloudpubsubreceiver/internal" ) @@ -76,7 +78,7 @@ func (receiver *pubsubReceiver) generateClientOptions() (copts []option.ClientOp if receiver.userAgent != "" { dialOpts = append(dialOpts, grpc.WithUserAgent(receiver.userAgent)) } - conn, _ := grpc.Dial(receiver.config.Endpoint, append(dialOpts, grpc.WithTransportCredentials(insecure.NewCredentials()))...) + conn, _ := grpc.NewClient(receiver.config.Endpoint, append(dialOpts, grpc.WithTransportCredentials(insecure.NewCredentials()))...) copts = append(copts, option.WithGRPCConn(conn)) } else { copts = append(copts, option.WithEndpoint(receiver.config.Endpoint)) @@ -113,13 +115,21 @@ func (receiver *pubsubReceiver) Start(ctx context.Context, _ component.Host) err } func (receiver *pubsubReceiver) Shutdown(_ context.Context) error { + var err error + if receiver.client != nil { + // A canceled code means the client connection is already closed, + // Shutdown shouldn't return an error in that case. + if closeErr := receiver.client.Close(); status.Code(closeErr) != codes.Canceled { + err = closeErr + } + } if receiver.handler == nil { - return nil + return err } receiver.logger.Info("Stopping Google Pubsub receiver") receiver.handler.CancelNow() receiver.logger.Info("Stopped Google Pubsub receiver") - return nil + return err } func (receiver *pubsubReceiver) handleLogStrings(ctx context.Context, message *pubsubpb.ReceivedMessage) error { diff --git a/receiver/googlecloudpubsubreceiver/receiver_test.go b/receiver/googlecloudpubsubreceiver/receiver_test.go index b355a3818237..8e428d63d705 100644 --- a/receiver/googlecloudpubsubreceiver/receiver_test.go +++ b/receiver/googlecloudpubsubreceiver/receiver_test.go @@ -153,6 +153,6 @@ func TestReceiver(t *testing.T) { return len(logSink.AllLogs()) == 1 }, time.Second, 10*time.Millisecond) - assert.Nil(t, receiver.Shutdown(ctx)) - assert.Nil(t, receiver.Shutdown(ctx)) + assert.NoError(t, receiver.Shutdown(ctx)) + assert.NoError(t, receiver.Shutdown(ctx)) } From 3e868a41264da17cba9a7123709b90b3effe19d3 Mon Sep 17 00:00:00 2001 From: "James Hughes (Splunk)" Date: Wed, 8 May 2024 09:35:40 -0700 Subject: [PATCH 03/55] [chore] Add some docs to readme regarding file exporter (#32855) Admittedly I tested this via docker-compose. I can expand to having a full docker-compose file with telemetry gen if that's preferred. --------- Co-authored-by: Alex Boten <223565+codeboten@users.noreply.github.com> --- exporter/fileexporter/README.md | 14 +++++++++++++- 1 file changed, 13 insertions(+), 1 deletion(-) diff --git a/exporter/fileexporter/README.md b/exporter/fileexporter/README.md index fb4a31590eb5..ec550717b6d9 100644 --- a/exporter/fileexporter/README.md +++ b/exporter/fileexporter/README.md @@ -26,7 +26,19 @@ Exporter supports the following features: Please note that there is no guarantee that exact field names will remain stable. The official [opentelemetry-collector-contrib container](https://hub.docker.com/r/otel/opentelemetry-collector-contrib/tags#!) does not have a writable filesystem by default since it's built on the `scratch` layer. -As such, you will need to create a writable directory for the path, potentially by mounting writable volumes or creating a custom image. +As such, you will need to create a writable directory for the path. You could do this by [mounting a volume](https://docs.docker.com/storage/volumes/#choose-the--v-or---mount-flag) with flags such as `rw` or `rwZ`. + +On Linux, and given a `otel-collector-config.yaml` with a `file` exporter whose path is prefixed with `/file-exporter`, +```bash +# linux needs +x to list a directory. You can use a+ instead of o+ for the mode if you want to ensure your user and group has access. +mkdir --mode o+rwx file-exporter +# z is an SELinux construct that is ignored on other systems +docker run -v "./file-exporter:/file-exporter:rwz" -v "otel-collector-config.yaml:/etc/otelcol-contrib/config.yaml" otel/opentelemetry-collector-contrib:latest +``` +Note this same syntax for volumes will work with docker-compose. + +You could also modify the base image and manually build your own container to have a writeable directory or change the runas uid if needed, but this is more involved. + ## Configuration options: The following settings are required: From 89d09e0ba97df2eca607d017741113a292f916bd Mon Sep 17 00:00:00 2001 From: Stefan Kurek Date: Wed, 8 May 2024 12:42:09 -0400 Subject: [PATCH 04/55] [receiver/vcenter] Fixes Cluster Resource Attributes for Datastore Resource (#32687) **Description:** Removed the `vcenter.cluster.name` resource attribute from all Datastore resources. **Link to tracking Issue:** #32674 **Testing:** Unit/integration tests updated and tested. Local environment tested. **Documentation:** New documentation generated based on the metadata. --------- Co-authored-by: Daniel Jaglowski Co-authored-by: Curtis Robert --- .../fix_vcenter-datastore-attributes.yaml | 32 ++++++++++++++ receiver/vcenterreceiver/client.go | 10 +++++ receiver/vcenterreceiver/client_test.go | 15 +++++++ receiver/vcenterreceiver/documentation.md | 2 +- .../internal/mockserver/client_mock.go | 3 ++ receiver/vcenterreceiver/metadata.yaml | 2 +- receiver/vcenterreceiver/scraper.go | 18 +++----- .../testdata/integration/expected.yaml | 41 ----------------- .../metrics/expected-all-enabled.yaml | 44 ------------------- .../testdata/metrics/expected.yaml | 41 ----------------- 10 files changed, 69 insertions(+), 139 deletions(-) create mode 100644 .chloggen/fix_vcenter-datastore-attributes.yaml diff --git a/.chloggen/fix_vcenter-datastore-attributes.yaml b/.chloggen/fix_vcenter-datastore-attributes.yaml new file mode 100644 index 000000000000..d6d3dcfc4a34 --- /dev/null +++ b/.chloggen/fix_vcenter-datastore-attributes.yaml @@ -0,0 +1,32 @@ +# Use this changelog template to create an entry for release notes. + +# One of 'breaking', 'deprecation', 'new_component', 'enhancement', 'bug_fix' +change_type: breaking + +# The name of the component, or a single word describing the area of concern, (e.g. filelogreceiver) +component: vcenterreceiver + +# A brief description of the change. Surround your text with quotes ("") if it needs to start with a backtick (`). +note: "Removes `vcenter.cluster.name` attribute from `vcenter.datastore` metrics" + +# Mandatory: One or more tracking issues related to the change. You can use the PR number here if no issue exists. +issues: [32674] + +# (Optional) One or more lines of additional information to render under the primary note. +# These lines will be padded with 2 spaces and then inserted directly into the document. +# Use pipe (|) for multiline entries. +subtext: | + If there were multiple Clusters, Datastore metrics were being repeated under Resources differentiated with a + `vcenter.cluster.name` resource attribute. In the same vein, if there were standalone Hosts, in addition to + clusters the metrics would be repeated under a Resource without the `vcenter.cluster.name` attribute. Now there + will only be a single set of metrics for one Datastore (as there should be, as Datastores don't be long to + Clusters). + +# If your change doesn't affect end users or the exported elements of any package, +# you should instead start your pull request title with [chore] or use the "Skip Changelog" label. +# Optional: The change log or logs in which this entry should be included. +# e.g. '[user]' or '[user, api]' +# Include 'user' if the change is relevant to end users. +# Include 'api' if there is a change to a library API. +# Default: '[user]' +change_logs: [user] diff --git a/receiver/vcenterreceiver/client.go b/receiver/vcenterreceiver/client.go index f3ae2f212217..ecde4291c868 100644 --- a/receiver/vcenterreceiver/client.go +++ b/receiver/vcenterreceiver/client.go @@ -94,6 +94,16 @@ func (vc *vcenterClient) Datacenters(ctx context.Context) ([]*object.Datacenter, return datacenters, nil } +// Datastores returns the Datastores of the vSphere SDK for a given datacenter +func (vc *vcenterClient) Datastores(ctx context.Context, datacenter *object.Datacenter) ([]*object.Datastore, error) { + vc.finder = vc.finder.SetDatacenter(datacenter) + datastores, err := vc.finder.DatastoreList(ctx, "*") + if err != nil { + return []*object.Datastore{}, fmt.Errorf("unable to get datastores: %w", err) + } + return datastores, nil +} + // Computes returns the ComputeResources (and ClusterComputeResources) of the vSphere SDK for a given datacenter func (vc *vcenterClient) Computes(ctx context.Context, datacenter *object.Datacenter) ([]*object.ComputeResource, error) { vc.finder = vc.finder.SetDatacenter(datacenter) diff --git a/receiver/vcenterreceiver/client_test.go b/receiver/vcenterreceiver/client_test.go index 426a84e9f541..ebd43b07c8a8 100644 --- a/receiver/vcenterreceiver/client_test.go +++ b/receiver/vcenterreceiver/client_test.go @@ -34,6 +34,21 @@ func TestGetComputes(t *testing.T) { }) } +func TestGetDatastores(t *testing.T) { + simulator.Test(func(ctx context.Context, c *vim25.Client) { + finder := find.NewFinder(c) + client := vcenterClient{ + vimDriver: c, + finder: finder, + } + dc, err := finder.DefaultDatacenter(ctx) + require.NoError(t, err) + datastores, err := client.Datastores(ctx, dc) + require.NoError(t, err) + require.NotEmpty(t, datastores, 0) + }) +} + func TestGetResourcePools(t *testing.T) { simulator.Test(func(ctx context.Context, c *vim25.Client) { finder := find.NewFinder(c) diff --git a/receiver/vcenterreceiver/documentation.md b/receiver/vcenterreceiver/documentation.md index 95ccdee031cb..39e17eb3c30c 100644 --- a/receiver/vcenterreceiver/documentation.md +++ b/receiver/vcenterreceiver/documentation.md @@ -529,7 +529,7 @@ As measured over the most recent 20s interval. | Name | Description | Values | Enabled | | ---- | ----------- | ------ | ------- | -| vcenter.cluster.name | The name of the vCenter Cluster. | Any Str | true | +| vcenter.cluster.name | The name of the vCenter cluster. | Any Str | true | | vcenter.datacenter.name | The name of the vCenter datacenter. | Any Str | false | | vcenter.datastore.name | The name of the vCenter datastore. | Any Str | true | | vcenter.host.name | The hostname of the vCenter ESXi host. | Any Str | true | diff --git a/receiver/vcenterreceiver/internal/mockserver/client_mock.go b/receiver/vcenterreceiver/internal/mockserver/client_mock.go index a15f63dbc2fe..25fba3fcd5da 100644 --- a/receiver/vcenterreceiver/internal/mockserver/client_mock.go +++ b/receiver/vcenterreceiver/internal/mockserver/client_mock.go @@ -126,6 +126,9 @@ func routeRetreiveProperties(t *testing.T, body map[string]any) ([]byte, error) case content == "datacenter-3" && contentType == "Datacenter": return loadResponse("datacenter-properties.xml") + case content == "group-s6" && contentType == "Folder": + return loadResponse("datastore-properties.xml") + case content == "datastore-1003" && contentType == "Datastore": if objectSetArray { return loadResponse("datastore-list.xml") diff --git a/receiver/vcenterreceiver/metadata.yaml b/receiver/vcenterreceiver/metadata.yaml index de6e8624c04f..c97dc1a0bd12 100644 --- a/receiver/vcenterreceiver/metadata.yaml +++ b/receiver/vcenterreceiver/metadata.yaml @@ -18,7 +18,7 @@ resource_attributes: warnings: if_enabled_not_set: "this attribute will be enabled by default starting in release v0.101.0" vcenter.cluster.name: - description: The name of the vCenter Cluster. + description: The name of the vCenter cluster. enabled: true type: string vcenter.host.name: diff --git a/receiver/vcenterreceiver/scraper.go b/receiver/vcenterreceiver/scraper.go index c32ad386074d..80895bde15e4 100644 --- a/receiver/vcenterreceiver/scraper.go +++ b/receiver/vcenterreceiver/scraper.go @@ -53,7 +53,7 @@ func newVmwareVcenterScraper( settings receiver.CreateSettings, ) *vcenterMetricScraper { client := newVcenterClient(config) - logger.Warn("[WARNING] `vcenter.cluster.name`: this attribute will be removed from the Datastore resource starting in release v0.101.0") + return &vcenterMetricScraper{ client: client, config: config, @@ -123,12 +123,12 @@ func (v *vcenterMetricScraper) collectClusters(ctx context.Context, datacenter * v.collectResourcePools(ctx, now, dcName, computes, errs) for _, c := range computes { v.collectHosts(ctx, now, dcName, c, errs) - v.collectDatastores(ctx, now, dcName, c, errs) poweredOnVMs, poweredOffVMs, suspendedVMs, templates := v.collectVMs(ctx, now, dcName, c, errs) if c.Reference().Type == "ClusterComputeResource" { v.collectCluster(ctx, now, dcName, c, poweredOnVMs, poweredOffVMs, suspendedVMs, templates, errs) } } + v.collectDatastores(ctx, now, datacenter, errs) } func (v *vcenterMetricScraper) collectCluster( @@ -165,19 +165,18 @@ func (v *vcenterMetricScraper) collectCluster( func (v *vcenterMetricScraper) collectDatastores( ctx context.Context, - colTime pcommon.Timestamp, - dcName string, - compute *object.ComputeResource, + ts pcommon.Timestamp, + datacenter *object.Datacenter, errs *scrapererror.ScrapeErrors, ) { - datastores, err := compute.Datastores(ctx) + datastores, err := v.client.Datastores(ctx, datacenter) if err != nil { errs.AddPartial(1, err) return } for _, ds := range datastores { - v.collectDatastore(ctx, colTime, dcName, ds, compute, errs) + v.collectDatastore(ctx, ts, datacenter.Name(), ds, errs) } } @@ -186,7 +185,6 @@ func (v *vcenterMetricScraper) collectDatastore( now pcommon.Timestamp, dcName string, ds *object.Datastore, - compute *object.ComputeResource, errs *scrapererror.ScrapeErrors, ) { var moDS mo.Datastore @@ -199,10 +197,8 @@ func (v *vcenterMetricScraper) collectDatastore( v.recordDatastoreProperties(now, moDS) rb := v.mb.NewResourceBuilder() rb.SetVcenterDatacenterName(dcName) - if compute.Reference().Type == "ClusterComputeResource" { - rb.SetVcenterClusterName(compute.Name()) - } rb.SetVcenterDatastoreName(moDS.Name) + v.mb.EmitForResource(metadata.WithResource(rb.Emit())) } diff --git a/receiver/vcenterreceiver/testdata/integration/expected.yaml b/receiver/vcenterreceiver/testdata/integration/expected.yaml index 88aa368adfc7..e0388ed1d83b 100644 --- a/receiver/vcenterreceiver/testdata/integration/expected.yaml +++ b/receiver/vcenterreceiver/testdata/integration/expected.yaml @@ -462,47 +462,6 @@ resourceMetrics: scope: name: otelcol/vcenterreceiver version: latest - - resource: - attributes: - - key: vcenter.cluster.name - value: - stringValue: DC0_C0 - - key: vcenter.datastore.name - value: - stringValue: LocalDS_0 - scopeMetrics: - - metrics: - - description: The amount of space in the datastore. - name: vcenter.datastore.disk.usage - sum: - aggregationTemporality: 2 - dataPoints: - - asInt: "42949672960" - attributes: - - key: disk_state - value: - stringValue: used - startTimeUnixNano: "1707407684042820000" - timeUnixNano: "1707407733803628000" - - asInt: "10952166604800" - attributes: - - key: disk_state - value: - stringValue: available - startTimeUnixNano: "1707407684042820000" - timeUnixNano: "1707407733803628000" - unit: By - - description: The utilization of the datastore. - gauge: - dataPoints: - - asDouble: 0.390625 - startTimeUnixNano: "1707407684042820000" - timeUnixNano: "1707407733803628000" - name: vcenter.datastore.disk.utilization - unit: "%" - scope: - name: otelcol/vcenterreceiver - version: latest - resource: attributes: - key: vcenter.vm.name diff --git a/receiver/vcenterreceiver/testdata/metrics/expected-all-enabled.yaml b/receiver/vcenterreceiver/testdata/metrics/expected-all-enabled.yaml index a9e4f8af7c96..316053940cf6 100644 --- a/receiver/vcenterreceiver/testdata/metrics/expected-all-enabled.yaml +++ b/receiver/vcenterreceiver/testdata/metrics/expected-all-enabled.yaml @@ -104,50 +104,6 @@ resourceMetrics: scope: name: otelcol/vcenterreceiver version: latest - - resource: - attributes: - - key: vcenter.datacenter.name - value: - stringValue: Datacenter - - key: vcenter.cluster.name - value: - stringValue: Cluster - - key: vcenter.datastore.name - value: - stringValue: vsanDatastore - scopeMetrics: - - metrics: - - description: The amount of space in the datastore. - name: vcenter.datastore.disk.usage - sum: - aggregationTemporality: 2 - dataPoints: - - asInt: "51693551508648" - attributes: - - key: disk_state - value: - stringValue: available - startTimeUnixNano: "1000000" - timeUnixNano: "2000000" - - asInt: "5917763748696" - attributes: - - key: disk_state - value: - stringValue: used - startTimeUnixNano: "1000000" - timeUnixNano: "2000000" - unit: By - - description: The utilization of the datastore. - gauge: - dataPoints: - - asDouble: 10.271877533539964 - startTimeUnixNano: "1000000" - timeUnixNano: "2000000" - name: vcenter.datastore.disk.utilization - unit: '%' - scope: - name: otelcol/vcenterreceiver - version: latest - resource: attributes: - key: vcenter.datacenter.name diff --git a/receiver/vcenterreceiver/testdata/metrics/expected.yaml b/receiver/vcenterreceiver/testdata/metrics/expected.yaml index 9bd35cd10ba5..9340103f2377 100644 --- a/receiver/vcenterreceiver/testdata/metrics/expected.yaml +++ b/receiver/vcenterreceiver/testdata/metrics/expected.yaml @@ -92,47 +92,6 @@ resourceMetrics: scope: name: otelcol/vcenterreceiver version: latest - - resource: - attributes: - - key: vcenter.cluster.name - value: - stringValue: Cluster - - key: vcenter.datastore.name - value: - stringValue: vsanDatastore - scopeMetrics: - - metrics: - - description: The amount of space in the datastore. - name: vcenter.datastore.disk.usage - sum: - aggregationTemporality: 2 - dataPoints: - - asInt: "51693551508648" - attributes: - - key: disk_state - value: - stringValue: available - startTimeUnixNano: "1000000" - timeUnixNano: "2000000" - - asInt: "5917763748696" - attributes: - - key: disk_state - value: - stringValue: used - startTimeUnixNano: "1000000" - timeUnixNano: "2000000" - unit: By - - description: The utilization of the datastore. - gauge: - dataPoints: - - asDouble: 10.271877533539964 - startTimeUnixNano: "1000000" - timeUnixNano: "2000000" - name: vcenter.datastore.disk.utilization - unit: '%' - scope: - name: otelcol/vcenterreceiver - version: latest - resource: attributes: - key: vcenter.datastore.name From 344990300ec413e990cdf406525523130c2ece46 Mon Sep 17 00:00:00 2001 From: Stefan Kurek Date: Wed, 8 May 2024 12:57:11 -0400 Subject: [PATCH 05/55] [receiver/vcenter] Adds New Packet Dropped Rate Metric for VMs (#32930) **Description:** Adds new default disabled (with Warning log for default enabled on next release) metric `vcenter.vm.network.packet.drop.rate` for Virtual Machines. This metric makes use of the `droppedRx` and `droppedTx` Network performance metrics detailed [here](https://vdc-repo.vmware.com/vmwb-repository/dcr-public/d1902b0e-d479-46bf-8ac9-cee0e31e8ec0/07ce8dbd-db48-4261-9b8f-c6d3ad8ba472/network_counters.html) for Virtual machines. This would use the same metric attributes as the other VM packet metrics and closely match `vcenter.vm.network.packet.rate` in every other way. **Link to tracking Issue:** #32929 **Testing:** Unit/integration tests updated and tested. Local environment tested. **Documentation:** New documentation generated based on the metadata. --- ...eat_vcenter-vm-add-packet-drop-metric.yaml | 27 ++ ...ix_vcenter-vm-add-new-packet-metrics.yaml} | 0 receiver/vcenterreceiver/documentation.md | 17 + .../internal/metadata/generated_config.go | 4 + .../metadata/generated_config_test.go | 2 + .../internal/metadata/generated_metrics.go | 63 +++ .../metadata/generated_metrics_test.go | 25 ++ .../internal/metadata/testdata/config.yaml | 4 + .../responses/vm-performance-counters.xml | 252 ++++++++++++ receiver/vcenterreceiver/metadata.yaml | 10 + receiver/vcenterreceiver/metrics.go | 8 + receiver/vcenterreceiver/scraper_test.go | 1 + .../metrics/expected-all-enabled.yaml | 375 ++++++++++++++++++ 13 files changed, 788 insertions(+) create mode 100644 .chloggen/feat_vcenter-vm-add-packet-drop-metric.yaml rename .chloggen/{fix_vcenter-vm-add-disk-metric copy.yaml => fix_vcenter-vm-add-new-packet-metrics.yaml} (100%) diff --git a/.chloggen/feat_vcenter-vm-add-packet-drop-metric.yaml b/.chloggen/feat_vcenter-vm-add-packet-drop-metric.yaml new file mode 100644 index 000000000000..bcc960441c35 --- /dev/null +++ b/.chloggen/feat_vcenter-vm-add-packet-drop-metric.yaml @@ -0,0 +1,27 @@ +# Use this changelog template to create an entry for release notes. + +# One of 'breaking', 'deprecation', 'new_component', 'enhancement', 'bug_fix' +change_type: bug_fix + +# The name of the component, or a single word describing the area of concern, (e.g. filelogreceiver) +component: vcenterreceiver + +# A brief description of the change. Surround your text with quotes ("") if it needs to start with a backtick (`). +note: "Adds inititially disabled packet drop rate metric for VMs." + +# Mandatory: One or more tracking issues related to the change. You can use the PR number here if no issue exists. +issues: [32929] + +# (Optional) One or more lines of additional information to render under the primary note. +# These lines will be padded with 2 spaces and then inserted directly into the document. +# Use pipe (|) for multiline entries. +subtext: + +# If your change doesn't affect end users or the exported elements of any package, +# you should instead start your pull request title with [chore] or use the "Skip Changelog" label. +# Optional: The change log or logs in which this entry should be included. +# e.g. '[user]' or '[user, api]' +# Include 'user' if the change is relevant to end users. +# Include 'api' if there is a change to a library API. +# Default: '[user]' +change_logs: [user] diff --git a/.chloggen/fix_vcenter-vm-add-disk-metric copy.yaml b/.chloggen/fix_vcenter-vm-add-new-packet-metrics.yaml similarity index 100% rename from .chloggen/fix_vcenter-vm-add-disk-metric copy.yaml rename to .chloggen/fix_vcenter-vm-add-new-packet-metrics.yaml diff --git a/receiver/vcenterreceiver/documentation.md b/receiver/vcenterreceiver/documentation.md index 39e17eb3c30c..ac7adf3f02b2 100644 --- a/receiver/vcenterreceiver/documentation.md +++ b/receiver/vcenterreceiver/documentation.md @@ -508,6 +508,23 @@ The memory utilization of the VM. | ---- | ----------- | ---------- | | % | Gauge | Double | +### vcenter.vm.network.packet.drop.rate + +The rate of transmitted or received packets dropped by each vNIC (virtual network interface controller) on the virtual machine. + +As measured over the most recent 20s interval. + +| Unit | Metric Type | Value Type | +| ---- | ----------- | ---------- | +| {packets/sec} | Gauge | Double | + +#### Attributes + +| Name | Description | Values | +| ---- | ----------- | ------ | +| direction | The direction of network throughput. | Str: ``transmitted``, ``received`` | +| object | The object on the virtual machine or host that is being reported on. | Any Str | + ### vcenter.vm.network.packet.rate The rate of packets transmitted or received by each vNIC (virtual network interface controller) on the virtual machine. diff --git a/receiver/vcenterreceiver/internal/metadata/generated_config.go b/receiver/vcenterreceiver/internal/metadata/generated_config.go index 75de09a40b35..954b1ffa1748 100644 --- a/receiver/vcenterreceiver/internal/metadata/generated_config.go +++ b/receiver/vcenterreceiver/internal/metadata/generated_config.go @@ -68,6 +68,7 @@ type MetricsConfig struct { VcenterVMMemoryUsage MetricConfig `mapstructure:"vcenter.vm.memory.usage"` VcenterVMMemoryUtilization MetricConfig `mapstructure:"vcenter.vm.memory.utilization"` VcenterVMNetworkPacketCount MetricConfig `mapstructure:"vcenter.vm.network.packet.count"` + VcenterVMNetworkPacketDropRate MetricConfig `mapstructure:"vcenter.vm.network.packet.drop.rate"` VcenterVMNetworkPacketRate MetricConfig `mapstructure:"vcenter.vm.network.packet.rate"` VcenterVMNetworkThroughput MetricConfig `mapstructure:"vcenter.vm.network.throughput"` VcenterVMNetworkUsage MetricConfig `mapstructure:"vcenter.vm.network.usage"` @@ -195,6 +196,9 @@ func DefaultMetricsConfig() MetricsConfig { VcenterVMNetworkPacketCount: MetricConfig{ Enabled: true, }, + VcenterVMNetworkPacketDropRate: MetricConfig{ + Enabled: false, + }, VcenterVMNetworkPacketRate: MetricConfig{ Enabled: false, }, diff --git a/receiver/vcenterreceiver/internal/metadata/generated_config_test.go b/receiver/vcenterreceiver/internal/metadata/generated_config_test.go index 217d33534c6f..a3f28c5d3dd7 100644 --- a/receiver/vcenterreceiver/internal/metadata/generated_config_test.go +++ b/receiver/vcenterreceiver/internal/metadata/generated_config_test.go @@ -66,6 +66,7 @@ func TestMetricsBuilderConfig(t *testing.T) { VcenterVMMemoryUsage: MetricConfig{Enabled: true}, VcenterVMMemoryUtilization: MetricConfig{Enabled: true}, VcenterVMNetworkPacketCount: MetricConfig{Enabled: true}, + VcenterVMNetworkPacketDropRate: MetricConfig{Enabled: true}, VcenterVMNetworkPacketRate: MetricConfig{Enabled: true}, VcenterVMNetworkThroughput: MetricConfig{Enabled: true}, VcenterVMNetworkUsage: MetricConfig{Enabled: true}, @@ -130,6 +131,7 @@ func TestMetricsBuilderConfig(t *testing.T) { VcenterVMMemoryUsage: MetricConfig{Enabled: false}, VcenterVMMemoryUtilization: MetricConfig{Enabled: false}, VcenterVMNetworkPacketCount: MetricConfig{Enabled: false}, + VcenterVMNetworkPacketDropRate: MetricConfig{Enabled: false}, VcenterVMNetworkPacketRate: MetricConfig{Enabled: false}, VcenterVMNetworkThroughput: MetricConfig{Enabled: false}, VcenterVMNetworkUsage: MetricConfig{Enabled: false}, diff --git a/receiver/vcenterreceiver/internal/metadata/generated_metrics.go b/receiver/vcenterreceiver/internal/metadata/generated_metrics.go index 0198c24e9598..efe7064ca54c 100644 --- a/receiver/vcenterreceiver/internal/metadata/generated_metrics.go +++ b/receiver/vcenterreceiver/internal/metadata/generated_metrics.go @@ -2205,6 +2205,58 @@ func newMetricVcenterVMNetworkPacketCount(cfg MetricConfig) metricVcenterVMNetwo return m } +type metricVcenterVMNetworkPacketDropRate struct { + data pmetric.Metric // data buffer for generated metric. + config MetricConfig // metric config provided by user. + capacity int // max observed number of data points added to the metric. +} + +// init fills vcenter.vm.network.packet.drop.rate metric with initial data. +func (m *metricVcenterVMNetworkPacketDropRate) init() { + m.data.SetName("vcenter.vm.network.packet.drop.rate") + m.data.SetDescription("The rate of transmitted or received packets dropped by each vNIC (virtual network interface controller) on the virtual machine.") + m.data.SetUnit("{packets/sec}") + m.data.SetEmptyGauge() + m.data.Gauge().DataPoints().EnsureCapacity(m.capacity) +} + +func (m *metricVcenterVMNetworkPacketDropRate) recordDataPoint(start pcommon.Timestamp, ts pcommon.Timestamp, val float64, throughputDirectionAttributeValue string, objectNameAttributeValue string) { + if !m.config.Enabled { + return + } + dp := m.data.Gauge().DataPoints().AppendEmpty() + dp.SetStartTimestamp(start) + dp.SetTimestamp(ts) + dp.SetDoubleValue(val) + dp.Attributes().PutStr("direction", throughputDirectionAttributeValue) + dp.Attributes().PutStr("object", objectNameAttributeValue) +} + +// updateCapacity saves max length of data point slices that will be used for the slice capacity. +func (m *metricVcenterVMNetworkPacketDropRate) updateCapacity() { + if m.data.Gauge().DataPoints().Len() > m.capacity { + m.capacity = m.data.Gauge().DataPoints().Len() + } +} + +// emit appends recorded metric data to a metrics slice and prepares it for recording another set of data points. +func (m *metricVcenterVMNetworkPacketDropRate) emit(metrics pmetric.MetricSlice) { + if m.config.Enabled && m.data.Gauge().DataPoints().Len() > 0 { + m.updateCapacity() + m.data.MoveTo(metrics.AppendEmpty()) + m.init() + } +} + +func newMetricVcenterVMNetworkPacketDropRate(cfg MetricConfig) metricVcenterVMNetworkPacketDropRate { + m := metricVcenterVMNetworkPacketDropRate{config: cfg} + if cfg.Enabled { + m.data = pmetric.NewMetric() + m.init() + } + return m +} + type metricVcenterVMNetworkPacketRate struct { data pmetric.Metric // data buffer for generated metric. config MetricConfig // metric config provided by user. @@ -2414,6 +2466,7 @@ type MetricsBuilder struct { metricVcenterVMMemoryUsage metricVcenterVMMemoryUsage metricVcenterVMMemoryUtilization metricVcenterVMMemoryUtilization metricVcenterVMNetworkPacketCount metricVcenterVMNetworkPacketCount + metricVcenterVMNetworkPacketDropRate metricVcenterVMNetworkPacketDropRate metricVcenterVMNetworkPacketRate metricVcenterVMNetworkPacketRate metricVcenterVMNetworkThroughput metricVcenterVMNetworkThroughput metricVcenterVMNetworkUsage metricVcenterVMNetworkUsage @@ -2451,6 +2504,9 @@ func NewMetricsBuilder(mbc MetricsBuilderConfig, settings receiver.CreateSetting if mbc.Metrics.VcenterVMNetworkPacketCount.enabledSetByUser { settings.Logger.Warn("[WARNING] `vcenter.vm.network.packet.count` should not be configured: this metric is replaced by [vcenter.vm.network.packet.rate] & will be removed starting in release v0.102.0") } + if !mbc.Metrics.VcenterVMNetworkPacketDropRate.enabledSetByUser { + settings.Logger.Warn("[WARNING] Please set `enabled` field explicitly for `vcenter.vm.network.packet.drop.rate`: this metric will be enabled by default starting in release v0.102.0") + } if !mbc.Metrics.VcenterVMNetworkPacketRate.enabledSetByUser { settings.Logger.Warn("[WARNING] Please set `enabled` field explicitly for `vcenter.vm.network.packet.rate`: this metric will be enabled by default starting in release v0.102.0") } @@ -2514,6 +2570,7 @@ func NewMetricsBuilder(mbc MetricsBuilderConfig, settings receiver.CreateSetting metricVcenterVMMemoryUsage: newMetricVcenterVMMemoryUsage(mbc.Metrics.VcenterVMMemoryUsage), metricVcenterVMMemoryUtilization: newMetricVcenterVMMemoryUtilization(mbc.Metrics.VcenterVMMemoryUtilization), metricVcenterVMNetworkPacketCount: newMetricVcenterVMNetworkPacketCount(mbc.Metrics.VcenterVMNetworkPacketCount), + metricVcenterVMNetworkPacketDropRate: newMetricVcenterVMNetworkPacketDropRate(mbc.Metrics.VcenterVMNetworkPacketDropRate), metricVcenterVMNetworkPacketRate: newMetricVcenterVMNetworkPacketRate(mbc.Metrics.VcenterVMNetworkPacketRate), metricVcenterVMNetworkThroughput: newMetricVcenterVMNetworkThroughput(mbc.Metrics.VcenterVMNetworkThroughput), metricVcenterVMNetworkUsage: newMetricVcenterVMNetworkUsage(mbc.Metrics.VcenterVMNetworkUsage), @@ -2693,6 +2750,7 @@ func (mb *MetricsBuilder) EmitForResource(rmo ...ResourceMetricsOption) { mb.metricVcenterVMMemoryUsage.emit(ils.Metrics()) mb.metricVcenterVMMemoryUtilization.emit(ils.Metrics()) mb.metricVcenterVMNetworkPacketCount.emit(ils.Metrics()) + mb.metricVcenterVMNetworkPacketDropRate.emit(ils.Metrics()) mb.metricVcenterVMNetworkPacketRate.emit(ils.Metrics()) mb.metricVcenterVMNetworkThroughput.emit(ils.Metrics()) mb.metricVcenterVMNetworkUsage.emit(ils.Metrics()) @@ -2927,6 +2985,11 @@ func (mb *MetricsBuilder) RecordVcenterVMNetworkPacketCountDataPoint(ts pcommon. mb.metricVcenterVMNetworkPacketCount.recordDataPoint(mb.startTime, ts, val, throughputDirectionAttributeValue.String(), objectNameAttributeValue) } +// RecordVcenterVMNetworkPacketDropRateDataPoint adds a data point to vcenter.vm.network.packet.drop.rate metric. +func (mb *MetricsBuilder) RecordVcenterVMNetworkPacketDropRateDataPoint(ts pcommon.Timestamp, val float64, throughputDirectionAttributeValue AttributeThroughputDirection, objectNameAttributeValue string) { + mb.metricVcenterVMNetworkPacketDropRate.recordDataPoint(mb.startTime, ts, val, throughputDirectionAttributeValue.String(), objectNameAttributeValue) +} + // RecordVcenterVMNetworkPacketRateDataPoint adds a data point to vcenter.vm.network.packet.rate metric. func (mb *MetricsBuilder) RecordVcenterVMNetworkPacketRateDataPoint(ts pcommon.Timestamp, val float64, throughputDirectionAttributeValue AttributeThroughputDirection, objectNameAttributeValue string) { mb.metricVcenterVMNetworkPacketRate.recordDataPoint(mb.startTime, ts, val, throughputDirectionAttributeValue.String(), objectNameAttributeValue) diff --git a/receiver/vcenterreceiver/internal/metadata/generated_metrics_test.go b/receiver/vcenterreceiver/internal/metadata/generated_metrics_test.go index 17c03a8be2c5..0285523faed5 100644 --- a/receiver/vcenterreceiver/internal/metadata/generated_metrics_test.go +++ b/receiver/vcenterreceiver/internal/metadata/generated_metrics_test.go @@ -90,6 +90,10 @@ func TestMetricsBuilder(t *testing.T) { assert.Equal(t, "[WARNING] `vcenter.vm.network.packet.count` should not be configured: this metric is replaced by [vcenter.vm.network.packet.rate] & will be removed starting in release v0.102.0", observedLogs.All()[expectedWarnings].Message) expectedWarnings++ } + if test.metricsSet == testDataSetDefault { + assert.Equal(t, "[WARNING] Please set `enabled` field explicitly for `vcenter.vm.network.packet.drop.rate`: this metric will be enabled by default starting in release v0.102.0", observedLogs.All()[expectedWarnings].Message) + expectedWarnings++ + } if test.metricsSet == testDataSetDefault { assert.Equal(t, "[WARNING] Please set `enabled` field explicitly for `vcenter.vm.network.packet.rate`: this metric will be enabled by default starting in release v0.102.0", observedLogs.All()[expectedWarnings].Message) expectedWarnings++ @@ -276,6 +280,9 @@ func TestMetricsBuilder(t *testing.T) { allMetricsCount++ mb.RecordVcenterVMNetworkPacketCountDataPoint(ts, 1, AttributeThroughputDirectionTransmitted, "object_name-val") + allMetricsCount++ + mb.RecordVcenterVMNetworkPacketDropRateDataPoint(ts, 1, AttributeThroughputDirectionTransmitted, "object_name-val") + allMetricsCount++ mb.RecordVcenterVMNetworkPacketRateDataPoint(ts, 1, AttributeThroughputDirectionTransmitted, "object_name-val") @@ -940,6 +947,24 @@ func TestMetricsBuilder(t *testing.T) { attrVal, ok = dp.Attributes().Get("object") assert.True(t, ok) assert.EqualValues(t, "object_name-val", attrVal.Str()) + case "vcenter.vm.network.packet.drop.rate": + assert.False(t, validatedMetrics["vcenter.vm.network.packet.drop.rate"], "Found a duplicate in the metrics slice: vcenter.vm.network.packet.drop.rate") + validatedMetrics["vcenter.vm.network.packet.drop.rate"] = true + assert.Equal(t, pmetric.MetricTypeGauge, ms.At(i).Type()) + assert.Equal(t, 1, ms.At(i).Gauge().DataPoints().Len()) + assert.Equal(t, "The rate of transmitted or received packets dropped by each vNIC (virtual network interface controller) on the virtual machine.", ms.At(i).Description()) + assert.Equal(t, "{packets/sec}", ms.At(i).Unit()) + dp := ms.At(i).Gauge().DataPoints().At(0) + assert.Equal(t, start, dp.StartTimestamp()) + assert.Equal(t, ts, dp.Timestamp()) + assert.Equal(t, pmetric.NumberDataPointValueTypeDouble, dp.ValueType()) + assert.Equal(t, float64(1), dp.DoubleValue()) + attrVal, ok := dp.Attributes().Get("direction") + assert.True(t, ok) + assert.EqualValues(t, "transmitted", attrVal.Str()) + attrVal, ok = dp.Attributes().Get("object") + assert.True(t, ok) + assert.EqualValues(t, "object_name-val", attrVal.Str()) case "vcenter.vm.network.packet.rate": assert.False(t, validatedMetrics["vcenter.vm.network.packet.rate"], "Found a duplicate in the metrics slice: vcenter.vm.network.packet.rate") validatedMetrics["vcenter.vm.network.packet.rate"] = true diff --git a/receiver/vcenterreceiver/internal/metadata/testdata/config.yaml b/receiver/vcenterreceiver/internal/metadata/testdata/config.yaml index 2f1281bc668f..0e9970eadf8e 100644 --- a/receiver/vcenterreceiver/internal/metadata/testdata/config.yaml +++ b/receiver/vcenterreceiver/internal/metadata/testdata/config.yaml @@ -81,6 +81,8 @@ all_set: enabled: true vcenter.vm.network.packet.count: enabled: true + vcenter.vm.network.packet.drop.rate: + enabled: true vcenter.vm.network.packet.rate: enabled: true vcenter.vm.network.throughput: @@ -194,6 +196,8 @@ none_set: enabled: false vcenter.vm.network.packet.count: enabled: false + vcenter.vm.network.packet.drop.rate: + enabled: false vcenter.vm.network.packet.rate: enabled: false vcenter.vm.network.throughput: diff --git a/receiver/vcenterreceiver/internal/mockserver/responses/vm-performance-counters.xml b/receiver/vcenterreceiver/internal/mockserver/responses/vm-performance-counters.xml index 005407144536..59f89bf27dab 100644 --- a/receiver/vcenterreceiver/internal/mockserver/responses/vm-performance-counters.xml +++ b/receiver/vcenterreceiver/internal/mockserver/responses/vm-performance-counters.xml @@ -64,6 +64,20 @@ 0 + + + 530 + + + 40 + + + + 529 + + + 20 + 143 @@ -85,6 +99,13 @@ 0 + + + 530 + vmnic3 + + 40 + 531 @@ -99,6 +120,13 @@ 0 + + + 530 + vmnic2 + + 40 + 143 @@ -148,6 +176,20 @@ 0 + + + 530 + vmnic1 + + 40 + + + + 529 + vmnic1 + + 20 + 143 @@ -169,6 +211,20 @@ 0 + + + 530 + vmnic0 + + 40 + + + + 529 + vmnic0 + + 20 + 143 @@ -183,6 +239,13 @@ 0 + + + 529 + vmnic3 + + 20 + 146 @@ -190,6 +253,13 @@ 0 + + + 529 + vmnic2 + + 20 + 532 @@ -246,6 +316,13 @@ 0 + + + 529 + 4000 + + 20 + 147 @@ -253,6 +330,13 @@ 0 + + + 530 + 4000 + + 40 + 532 @@ -330,6 +414,20 @@ 0 + + + 530 + + + 40 + + + + 529 + + + 20 + 143 @@ -351,6 +449,13 @@ 0 + + + 530 + vmnic3 + + 40 + 531 @@ -365,6 +470,13 @@ 0 + + + 530 + vmnic2 + + 40 + 143 @@ -414,6 +526,20 @@ 0 + + + 530 + vmnic1 + + 40 + + + + 529 + vmnic1 + + 20 + 143 @@ -435,6 +561,20 @@ 0 + + + 530 + vmnic0 + + 40 + + + + 529 + vmnic0 + + 20 + 143 @@ -449,6 +589,13 @@ 0 + + + 529 + vmnic3 + + 20 + 146 @@ -456,6 +603,13 @@ 0 + + + 529 + vmnic2 + + 20 + 532 @@ -519,6 +673,20 @@ 0 + + + 530 + 4000 + + 40 + + + + 529 + 4000 + + 20 + 532 @@ -596,6 +764,20 @@ 0 + + + 530 + + + 40 + + + + 529 + + + 20 + 143 @@ -617,6 +799,13 @@ 0 + + + 530 + vmnic3 + + 40 + 531 @@ -631,6 +820,13 @@ 0 + + + 530 + vmnic2 + + 40 + 143 @@ -680,6 +876,20 @@ 0 + + + 530 + vmnic1 + + 40 + + + + 529 + vmnic1 + + 20 + 143 @@ -701,6 +911,20 @@ 0 + + + 530 + vmnic0 + + 40 + + + + 529 + vmnic0 + + 20 + 143 @@ -715,6 +939,13 @@ 0 + + + 529 + vmnic3 + + 20 + 146 @@ -722,6 +953,13 @@ 0 + + + 529 + vmnic2 + + 20 + 532 @@ -785,6 +1023,20 @@ 0 + + + 529 + 4000 + + 20 + + + + 530 + 4000 + + 40 + 532 diff --git a/receiver/vcenterreceiver/metadata.yaml b/receiver/vcenterreceiver/metadata.yaml index c97dc1a0bd12..4c82e086b6bb 100644 --- a/receiver/vcenterreceiver/metadata.yaml +++ b/receiver/vcenterreceiver/metadata.yaml @@ -470,6 +470,16 @@ metrics: extended_documentation: As measured over the most recent 20s interval. warnings: if_enabled_not_set: "this metric will be enabled by default starting in release v0.102.0" + vcenter.vm.network.packet.drop.rate: + enabled: false + description: The rate of transmitted or received packets dropped by each vNIC (virtual network interface controller) on the virtual machine. + unit: "{packets/sec}" + gauge: + value_type: double + attributes: [throughput_direction, object_name] + extended_documentation: As measured over the most recent 20s interval. + warnings: + if_enabled_not_set: "this metric will be enabled by default starting in release v0.102.0" vcenter.vm.network.usage: enabled: true description: The network utilization combined transmit and receive rates during an interval. diff --git a/receiver/vcenterreceiver/metrics.go b/receiver/vcenterreceiver/metrics.go index e1dd574d3a31..6114206f0b2f 100644 --- a/receiver/vcenterreceiver/metrics.go +++ b/receiver/vcenterreceiver/metrics.go @@ -166,6 +166,8 @@ var vmPerfMetricList = []string{ // network metrics "net.packetsTx.summation", "net.packetsRx.summation", + "net.droppedTx.summation", + "net.droppedRx.summation", "net.bytesRx.average", "net.bytesTx.average", "net.usage.average", @@ -216,6 +218,12 @@ func (v *vcenterMetricScraper) recordVMPerformanceMetrics(entityMetric *performa v.mb.RecordVcenterVMDiskThroughputDataPoint(pcommon.NewTimestampFromTime(si.Timestamp), nestedValue, metadata.AttributeDiskDirectionRead, val.Instance) case "virtualDisk.write.average": v.mb.RecordVcenterVMDiskThroughputDataPoint(pcommon.NewTimestampFromTime(si.Timestamp), nestedValue, metadata.AttributeDiskDirectionWrite, val.Instance) + case "net.droppedTx.summation": + txRate := float64(nestedValue) / 20 + v.mb.RecordVcenterVMNetworkPacketDropRateDataPoint(pcommon.NewTimestampFromTime(si.Timestamp), txRate, metadata.AttributeThroughputDirectionTransmitted, val.Instance) + case "net.droppedRx.summation": + rxRate := float64(nestedValue) / 20 + v.mb.RecordVcenterVMNetworkPacketDropRateDataPoint(pcommon.NewTimestampFromTime(si.Timestamp), rxRate, metadata.AttributeThroughputDirectionReceived, val.Instance) } } } diff --git a/receiver/vcenterreceiver/scraper_test.go b/receiver/vcenterreceiver/scraper_test.go index fd84ca3cc96e..fefeb86ce2cc 100644 --- a/receiver/vcenterreceiver/scraper_test.go +++ b/receiver/vcenterreceiver/scraper_test.go @@ -49,6 +49,7 @@ func TestScrapeConfigsEnabled(t *testing.T) { optConfigs.Metrics.VcenterHostNetworkPacketErrorRate.Enabled = true optConfigs.Metrics.VcenterHostNetworkPacketRate.Enabled = true optConfigs.Metrics.VcenterVMNetworkPacketRate.Enabled = true + optConfigs.Metrics.VcenterVMNetworkPacketDropRate.Enabled = true cfg := &Config{ MetricsBuilderConfig: optConfigs, diff --git a/receiver/vcenterreceiver/testdata/metrics/expected-all-enabled.yaml b/receiver/vcenterreceiver/testdata/metrics/expected-all-enabled.yaml index 316053940cf6..13610089822e 100644 --- a/receiver/vcenterreceiver/testdata/metrics/expected-all-enabled.yaml +++ b/receiver/vcenterreceiver/testdata/metrics/expected-all-enabled.yaml @@ -7500,6 +7500,131 @@ resourceMetrics: startTimeUnixNano: "2000000" timeUnixNano: "1000000" unit: '{packets/sec}' + - description: The rate of transmitted or received packets dropped by each vNIC (virtual network interface controller) on the virtual machine. + name: vcenter.vm.network.packet.drop.rate + gauge: + dataPoints: + - asDouble: "1" + attributes: + - key: direction + value: + stringValue: received + - key: object + value: + stringValue: "" + startTimeUnixNano: "2000000" + timeUnixNano: "1000000" + - asDouble: "1" + attributes: + - key: direction + value: + stringValue: received + - key: object + value: + stringValue: "4000" + startTimeUnixNano: "2000000" + timeUnixNano: "1000000" + - asDouble: "1" + attributes: + - key: direction + value: + stringValue: received + - key: object + value: + stringValue: vmnic0 + startTimeUnixNano: "2000000" + timeUnixNano: "1000000" + - asDouble: "1" + attributes: + - key: direction + value: + stringValue: received + - key: object + value: + stringValue: vmnic1 + startTimeUnixNano: "2000000" + timeUnixNano: "1000000" + - asDouble: "1" + attributes: + - key: direction + value: + stringValue: received + - key: object + value: + stringValue: vmnic2 + startTimeUnixNano: "2000000" + timeUnixNano: "1000000" + - asDouble: "1" + attributes: + - key: direction + value: + stringValue: received + - key: object + value: + stringValue: vmnic3 + startTimeUnixNano: "2000000" + timeUnixNano: "1000000" + - asDouble: "2" + attributes: + - key: direction + value: + stringValue: transmitted + - key: object + value: + stringValue: "" + startTimeUnixNano: "2000000" + timeUnixNano: "1000000" + - asDouble: "2" + attributes: + - key: direction + value: + stringValue: transmitted + - key: object + value: + stringValue: "4000" + startTimeUnixNano: "2000000" + timeUnixNano: "1000000" + - asDouble: "2" + attributes: + - key: direction + value: + stringValue: transmitted + - key: object + value: + stringValue: vmnic0 + startTimeUnixNano: "2000000" + timeUnixNano: "1000000" + - asDouble: "2" + attributes: + - key: direction + value: + stringValue: transmitted + - key: object + value: + stringValue: vmnic1 + startTimeUnixNano: "2000000" + timeUnixNano: "1000000" + - asDouble: "2" + attributes: + - key: direction + value: + stringValue: transmitted + - key: object + value: + stringValue: vmnic2 + startTimeUnixNano: "2000000" + timeUnixNano: "1000000" + - asDouble: "2" + attributes: + - key: direction + value: + stringValue: transmitted + - key: object + value: + stringValue: vmnic3 + startTimeUnixNano: "2000000" + timeUnixNano: "1000000" + unit: '{packets/sec}' - description: The rate of packets transmitted or received by each vNIC (virtual network interface controller) on the virtual machine. name: vcenter.vm.network.packet.rate gauge: @@ -8172,6 +8297,131 @@ resourceMetrics: startTimeUnixNano: "2000000" timeUnixNano: "1000000" unit: '{packets/sec}' + - description: The rate of transmitted or received packets dropped by each vNIC (virtual network interface controller) on the virtual machine. + name: vcenter.vm.network.packet.drop.rate + gauge: + dataPoints: + - asDouble: "1" + attributes: + - key: direction + value: + stringValue: received + - key: object + value: + stringValue: "" + startTimeUnixNano: "2000000" + timeUnixNano: "1000000" + - asDouble: "1" + attributes: + - key: direction + value: + stringValue: received + - key: object + value: + stringValue: "4000" + startTimeUnixNano: "2000000" + timeUnixNano: "1000000" + - asDouble: "1" + attributes: + - key: direction + value: + stringValue: received + - key: object + value: + stringValue: vmnic0 + startTimeUnixNano: "2000000" + timeUnixNano: "1000000" + - asDouble: "1" + attributes: + - key: direction + value: + stringValue: received + - key: object + value: + stringValue: vmnic1 + startTimeUnixNano: "2000000" + timeUnixNano: "1000000" + - asDouble: "1" + attributes: + - key: direction + value: + stringValue: received + - key: object + value: + stringValue: vmnic2 + startTimeUnixNano: "2000000" + timeUnixNano: "1000000" + - asDouble: "1" + attributes: + - key: direction + value: + stringValue: received + - key: object + value: + stringValue: vmnic3 + startTimeUnixNano: "2000000" + timeUnixNano: "1000000" + - asDouble: "2" + attributes: + - key: direction + value: + stringValue: transmitted + - key: object + value: + stringValue: "" + startTimeUnixNano: "2000000" + timeUnixNano: "1000000" + - asDouble: "2" + attributes: + - key: direction + value: + stringValue: transmitted + - key: object + value: + stringValue: "4000" + startTimeUnixNano: "2000000" + timeUnixNano: "1000000" + - asDouble: "2" + attributes: + - key: direction + value: + stringValue: transmitted + - key: object + value: + stringValue: vmnic0 + startTimeUnixNano: "2000000" + timeUnixNano: "1000000" + - asDouble: "2" + attributes: + - key: direction + value: + stringValue: transmitted + - key: object + value: + stringValue: vmnic1 + startTimeUnixNano: "2000000" + timeUnixNano: "1000000" + - asDouble: "2" + attributes: + - key: direction + value: + stringValue: transmitted + - key: object + value: + stringValue: vmnic2 + startTimeUnixNano: "2000000" + timeUnixNano: "1000000" + - asDouble: "2" + attributes: + - key: direction + value: + stringValue: transmitted + - key: object + value: + stringValue: vmnic3 + startTimeUnixNano: "2000000" + timeUnixNano: "1000000" + unit: '{packets/sec}' - description: The rate of packets transmitted or received by each vNIC (virtual network interface controller) on the virtual machine. name: vcenter.vm.network.packet.rate gauge: @@ -8799,6 +9049,131 @@ resourceMetrics: startTimeUnixNano: "2000000" timeUnixNano: "1000000" unit: '{packets/sec}' + - description: The rate of transmitted or received packets dropped by each vNIC (virtual network interface controller) on the virtual machine. + name: vcenter.vm.network.packet.drop.rate + gauge: + dataPoints: + - asDouble: "1" + attributes: + - key: direction + value: + stringValue: received + - key: object + value: + stringValue: "" + startTimeUnixNano: "2000000" + timeUnixNano: "1000000" + - asDouble: "1" + attributes: + - key: direction + value: + stringValue: received + - key: object + value: + stringValue: "4000" + startTimeUnixNano: "2000000" + timeUnixNano: "1000000" + - asDouble: "1" + attributes: + - key: direction + value: + stringValue: received + - key: object + value: + stringValue: vmnic0 + startTimeUnixNano: "2000000" + timeUnixNano: "1000000" + - asDouble: "1" + attributes: + - key: direction + value: + stringValue: received + - key: object + value: + stringValue: vmnic1 + startTimeUnixNano: "2000000" + timeUnixNano: "1000000" + - asDouble: "1" + attributes: + - key: direction + value: + stringValue: received + - key: object + value: + stringValue: vmnic2 + startTimeUnixNano: "2000000" + timeUnixNano: "1000000" + - asDouble: "1" + attributes: + - key: direction + value: + stringValue: received + - key: object + value: + stringValue: vmnic3 + startTimeUnixNano: "2000000" + timeUnixNano: "1000000" + - asDouble: "2" + attributes: + - key: direction + value: + stringValue: transmitted + - key: object + value: + stringValue: "" + startTimeUnixNano: "2000000" + timeUnixNano: "1000000" + - asDouble: "2" + attributes: + - key: direction + value: + stringValue: transmitted + - key: object + value: + stringValue: "4000" + startTimeUnixNano: "2000000" + timeUnixNano: "1000000" + - asDouble: "2" + attributes: + - key: direction + value: + stringValue: transmitted + - key: object + value: + stringValue: vmnic0 + startTimeUnixNano: "2000000" + timeUnixNano: "1000000" + - asDouble: "2" + attributes: + - key: direction + value: + stringValue: transmitted + - key: object + value: + stringValue: vmnic1 + startTimeUnixNano: "2000000" + timeUnixNano: "1000000" + - asDouble: "2" + attributes: + - key: direction + value: + stringValue: transmitted + - key: object + value: + stringValue: vmnic2 + startTimeUnixNano: "2000000" + timeUnixNano: "1000000" + - asDouble: "2" + attributes: + - key: direction + value: + stringValue: transmitted + - key: object + value: + stringValue: vmnic3 + startTimeUnixNano: "2000000" + timeUnixNano: "1000000" + unit: '{packets/sec}' - description: The rate of packets transmitted or received by each vNIC (virtual network interface controller) on the virtual machine. name: vcenter.vm.network.packet.rate gauge: From f4a3147bc5006e01ea6c7b57f6980ce4864cdb79 Mon Sep 17 00:00:00 2001 From: "renovate[bot]" <29139614+renovate[bot]@users.noreply.github.com> Date: Wed, 8 May 2024 10:19:15 -0700 Subject: [PATCH 06/55] fix(deps): update module github.com/open-telemetry/otel-arrow to v0.22.0 (#32105) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit [![Mend Renovate](https://app.renovatebot.com/images/banner.svg)](https://renovatebot.com) This PR contains the following updates: | Package | Change | Age | Adoption | Passing | Confidence | |---|---|---|---|---|---| | [github.com/open-telemetry/otel-arrow](https://togithub.com/open-telemetry/otel-arrow) | `v0.18.0` -> `v0.22.0` | [![age](https://developer.mend.io/api/mc/badges/age/go/github.com%2fopen-telemetry%2fotel-arrow/v0.22.0?slim=true)](https://docs.renovatebot.com/merge-confidence/) | [![adoption](https://developer.mend.io/api/mc/badges/adoption/go/github.com%2fopen-telemetry%2fotel-arrow/v0.22.0?slim=true)](https://docs.renovatebot.com/merge-confidence/) | [![passing](https://developer.mend.io/api/mc/badges/compatibility/go/github.com%2fopen-telemetry%2fotel-arrow/v0.18.0/v0.22.0?slim=true)](https://docs.renovatebot.com/merge-confidence/) | [![confidence](https://developer.mend.io/api/mc/badges/confidence/go/github.com%2fopen-telemetry%2fotel-arrow/v0.18.0/v0.22.0?slim=true)](https://docs.renovatebot.com/merge-confidence/) | --- > [!WARNING] > Some dependencies could not be looked up. Check the Dependency Dashboard for more information. --- ### Release Notes
open-telemetry/otel-arrow (github.com/open-telemetry/otel-arrow) ### [`v0.22.0`](https://togithub.com/open-telemetry/otel-arrow/releases/tag/v0.22.0) [Compare Source](https://togithub.com/open-telemetry/otel-arrow/compare/v0.21.0...v0.22.0) Includes [#​178](https://togithub.com/open-telemetry/otel-arrow/issues/178). [CHANGELOG](https://togithub.com/open-telemetry/otel-arrow/blob/main/CHANGELOG.md) ### [`v0.21.0`](https://togithub.com/open-telemetry/otel-arrow/releases/tag/v0.21.0) [Compare Source](https://togithub.com/open-telemetry/otel-arrow/compare/v0.20.0...v0.21.0) See the [CHANGELOG](https://togithub.com/open-telemetry/otel-arrow/blob/main/CHANGELOG.md). ### [`v0.20.0`](https://togithub.com/open-telemetry/otel-arrow/releases/tag/v0.20.0) [Compare Source](https://togithub.com/open-telemetry/otel-arrow/compare/v0.19.0...v0.20.0) ##### What's Changed - Backport lint fixes from OTel-Collector-Contrib PR 31996 by [@​jmacd](https://togithub.com/jmacd) in [https://github.com/open-telemetry/otel-arrow/pull/163](https://togithub.com/open-telemetry/otel-arrow/pull/163) - Upgrade collector to v0.97.0 by [@​moh-osman3](https://togithub.com/moh-osman3) in [https://github.com/open-telemetry/otel-arrow/pull/164](https://togithub.com/open-telemetry/otel-arrow/pull/164) **Full Changelog**: https://github.com/open-telemetry/otel-arrow/compare/v0.19.0...v0.20.0 ### [`v0.19.0`](https://togithub.com/open-telemetry/otel-arrow/releases/tag/v0.19.0) [Compare Source](https://togithub.com/open-telemetry/otel-arrow/compare/v0.18.0...v0.19.0) See [CHANGELOG.md](https://togithub.com/open-telemetry/otel-arrow/blob/main/CHANGELOG.md#0190---2024-03-26) for release notes.
--- ### Configuration 📅 **Schedule**: Branch creation - "on tuesday" (UTC), Automerge - At any time (no schedule defined). 🚦 **Automerge**: Disabled by config. Please merge this manually once you are satisfied. ♻ **Rebasing**: Whenever PR becomes conflicted, or you tick the rebase/retry checkbox. 🔕 **Ignore**: Close this PR and you won't be reminded about this update again. --- - [ ] If you want to rebase/retry this PR, check this box --- This PR has been generated by [Mend Renovate](https://www.mend.io/free-developer-tools/renovate/). View repository job log [here](https://developer.mend.io/github/open-telemetry/opentelemetry-collector-contrib). --------- Co-authored-by: renovate[bot] <29139614+renovate[bot]@users.noreply.github.com> Co-authored-by: opentelemetrybot <107717825+opentelemetrybot@users.noreply.github.com> --- exporter/otelarrowexporter/go.mod | 6 +++--- exporter/otelarrowexporter/go.sum | 20 ++++++++++---------- receiver/otelarrowreceiver/go.mod | 6 +++--- receiver/otelarrowreceiver/go.sum | 20 ++++++++++---------- 4 files changed, 26 insertions(+), 26 deletions(-) diff --git a/exporter/otelarrowexporter/go.mod b/exporter/otelarrowexporter/go.mod index 3cc88aad09bf..ddac000a2253 100644 --- a/exporter/otelarrowexporter/go.mod +++ b/exporter/otelarrowexporter/go.mod @@ -3,7 +3,7 @@ module github.com/open-telemetry/opentelemetry-collector-contrib/exporter/otelar go 1.21.0 require ( - github.com/open-telemetry/otel-arrow v0.18.0 + github.com/open-telemetry/otel-arrow v0.22.0 github.com/open-telemetry/otel-arrow/collector v0.22.0 github.com/stretchr/testify v1.9.0 go.opentelemetry.io/collector/component v0.100.0 @@ -70,11 +70,11 @@ require ( go.opentelemetry.io/otel/sdk/metric v1.26.0 // indirect go.uber.org/multierr v1.11.0 // indirect go.uber.org/zap v1.27.0 // indirect - golang.org/x/mod v0.13.0 // indirect + golang.org/x/mod v0.14.0 // indirect golang.org/x/net v0.24.0 // indirect golang.org/x/sys v0.19.0 // indirect golang.org/x/text v0.14.0 // indirect - golang.org/x/tools v0.14.0 // indirect + golang.org/x/tools v0.15.0 // indirect golang.org/x/xerrors v0.0.0-20231012003039-104605ab7028 // indirect google.golang.org/genproto/googleapis/rpc v0.0.0-20240401170217-c3f982113cda // indirect google.golang.org/protobuf v1.34.0 // indirect diff --git a/exporter/otelarrowexporter/go.sum b/exporter/otelarrowexporter/go.sum index 4a89abe4897b..db5468f4d691 100644 --- a/exporter/otelarrowexporter/go.sum +++ b/exporter/otelarrowexporter/go.sum @@ -62,8 +62,8 @@ github.com/modern-go/reflect2 v1.0.2 h1:xBagoLtFs94CBntxluKeaWgTMpvLxC4ur3nMaC9G github.com/modern-go/reflect2 v1.0.2/go.mod h1:yWuevngMOJpCy52FWWMvUC8ws7m/LJsjYzDa0/r8luk= github.com/mostynb/go-grpc-compression v1.2.2 h1:XaDbnRvt2+1vgr0b/l0qh4mJAfIxE0bKXtz2Znl3GGI= github.com/mostynb/go-grpc-compression v1.2.2/go.mod h1:GOCr2KBxXcblCuczg3YdLQlcin1/NfyDA348ckuCH6w= -github.com/open-telemetry/otel-arrow v0.18.0 h1:v3KH1HIpdXRy+V5awAmn2M+uthbE52Qi7svBYSweASI= -github.com/open-telemetry/otel-arrow v0.18.0/go.mod h1:054cuTUlLVHH6Y//65bEPeMiHjYRs7DiX/el+yQbgYg= +github.com/open-telemetry/otel-arrow v0.22.0 h1:G1jgtqAM2ho5pyKQ4tyrDzk9Y0VcJ+GZQRJgN26vRlI= +github.com/open-telemetry/otel-arrow v0.22.0/go.mod h1:F50XFaiNfkfB0MYftZIUKFULm6pxfGqjbgQzevi+65M= github.com/open-telemetry/otel-arrow/collector v0.22.0 h1:lHFjzkh5PbsiW8B63SRntnP9W7bLCXV9lslO4zI0s/Y= github.com/open-telemetry/otel-arrow/collector v0.22.0/go.mod h1:R7hRwuGDxoGLB27dkJUFKDK7mGG7Yb02ODnLHx8Whis= github.com/pierrec/lz4 v2.0.5+incompatible h1:2xWsjqPFWcplujydGg4WmhC/6fZqK42wMM8aXeqhl0I= @@ -154,12 +154,12 @@ go.uber.org/zap v1.27.0/go.mod h1:GB2qFLM7cTU87MWRP2mPIjqfIDnGu+VIO4V/SdhGo2E= golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w= golang.org/x/crypto v0.0.0-20191011191535-87dc89f01550/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= golang.org/x/crypto v0.0.0-20200622213623-75b288015ac9/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= -golang.org/x/exp v0.0.0-20231006140011-7918f672742d h1:jtJma62tbqLibJ5sFQz8bKtEM8rJBtfilJ2qTU199MI= -golang.org/x/exp v0.0.0-20231006140011-7918f672742d/go.mod h1:ldy0pHrwJyGW56pPQzzkH36rKxoZW1tw7ZJpeKx+hdo= +golang.org/x/exp v0.0.0-20231110203233-9a3e6036ecaa h1:FRnLl4eNAQl8hwxVVC17teOw8kdjVDVAiFMtgUdTSRQ= +golang.org/x/exp v0.0.0-20231110203233-9a3e6036ecaa/go.mod h1:zk2irFbV9DP96SEBUUAy67IdHUaZuSnrz1n472HUCLE= golang.org/x/mod v0.2.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= golang.org/x/mod v0.3.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= -golang.org/x/mod v0.13.0 h1:I/DsJXRlw/8l/0c24sM9yb0T4z9liZTduXvdAWYiysY= -golang.org/x/mod v0.13.0/go.mod h1:hTbmBsO62+eylJbnUtE2MGJUyE7QWk4xUqPFrRgJ+7c= +golang.org/x/mod v0.14.0 h1:dGoOF9QVLYng8IHTm7BAyWqCqSheQ5pYWGhzW00YJr0= +golang.org/x/mod v0.14.0/go.mod h1:hTbmBsO62+eylJbnUtE2MGJUyE7QWk4xUqPFrRgJ+7c= golang.org/x/net v0.0.0-20190404232315-eb5bcb51f2a3/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= golang.org/x/net v0.0.0-20190620200207-3b0461eec859/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= golang.org/x/net v0.0.0-20200226121028-0de0cce0169b/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= @@ -185,16 +185,16 @@ golang.org/x/tools v0.0.0-20180917221912-90fa682c2a6e/go.mod h1:n7NCudcB/nEzxVGm golang.org/x/tools v0.0.0-20191119224855-298f0cb1881e/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= golang.org/x/tools v0.0.0-20200619180055-7c47624df98f/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE= golang.org/x/tools v0.0.0-20210106214847-113979e3529a/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA= -golang.org/x/tools v0.14.0 h1:jvNa2pY0M4r62jkRQ6RwEZZyPcymeL9XZMLBbV7U2nc= -golang.org/x/tools v0.14.0/go.mod h1:uYBEerGOWcJyEORxN+Ek8+TT266gXkNlHdJBwexUsBg= +golang.org/x/tools v0.15.0 h1:zdAyfUGbYmuVokhzVmghFl2ZJh5QhcfebBgmVPFYA+8= +golang.org/x/tools v0.15.0/go.mod h1:hpksKq4dtpQWS1uQ61JkdqWM3LscIS6Slf+VVkm+wQk= golang.org/x/xerrors v0.0.0-20190717185122-a985d3407aa7/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= golang.org/x/xerrors v0.0.0-20191011141410-1b5146add898/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= golang.org/x/xerrors v0.0.0-20200804184101-5ec99f83aff1/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= golang.org/x/xerrors v0.0.0-20231012003039-104605ab7028 h1:+cNy6SZtPcJQH3LJVLOSmiC7MMxXNOb3PU/VUEz+EhU= golang.org/x/xerrors v0.0.0-20231012003039-104605ab7028/go.mod h1:NDW/Ps6MPRej6fsCIbMTohpP40sJ/P/vI1MoTEGwX90= -gonum.org/v1/gonum v0.14.0 h1:2NiG67LD1tEH0D7kM+ps2V+fXmsAnpUeec7n8tcr4S0= -gonum.org/v1/gonum v0.14.0/go.mod h1:AoWeoz0becf9QMWtE8iWXNXc27fK4fNeHNf/oMejGfU= +gonum.org/v1/gonum v0.15.0 h1:2lYxjRbTYyxkJxlhC+LvJIx3SsANPdRybu1tGj9/OrQ= +gonum.org/v1/gonum v0.15.0/go.mod h1:xzZVBJBtS+Mz4q0Yl2LJTk+OxOg4jiXZ7qBoM0uISGo= google.golang.org/genproto/googleapis/rpc v0.0.0-20240401170217-c3f982113cda h1:LI5DOvAxUPMv/50agcLLoo+AdWc1irS9Rzz4vPuD1V4= google.golang.org/genproto/googleapis/rpc v0.0.0-20240401170217-c3f982113cda/go.mod h1:WtryC6hu0hhx87FDGxWCDptyssuo68sk10vYjF+T9fY= google.golang.org/grpc v1.63.2 h1:MUeiw1B2maTVZthpU5xvASfTh3LDbxHd6IJ6QQVU+xM= diff --git a/receiver/otelarrowreceiver/go.mod b/receiver/otelarrowreceiver/go.mod index 746856096e50..e514c14a26e3 100644 --- a/receiver/otelarrowreceiver/go.mod +++ b/receiver/otelarrowreceiver/go.mod @@ -3,7 +3,7 @@ module github.com/open-telemetry/opentelemetry-collector-contrib/receiver/otelar go 1.21.0 require ( - github.com/open-telemetry/otel-arrow v0.18.0 + github.com/open-telemetry/otel-arrow v0.22.0 github.com/open-telemetry/otel-arrow/collector v0.22.0 github.com/stretchr/testify v1.9.0 go.opentelemetry.io/collector v0.100.0 @@ -77,10 +77,10 @@ require ( go.opentelemetry.io/otel/exporters/prometheus v0.48.0 // indirect go.opentelemetry.io/otel/sdk v1.26.0 // indirect go.opentelemetry.io/otel/sdk/metric v1.26.0 // indirect - golang.org/x/mod v0.13.0 // indirect + golang.org/x/mod v0.14.0 // indirect golang.org/x/sys v0.20.0 // indirect golang.org/x/text v0.15.0 // indirect - golang.org/x/tools v0.14.0 // indirect + golang.org/x/tools v0.15.0 // indirect golang.org/x/xerrors v0.0.0-20231012003039-104605ab7028 // indirect google.golang.org/genproto/googleapis/rpc v0.0.0-20240401170217-c3f982113cda // indirect google.golang.org/protobuf v1.34.0 // indirect diff --git a/receiver/otelarrowreceiver/go.sum b/receiver/otelarrowreceiver/go.sum index 6e6f1eadccc1..009fecf9b730 100644 --- a/receiver/otelarrowreceiver/go.sum +++ b/receiver/otelarrowreceiver/go.sum @@ -82,8 +82,8 @@ github.com/modern-go/reflect2 v1.0.2/go.mod h1:yWuevngMOJpCy52FWWMvUC8ws7m/LJsjY github.com/mostynb/go-grpc-compression v1.2.2 h1:XaDbnRvt2+1vgr0b/l0qh4mJAfIxE0bKXtz2Znl3GGI= github.com/mostynb/go-grpc-compression v1.2.2/go.mod h1:GOCr2KBxXcblCuczg3YdLQlcin1/NfyDA348ckuCH6w= github.com/niemeyer/pretty v0.0.0-20200227124842-a10e7caefd8e/go.mod h1:zD1mROLANZcx1PVRCS0qkT7pwLkGfwJo4zjcN/Tysno= -github.com/open-telemetry/otel-arrow v0.18.0 h1:v3KH1HIpdXRy+V5awAmn2M+uthbE52Qi7svBYSweASI= -github.com/open-telemetry/otel-arrow v0.18.0/go.mod h1:054cuTUlLVHH6Y//65bEPeMiHjYRs7DiX/el+yQbgYg= +github.com/open-telemetry/otel-arrow v0.22.0 h1:G1jgtqAM2ho5pyKQ4tyrDzk9Y0VcJ+GZQRJgN26vRlI= +github.com/open-telemetry/otel-arrow v0.22.0/go.mod h1:F50XFaiNfkfB0MYftZIUKFULm6pxfGqjbgQzevi+65M= github.com/open-telemetry/otel-arrow/collector v0.22.0 h1:lHFjzkh5PbsiW8B63SRntnP9W7bLCXV9lslO4zI0s/Y= github.com/open-telemetry/otel-arrow/collector v0.22.0/go.mod h1:R7hRwuGDxoGLB27dkJUFKDK7mGG7Yb02ODnLHx8Whis= github.com/pierrec/lz4/v4 v4.1.18 h1:xaKrnTkyoqfh1YItXl56+6KJNVYWlEEPuAQW9xsplYQ= @@ -182,8 +182,8 @@ golang.org/x/exp v0.0.0-20180807140117-3d87b88a115f/go.mod h1:CJ0aWSM057203Lf6IL golang.org/x/exp v0.0.0-20190125153040-c74c464bbbf2/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA= golang.org/x/exp v0.0.0-20190306152737-a1d7652674e8/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA= golang.org/x/exp v0.0.0-20191030013958-a1ab85dbe136/go.mod h1:JXzH8nQsPlswgeRAPE3MuO9GYsAcnJvJ4vnMwN/5qkY= -golang.org/x/exp v0.0.0-20231006140011-7918f672742d h1:jtJma62tbqLibJ5sFQz8bKtEM8rJBtfilJ2qTU199MI= -golang.org/x/exp v0.0.0-20231006140011-7918f672742d/go.mod h1:ldy0pHrwJyGW56pPQzzkH36rKxoZW1tw7ZJpeKx+hdo= +golang.org/x/exp v0.0.0-20231110203233-9a3e6036ecaa h1:FRnLl4eNAQl8hwxVVC17teOw8kdjVDVAiFMtgUdTSRQ= +golang.org/x/exp v0.0.0-20231110203233-9a3e6036ecaa/go.mod h1:zk2irFbV9DP96SEBUUAy67IdHUaZuSnrz1n472HUCLE= golang.org/x/image v0.0.0-20180708004352-c73c2afc3b81/go.mod h1:ux5Hcp/YLpHSI86hEcLt0YII63i6oz57MZXIpbrjZUs= golang.org/x/image v0.0.0-20190227222117-0694c2d4d067/go.mod h1:kZ7UVZpmo3dzQBMxlp+ypCbDeSB+sBbTgSJuh5dn5js= golang.org/x/image v0.0.0-20190802002840-cff245a6509b/go.mod h1:FeLwcggjj3mMvU+oOTbSwawSJRM1uh48EjtB4UJZlP0= @@ -191,8 +191,8 @@ golang.org/x/mobile v0.0.0-20190719004257-d2bd2a29d028/go.mod h1:E/iHnbuqvinMTCc golang.org/x/mod v0.1.0/go.mod h1:0QHyrYULN0/3qlju5TqG8bIK38QM8yzMo5ekMj3DlcY= golang.org/x/mod v0.2.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= golang.org/x/mod v0.3.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= -golang.org/x/mod v0.13.0 h1:I/DsJXRlw/8l/0c24sM9yb0T4z9liZTduXvdAWYiysY= -golang.org/x/mod v0.13.0/go.mod h1:hTbmBsO62+eylJbnUtE2MGJUyE7QWk4xUqPFrRgJ+7c= +golang.org/x/mod v0.14.0 h1:dGoOF9QVLYng8IHTm7BAyWqCqSheQ5pYWGhzW00YJr0= +golang.org/x/mod v0.14.0/go.mod h1:hTbmBsO62+eylJbnUtE2MGJUyE7QWk4xUqPFrRgJ+7c= golang.org/x/net v0.0.0-20190404232315-eb5bcb51f2a3/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= golang.org/x/net v0.0.0-20190620200207-3b0461eec859/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= golang.org/x/net v0.0.0-20200226121028-0de0cce0169b/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= @@ -222,8 +222,8 @@ golang.org/x/tools v0.0.0-20191012152004-8de300cfc20a/go.mod h1:b+2E5dAYhXwXZwtn golang.org/x/tools v0.0.0-20191119224855-298f0cb1881e/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= golang.org/x/tools v0.0.0-20200619180055-7c47624df98f/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE= golang.org/x/tools v0.0.0-20210106214847-113979e3529a/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA= -golang.org/x/tools v0.14.0 h1:jvNa2pY0M4r62jkRQ6RwEZZyPcymeL9XZMLBbV7U2nc= -golang.org/x/tools v0.14.0/go.mod h1:uYBEerGOWcJyEORxN+Ek8+TT266gXkNlHdJBwexUsBg= +golang.org/x/tools v0.15.0 h1:zdAyfUGbYmuVokhzVmghFl2ZJh5QhcfebBgmVPFYA+8= +golang.org/x/tools v0.15.0/go.mod h1:hpksKq4dtpQWS1uQ61JkdqWM3LscIS6Slf+VVkm+wQk= golang.org/x/xerrors v0.0.0-20190717185122-a985d3407aa7/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= golang.org/x/xerrors v0.0.0-20191011141410-1b5146add898/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= @@ -232,8 +232,8 @@ golang.org/x/xerrors v0.0.0-20231012003039-104605ab7028 h1:+cNy6SZtPcJQH3LJVLOSm golang.org/x/xerrors v0.0.0-20231012003039-104605ab7028/go.mod h1:NDW/Ps6MPRej6fsCIbMTohpP40sJ/P/vI1MoTEGwX90= gonum.org/v1/gonum v0.0.0-20180816165407-929014505bf4/go.mod h1:Y+Yx5eoAFn32cQvJDxZx5Dpnq+c3wtXuadVZAcxbbBo= gonum.org/v1/gonum v0.8.2/go.mod h1:oe/vMfY3deqTw+1EZJhuvEW2iwGF1bW9wwu7XCu0+v0= -gonum.org/v1/gonum v0.14.0 h1:2NiG67LD1tEH0D7kM+ps2V+fXmsAnpUeec7n8tcr4S0= -gonum.org/v1/gonum v0.14.0/go.mod h1:AoWeoz0becf9QMWtE8iWXNXc27fK4fNeHNf/oMejGfU= +gonum.org/v1/gonum v0.15.0 h1:2lYxjRbTYyxkJxlhC+LvJIx3SsANPdRybu1tGj9/OrQ= +gonum.org/v1/gonum v0.15.0/go.mod h1:xzZVBJBtS+Mz4q0Yl2LJTk+OxOg4jiXZ7qBoM0uISGo= gonum.org/v1/netlib v0.0.0-20190313105609-8cb42192e0e0/go.mod h1:wa6Ws7BG/ESfp6dHfk7C6KdzKA7wR7u/rKwOGE66zvw= gonum.org/v1/plot v0.0.0-20190515093506-e2840ee46a6b/go.mod h1:Wt8AAjI+ypCyYX3nZBvf6cAIx93T+c/OS2HFAYskSZc= google.golang.org/genproto/googleapis/rpc v0.0.0-20240401170217-c3f982113cda h1:LI5DOvAxUPMv/50agcLLoo+AdWc1irS9Rzz4vPuD1V4= From 497fed777540f5a627445804afacbe3b4ee33ba0 Mon Sep 17 00:00:00 2001 From: lizeyuan Date: Thu, 9 May 2024 01:20:05 +0800 Subject: [PATCH 07/55] [remotetapprocessor] use 'time/rate' to limit traffic (#32481) bug: The remotetapprocessor `limit` configure doesn't work. how to fix: use `time/rate` to limit traffic. Resolves https://github.com/open-telemetry/opentelemetry-collector-contrib/issues/32385 --------- Co-authored-by: Andrzej Stencel --- .chloggen/fix-remotetap-limit.yaml | 30 ++++ processor/remotetapprocessor/processor.go | 42 +++-- .../remotetapprocessor/processor_test.go | 163 ++++++++++++++++++ processor/remotetapprocessor/server_test.go | 3 + 4 files changed, 223 insertions(+), 15 deletions(-) create mode 100644 .chloggen/fix-remotetap-limit.yaml create mode 100644 processor/remotetapprocessor/processor_test.go diff --git a/.chloggen/fix-remotetap-limit.yaml b/.chloggen/fix-remotetap-limit.yaml new file mode 100644 index 000000000000..23b154e3b6ca --- /dev/null +++ b/.chloggen/fix-remotetap-limit.yaml @@ -0,0 +1,30 @@ +# Use this changelog template to create an entry for release notes. + +# One of 'breaking', 'deprecation', 'new_component', 'enhancement', 'bug_fix' +change_type: breaking + +# The name of the component, or a single word describing the area of concern, (e.g. filelogreceiver) +component: remotetapprocessor + +# A brief description of the change. Surround your text with quotes ("") if it needs to start with a backtick (`). +note: Make the `limit` configuration work properly. + +# Mandatory: One or more tracking issues related to the change. You can use the PR number here if no issue exists. +issues: [32385] + +# (Optional) One or more lines of additional information to render under the primary note. +# These lines will be padded with 2 spaces and then inserted directly into the document. +# Use pipe (|) for multiline entries. +subtext: | + The `limit` configuration was ignored previously, but now it works according to the configuration and documentation. + Nothing is required of users. + See the remotetapprocessor's `README.md` for details. + +# If your change doesn't affect end users or the exported elements of any package, +# you should instead start your pull request title with [chore] or use the "Skip Changelog" label. +# Optional: The change log or logs in which this entry should be included. +# e.g. '[user]' or '[user, api]' +# Include 'user' if the change is relevant to end users. +# Include 'api' if there is a change to a library API. +# Default: '[user]' +change_logs: [] diff --git a/processor/remotetapprocessor/processor.go b/processor/remotetapprocessor/processor.go index 23de9beeebd0..ea7d89c8ab40 100644 --- a/processor/remotetapprocessor/processor.go +++ b/processor/remotetapprocessor/processor.go @@ -19,6 +19,7 @@ import ( "go.opentelemetry.io/collector/processor" "go.uber.org/zap" "golang.org/x/net/websocket" + "golang.org/x/time/rate" ) type wsprocessor struct { @@ -27,6 +28,7 @@ type wsprocessor struct { server *http.Server shutdownWG sync.WaitGroup cs *channelSet + limiter *rate.Limiter } var logMarshaler = &plog.JSONMarshaler{} @@ -38,6 +40,7 @@ func newProcessor(settings processor.CreateSettings, config *Config) *wsprocesso config: config, telemetrySettings: settings.TelemetrySettings, cs: newChannelSet(), + limiter: rate.NewLimiter(config.Limit, int(config.Limit)), } } @@ -98,31 +101,40 @@ func (w *wsprocessor) Shutdown(ctx context.Context) error { } func (w *wsprocessor) ConsumeMetrics(_ context.Context, md pmetric.Metrics) (pmetric.Metrics, error) { - b, err := metricMarshaler.MarshalMetrics(md) - if err != nil { - w.telemetrySettings.Logger.Debug("Error serializing to JSON", zap.Error(err)) - } else { - w.cs.writeBytes(b) + if w.limiter.Allow() { + b, err := metricMarshaler.MarshalMetrics(md) + if err != nil { + w.telemetrySettings.Logger.Debug("Error serializing to JSON", zap.Error(err)) + } else { + w.cs.writeBytes(b) + } } + return md, nil } func (w *wsprocessor) ConsumeLogs(_ context.Context, ld plog.Logs) (plog.Logs, error) { - b, err := logMarshaler.MarshalLogs(ld) - if err != nil { - w.telemetrySettings.Logger.Debug("Error serializing to JSON", zap.Error(err)) - } else { - w.cs.writeBytes(b) + if w.limiter.Allow() { + b, err := logMarshaler.MarshalLogs(ld) + if err != nil { + w.telemetrySettings.Logger.Debug("Error serializing to JSON", zap.Error(err)) + } else { + w.cs.writeBytes(b) + } } + return ld, nil } func (w *wsprocessor) ConsumeTraces(_ context.Context, td ptrace.Traces) (ptrace.Traces, error) { - b, err := traceMarshaler.MarshalTraces(td) - if err != nil { - w.telemetrySettings.Logger.Debug("Error serializing to JSON", zap.Error(err)) - } else { - w.cs.writeBytes(b) + if w.limiter.Allow() { + b, err := traceMarshaler.MarshalTraces(td) + if err != nil { + w.telemetrySettings.Logger.Debug("Error serializing to JSON", zap.Error(err)) + } else { + w.cs.writeBytes(b) + } } + return td, nil } diff --git a/processor/remotetapprocessor/processor_test.go b/processor/remotetapprocessor/processor_test.go new file mode 100644 index 000000000000..c0222a99acf2 --- /dev/null +++ b/processor/remotetapprocessor/processor_test.go @@ -0,0 +1,163 @@ +// Copyright The OpenTelemetry Authors +// SPDX-License-Identifier: Apache-2.0 + +package remotetapprocessor + +import ( + "context" + "sync" + "testing" + + "github.com/stretchr/testify/assert" + "go.opentelemetry.io/collector/pdata/plog" + "go.opentelemetry.io/collector/pdata/pmetric" + "go.opentelemetry.io/collector/pdata/ptrace" + "go.opentelemetry.io/collector/processor/processortest" + "golang.org/x/time/rate" +) + +func TestConsumeMetrics(t *testing.T) { + metric := pmetric.NewMetrics() + metric.ResourceMetrics().AppendEmpty().ScopeMetrics().AppendEmpty().Metrics().AppendEmpty().SetName("foo") + + cases := []struct { + name string + limit int + }{ + {name: "limit_0", limit: 0}, + {name: "limit_1", limit: 1}, + {name: "limit_10", limit: 10}, + {name: "limit_50", limit: 50}, + } + + for _, c := range cases { + t.Run(c.name, func(t *testing.T) { + conf := &Config{ + Limit: rate.Limit(c.limit), + } + + processor := newProcessor(processortest.NewNopCreateSettings(), conf) + + ch := make(chan []byte) + idx := processor.cs.add(ch) + receiveNum := 0 + wg := &sync.WaitGroup{} + wg.Add(1) + go func() { + defer wg.Done() + for range ch { + receiveNum++ + } + }() + + for i := 0; i < c.limit*2; i++ { + // send metric to chan c.limit*2 per sec. + metric2, err := processor.ConsumeMetrics(context.Background(), metric) + assert.Nil(t, err) + assert.Equal(t, metric, metric2) + } + + processor.cs.closeAndRemove(idx) + wg.Wait() + assert.Equal(t, receiveNum, c.limit) + + }) + } +} + +func TestConsumeLogs(t *testing.T) { + log := plog.NewLogs() + log.ResourceLogs().AppendEmpty().ScopeLogs().AppendEmpty().LogRecords().AppendEmpty().Body().SetStr("foo") + + cases := []struct { + name string + limit int + }{ + {name: "limit_0", limit: 0}, + {name: "limit_1", limit: 1}, + {name: "limit_10", limit: 10}, + {name: "limit_50", limit: 50}, + } + + for _, c := range cases { + t.Run(c.name, func(t *testing.T) { + conf := &Config{ + Limit: rate.Limit(c.limit), + } + + processor := newProcessor(processortest.NewNopCreateSettings(), conf) + + ch := make(chan []byte) + idx := processor.cs.add(ch) + receiveNum := 0 + wg := &sync.WaitGroup{} + wg.Add(1) + go func() { + defer wg.Done() + for range ch { + receiveNum++ + } + }() + + // send log to chan c.limit*2 per sec. + for i := 0; i < c.limit*2; i++ { + log2, err := processor.ConsumeLogs(context.Background(), log) + assert.Nil(t, err) + assert.Equal(t, log, log2) + } + + processor.cs.closeAndRemove(idx) + wg.Wait() + t.Log(receiveNum) + assert.Equal(t, receiveNum, c.limit) + }) + } +} + +func TestConsumeTraces(t *testing.T) { + trace := ptrace.NewTraces() + trace.ResourceSpans().AppendEmpty().ScopeSpans().AppendEmpty().Spans().AppendEmpty().SetName("foo") + + cases := []struct { + name string + limit int + }{ + {name: "limit_0", limit: 0}, + {name: "limit_1", limit: 1}, + {name: "limit_10", limit: 10}, + {name: "limit_50", limit: 50}, + } + + for _, c := range cases { + t.Run(c.name, func(t *testing.T) { + conf := &Config{ + Limit: rate.Limit(c.limit), + } + + processor := newProcessor(processortest.NewNopCreateSettings(), conf) + + ch := make(chan []byte) + idx := processor.cs.add(ch) + receiveNum := 0 + wg := &sync.WaitGroup{} + wg.Add(1) + go func() { + defer wg.Done() + for range ch { + receiveNum++ + } + }() + + for i := 0; i < c.limit*2; i++ { + // send trace to chan c.limit*2 per sec. + trace2, err := processor.ConsumeTraces(context.Background(), trace) + assert.Nil(t, err) + assert.Equal(t, trace, trace2) + } + + processor.cs.closeAndRemove(idx) + wg.Wait() + assert.Equal(t, receiveNum, c.limit) + }) + } +} diff --git a/processor/remotetapprocessor/server_test.go b/processor/remotetapprocessor/server_test.go index 66e7e1f75672..779a2353e56b 100644 --- a/processor/remotetapprocessor/server_test.go +++ b/processor/remotetapprocessor/server_test.go @@ -25,6 +25,7 @@ func TestSocketConnectionLogs(t *testing.T) { ServerConfig: confighttp.ServerConfig{ Endpoint: "localhost:12001", }, + Limit: 1, } logSink := &consumertest.LogsSink{} processor, err := NewFactory().CreateLogsProcessor(context.Background(), processortest.NewNopCreateSettings(), cfg, @@ -62,6 +63,7 @@ func TestSocketConnectionMetrics(t *testing.T) { ServerConfig: confighttp.ServerConfig{ Endpoint: "localhost:12002", }, + Limit: 1, } metricsSink := &consumertest.MetricsSink{} processor, err := NewFactory().CreateMetricsProcessor(context.Background(), processortest.NewNopCreateSettings(), cfg, @@ -97,6 +99,7 @@ func TestSocketConnectionTraces(t *testing.T) { ServerConfig: confighttp.ServerConfig{ Endpoint: "localhost:12003", }, + Limit: 1, } tracesSink := &consumertest.TracesSink{} processor, err := NewFactory().CreateTracesProcessor(context.Background(), processortest.NewNopCreateSettings(), cfg, From fec5543b69f0266130b40b440b763f89568816fb Mon Sep 17 00:00:00 2001 From: Stefan Kurek Date: Wed, 8 May 2024 13:38:19 -0400 Subject: [PATCH 08/55] [receiver/vcenter] Switches Over Metadata Configs Waiting for v0.100.0 Release (#32913) **Description:** A number of configurations were disabled by default and had warnings that they were going to be enabled in v0.101.0 (1 metric had a warning that it was going to be removed). Now that v0.100.0 has been release, I have removed all of these warnings, and made the modifications that the warnings "warned" about. I have also updated the tests to reflect this. **Link to tracking Issue:** #32803 #32805 #32821 #32531 #32557 **Testing:** Unit/integration tests updated and tested. Local environment tested. **Documentation:** New documentation generated based on the metadata. --- ...ceiver-vcenter_modify-default-configs.yaml | 31 + receiver/vcenterreceiver/documentation.md | 50 +- .../internal/metadata/generated_config.go | 18 +- .../metadata/generated_config_test.go | 2 - .../internal/metadata/generated_metrics.go | 80 - .../metadata/generated_metrics_test.go | 48 +- .../metadata/generated_resource_test.go | 12 +- .../internal/metadata/testdata/config.yaml | 4 - receiver/vcenterreceiver/metadata.yaml | 37 +- receiver/vcenterreceiver/scraper.go | 8 - receiver/vcenterreceiver/scraper_test.go | 11 - .../testdata/integration/expected.yaml | 45 + .../metrics/expected-all-enabled.yaml | 3798 +++-------------- .../testdata/metrics/expected.yaml | 108 + 14 files changed, 741 insertions(+), 3511 deletions(-) create mode 100644 .chloggen/receiver-vcenter_modify-default-configs.yaml diff --git a/.chloggen/receiver-vcenter_modify-default-configs.yaml b/.chloggen/receiver-vcenter_modify-default-configs.yaml new file mode 100644 index 000000000000..5232beebd3cb --- /dev/null +++ b/.chloggen/receiver-vcenter_modify-default-configs.yaml @@ -0,0 +1,31 @@ +# Use this changelog template to create an entry for release notes. + +# One of 'breaking', 'deprecation', 'new_component', 'enhancement', 'bug_fix' +change_type: enhancement + +# The name of the component, or a single word describing the area of concern, (e.g. filelogreceiver) +component: vcenterreceiver + +# A brief description of the change. Surround your text with quotes ("") if it needs to start with a backtick (`). +note: Changing various default configurations for vcenterreceiver and removing warnings about future release. + +# Mandatory: One or more tracking issues related to the change. You can use the PR number here if no issue exists. +issues: [32803, 32805, 32821, 32531, 32557] + +# (Optional) One or more lines of additional information to render under the primary note. +# These lines will be padded with 2 spaces and then inserted directly into the document. +# Use pipe (|) for multiline entries. +subtext: | + The resource attributes that will now be enabled by default are `vcenter.datacenter.name`, `vcenter.virtual_app.name`, + `vcenter.virtual_app.inventory_path`, `vcenter.vm_template.name`, and `vcenter.vm_template.id`. The metric + `vcenter.cluster.memory.used` will be removed. The metrics `vcenter.cluster.vm_template.count` and + `vcenter.vm.memory.utilization` will be enabled by default. + +# If your change doesn't affect end users or the exported elements of any package, +# you should instead start your pull request title with [chore] or use the "Skip Changelog" label. +# Optional: The change log or logs in which this entry should be included. +# e.g. '[user]' or '[user, api]' +# Include 'user' if the change is relevant to end users. +# Include 'api' if there is a change to a library API. +# Default: '[user]' +change_logs: [user] diff --git a/receiver/vcenterreceiver/documentation.md b/receiver/vcenterreceiver/documentation.md index ac7adf3f02b2..73e2142d16e7 100644 --- a/receiver/vcenterreceiver/documentation.md +++ b/receiver/vcenterreceiver/documentation.md @@ -60,14 +60,6 @@ The available memory of the cluster. | ---- | ----------- | ---------- | ----------------------- | --------- | | By | Sum | Int | Cumulative | false | -### vcenter.cluster.memory.used - -The memory that is currently used by the cluster. - -| Unit | Metric Type | Value Type | Aggregation Temporality | Monotonic | -| ---- | ----------- | ---------- | ----------------------- | --------- | -| By | Sum | Int | Cumulative | false | - ### vcenter.cluster.vm.count The number of virtual machines in the cluster. @@ -82,6 +74,14 @@ The number of virtual machines in the cluster. | ---- | ----------- | ------ | | power_state | The current power state of the virtual machine. | Str: ``on``, ``off``, ``suspended`` | +### vcenter.cluster.vm_template.count + +The number of virtual machine templates in the cluster. + +| Unit | Metric Type | Value Type | Aggregation Temporality | Monotonic | +| ---- | ----------- | ---------- | ----------------------- | --------- | +| {virtual_machine_templates} | Sum | Int | Cumulative | false | + ### vcenter.datastore.disk.usage The amount of space in the datastore. @@ -400,6 +400,14 @@ The amount of memory that is used by the virtual machine. | ---- | ----------- | ---------- | ----------------------- | --------- | | MiBy | Sum | Int | Cumulative | false | +### vcenter.vm.memory.utilization + +The memory utilization of the VM. + +| Unit | Metric Type | Value Type | +| ---- | ----------- | ---------- | +| % | Gauge | Double | + ### vcenter.vm.network.packet.count The amount of packets that was received or transmitted over the instance's network. @@ -458,14 +466,6 @@ metrics: enabled: true ``` -### vcenter.cluster.vm_template.count - -The number of virtual machine templates in the cluster. - -| Unit | Metric Type | Value Type | Aggregation Temporality | Monotonic | -| ---- | ----------- | ---------- | ----------------------- | --------- | -| {virtual_machine_templates} | Sum | Int | Cumulative | false | - ### vcenter.host.network.packet.error.rate The rate of packet errors transmitted or received on the host network. @@ -500,14 +500,6 @@ As measured over the most recent 20s interval. | direction | The direction of network throughput. | Str: ``transmitted``, ``received`` | | object | The object on the virtual machine or host that is being reported on. | Any Str | -### vcenter.vm.memory.utilization - -The memory utilization of the VM. - -| Unit | Metric Type | Value Type | -| ---- | ----------- | ---------- | -| % | Gauge | Double | - ### vcenter.vm.network.packet.drop.rate The rate of transmitted or received packets dropped by each vNIC (virtual network interface controller) on the virtual machine. @@ -547,14 +539,14 @@ As measured over the most recent 20s interval. | Name | Description | Values | Enabled | | ---- | ----------- | ------ | ------- | | vcenter.cluster.name | The name of the vCenter cluster. | Any Str | true | -| vcenter.datacenter.name | The name of the vCenter datacenter. | Any Str | false | +| vcenter.datacenter.name | The name of the vCenter datacenter. | Any Str | true | | vcenter.datastore.name | The name of the vCenter datastore. | Any Str | true | | vcenter.host.name | The hostname of the vCenter ESXi host. | Any Str | true | | vcenter.resource_pool.inventory_path | The inventory path of the resource pool. | Any Str | true | | vcenter.resource_pool.name | The name of the resource pool. | Any Str | true | -| vcenter.virtual_app.inventory_path | The inventory path of the vApp. | Any Str | false | -| vcenter.virtual_app.name | The name of the vApp. | Any Str | false | +| vcenter.virtual_app.inventory_path | The inventory path of the vApp. | Any Str | true | +| vcenter.virtual_app.name | The name of the vApp. | Any Str | true | | vcenter.vm.id | The instance UUID of the virtual machine. | Any Str | true | | vcenter.vm.name | The name of the virtual machine. | Any Str | true | -| vcenter.vm_template.id | The instance UUID of the virtual machine template. | Any Str | false | -| vcenter.vm_template.name | The name of the virtual machine template. | Any Str | false | +| vcenter.vm_template.id | The instance UUID of the virtual machine template. | Any Str | true | +| vcenter.vm_template.name | The name of the virtual machine template. | Any Str | true | diff --git a/receiver/vcenterreceiver/internal/metadata/generated_config.go b/receiver/vcenterreceiver/internal/metadata/generated_config.go index 954b1ffa1748..bb502a7983b6 100644 --- a/receiver/vcenterreceiver/internal/metadata/generated_config.go +++ b/receiver/vcenterreceiver/internal/metadata/generated_config.go @@ -33,7 +33,6 @@ type MetricsConfig struct { VcenterClusterHostCount MetricConfig `mapstructure:"vcenter.cluster.host.count"` VcenterClusterMemoryEffective MetricConfig `mapstructure:"vcenter.cluster.memory.effective"` VcenterClusterMemoryLimit MetricConfig `mapstructure:"vcenter.cluster.memory.limit"` - VcenterClusterMemoryUsed MetricConfig `mapstructure:"vcenter.cluster.memory.used"` VcenterClusterVMCount MetricConfig `mapstructure:"vcenter.cluster.vm.count"` VcenterClusterVMTemplateCount MetricConfig `mapstructure:"vcenter.cluster.vm_template.count"` VcenterDatastoreDiskUsage MetricConfig `mapstructure:"vcenter.datastore.disk.usage"` @@ -91,14 +90,11 @@ func DefaultMetricsConfig() MetricsConfig { VcenterClusterMemoryLimit: MetricConfig{ Enabled: true, }, - VcenterClusterMemoryUsed: MetricConfig{ - Enabled: true, - }, VcenterClusterVMCount: MetricConfig{ Enabled: true, }, VcenterClusterVMTemplateCount: MetricConfig{ - Enabled: false, + Enabled: true, }, VcenterDatastoreDiskUsage: MetricConfig{ Enabled: true, @@ -191,7 +187,7 @@ func DefaultMetricsConfig() MetricsConfig { Enabled: true, }, VcenterVMMemoryUtilization: MetricConfig{ - Enabled: false, + Enabled: true, }, VcenterVMNetworkPacketCount: MetricConfig{ Enabled: true, @@ -259,7 +255,7 @@ func DefaultResourceAttributesConfig() ResourceAttributesConfig { Enabled: true, }, VcenterDatacenterName: ResourceAttributeConfig{ - Enabled: false, + Enabled: true, }, VcenterDatastoreName: ResourceAttributeConfig{ Enabled: true, @@ -274,10 +270,10 @@ func DefaultResourceAttributesConfig() ResourceAttributesConfig { Enabled: true, }, VcenterVirtualAppInventoryPath: ResourceAttributeConfig{ - Enabled: false, + Enabled: true, }, VcenterVirtualAppName: ResourceAttributeConfig{ - Enabled: false, + Enabled: true, }, VcenterVMID: ResourceAttributeConfig{ Enabled: true, @@ -286,10 +282,10 @@ func DefaultResourceAttributesConfig() ResourceAttributesConfig { Enabled: true, }, VcenterVMTemplateID: ResourceAttributeConfig{ - Enabled: false, + Enabled: true, }, VcenterVMTemplateName: ResourceAttributeConfig{ - Enabled: false, + Enabled: true, }, } } diff --git a/receiver/vcenterreceiver/internal/metadata/generated_config_test.go b/receiver/vcenterreceiver/internal/metadata/generated_config_test.go index a3f28c5d3dd7..c31304800cbd 100644 --- a/receiver/vcenterreceiver/internal/metadata/generated_config_test.go +++ b/receiver/vcenterreceiver/internal/metadata/generated_config_test.go @@ -31,7 +31,6 @@ func TestMetricsBuilderConfig(t *testing.T) { VcenterClusterHostCount: MetricConfig{Enabled: true}, VcenterClusterMemoryEffective: MetricConfig{Enabled: true}, VcenterClusterMemoryLimit: MetricConfig{Enabled: true}, - VcenterClusterMemoryUsed: MetricConfig{Enabled: true}, VcenterClusterVMCount: MetricConfig{Enabled: true}, VcenterClusterVMTemplateCount: MetricConfig{Enabled: true}, VcenterDatastoreDiskUsage: MetricConfig{Enabled: true}, @@ -96,7 +95,6 @@ func TestMetricsBuilderConfig(t *testing.T) { VcenterClusterHostCount: MetricConfig{Enabled: false}, VcenterClusterMemoryEffective: MetricConfig{Enabled: false}, VcenterClusterMemoryLimit: MetricConfig{Enabled: false}, - VcenterClusterMemoryUsed: MetricConfig{Enabled: false}, VcenterClusterVMCount: MetricConfig{Enabled: false}, VcenterClusterVMTemplateCount: MetricConfig{Enabled: false}, VcenterDatastoreDiskUsage: MetricConfig{Enabled: false}, diff --git a/receiver/vcenterreceiver/internal/metadata/generated_metrics.go b/receiver/vcenterreceiver/internal/metadata/generated_metrics.go index efe7064ca54c..1e432048dd48 100644 --- a/receiver/vcenterreceiver/internal/metadata/generated_metrics.go +++ b/receiver/vcenterreceiver/internal/metadata/generated_metrics.go @@ -403,57 +403,6 @@ func newMetricVcenterClusterMemoryLimit(cfg MetricConfig) metricVcenterClusterMe return m } -type metricVcenterClusterMemoryUsed struct { - data pmetric.Metric // data buffer for generated metric. - config MetricConfig // metric config provided by user. - capacity int // max observed number of data points added to the metric. -} - -// init fills vcenter.cluster.memory.used metric with initial data. -func (m *metricVcenterClusterMemoryUsed) init() { - m.data.SetName("vcenter.cluster.memory.used") - m.data.SetDescription("The memory that is currently used by the cluster.") - m.data.SetUnit("By") - m.data.SetEmptySum() - m.data.Sum().SetIsMonotonic(false) - m.data.Sum().SetAggregationTemporality(pmetric.AggregationTemporalityCumulative) -} - -func (m *metricVcenterClusterMemoryUsed) recordDataPoint(start pcommon.Timestamp, ts pcommon.Timestamp, val int64) { - if !m.config.Enabled { - return - } - dp := m.data.Sum().DataPoints().AppendEmpty() - dp.SetStartTimestamp(start) - dp.SetTimestamp(ts) - dp.SetIntValue(val) -} - -// updateCapacity saves max length of data point slices that will be used for the slice capacity. -func (m *metricVcenterClusterMemoryUsed) updateCapacity() { - if m.data.Sum().DataPoints().Len() > m.capacity { - m.capacity = m.data.Sum().DataPoints().Len() - } -} - -// emit appends recorded metric data to a metrics slice and prepares it for recording another set of data points. -func (m *metricVcenterClusterMemoryUsed) emit(metrics pmetric.MetricSlice) { - if m.config.Enabled && m.data.Sum().DataPoints().Len() > 0 { - m.updateCapacity() - m.data.MoveTo(metrics.AppendEmpty()) - m.init() - } -} - -func newMetricVcenterClusterMemoryUsed(cfg MetricConfig) metricVcenterClusterMemoryUsed { - m := metricVcenterClusterMemoryUsed{config: cfg} - if cfg.Enabled { - m.data = pmetric.NewMetric() - m.init() - } - return m -} - type metricVcenterClusterVMCount struct { data pmetric.Metric // data buffer for generated metric. config MetricConfig // metric config provided by user. @@ -2431,7 +2380,6 @@ type MetricsBuilder struct { metricVcenterClusterHostCount metricVcenterClusterHostCount metricVcenterClusterMemoryEffective metricVcenterClusterMemoryEffective metricVcenterClusterMemoryLimit metricVcenterClusterMemoryLimit - metricVcenterClusterMemoryUsed metricVcenterClusterMemoryUsed metricVcenterClusterVMCount metricVcenterClusterVMCount metricVcenterClusterVMTemplateCount metricVcenterClusterVMTemplateCount metricVcenterDatastoreDiskUsage metricVcenterDatastoreDiskUsage @@ -2483,12 +2431,6 @@ func WithStartTime(startTime pcommon.Timestamp) metricBuilderOption { } func NewMetricsBuilder(mbc MetricsBuilderConfig, settings receiver.CreateSettings, options ...metricBuilderOption) *MetricsBuilder { - if mbc.Metrics.VcenterClusterMemoryUsed.enabledSetByUser { - settings.Logger.Warn("[WARNING] `vcenter.cluster.memory.used` should not be configured: this metric is unimplemented & will be removed starting in release v0.101.0") - } - if !mbc.Metrics.VcenterClusterVMTemplateCount.enabledSetByUser { - settings.Logger.Warn("[WARNING] Please set `enabled` field explicitly for `vcenter.cluster.vm_template.count`: this metric will be enabled by default starting in release v0.101.0") - } if mbc.Metrics.VcenterHostNetworkPacketCount.enabledSetByUser { settings.Logger.Warn("[WARNING] `vcenter.host.network.packet.count` should not be configured: this metric is replaced by [vcenter.host.network.packet.rate] & will be removed starting in release v0.102.0") } @@ -2510,21 +2452,6 @@ func NewMetricsBuilder(mbc MetricsBuilderConfig, settings receiver.CreateSetting if !mbc.Metrics.VcenterVMNetworkPacketRate.enabledSetByUser { settings.Logger.Warn("[WARNING] Please set `enabled` field explicitly for `vcenter.vm.network.packet.rate`: this metric will be enabled by default starting in release v0.102.0") } - if !mbc.ResourceAttributes.VcenterDatacenterName.enabledSetByUser { - settings.Logger.Warn("[WARNING] Please set `enabled` field explicitly for `vcenter.datacenter.name`: this attribute will be enabled by default starting in release v0.101.0") - } - if !mbc.ResourceAttributes.VcenterVirtualAppInventoryPath.enabledSetByUser { - settings.Logger.Warn("[WARNING] Please set `enabled` field explicitly for `vcenter.virtual_app.inventory_path`: this attribute will be enabled by default starting in release v0.101.0") - } - if !mbc.ResourceAttributes.VcenterVirtualAppName.enabledSetByUser { - settings.Logger.Warn("[WARNING] Please set `enabled` field explicitly for `vcenter.virtual_app.name`: this attribute will be enabled by default starting in release v0.101.0") - } - if !mbc.ResourceAttributes.VcenterVMTemplateID.enabledSetByUser { - settings.Logger.Warn("[WARNING] Please set `enabled` field explicitly for `vcenter.vm_template.id`: this attribute will be enabled by default starting in release v0.101.0") - } - if !mbc.ResourceAttributes.VcenterVMTemplateName.enabledSetByUser { - settings.Logger.Warn("[WARNING] Please set `enabled` field explicitly for `vcenter.vm_template.name`: this attribute will be enabled by default starting in release v0.101.0") - } mb := &MetricsBuilder{ config: mbc, startTime: pcommon.NewTimestampFromTime(time.Now()), @@ -2535,7 +2462,6 @@ func NewMetricsBuilder(mbc MetricsBuilderConfig, settings receiver.CreateSetting metricVcenterClusterHostCount: newMetricVcenterClusterHostCount(mbc.Metrics.VcenterClusterHostCount), metricVcenterClusterMemoryEffective: newMetricVcenterClusterMemoryEffective(mbc.Metrics.VcenterClusterMemoryEffective), metricVcenterClusterMemoryLimit: newMetricVcenterClusterMemoryLimit(mbc.Metrics.VcenterClusterMemoryLimit), - metricVcenterClusterMemoryUsed: newMetricVcenterClusterMemoryUsed(mbc.Metrics.VcenterClusterMemoryUsed), metricVcenterClusterVMCount: newMetricVcenterClusterVMCount(mbc.Metrics.VcenterClusterVMCount), metricVcenterClusterVMTemplateCount: newMetricVcenterClusterVMTemplateCount(mbc.Metrics.VcenterClusterVMTemplateCount), metricVcenterDatastoreDiskUsage: newMetricVcenterDatastoreDiskUsage(mbc.Metrics.VcenterDatastoreDiskUsage), @@ -2715,7 +2641,6 @@ func (mb *MetricsBuilder) EmitForResource(rmo ...ResourceMetricsOption) { mb.metricVcenterClusterHostCount.emit(ils.Metrics()) mb.metricVcenterClusterMemoryEffective.emit(ils.Metrics()) mb.metricVcenterClusterMemoryLimit.emit(ils.Metrics()) - mb.metricVcenterClusterMemoryUsed.emit(ils.Metrics()) mb.metricVcenterClusterVMCount.emit(ils.Metrics()) mb.metricVcenterClusterVMTemplateCount.emit(ils.Metrics()) mb.metricVcenterDatastoreDiskUsage.emit(ils.Metrics()) @@ -2810,11 +2735,6 @@ func (mb *MetricsBuilder) RecordVcenterClusterMemoryLimitDataPoint(ts pcommon.Ti mb.metricVcenterClusterMemoryLimit.recordDataPoint(mb.startTime, ts, val) } -// RecordVcenterClusterMemoryUsedDataPoint adds a data point to vcenter.cluster.memory.used metric. -func (mb *MetricsBuilder) RecordVcenterClusterMemoryUsedDataPoint(ts pcommon.Timestamp, val int64) { - mb.metricVcenterClusterMemoryUsed.recordDataPoint(mb.startTime, ts, val) -} - // RecordVcenterClusterVMCountDataPoint adds a data point to vcenter.cluster.vm.count metric. func (mb *MetricsBuilder) RecordVcenterClusterVMCountDataPoint(ts pcommon.Timestamp, val int64, vmCountPowerStateAttributeValue AttributeVMCountPowerState) { mb.metricVcenterClusterVMCount.recordDataPoint(mb.startTime, ts, val, vmCountPowerStateAttributeValue.String()) diff --git a/receiver/vcenterreceiver/internal/metadata/generated_metrics_test.go b/receiver/vcenterreceiver/internal/metadata/generated_metrics_test.go index 0285523faed5..280ed3f203c7 100644 --- a/receiver/vcenterreceiver/internal/metadata/generated_metrics_test.go +++ b/receiver/vcenterreceiver/internal/metadata/generated_metrics_test.go @@ -62,14 +62,6 @@ func TestMetricsBuilder(t *testing.T) { mb := NewMetricsBuilder(loadMetricsBuilderConfig(t, test.name), settings, WithStartTime(start)) expectedWarnings := 0 - if test.metricsSet == testDataSetAll || test.metricsSet == testDataSetNone { - assert.Equal(t, "[WARNING] `vcenter.cluster.memory.used` should not be configured: this metric is unimplemented & will be removed starting in release v0.101.0", observedLogs.All()[expectedWarnings].Message) - expectedWarnings++ - } - if test.metricsSet == testDataSetDefault { - assert.Equal(t, "[WARNING] Please set `enabled` field explicitly for `vcenter.cluster.vm_template.count`: this metric will be enabled by default starting in release v0.101.0", observedLogs.All()[expectedWarnings].Message) - expectedWarnings++ - } if test.metricsSet == testDataSetAll || test.metricsSet == testDataSetNone { assert.Equal(t, "[WARNING] `vcenter.host.network.packet.count` should not be configured: this metric is replaced by [vcenter.host.network.packet.rate] & will be removed starting in release v0.102.0", observedLogs.All()[expectedWarnings].Message) expectedWarnings++ @@ -98,26 +90,6 @@ func TestMetricsBuilder(t *testing.T) { assert.Equal(t, "[WARNING] Please set `enabled` field explicitly for `vcenter.vm.network.packet.rate`: this metric will be enabled by default starting in release v0.102.0", observedLogs.All()[expectedWarnings].Message) expectedWarnings++ } - if test.resAttrsSet == testDataSetDefault { - assert.Equal(t, "[WARNING] Please set `enabled` field explicitly for `vcenter.datacenter.name`: this attribute will be enabled by default starting in release v0.101.0", observedLogs.All()[expectedWarnings].Message) - expectedWarnings++ - } - if test.resAttrsSet == testDataSetDefault { - assert.Equal(t, "[WARNING] Please set `enabled` field explicitly for `vcenter.virtual_app.inventory_path`: this attribute will be enabled by default starting in release v0.101.0", observedLogs.All()[expectedWarnings].Message) - expectedWarnings++ - } - if test.resAttrsSet == testDataSetDefault { - assert.Equal(t, "[WARNING] Please set `enabled` field explicitly for `vcenter.virtual_app.name`: this attribute will be enabled by default starting in release v0.101.0", observedLogs.All()[expectedWarnings].Message) - expectedWarnings++ - } - if test.resAttrsSet == testDataSetDefault { - assert.Equal(t, "[WARNING] Please set `enabled` field explicitly for `vcenter.vm_template.id`: this attribute will be enabled by default starting in release v0.101.0", observedLogs.All()[expectedWarnings].Message) - expectedWarnings++ - } - if test.resAttrsSet == testDataSetDefault { - assert.Equal(t, "[WARNING] Please set `enabled` field explicitly for `vcenter.vm_template.name`: this attribute will be enabled by default starting in release v0.101.0", observedLogs.All()[expectedWarnings].Message) - expectedWarnings++ - } assert.Equal(t, expectedWarnings, observedLogs.Len()) @@ -144,14 +116,11 @@ func TestMetricsBuilder(t *testing.T) { allMetricsCount++ mb.RecordVcenterClusterMemoryLimitDataPoint(ts, 1) - defaultMetricsCount++ - allMetricsCount++ - mb.RecordVcenterClusterMemoryUsedDataPoint(ts, 1) - defaultMetricsCount++ allMetricsCount++ mb.RecordVcenterClusterVMCountDataPoint(ts, 1, AttributeVMCountPowerStateOn) + defaultMetricsCount++ allMetricsCount++ mb.RecordVcenterClusterVMTemplateCountDataPoint(ts, 1) @@ -273,6 +242,7 @@ func TestMetricsBuilder(t *testing.T) { allMetricsCount++ mb.RecordVcenterVMMemoryUsageDataPoint(ts, 1) + defaultMetricsCount++ allMetricsCount++ mb.RecordVcenterVMMemoryUtilizationDataPoint(ts, 1) @@ -402,20 +372,6 @@ func TestMetricsBuilder(t *testing.T) { assert.Equal(t, ts, dp.Timestamp()) assert.Equal(t, pmetric.NumberDataPointValueTypeInt, dp.ValueType()) assert.Equal(t, int64(1), dp.IntValue()) - case "vcenter.cluster.memory.used": - assert.False(t, validatedMetrics["vcenter.cluster.memory.used"], "Found a duplicate in the metrics slice: vcenter.cluster.memory.used") - validatedMetrics["vcenter.cluster.memory.used"] = true - assert.Equal(t, pmetric.MetricTypeSum, ms.At(i).Type()) - assert.Equal(t, 1, ms.At(i).Sum().DataPoints().Len()) - assert.Equal(t, "The memory that is currently used by the cluster.", ms.At(i).Description()) - assert.Equal(t, "By", ms.At(i).Unit()) - assert.Equal(t, false, ms.At(i).Sum().IsMonotonic()) - assert.Equal(t, pmetric.AggregationTemporalityCumulative, ms.At(i).Sum().AggregationTemporality()) - dp := ms.At(i).Sum().DataPoints().At(0) - assert.Equal(t, start, dp.StartTimestamp()) - assert.Equal(t, ts, dp.Timestamp()) - assert.Equal(t, pmetric.NumberDataPointValueTypeInt, dp.ValueType()) - assert.Equal(t, int64(1), dp.IntValue()) case "vcenter.cluster.vm.count": assert.False(t, validatedMetrics["vcenter.cluster.vm.count"], "Found a duplicate in the metrics slice: vcenter.cluster.vm.count") validatedMetrics["vcenter.cluster.vm.count"] = true diff --git a/receiver/vcenterreceiver/internal/metadata/generated_resource_test.go b/receiver/vcenterreceiver/internal/metadata/generated_resource_test.go index 39d8e7b32514..cc4336669b4b 100644 --- a/receiver/vcenterreceiver/internal/metadata/generated_resource_test.go +++ b/receiver/vcenterreceiver/internal/metadata/generated_resource_test.go @@ -31,7 +31,7 @@ func TestResourceBuilder(t *testing.T) { switch test { case "default": - assert.Equal(t, 7, res.Attributes().Len()) + assert.Equal(t, 12, res.Attributes().Len()) case "all_set": assert.Equal(t, 12, res.Attributes().Len()) case "none_set": @@ -47,7 +47,7 @@ func TestResourceBuilder(t *testing.T) { assert.EqualValues(t, "vcenter.cluster.name-val", val.Str()) } val, ok = res.Attributes().Get("vcenter.datacenter.name") - assert.Equal(t, test == "all_set", ok) + assert.True(t, ok) if ok { assert.EqualValues(t, "vcenter.datacenter.name-val", val.Str()) } @@ -72,12 +72,12 @@ func TestResourceBuilder(t *testing.T) { assert.EqualValues(t, "vcenter.resource_pool.name-val", val.Str()) } val, ok = res.Attributes().Get("vcenter.virtual_app.inventory_path") - assert.Equal(t, test == "all_set", ok) + assert.True(t, ok) if ok { assert.EqualValues(t, "vcenter.virtual_app.inventory_path-val", val.Str()) } val, ok = res.Attributes().Get("vcenter.virtual_app.name") - assert.Equal(t, test == "all_set", ok) + assert.True(t, ok) if ok { assert.EqualValues(t, "vcenter.virtual_app.name-val", val.Str()) } @@ -92,12 +92,12 @@ func TestResourceBuilder(t *testing.T) { assert.EqualValues(t, "vcenter.vm.name-val", val.Str()) } val, ok = res.Attributes().Get("vcenter.vm_template.id") - assert.Equal(t, test == "all_set", ok) + assert.True(t, ok) if ok { assert.EqualValues(t, "vcenter.vm_template.id-val", val.Str()) } val, ok = res.Attributes().Get("vcenter.vm_template.name") - assert.Equal(t, test == "all_set", ok) + assert.True(t, ok) if ok { assert.EqualValues(t, "vcenter.vm_template.name-val", val.Str()) } diff --git a/receiver/vcenterreceiver/internal/metadata/testdata/config.yaml b/receiver/vcenterreceiver/internal/metadata/testdata/config.yaml index 0e9970eadf8e..e2db587e5d2e 100644 --- a/receiver/vcenterreceiver/internal/metadata/testdata/config.yaml +++ b/receiver/vcenterreceiver/internal/metadata/testdata/config.yaml @@ -11,8 +11,6 @@ all_set: enabled: true vcenter.cluster.memory.limit: enabled: true - vcenter.cluster.memory.used: - enabled: true vcenter.cluster.vm.count: enabled: true vcenter.cluster.vm_template.count: @@ -126,8 +124,6 @@ none_set: enabled: false vcenter.cluster.memory.limit: enabled: false - vcenter.cluster.memory.used: - enabled: false vcenter.cluster.vm.count: enabled: false vcenter.cluster.vm_template.count: diff --git a/receiver/vcenterreceiver/metadata.yaml b/receiver/vcenterreceiver/metadata.yaml index 4c82e086b6bb..e50357b44177 100644 --- a/receiver/vcenterreceiver/metadata.yaml +++ b/receiver/vcenterreceiver/metadata.yaml @@ -13,10 +13,8 @@ status: resource_attributes: vcenter.datacenter.name: description: The name of the vCenter datacenter. - enabled: false + enabled: true type: string - warnings: - if_enabled_not_set: "this attribute will be enabled by default starting in release v0.101.0" vcenter.cluster.name: description: The name of the vCenter cluster. enabled: true @@ -35,16 +33,12 @@ resource_attributes: type: string vcenter.virtual_app.name: description: The name of the vApp. - enabled: false + enabled: true type: string - warnings: - if_enabled_not_set: "this attribute will be enabled by default starting in release v0.101.0" vcenter.virtual_app.inventory_path: description: The inventory path of the vApp. - enabled: false + enabled: true type: string - warnings: - if_enabled_not_set: "this attribute will be enabled by default starting in release v0.101.0" vcenter.datastore.name: description: The name of the vCenter datastore. enabled: true @@ -59,16 +53,12 @@ resource_attributes: type: string vcenter.vm_template.name: description: The name of the virtual machine template. - enabled: false + enabled: true type: string - warnings: - if_enabled_not_set: "this attribute will be enabled by default starting in release v0.101.0" vcenter.vm_template.id: description: The instance UUID of the virtual machine template. - enabled: false + enabled: true type: string - warnings: - if_enabled_not_set: "this attribute will be enabled by default starting in release v0.101.0" attributes: disk_state: @@ -152,17 +142,6 @@ metrics: aggregation_temporality: cumulative attributes: [] extended_documentation: This value excludes memory from hosts that are either in maintenance mode or are unresponsive. It also excludes memory used by the VMware Service Console. - vcenter.cluster.memory.used: - enabled: true - description: The memory that is currently used by the cluster. - unit: By - sum: - monotonic: false - value_type: int - aggregation_temporality: cumulative - attributes: [] - warnings: - if_configured: this metric is unimplemented & will be removed starting in release v0.101.0 vcenter.cluster.vm.count: enabled: true description: The number of virtual machines in the cluster. @@ -173,7 +152,7 @@ metrics: aggregation_temporality: cumulative attributes: [vm_count_power_state] vcenter.cluster.vm_template.count: - enabled: false + enabled: true description: The number of virtual machine templates in the cluster. unit: "{virtual_machine_templates}" sum: @@ -181,8 +160,6 @@ metrics: value_type: int aggregation_temporality: cumulative attributes: [] - warnings: - if_enabled_not_set: "this metric will be enabled by default starting in release v0.101.0" vcenter.cluster.host.count: enabled: true description: The number of hosts in the cluster. @@ -507,7 +484,7 @@ metrics: aggregation_temporality: cumulative attributes: [] vcenter.vm.memory.utilization: - enabled: false + enabled: true description: The memory utilization of the VM. unit: "%" gauge: diff --git a/receiver/vcenterreceiver/scraper.go b/receiver/vcenterreceiver/scraper.go index 80895bde15e4..fc3f260d1cae 100644 --- a/receiver/vcenterreceiver/scraper.go +++ b/receiver/vcenterreceiver/scraper.go @@ -409,14 +409,6 @@ func (v *vcenterMetricScraper) collectVMs( } } - // TODO: Remove after v0.100.0 has been released - // Ignore template resources/metrics for now if not explicitly enabled - if vm.Config.Template && - !v.client.cfg.ResourceAttributes.VcenterVMTemplateID.Enabled && - !v.client.cfg.ResourceAttributes.VcenterVMTemplateName.Enabled { - continue - } - // vApp may not exist for a VM vApp := v.vmToVirtualApp[vm.Reference().Value] diff --git a/receiver/vcenterreceiver/scraper_test.go b/receiver/vcenterreceiver/scraper_test.go index fefeb86ce2cc..ef1fdd26c887 100644 --- a/receiver/vcenterreceiver/scraper_test.go +++ b/receiver/vcenterreceiver/scraper_test.go @@ -39,17 +39,6 @@ func TestScrapeConfigsEnabled(t *testing.T) { defer mockServer.Close() optConfigs := metadata.DefaultMetricsBuilderConfig() - optConfigs.ResourceAttributes.VcenterDatacenterName.Enabled = true - optConfigs.ResourceAttributes.VcenterVirtualAppName.Enabled = true - optConfigs.ResourceAttributes.VcenterVirtualAppInventoryPath.Enabled = true - optConfigs.ResourceAttributes.VcenterVMTemplateID.Enabled = true - optConfigs.ResourceAttributes.VcenterVMTemplateName.Enabled = true - optConfigs.Metrics.VcenterVMMemoryUtilization.Enabled = true - optConfigs.Metrics.VcenterClusterVMTemplateCount.Enabled = true - optConfigs.Metrics.VcenterHostNetworkPacketErrorRate.Enabled = true - optConfigs.Metrics.VcenterHostNetworkPacketRate.Enabled = true - optConfigs.Metrics.VcenterVMNetworkPacketRate.Enabled = true - optConfigs.Metrics.VcenterVMNetworkPacketDropRate.Enabled = true cfg := &Config{ MetricsBuilderConfig: optConfigs, diff --git a/receiver/vcenterreceiver/testdata/integration/expected.yaml b/receiver/vcenterreceiver/testdata/integration/expected.yaml index e0388ed1d83b..7542d529967f 100644 --- a/receiver/vcenterreceiver/testdata/integration/expected.yaml +++ b/receiver/vcenterreceiver/testdata/integration/expected.yaml @@ -1,6 +1,9 @@ resourceMetrics: - resource: attributes: + - key: vcenter.datacenter.name + value: + stringValue: DC0 - key: vcenter.host.name value: stringValue: DC0_H0 @@ -35,6 +38,9 @@ resourceMetrics: version: latest - resource: attributes: + - key: vcenter.datacenter.name + value: + stringValue: DC0 - key: vcenter.cluster.name value: stringValue: DC0_C0 @@ -69,6 +75,9 @@ resourceMetrics: version: latest - resource: attributes: + - key: vcenter.datacenter.name + value: + stringValue: DC0 - key: vcenter.host.name value: stringValue: DC0_H0 @@ -113,6 +122,9 @@ resourceMetrics: version: latest - resource: attributes: + - key: vcenter.datacenter.name + value: + stringValue: DC0 - key: vcenter.datastore.name value: stringValue: LocalDS_0 @@ -151,6 +163,9 @@ resourceMetrics: version: latest - resource: attributes: + - key: vcenter.datacenter.name + value: + stringValue: DC0 - key: vcenter.vm.name value: stringValue: DC0_H0_VM0 @@ -237,6 +252,9 @@ resourceMetrics: version: latest - resource: attributes: + - key: vcenter.datacenter.name + value: + stringValue: DC0 - key: vcenter.vm.name value: stringValue: DC0_H0_VM1 @@ -323,6 +341,9 @@ resourceMetrics: version: latest - resource: attributes: + - key: vcenter.datacenter.name + value: + stringValue: DC0 - key: vcenter.host.name value: stringValue: DC0_C0_H0 @@ -370,6 +391,9 @@ resourceMetrics: version: latest - resource: attributes: + - key: vcenter.datacenter.name + value: + stringValue: DC0 - key: vcenter.host.name value: stringValue: DC0_C0_H1 @@ -417,6 +441,9 @@ resourceMetrics: version: latest - resource: attributes: + - key: vcenter.datacenter.name + value: + stringValue: DC0 - key: vcenter.host.name value: stringValue: DC0_C0_H2 @@ -464,6 +491,9 @@ resourceMetrics: version: latest - resource: attributes: + - key: vcenter.datacenter.name + value: + stringValue: DC0 - key: vcenter.vm.name value: stringValue: DC0_C0_RP0_VM0 @@ -553,6 +583,9 @@ resourceMetrics: version: latest - resource: attributes: + - key: vcenter.datacenter.name + value: + stringValue: DC0 - key: vcenter.vm.name value: stringValue: DC0_C0_RP0_VM1 @@ -642,6 +675,9 @@ resourceMetrics: version: latest - resource: attributes: + - key: vcenter.datacenter.name + value: + stringValue: DC0 - key: vcenter.cluster.name value: stringValue: DC0_C0 @@ -730,6 +766,15 @@ resourceMetrics: startTimeUnixNano: "1707407684042820000" timeUnixNano: "1707407733803628000" unit: "{virtual_machines}" + - description: The number of virtual machine templates in the cluster. + name: vcenter.cluster.vm_template.count + sum: + aggregationTemporality: 2 + dataPoints: + - asInt: "0" + startTimeUnixNano: "1707407684042820000" + timeUnixNano: "1707407733803628000" + unit: "{virtual_machine_templates}" scope: name: otelcol/vcenterreceiver version: latest diff --git a/receiver/vcenterreceiver/testdata/metrics/expected-all-enabled.yaml b/receiver/vcenterreceiver/testdata/metrics/expected-all-enabled.yaml index 13610089822e..f52ae97074e3 100644 --- a/receiver/vcenterreceiver/testdata/metrics/expected-all-enabled.yaml +++ b/receiver/vcenterreceiver/testdata/metrics/expected-all-enabled.yaml @@ -1399,11 +1399,12 @@ resourceMetrics: startTimeUnixNano: "6000000" timeUnixNano: "5000000" unit: '{packets/sec}' - - description: The rate of packet errors transmitted or received on the host network. - name: vcenter.host.network.packet.error.rate - gauge: + - description: The summation of packet errors on the host network. + name: vcenter.host.network.packet.errors + sum: + aggregationTemporality: 2 dataPoints: - - asDouble: "0" + - asInt: "0" attributes: - key: direction value: @@ -1413,7 +1414,7 @@ resourceMetrics: stringValue: "" startTimeUnixNano: "6000000" timeUnixNano: "1000000" - - asDouble: "0" + - asInt: "0" attributes: - key: direction value: @@ -1423,7 +1424,7 @@ resourceMetrics: stringValue: "" startTimeUnixNano: "6000000" timeUnixNano: "2000000" - - asDouble: "0" + - asInt: "0" attributes: - key: direction value: @@ -1433,7 +1434,7 @@ resourceMetrics: stringValue: "" startTimeUnixNano: "6000000" timeUnixNano: "3000000" - - asDouble: "0" + - asInt: "0" attributes: - key: direction value: @@ -1443,7 +1444,7 @@ resourceMetrics: stringValue: "" startTimeUnixNano: "6000000" timeUnixNano: "4000000" - - asDouble: "0" + - asInt: "0" attributes: - key: direction value: @@ -1453,7 +1454,7 @@ resourceMetrics: stringValue: "" startTimeUnixNano: "6000000" timeUnixNano: "5000000" - - asDouble: "0" + - asInt: "0" attributes: - key: direction value: @@ -1463,7 +1464,7 @@ resourceMetrics: stringValue: vmnic0 startTimeUnixNano: "6000000" timeUnixNano: "1000000" - - asDouble: "0" + - asInt: "0" attributes: - key: direction value: @@ -1473,7 +1474,7 @@ resourceMetrics: stringValue: vmnic0 startTimeUnixNano: "6000000" timeUnixNano: "2000000" - - asDouble: "0" + - asInt: "0" attributes: - key: direction value: @@ -1483,7 +1484,7 @@ resourceMetrics: stringValue: vmnic0 startTimeUnixNano: "6000000" timeUnixNano: "3000000" - - asDouble: "0" + - asInt: "0" attributes: - key: direction value: @@ -1493,7 +1494,7 @@ resourceMetrics: stringValue: vmnic0 startTimeUnixNano: "6000000" timeUnixNano: "4000000" - - asDouble: "0" + - asInt: "0" attributes: - key: direction value: @@ -1503,7 +1504,7 @@ resourceMetrics: stringValue: vmnic0 startTimeUnixNano: "6000000" timeUnixNano: "5000000" - - asDouble: "0" + - asInt: "0" attributes: - key: direction value: @@ -1513,7 +1514,7 @@ resourceMetrics: stringValue: vmnic1 startTimeUnixNano: "6000000" timeUnixNano: "1000000" - - asDouble: "0" + - asInt: "0" attributes: - key: direction value: @@ -1523,7 +1524,7 @@ resourceMetrics: stringValue: vmnic1 startTimeUnixNano: "6000000" timeUnixNano: "2000000" - - asDouble: "0" + - asInt: "0" attributes: - key: direction value: @@ -1533,7 +1534,7 @@ resourceMetrics: stringValue: vmnic1 startTimeUnixNano: "6000000" timeUnixNano: "3000000" - - asDouble: "0" + - asInt: "0" attributes: - key: direction value: @@ -1543,7 +1544,7 @@ resourceMetrics: stringValue: vmnic1 startTimeUnixNano: "6000000" timeUnixNano: "4000000" - - asDouble: "0" + - asInt: "0" attributes: - key: direction value: @@ -1553,7 +1554,7 @@ resourceMetrics: stringValue: vmnic1 startTimeUnixNano: "6000000" timeUnixNano: "5000000" - - asDouble: "0" + - asInt: "0" attributes: - key: direction value: @@ -1563,7 +1564,7 @@ resourceMetrics: stringValue: vmnic2 startTimeUnixNano: "6000000" timeUnixNano: "1000000" - - asDouble: "0" + - asInt: "0" attributes: - key: direction value: @@ -1573,7 +1574,7 @@ resourceMetrics: stringValue: vmnic2 startTimeUnixNano: "6000000" timeUnixNano: "2000000" - - asDouble: "0" + - asInt: "0" attributes: - key: direction value: @@ -1583,7 +1584,7 @@ resourceMetrics: stringValue: vmnic2 startTimeUnixNano: "6000000" timeUnixNano: "3000000" - - asDouble: "0" + - asInt: "0" attributes: - key: direction value: @@ -1593,7 +1594,7 @@ resourceMetrics: stringValue: vmnic2 startTimeUnixNano: "6000000" timeUnixNano: "4000000" - - asDouble: "0" + - asInt: "0" attributes: - key: direction value: @@ -1603,7 +1604,7 @@ resourceMetrics: stringValue: vmnic2 startTimeUnixNano: "6000000" timeUnixNano: "5000000" - - asDouble: "0" + - asInt: "0" attributes: - key: direction value: @@ -1613,7 +1614,7 @@ resourceMetrics: stringValue: vmnic3 startTimeUnixNano: "6000000" timeUnixNano: "1000000" - - asDouble: "0" + - asInt: "0" attributes: - key: direction value: @@ -1623,7 +1624,7 @@ resourceMetrics: stringValue: vmnic3 startTimeUnixNano: "6000000" timeUnixNano: "2000000" - - asDouble: "0" + - asInt: "0" attributes: - key: direction value: @@ -1633,7 +1634,7 @@ resourceMetrics: stringValue: vmnic3 startTimeUnixNano: "6000000" timeUnixNano: "3000000" - - asDouble: "0" + - asInt: "0" attributes: - key: direction value: @@ -1643,7 +1644,7 @@ resourceMetrics: stringValue: vmnic3 startTimeUnixNano: "6000000" timeUnixNano: "4000000" - - asDouble: "0" + - asInt: "0" attributes: - key: direction value: @@ -1653,7 +1654,7 @@ resourceMetrics: stringValue: vmnic3 startTimeUnixNano: "6000000" timeUnixNano: "5000000" - - asDouble: "0" + - asInt: "0" attributes: - key: direction value: @@ -1663,7 +1664,7 @@ resourceMetrics: stringValue: "" startTimeUnixNano: "6000000" timeUnixNano: "1000000" - - asDouble: "0" + - asInt: "0" attributes: - key: direction value: @@ -1673,7 +1674,7 @@ resourceMetrics: stringValue: "" startTimeUnixNano: "6000000" timeUnixNano: "2000000" - - asDouble: "0" + - asInt: "0" attributes: - key: direction value: @@ -1683,7 +1684,7 @@ resourceMetrics: stringValue: "" startTimeUnixNano: "6000000" timeUnixNano: "3000000" - - asDouble: "0" + - asInt: "0" attributes: - key: direction value: @@ -1693,7 +1694,7 @@ resourceMetrics: stringValue: "" startTimeUnixNano: "6000000" timeUnixNano: "4000000" - - asDouble: "0" + - asInt: "0" attributes: - key: direction value: @@ -1703,7 +1704,7 @@ resourceMetrics: stringValue: "" startTimeUnixNano: "6000000" timeUnixNano: "5000000" - - asDouble: "0" + - asInt: "0" attributes: - key: direction value: @@ -1713,7 +1714,7 @@ resourceMetrics: stringValue: vmnic0 startTimeUnixNano: "6000000" timeUnixNano: "1000000" - - asDouble: "0" + - asInt: "0" attributes: - key: direction value: @@ -1723,7 +1724,7 @@ resourceMetrics: stringValue: vmnic0 startTimeUnixNano: "6000000" timeUnixNano: "2000000" - - asDouble: "0" + - asInt: "0" attributes: - key: direction value: @@ -1733,7 +1734,7 @@ resourceMetrics: stringValue: vmnic0 startTimeUnixNano: "6000000" timeUnixNano: "3000000" - - asDouble: "0" + - asInt: "0" attributes: - key: direction value: @@ -1743,7 +1744,7 @@ resourceMetrics: stringValue: vmnic0 startTimeUnixNano: "6000000" timeUnixNano: "4000000" - - asDouble: "0" + - asInt: "0" attributes: - key: direction value: @@ -1753,7 +1754,7 @@ resourceMetrics: stringValue: vmnic0 startTimeUnixNano: "6000000" timeUnixNano: "5000000" - - asDouble: "0" + - asInt: "0" attributes: - key: direction value: @@ -1763,7 +1764,7 @@ resourceMetrics: stringValue: vmnic1 startTimeUnixNano: "6000000" timeUnixNano: "1000000" - - asDouble: "0" + - asInt: "0" attributes: - key: direction value: @@ -1773,7 +1774,7 @@ resourceMetrics: stringValue: vmnic1 startTimeUnixNano: "6000000" timeUnixNano: "2000000" - - asDouble: "0" + - asInt: "0" attributes: - key: direction value: @@ -1783,7 +1784,7 @@ resourceMetrics: stringValue: vmnic1 startTimeUnixNano: "6000000" timeUnixNano: "3000000" - - asDouble: "0" + - asInt: "0" attributes: - key: direction value: @@ -1793,7 +1794,7 @@ resourceMetrics: stringValue: vmnic1 startTimeUnixNano: "6000000" timeUnixNano: "4000000" - - asDouble: "0" + - asInt: "0" attributes: - key: direction value: @@ -1803,7 +1804,7 @@ resourceMetrics: stringValue: vmnic1 startTimeUnixNano: "6000000" timeUnixNano: "5000000" - - asDouble: "0" + - asInt: "0" attributes: - key: direction value: @@ -1813,7 +1814,7 @@ resourceMetrics: stringValue: vmnic2 startTimeUnixNano: "6000000" timeUnixNano: "1000000" - - asDouble: "0" + - asInt: "0" attributes: - key: direction value: @@ -1823,7 +1824,7 @@ resourceMetrics: stringValue: vmnic2 startTimeUnixNano: "6000000" timeUnixNano: "2000000" - - asDouble: "0" + - asInt: "0" attributes: - key: direction value: @@ -1833,7 +1834,7 @@ resourceMetrics: stringValue: vmnic2 startTimeUnixNano: "6000000" timeUnixNano: "3000000" - - asDouble: "0" + - asInt: "0" attributes: - key: direction value: @@ -1843,7 +1844,7 @@ resourceMetrics: stringValue: vmnic2 startTimeUnixNano: "6000000" timeUnixNano: "4000000" - - asDouble: "0" + - asInt: "0" attributes: - key: direction value: @@ -1853,7 +1854,7 @@ resourceMetrics: stringValue: vmnic2 startTimeUnixNano: "6000000" timeUnixNano: "5000000" - - asDouble: "0" + - asInt: "0" attributes: - key: direction value: @@ -1863,7 +1864,7 @@ resourceMetrics: stringValue: vmnic3 startTimeUnixNano: "6000000" timeUnixNano: "1000000" - - asDouble: "0" + - asInt: "0" attributes: - key: direction value: @@ -1873,7 +1874,7 @@ resourceMetrics: stringValue: vmnic3 startTimeUnixNano: "6000000" timeUnixNano: "2000000" - - asDouble: "0" + - asInt: "0" attributes: - key: direction value: @@ -1883,7 +1884,7 @@ resourceMetrics: stringValue: vmnic3 startTimeUnixNano: "6000000" timeUnixNano: "3000000" - - asDouble: "0" + - asInt: "0" attributes: - key: direction value: @@ -1893,7 +1894,7 @@ resourceMetrics: stringValue: vmnic3 startTimeUnixNano: "6000000" timeUnixNano: "4000000" - - asDouble: "0" + - asInt: "0" attributes: - key: direction value: @@ -1903,13 +1904,13 @@ resourceMetrics: stringValue: vmnic3 startTimeUnixNano: "6000000" timeUnixNano: "5000000" - unit: '{errors/sec}' - - description: The summation of packet errors on the host network. - name: vcenter.host.network.packet.errors + unit: '{errors}' + - description: The amount of data that was transmitted or received over the network by the host. + name: vcenter.host.network.throughput sum: aggregationTemporality: 2 dataPoints: - - asInt: "0" + - asInt: "928" attributes: - key: direction value: @@ -1919,7 +1920,7 @@ resourceMetrics: stringValue: "" startTimeUnixNano: "6000000" timeUnixNano: "1000000" - - asInt: "0" + - asInt: "1120" attributes: - key: direction value: @@ -1929,7 +1930,7 @@ resourceMetrics: stringValue: "" startTimeUnixNano: "6000000" timeUnixNano: "2000000" - - asInt: "0" + - asInt: "1646" attributes: - key: direction value: @@ -1939,7 +1940,7 @@ resourceMetrics: stringValue: "" startTimeUnixNano: "6000000" timeUnixNano: "3000000" - - asInt: "0" + - asInt: "1291" attributes: - key: direction value: @@ -1949,7 +1950,7 @@ resourceMetrics: stringValue: "" startTimeUnixNano: "6000000" timeUnixNano: "4000000" - - asInt: "0" + - asInt: "1058" attributes: - key: direction value: @@ -1959,7 +1960,7 @@ resourceMetrics: stringValue: "" startTimeUnixNano: "6000000" timeUnixNano: "5000000" - - asInt: "0" + - asInt: "570" attributes: - key: direction value: @@ -1969,7 +1970,7 @@ resourceMetrics: stringValue: vmnic0 startTimeUnixNano: "6000000" timeUnixNano: "1000000" - - asInt: "0" + - asInt: "768" attributes: - key: direction value: @@ -1979,7 +1980,7 @@ resourceMetrics: stringValue: vmnic0 startTimeUnixNano: "6000000" timeUnixNano: "2000000" - - asInt: "0" + - asInt: "1269" attributes: - key: direction value: @@ -1989,7 +1990,7 @@ resourceMetrics: stringValue: vmnic0 startTimeUnixNano: "6000000" timeUnixNano: "3000000" - - asInt: "0" + - asInt: "927" attributes: - key: direction value: @@ -1999,7 +2000,7 @@ resourceMetrics: stringValue: vmnic0 startTimeUnixNano: "6000000" timeUnixNano: "4000000" - - asInt: "0" + - asInt: "681" attributes: - key: direction value: @@ -2109,7 +2110,7 @@ resourceMetrics: stringValue: vmnic2 startTimeUnixNano: "6000000" timeUnixNano: "5000000" - - asInt: "0" + - asInt: "357" attributes: - key: direction value: @@ -2119,7 +2120,7 @@ resourceMetrics: stringValue: vmnic3 startTimeUnixNano: "6000000" timeUnixNano: "1000000" - - asInt: "0" + - asInt: "351" attributes: - key: direction value: @@ -2129,7 +2130,7 @@ resourceMetrics: stringValue: vmnic3 startTimeUnixNano: "6000000" timeUnixNano: "2000000" - - asInt: "0" + - asInt: "376" attributes: - key: direction value: @@ -2139,7 +2140,7 @@ resourceMetrics: stringValue: vmnic3 startTimeUnixNano: "6000000" timeUnixNano: "3000000" - - asInt: "0" + - asInt: "363" attributes: - key: direction value: @@ -2149,7 +2150,7 @@ resourceMetrics: stringValue: vmnic3 startTimeUnixNano: "6000000" timeUnixNano: "4000000" - - asInt: "0" + - asInt: "376" attributes: - key: direction value: @@ -2159,7 +2160,7 @@ resourceMetrics: stringValue: vmnic3 startTimeUnixNano: "6000000" timeUnixNano: "5000000" - - asInt: "0" + - asInt: "3475" attributes: - key: direction value: @@ -2169,7 +2170,7 @@ resourceMetrics: stringValue: "" startTimeUnixNano: "6000000" timeUnixNano: "1000000" - - asInt: "0" + - asInt: "2959" attributes: - key: direction value: @@ -2179,7 +2180,7 @@ resourceMetrics: stringValue: "" startTimeUnixNano: "6000000" timeUnixNano: "2000000" - - asInt: "0" + - asInt: "4924" attributes: - key: direction value: @@ -2189,7 +2190,7 @@ resourceMetrics: stringValue: "" startTimeUnixNano: "6000000" timeUnixNano: "3000000" - - asInt: "0" + - asInt: "4364" attributes: - key: direction value: @@ -2199,7 +2200,7 @@ resourceMetrics: stringValue: "" startTimeUnixNano: "6000000" timeUnixNano: "4000000" - - asInt: "0" + - asInt: "3058" attributes: - key: direction value: @@ -2209,7 +2210,7 @@ resourceMetrics: stringValue: "" startTimeUnixNano: "6000000" timeUnixNano: "5000000" - - asInt: "0" + - asInt: "3064" attributes: - key: direction value: @@ -2219,7 +2220,7 @@ resourceMetrics: stringValue: vmnic0 startTimeUnixNano: "6000000" timeUnixNano: "1000000" - - asInt: "0" + - asInt: "2537" attributes: - key: direction value: @@ -2229,7 +2230,7 @@ resourceMetrics: stringValue: vmnic0 startTimeUnixNano: "6000000" timeUnixNano: "2000000" - - asInt: "0" + - asInt: "4373" attributes: - key: direction value: @@ -2239,7 +2240,7 @@ resourceMetrics: stringValue: vmnic0 startTimeUnixNano: "6000000" timeUnixNano: "3000000" - - asInt: "0" + - asInt: "3746" attributes: - key: direction value: @@ -2249,7 +2250,7 @@ resourceMetrics: stringValue: vmnic0 startTimeUnixNano: "6000000" timeUnixNano: "4000000" - - asInt: "0" + - asInt: "2569" attributes: - key: direction value: @@ -2359,7 +2360,7 @@ resourceMetrics: stringValue: vmnic2 startTimeUnixNano: "6000000" timeUnixNano: "5000000" - - asInt: "0" + - asInt: "411" attributes: - key: direction value: @@ -2369,7 +2370,7 @@ resourceMetrics: stringValue: vmnic3 startTimeUnixNano: "6000000" timeUnixNano: "1000000" - - asInt: "0" + - asInt: "422" attributes: - key: direction value: @@ -2379,7 +2380,7 @@ resourceMetrics: stringValue: vmnic3 startTimeUnixNano: "6000000" timeUnixNano: "2000000" - - asInt: "0" + - asInt: "551" attributes: - key: direction value: @@ -2389,7 +2390,7 @@ resourceMetrics: stringValue: vmnic3 startTimeUnixNano: "6000000" timeUnixNano: "3000000" - - asInt: "0" + - asInt: "617" attributes: - key: direction value: @@ -2399,7 +2400,7 @@ resourceMetrics: stringValue: vmnic3 startTimeUnixNano: "6000000" timeUnixNano: "4000000" - - asInt: "0" + - asInt: "488" attributes: - key: direction value: @@ -2409,2964 +2410,942 @@ resourceMetrics: stringValue: vmnic3 startTimeUnixNano: "6000000" timeUnixNano: "5000000" - unit: '{errors}' - - description: The rate of packets transmitted or received across each physical NIC (network interface controller) instance on the host. - name: vcenter.host.network.packet.rate - gauge: + unit: '{KiBy/s}' + - description: The sum of the data transmitted and received for all the NIC instances of the host. + name: vcenter.host.network.usage + sum: + aggregationTemporality: 2 dataPoints: - - asDouble: "2782.35" + - asInt: "4404" attributes: - - key: direction - value: - stringValue: received - key: object value: stringValue: "" startTimeUnixNano: "6000000" timeUnixNano: "1000000" - - asDouble: "2868.8" + - asInt: "4079" attributes: - - key: direction - value: - stringValue: received - key: object value: stringValue: "" startTimeUnixNano: "6000000" timeUnixNano: "2000000" - - asDouble: "3207.8" + - asInt: "6570" attributes: - - key: direction - value: - stringValue: received - key: object value: stringValue: "" startTimeUnixNano: "6000000" timeUnixNano: "3000000" - - asDouble: "2940.7" + - asInt: "5655" attributes: - - key: direction - value: - stringValue: received - key: object value: stringValue: "" startTimeUnixNano: "6000000" timeUnixNano: "4000000" - - asDouble: "2869.5" + - asInt: "4117" attributes: - - key: direction - value: - stringValue: received - key: object value: stringValue: "" startTimeUnixNano: "6000000" timeUnixNano: "5000000" - - asDouble: "665.8" + - asInt: "3634" attributes: - - key: direction - value: - stringValue: received - key: object value: stringValue: vmnic0 startTimeUnixNano: "6000000" timeUnixNano: "1000000" - - asDouble: "723.65" + - asInt: "3305" attributes: - - key: direction - value: - stringValue: received - key: object value: stringValue: vmnic0 startTimeUnixNano: "6000000" timeUnixNano: "2000000" - - asDouble: "983.1" + - asInt: "5642" attributes: - - key: direction - value: - stringValue: received - key: object value: stringValue: vmnic0 startTimeUnixNano: "6000000" timeUnixNano: "3000000" - - asDouble: "773.9" + - asInt: "4674" attributes: - - key: direction - value: - stringValue: received - key: object value: stringValue: vmnic0 startTimeUnixNano: "6000000" timeUnixNano: "4000000" - - asDouble: "722.9" + - asInt: "3251" attributes: - - key: direction - value: - stringValue: received - key: object value: stringValue: vmnic0 startTimeUnixNano: "6000000" timeUnixNano: "5000000" - - asDouble: "5.8" + - asInt: "0" attributes: - - key: direction - value: - stringValue: received - key: object value: stringValue: vmnic1 startTimeUnixNano: "6000000" timeUnixNano: "1000000" - - asDouble: "5.7" + - asInt: "0" attributes: - - key: direction - value: - stringValue: received - key: object value: stringValue: vmnic1 startTimeUnixNano: "6000000" timeUnixNano: "2000000" - - asDouble: "5.65" + - asInt: "0" attributes: - - key: direction - value: - stringValue: received - key: object value: stringValue: vmnic1 startTimeUnixNano: "6000000" timeUnixNano: "3000000" - - asDouble: "5.6" + - asInt: "0" attributes: - - key: direction - value: - stringValue: received - key: object value: stringValue: vmnic1 startTimeUnixNano: "6000000" timeUnixNano: "4000000" - - asDouble: "6" + - asInt: "0" attributes: - - key: direction - value: - stringValue: received - key: object value: stringValue: vmnic1 startTimeUnixNano: "6000000" timeUnixNano: "5000000" - - asDouble: "5.25" + - asInt: "0" attributes: - - key: direction - value: - stringValue: received - key: object value: stringValue: vmnic2 startTimeUnixNano: "6000000" timeUnixNano: "1000000" - - asDouble: "5.15" + - asInt: "0" attributes: - - key: direction - value: - stringValue: received - key: object value: stringValue: vmnic2 startTimeUnixNano: "6000000" timeUnixNano: "2000000" - - asDouble: "5.2" + - asInt: "0" attributes: - - key: direction - value: - stringValue: received - key: object value: stringValue: vmnic2 startTimeUnixNano: "6000000" timeUnixNano: "3000000" - - asDouble: "5.1" + - asInt: "0" attributes: - - key: direction - value: - stringValue: received - key: object value: stringValue: vmnic2 startTimeUnixNano: "6000000" timeUnixNano: "4000000" - - asDouble: "5.45" + - asInt: "0" attributes: - - key: direction - value: - stringValue: received - key: object value: stringValue: vmnic2 startTimeUnixNano: "6000000" timeUnixNano: "5000000" - - asDouble: "2105.5" + - asInt: "769" attributes: - - key: direction - value: - stringValue: received - key: object value: stringValue: vmnic3 startTimeUnixNano: "6000000" timeUnixNano: "1000000" - - asDouble: "2134.3" + - asInt: "773" attributes: - - key: direction - value: - stringValue: received - key: object value: stringValue: vmnic3 startTimeUnixNano: "6000000" timeUnixNano: "2000000" - - asDouble: "2213.85" + - asInt: "927" attributes: - - key: direction - value: - stringValue: received - key: object value: stringValue: vmnic3 startTimeUnixNano: "6000000" timeUnixNano: "3000000" - - asDouble: "2156.1" + - asInt: "980" attributes: - - key: direction - value: - stringValue: received - key: object value: stringValue: vmnic3 startTimeUnixNano: "6000000" timeUnixNano: "4000000" - - asDouble: "2135.15" + - asInt: "864" attributes: - - key: direction - value: - stringValue: received - key: object value: stringValue: vmnic3 startTimeUnixNano: "6000000" timeUnixNano: "5000000" - - asDouble: "2599.6" - attributes: - - key: direction - value: - stringValue: transmitted - - key: object - value: - stringValue: "" - startTimeUnixNano: "6000000" - timeUnixNano: "1000000" - - asDouble: "2735.6" - attributes: - - key: direction - value: - stringValue: transmitted - - key: object - value: - stringValue: "" - startTimeUnixNano: "6000000" - timeUnixNano: "2000000" - - asDouble: "2972.45" - attributes: - - key: direction - value: - stringValue: transmitted - - key: object - value: - stringValue: "" - startTimeUnixNano: "6000000" - timeUnixNano: "3000000" - - asDouble: "2730.2" - attributes: - - key: direction - value: - stringValue: transmitted - - key: object - value: - stringValue: "" - startTimeUnixNano: "6000000" - timeUnixNano: "4000000" - - asDouble: "2723.2" - attributes: - - key: direction - value: - stringValue: transmitted - - key: object - value: - stringValue: "" - startTimeUnixNano: "6000000" - timeUnixNano: "5000000" - - asDouble: "559.1" - attributes: - - key: direction - value: - stringValue: transmitted - - key: object - value: - stringValue: vmnic0 - startTimeUnixNano: "6000000" - timeUnixNano: "1000000" - - asDouble: "650.45" - attributes: - - key: direction - value: - stringValue: transmitted - - key: object - value: - stringValue: vmnic0 - startTimeUnixNano: "6000000" - timeUnixNano: "2000000" - - asDouble: "824.45" - attributes: - - key: direction - value: - stringValue: transmitted - - key: object - value: - stringValue: vmnic0 - startTimeUnixNano: "6000000" - timeUnixNano: "3000000" - - asDouble: "619.9" - attributes: - - key: direction - value: - stringValue: transmitted - - key: object - value: - stringValue: vmnic0 - startTimeUnixNano: "6000000" - timeUnixNano: "4000000" - - asDouble: "649.2" - attributes: - - key: direction - value: - stringValue: transmitted - - key: object - value: - stringValue: vmnic0 - startTimeUnixNano: "6000000" - timeUnixNano: "5000000" - - asDouble: "0" - attributes: - - key: direction - value: - stringValue: transmitted - - key: object - value: - stringValue: vmnic1 - startTimeUnixNano: "6000000" - timeUnixNano: "1000000" - - asDouble: "0" - attributes: - - key: direction - value: - stringValue: transmitted - - key: object - value: - stringValue: vmnic1 - startTimeUnixNano: "6000000" - timeUnixNano: "2000000" - - asDouble: "0" - attributes: - - key: direction - value: - stringValue: transmitted - - key: object - value: - stringValue: vmnic1 - startTimeUnixNano: "6000000" - timeUnixNano: "3000000" - - asDouble: "0" - attributes: - - key: direction - value: - stringValue: transmitted - - key: object - value: - stringValue: vmnic1 - startTimeUnixNano: "6000000" - timeUnixNano: "4000000" - - asDouble: "0" - attributes: - - key: direction - value: - stringValue: transmitted - - key: object - value: - stringValue: vmnic1 - startTimeUnixNano: "6000000" - timeUnixNano: "5000000" - - asDouble: "0" - attributes: - - key: direction - value: - stringValue: transmitted - - key: object - value: - stringValue: vmnic2 - startTimeUnixNano: "6000000" - timeUnixNano: "1000000" - - asDouble: "0" - attributes: - - key: direction - value: - stringValue: transmitted - - key: object - value: - stringValue: vmnic2 - startTimeUnixNano: "6000000" - timeUnixNano: "2000000" - - asDouble: "0" - attributes: - - key: direction - value: - stringValue: transmitted - - key: object - value: - stringValue: vmnic2 - startTimeUnixNano: "6000000" - timeUnixNano: "3000000" - - asDouble: "0" - attributes: - - key: direction - value: - stringValue: transmitted - - key: object - value: - stringValue: vmnic2 - startTimeUnixNano: "6000000" - timeUnixNano: "4000000" - - asDouble: "0" - attributes: - - key: direction - value: - stringValue: transmitted - - key: object - value: - stringValue: vmnic2 - startTimeUnixNano: "6000000" - timeUnixNano: "5000000" - - asDouble: "2040.5" - attributes: - - key: direction - value: - stringValue: transmitted - - key: object - value: - stringValue: vmnic3 - startTimeUnixNano: "6000000" - timeUnixNano: "1000000" - - asDouble: "2085.15" - attributes: - - key: direction - value: - stringValue: transmitted - - key: object - value: - stringValue: vmnic3 - startTimeUnixNano: "6000000" - timeUnixNano: "2000000" - - asDouble: "2148" - attributes: - - key: direction - value: - stringValue: transmitted - - key: object - value: - stringValue: vmnic3 - startTimeUnixNano: "6000000" - timeUnixNano: "3000000" - - asDouble: "2110.3" - attributes: - - key: direction - value: - stringValue: transmitted - - key: object - value: - stringValue: vmnic3 - startTimeUnixNano: "6000000" - timeUnixNano: "4000000" - - asDouble: "2074" - attributes: - - key: direction - value: - stringValue: transmitted - - key: object - value: - stringValue: vmnic3 - startTimeUnixNano: "6000000" - timeUnixNano: "5000000" - unit: '{packets/sec}' - - description: The amount of data that was transmitted or received over the network by the host. - name: vcenter.host.network.throughput + unit: '{KiBy/s}' + scope: + name: otelcol/vcenterreceiver + version: latest + - resource: + attributes: + - key: vcenter.datacenter.name + value: + stringValue: Datacenter + - key: vcenter.host.name + value: + stringValue: esxi-111.europe-southeast1.gve.goog + scopeMetrics: + - metrics: + - description: The amount of CPU used by the host. + name: vcenter.host.cpu.usage sum: aggregationTemporality: 2 dataPoints: - - asInt: "928" - attributes: - - key: direction - value: - stringValue: received - - key: object - value: - stringValue: "" - startTimeUnixNano: "6000000" - timeUnixNano: "1000000" - - asInt: "1120" - attributes: - - key: direction - value: - stringValue: received - - key: object - value: - stringValue: "" - startTimeUnixNano: "6000000" + - asInt: "6107" + startTimeUnixNano: "1000000" timeUnixNano: "2000000" - - asInt: "1646" + unit: MHz + - description: The CPU utilization of the host system. + gauge: + dataPoints: + - asDouble: 6.542186227878476 + startTimeUnixNano: "1000000" + timeUnixNano: "2000000" + name: vcenter.host.cpu.utilization + unit: '%' + - description: The latency of operations to the host system's disk. + gauge: + dataPoints: + - asInt: "781" attributes: - key: direction value: - stringValue: received + stringValue: read - key: object value: - stringValue: "" - startTimeUnixNano: "6000000" - timeUnixNano: "3000000" - - asInt: "1291" - attributes: - - key: direction - value: - stringValue: received - - key: object - value: - stringValue: "" - startTimeUnixNano: "6000000" - timeUnixNano: "4000000" - - asInt: "1058" - attributes: - - key: direction - value: - stringValue: received - - key: object - value: - stringValue: "" - startTimeUnixNano: "6000000" - timeUnixNano: "5000000" - - asInt: "570" - attributes: - - key: direction - value: - stringValue: received - - key: object - value: - stringValue: vmnic0 - startTimeUnixNano: "6000000" - timeUnixNano: "1000000" - - asInt: "768" - attributes: - - key: direction - value: - stringValue: received - - key: object - value: - stringValue: vmnic0 - startTimeUnixNano: "6000000" - timeUnixNano: "2000000" - - asInt: "1269" - attributes: - - key: direction - value: - stringValue: received - - key: object - value: - stringValue: vmnic0 - startTimeUnixNano: "6000000" - timeUnixNano: "3000000" - - asInt: "927" - attributes: - - key: direction - value: - stringValue: received - - key: object - value: - stringValue: vmnic0 - startTimeUnixNano: "6000000" - timeUnixNano: "4000000" - - asInt: "681" - attributes: - - key: direction - value: - stringValue: received - - key: object - value: - stringValue: vmnic0 - startTimeUnixNano: "6000000" - timeUnixNano: "5000000" - - asInt: "0" - attributes: - - key: direction - value: - stringValue: received - - key: object - value: - stringValue: vmnic1 - startTimeUnixNano: "6000000" - timeUnixNano: "1000000" - - asInt: "0" - attributes: - - key: direction - value: - stringValue: received - - key: object - value: - stringValue: vmnic1 - startTimeUnixNano: "6000000" - timeUnixNano: "2000000" - - asInt: "0" - attributes: - - key: direction - value: - stringValue: received - - key: object - value: - stringValue: vmnic1 - startTimeUnixNano: "6000000" - timeUnixNano: "3000000" - - asInt: "0" - attributes: - - key: direction - value: - stringValue: received - - key: object - value: - stringValue: vmnic1 - startTimeUnixNano: "6000000" - timeUnixNano: "4000000" - - asInt: "0" - attributes: - - key: direction - value: - stringValue: received - - key: object - value: - stringValue: vmnic1 - startTimeUnixNano: "6000000" - timeUnixNano: "5000000" - - asInt: "0" - attributes: - - key: direction - value: - stringValue: received - - key: object - value: - stringValue: vmnic2 - startTimeUnixNano: "6000000" - timeUnixNano: "1000000" - - asInt: "0" - attributes: - - key: direction - value: - stringValue: received - - key: object - value: - stringValue: vmnic2 - startTimeUnixNano: "6000000" - timeUnixNano: "2000000" - - asInt: "0" - attributes: - - key: direction - value: - stringValue: received - - key: object - value: - stringValue: vmnic2 - startTimeUnixNano: "6000000" - timeUnixNano: "3000000" - - asInt: "0" - attributes: - - key: direction - value: - stringValue: received - - key: object - value: - stringValue: vmnic2 - startTimeUnixNano: "6000000" - timeUnixNano: "4000000" - - asInt: "0" - attributes: - - key: direction - value: - stringValue: received - - key: object - value: - stringValue: vmnic2 - startTimeUnixNano: "6000000" - timeUnixNano: "5000000" - - asInt: "357" - attributes: - - key: direction - value: - stringValue: received - - key: object - value: - stringValue: vmnic3 - startTimeUnixNano: "6000000" - timeUnixNano: "1000000" - - asInt: "351" - attributes: - - key: direction - value: - stringValue: received - - key: object - value: - stringValue: vmnic3 - startTimeUnixNano: "6000000" - timeUnixNano: "2000000" - - asInt: "376" - attributes: - - key: direction - value: - stringValue: received - - key: object - value: - stringValue: vmnic3 - startTimeUnixNano: "6000000" - timeUnixNano: "3000000" - - asInt: "363" - attributes: - - key: direction - value: - stringValue: received - - key: object - value: - stringValue: vmnic3 - startTimeUnixNano: "6000000" - timeUnixNano: "4000000" - - asInt: "376" - attributes: - - key: direction - value: - stringValue: received - - key: object - value: - stringValue: vmnic3 - startTimeUnixNano: "6000000" - timeUnixNano: "5000000" - - asInt: "3475" - attributes: - - key: direction - value: - stringValue: transmitted - - key: object - value: - stringValue: "" - startTimeUnixNano: "6000000" - timeUnixNano: "1000000" - - asInt: "2959" - attributes: - - key: direction - value: - stringValue: transmitted - - key: object - value: - stringValue: "" - startTimeUnixNano: "6000000" - timeUnixNano: "2000000" - - asInt: "4924" - attributes: - - key: direction - value: - stringValue: transmitted - - key: object - value: - stringValue: "" - startTimeUnixNano: "6000000" - timeUnixNano: "3000000" - - asInt: "4364" - attributes: - - key: direction - value: - stringValue: transmitted - - key: object - value: - stringValue: "" - startTimeUnixNano: "6000000" - timeUnixNano: "4000000" - - asInt: "3058" - attributes: - - key: direction - value: - stringValue: transmitted - - key: object - value: - stringValue: "" - startTimeUnixNano: "6000000" - timeUnixNano: "5000000" - - asInt: "3064" - attributes: - - key: direction - value: - stringValue: transmitted - - key: object - value: - stringValue: vmnic0 - startTimeUnixNano: "6000000" - timeUnixNano: "1000000" - - asInt: "2537" - attributes: - - key: direction - value: - stringValue: transmitted - - key: object - value: - stringValue: vmnic0 - startTimeUnixNano: "6000000" - timeUnixNano: "2000000" - - asInt: "4373" - attributes: - - key: direction - value: - stringValue: transmitted - - key: object - value: - stringValue: vmnic0 - startTimeUnixNano: "6000000" - timeUnixNano: "3000000" - - asInt: "3746" - attributes: - - key: direction - value: - stringValue: transmitted - - key: object - value: - stringValue: vmnic0 - startTimeUnixNano: "6000000" - timeUnixNano: "4000000" - - asInt: "2569" - attributes: - - key: direction - value: - stringValue: transmitted - - key: object - value: - stringValue: vmnic0 - startTimeUnixNano: "6000000" - timeUnixNano: "5000000" - - asInt: "0" - attributes: - - key: direction - value: - stringValue: transmitted - - key: object - value: - stringValue: vmnic1 - startTimeUnixNano: "6000000" - timeUnixNano: "1000000" - - asInt: "0" - attributes: - - key: direction - value: - stringValue: transmitted - - key: object - value: - stringValue: vmnic1 - startTimeUnixNano: "6000000" - timeUnixNano: "2000000" - - asInt: "0" - attributes: - - key: direction - value: - stringValue: transmitted - - key: object - value: - stringValue: vmnic1 - startTimeUnixNano: "6000000" - timeUnixNano: "3000000" - - asInt: "0" - attributes: - - key: direction - value: - stringValue: transmitted - - key: object - value: - stringValue: vmnic1 - startTimeUnixNano: "6000000" - timeUnixNano: "4000000" - - asInt: "0" - attributes: - - key: direction - value: - stringValue: transmitted - - key: object - value: - stringValue: vmnic1 - startTimeUnixNano: "6000000" - timeUnixNano: "5000000" - - asInt: "0" - attributes: - - key: direction - value: - stringValue: transmitted - - key: object - value: - stringValue: vmnic2 - startTimeUnixNano: "6000000" - timeUnixNano: "1000000" - - asInt: "0" - attributes: - - key: direction - value: - stringValue: transmitted - - key: object - value: - stringValue: vmnic2 - startTimeUnixNano: "6000000" - timeUnixNano: "2000000" - - asInt: "0" - attributes: - - key: direction - value: - stringValue: transmitted - - key: object - value: - stringValue: vmnic2 - startTimeUnixNano: "6000000" - timeUnixNano: "3000000" - - asInt: "0" - attributes: - - key: direction - value: - stringValue: transmitted - - key: object - value: - stringValue: vmnic2 - startTimeUnixNano: "6000000" - timeUnixNano: "4000000" - - asInt: "0" - attributes: - - key: direction - value: - stringValue: transmitted - - key: object - value: - stringValue: vmnic2 - startTimeUnixNano: "6000000" - timeUnixNano: "5000000" - - asInt: "411" - attributes: - - key: direction - value: - stringValue: transmitted - - key: object - value: - stringValue: vmnic3 - startTimeUnixNano: "6000000" - timeUnixNano: "1000000" - - asInt: "422" - attributes: - - key: direction - value: - stringValue: transmitted - - key: object - value: - stringValue: vmnic3 - startTimeUnixNano: "6000000" - timeUnixNano: "2000000" - - asInt: "551" - attributes: - - key: direction - value: - stringValue: transmitted - - key: object - value: - stringValue: vmnic3 - startTimeUnixNano: "6000000" - timeUnixNano: "3000000" - - asInt: "617" - attributes: - - key: direction - value: - stringValue: transmitted - - key: object - value: - stringValue: vmnic3 - startTimeUnixNano: "6000000" - timeUnixNano: "4000000" - - asInt: "488" - attributes: - - key: direction - value: - stringValue: transmitted - - key: object - value: - stringValue: vmnic3 - startTimeUnixNano: "6000000" - timeUnixNano: "5000000" - unit: '{KiBy/s}' - - description: The sum of the data transmitted and received for all the NIC instances of the host. - name: vcenter.host.network.usage - sum: - aggregationTemporality: 2 - dataPoints: - - asInt: "4404" - attributes: - - key: object - value: - stringValue: "" - startTimeUnixNano: "6000000" - timeUnixNano: "1000000" - - asInt: "4079" - attributes: - - key: object - value: - stringValue: "" - startTimeUnixNano: "6000000" - timeUnixNano: "2000000" - - asInt: "6570" - attributes: - - key: object - value: - stringValue: "" - startTimeUnixNano: "6000000" - timeUnixNano: "3000000" - - asInt: "5655" - attributes: - - key: object - value: - stringValue: "" - startTimeUnixNano: "6000000" - timeUnixNano: "4000000" - - asInt: "4117" - attributes: - - key: object - value: - stringValue: "" - startTimeUnixNano: "6000000" - timeUnixNano: "5000000" - - asInt: "3634" - attributes: - - key: object - value: - stringValue: vmnic0 - startTimeUnixNano: "6000000" - timeUnixNano: "1000000" - - asInt: "3305" - attributes: - - key: object - value: - stringValue: vmnic0 - startTimeUnixNano: "6000000" - timeUnixNano: "2000000" - - asInt: "5642" - attributes: - - key: object - value: - stringValue: vmnic0 - startTimeUnixNano: "6000000" - timeUnixNano: "3000000" - - asInt: "4674" - attributes: - - key: object - value: - stringValue: vmnic0 - startTimeUnixNano: "6000000" - timeUnixNano: "4000000" - - asInt: "3251" - attributes: - - key: object - value: - stringValue: vmnic0 - startTimeUnixNano: "6000000" - timeUnixNano: "5000000" - - asInt: "0" - attributes: - - key: object - value: - stringValue: vmnic1 - startTimeUnixNano: "6000000" - timeUnixNano: "1000000" - - asInt: "0" - attributes: - - key: object - value: - stringValue: vmnic1 - startTimeUnixNano: "6000000" - timeUnixNano: "2000000" - - asInt: "0" - attributes: - - key: object - value: - stringValue: vmnic1 - startTimeUnixNano: "6000000" - timeUnixNano: "3000000" - - asInt: "0" - attributes: - - key: object - value: - stringValue: vmnic1 - startTimeUnixNano: "6000000" - timeUnixNano: "4000000" - - asInt: "0" - attributes: - - key: object - value: - stringValue: vmnic1 - startTimeUnixNano: "6000000" - timeUnixNano: "5000000" - - asInt: "0" - attributes: - - key: object - value: - stringValue: vmnic2 - startTimeUnixNano: "6000000" - timeUnixNano: "1000000" - - asInt: "0" - attributes: - - key: object - value: - stringValue: vmnic2 - startTimeUnixNano: "6000000" - timeUnixNano: "2000000" - - asInt: "0" - attributes: - - key: object - value: - stringValue: vmnic2 - startTimeUnixNano: "6000000" - timeUnixNano: "3000000" - - asInt: "0" - attributes: - - key: object - value: - stringValue: vmnic2 - startTimeUnixNano: "6000000" - timeUnixNano: "4000000" - - asInt: "0" - attributes: - - key: object - value: - stringValue: vmnic2 - startTimeUnixNano: "6000000" - timeUnixNano: "5000000" - - asInt: "769" - attributes: - - key: object - value: - stringValue: vmnic3 - startTimeUnixNano: "6000000" - timeUnixNano: "1000000" - - asInt: "773" - attributes: - - key: object - value: - stringValue: vmnic3 - startTimeUnixNano: "6000000" - timeUnixNano: "2000000" - - asInt: "927" - attributes: - - key: object - value: - stringValue: vmnic3 - startTimeUnixNano: "6000000" - timeUnixNano: "3000000" - - asInt: "980" - attributes: - - key: object - value: - stringValue: vmnic3 - startTimeUnixNano: "6000000" - timeUnixNano: "4000000" - - asInt: "864" - attributes: - - key: object - value: - stringValue: vmnic3 - startTimeUnixNano: "6000000" - timeUnixNano: "5000000" - unit: '{KiBy/s}' - scope: - name: otelcol/vcenterreceiver - version: latest - - resource: - attributes: - - key: vcenter.datacenter.name - value: - stringValue: Datacenter - - key: vcenter.host.name - value: - stringValue: esxi-111.europe-southeast1.gve.goog - scopeMetrics: - - metrics: - - description: The amount of CPU used by the host. - name: vcenter.host.cpu.usage - sum: - aggregationTemporality: 2 - dataPoints: - - asInt: "6107" - startTimeUnixNano: "1000000" - timeUnixNano: "2000000" - unit: MHz - - description: The CPU utilization of the host system. - gauge: - dataPoints: - - asDouble: 6.542186227878476 - startTimeUnixNano: "1000000" - timeUnixNano: "2000000" - name: vcenter.host.cpu.utilization - unit: '%' - - description: The latency of operations to the host system's disk. - gauge: - dataPoints: - - asInt: "781" - attributes: - - key: direction - value: - stringValue: read - - key: object - value: - stringValue: "4000" - startTimeUnixNano: "6000000" - timeUnixNano: "1000000" - - asInt: "789" - attributes: - - key: direction - value: - stringValue: read - - key: object - value: - stringValue: "4000" - startTimeUnixNano: "6000000" - timeUnixNano: "2000000" - - asInt: "645" - attributes: - - key: direction - value: - stringValue: read - - key: object - value: - stringValue: "4000" - startTimeUnixNano: "6000000" - timeUnixNano: "3000000" - - asInt: "781" - attributes: - - key: direction - value: - stringValue: read - - key: object - value: - stringValue: "4000" - startTimeUnixNano: "6000000" - timeUnixNano: "4000000" - - asInt: "782" - attributes: - - key: direction - value: - stringValue: read - - key: object - value: - stringValue: "4000" - startTimeUnixNano: "6000000" - timeUnixNano: "5000000" - - asInt: "781" - attributes: - - key: direction - value: - stringValue: write - - key: object - value: - stringValue: "4000" - startTimeUnixNano: "6000000" - timeUnixNano: "1000000" - - asInt: "789" - attributes: - - key: direction - value: - stringValue: write - - key: object - value: - stringValue: "4000" - startTimeUnixNano: "6000000" - timeUnixNano: "2000000" - - asInt: "645" - attributes: - - key: direction - value: - stringValue: write - - key: object - value: - stringValue: "4000" - startTimeUnixNano: "6000000" - timeUnixNano: "3000000" - - asInt: "781" - attributes: - - key: direction - value: - stringValue: write - - key: object - value: - stringValue: "4000" - startTimeUnixNano: "6000000" - timeUnixNano: "4000000" - - asInt: "782" - attributes: - - key: direction - value: - stringValue: write - - key: object - value: - stringValue: "4000" - startTimeUnixNano: "6000000" - timeUnixNano: "5000000" - name: vcenter.host.disk.latency.avg - unit: ms - - description: Highest latency value across all disks used by the host. - gauge: - dataPoints: - - asInt: "899" - attributes: - - key: object - value: - stringValue: "4000" - startTimeUnixNano: "6000000" - timeUnixNano: "1000000" - - asInt: "899" - attributes: - - key: object - value: - stringValue: "4000" - startTimeUnixNano: "6000000" - timeUnixNano: "2000000" - - asInt: "905" - attributes: - - key: object - value: - stringValue: "4000" - startTimeUnixNano: "6000000" - timeUnixNano: "3000000" - - asInt: "1000" - attributes: - - key: object - value: - stringValue: "4000" - startTimeUnixNano: "6000000" - timeUnixNano: "4000000" - - asInt: "1002" - attributes: - - key: object - value: - stringValue: "4000" - startTimeUnixNano: "6000000" - timeUnixNano: "5000000" - name: vcenter.host.disk.latency.max - unit: ms - - description: Average number of kilobytes read from or written to the disk each second. - name: vcenter.host.disk.throughput - sum: - aggregationTemporality: 2 - dataPoints: - - asInt: "28" - attributes: - - key: direction - value: - stringValue: read - - key: object - value: - stringValue: "" - startTimeUnixNano: "6000000" - timeUnixNano: "1000000" - - asInt: "45" - attributes: - - key: direction - value: - stringValue: read - - key: object - value: - stringValue: "" - startTimeUnixNano: "6000000" - timeUnixNano: "2000000" - - asInt: "88" - attributes: - - key: direction - value: - stringValue: read - - key: object - value: - stringValue: "" - startTimeUnixNano: "6000000" - timeUnixNano: "3000000" - - asInt: "92" - attributes: - - key: direction - value: - stringValue: read - - key: object - value: - stringValue: "" - startTimeUnixNano: "6000000" - timeUnixNano: "4000000" - - asInt: "31" - attributes: - - key: direction - value: - stringValue: read - - key: object - value: - stringValue: "" - startTimeUnixNano: "6000000" - timeUnixNano: "5000000" - - asInt: "4" - attributes: - - key: direction - value: - stringValue: read - - key: object - value: - stringValue: t10.ATA_____DELLBOSS_VD_____________________________983baa25884a001000000000 - startTimeUnixNano: "6000000" - timeUnixNano: "1000000" - - asInt: "25" - attributes: - - key: direction - value: - stringValue: read - - key: object - value: - stringValue: t10.ATA_____DELLBOSS_VD_____________________________983baa25884a001000000000 - startTimeUnixNano: "6000000" - timeUnixNano: "2000000" - - asInt: "76" - attributes: - - key: direction - value: - stringValue: read - - key: object - value: - stringValue: t10.ATA_____DELLBOSS_VD_____________________________983baa25884a001000000000 - startTimeUnixNano: "6000000" - timeUnixNano: "3000000" - - asInt: "63" - attributes: - - key: direction - value: - stringValue: read - - key: object - value: - stringValue: t10.ATA_____DELLBOSS_VD_____________________________983baa25884a001000000000 - startTimeUnixNano: "6000000" - timeUnixNano: "4000000" - - asInt: "0" - attributes: - - key: direction - value: - stringValue: read - - key: object - value: - stringValue: t10.ATA_____DELLBOSS_VD_____________________________983baa25884a001000000000 - startTimeUnixNano: "6000000" - timeUnixNano: "5000000" - - asInt: "6" - attributes: - - key: direction - value: - stringValue: read - - key: object - value: - stringValue: t10.NVMe____Dell_Express_Flash_NVMe_P4610_1.6TB_SFF_00010E10E3E4D25C - startTimeUnixNano: "6000000" - timeUnixNano: "1000000" - - asInt: "6" - attributes: - - key: direction - value: - stringValue: read - - key: object - value: - stringValue: t10.NVMe____Dell_Express_Flash_NVMe_P4610_1.6TB_SFF_00010E10E3E4D25C - startTimeUnixNano: "6000000" - timeUnixNano: "2000000" - - asInt: "4" - attributes: - - key: direction - value: - stringValue: read - - key: object - value: - stringValue: t10.NVMe____Dell_Express_Flash_NVMe_P4610_1.6TB_SFF_00010E10E3E4D25C - startTimeUnixNano: "6000000" - timeUnixNano: "3000000" - - asInt: "8" - attributes: - - key: direction - value: - stringValue: read - - key: object - value: - stringValue: t10.NVMe____Dell_Express_Flash_NVMe_P4610_1.6TB_SFF_00010E10E3E4D25C - startTimeUnixNano: "6000000" - timeUnixNano: "4000000" - - asInt: "19" - attributes: - - key: direction - value: - stringValue: read - - key: object - value: - stringValue: t10.NVMe____Dell_Express_Flash_NVMe_P4610_1.6TB_SFF_00010E10E3E4D25C - startTimeUnixNano: "6000000" - timeUnixNano: "5000000" - - asInt: "5" - attributes: - - key: direction - value: - stringValue: read - - key: object - value: - stringValue: t10.NVMe____Dell_Express_Flash_NVMe_P4610_1.6TB_SFF_00010E266CE4D25C - startTimeUnixNano: "6000000" - timeUnixNano: "1000000" - - asInt: "10" - attributes: - - key: direction - value: - stringValue: read - - key: object - value: - stringValue: t10.NVMe____Dell_Express_Flash_NVMe_P4610_1.6TB_SFF_00010E266CE4D25C - startTimeUnixNano: "6000000" - timeUnixNano: "2000000" - - asInt: "5" - attributes: - - key: direction - value: - stringValue: read - - key: object - value: - stringValue: t10.NVMe____Dell_Express_Flash_NVMe_P4610_1.6TB_SFF_00010E266CE4D25C - startTimeUnixNano: "6000000" - timeUnixNano: "3000000" - - asInt: "7" - attributes: - - key: direction - value: - stringValue: read - - key: object - value: - stringValue: t10.NVMe____Dell_Express_Flash_NVMe_P4610_1.6TB_SFF_00010E266CE4D25C - startTimeUnixNano: "6000000" - timeUnixNano: "4000000" - - asInt: "6" - attributes: - - key: direction - value: - stringValue: read - - key: object - value: - stringValue: t10.NVMe____Dell_Express_Flash_NVMe_P4610_1.6TB_SFF_00010E266CE4D25C - startTimeUnixNano: "6000000" - timeUnixNano: "5000000" - - asInt: "4" - attributes: - - key: direction - value: - stringValue: read - - key: object - value: - stringValue: t10.NVMe____Dell_Express_Flash_PM1725b_3.2TB_SFF____362E000121382500 - startTimeUnixNano: "6000000" - timeUnixNano: "1000000" - - asInt: "0" - attributes: - - key: direction - value: - stringValue: read - - key: object - value: - stringValue: t10.NVMe____Dell_Express_Flash_PM1725b_3.2TB_SFF____362E000121382500 - startTimeUnixNano: "6000000" - timeUnixNano: "2000000" - - asInt: "1" - attributes: - - key: direction - value: - stringValue: read - - key: object - value: - stringValue: t10.NVMe____Dell_Express_Flash_PM1725b_3.2TB_SFF____362E000121382500 - startTimeUnixNano: "6000000" - timeUnixNano: "3000000" - - asInt: "2" - attributes: - - key: direction - value: - stringValue: read - - key: object - value: - stringValue: t10.NVMe____Dell_Express_Flash_PM1725b_3.2TB_SFF____362E000121382500 - startTimeUnixNano: "6000000" - timeUnixNano: "4000000" - - asInt: "0" - attributes: - - key: direction - value: - stringValue: read - - key: object - value: - stringValue: t10.NVMe____Dell_Express_Flash_PM1725b_3.2TB_SFF____362E000121382500 - startTimeUnixNano: "6000000" - timeUnixNano: "5000000" - - asInt: "7" - attributes: - - key: direction - value: - stringValue: read - - key: object - value: - stringValue: t10.NVMe____Dell_Express_Flash_PM1725b_3.2TB_SFF____3C2E000121382500 - startTimeUnixNano: "6000000" - timeUnixNano: "1000000" - - asInt: "0" - attributes: - - key: direction - value: - stringValue: read - - key: object - value: - stringValue: t10.NVMe____Dell_Express_Flash_PM1725b_3.2TB_SFF____3C2E000121382500 - startTimeUnixNano: "6000000" - timeUnixNano: "2000000" - - asInt: "0" - attributes: - - key: direction - value: - stringValue: read - - key: object - value: - stringValue: t10.NVMe____Dell_Express_Flash_PM1725b_3.2TB_SFF____3C2E000121382500 - startTimeUnixNano: "6000000" - timeUnixNano: "3000000" - - asInt: "4" - attributes: - - key: direction - value: - stringValue: read - - key: object - value: - stringValue: t10.NVMe____Dell_Express_Flash_PM1725b_3.2TB_SFF____3C2E000121382500 - startTimeUnixNano: "6000000" - timeUnixNano: "4000000" - - asInt: "2" - attributes: - - key: direction - value: - stringValue: read - - key: object - value: - stringValue: t10.NVMe____Dell_Express_Flash_PM1725b_3.2TB_SFF____3C2E000121382500 - startTimeUnixNano: "6000000" - timeUnixNano: "5000000" - - asInt: "0" - attributes: - - key: direction - value: - stringValue: read - - key: object - value: - stringValue: t10.NVMe____Dell_Express_Flash_PM1725b_3.2TB_SFF____3D2E000121382500 - startTimeUnixNano: "6000000" - timeUnixNano: "1000000" - - asInt: "0" - attributes: - - key: direction - value: - stringValue: read - - key: object - value: - stringValue: t10.NVMe____Dell_Express_Flash_PM1725b_3.2TB_SFF____3D2E000121382500 - startTimeUnixNano: "6000000" - timeUnixNano: "2000000" - - asInt: "0" - attributes: - - key: direction - value: - stringValue: read - - key: object - value: - stringValue: t10.NVMe____Dell_Express_Flash_PM1725b_3.2TB_SFF____3D2E000121382500 - startTimeUnixNano: "6000000" - timeUnixNano: "3000000" - - asInt: "2" - attributes: - - key: direction - value: - stringValue: read - - key: object - value: - stringValue: t10.NVMe____Dell_Express_Flash_PM1725b_3.2TB_SFF____3D2E000121382500 - startTimeUnixNano: "6000000" - timeUnixNano: "4000000" - - asInt: "1" - attributes: - - key: direction - value: - stringValue: read - - key: object - value: - stringValue: t10.NVMe____Dell_Express_Flash_PM1725b_3.2TB_SFF____3D2E000121382500 - startTimeUnixNano: "6000000" - timeUnixNano: "5000000" - - asInt: "0" - attributes: - - key: direction - value: - stringValue: read - - key: object - value: - stringValue: t10.NVMe____Dell_Express_Flash_PM1725b_3.2TB_SFF____482E000121382500 - startTimeUnixNano: "6000000" - timeUnixNano: "1000000" - - asInt: "0" - attributes: - - key: direction - value: - stringValue: read - - key: object - value: - stringValue: t10.NVMe____Dell_Express_Flash_PM1725b_3.2TB_SFF____482E000121382500 - startTimeUnixNano: "6000000" - timeUnixNano: "2000000" - - asInt: "0" - attributes: - - key: direction - value: - stringValue: read - - key: object - value: - stringValue: t10.NVMe____Dell_Express_Flash_PM1725b_3.2TB_SFF____482E000121382500 - startTimeUnixNano: "6000000" - timeUnixNano: "3000000" - - asInt: "1" - attributes: - - key: direction - value: - stringValue: read - - key: object - value: - stringValue: t10.NVMe____Dell_Express_Flash_PM1725b_3.2TB_SFF____482E000121382500 - startTimeUnixNano: "6000000" - timeUnixNano: "4000000" - - asInt: "0" - attributes: - - key: direction - value: - stringValue: read - - key: object - value: - stringValue: t10.NVMe____Dell_Express_Flash_PM1725b_3.2TB_SFF____482E000121382500 - startTimeUnixNano: "6000000" - timeUnixNano: "5000000" - - asInt: "0" - attributes: - - key: direction - value: - stringValue: read - - key: object - value: - stringValue: t10.NVMe____Dell_Express_Flash_PM1725b_3.2TB_SFF____B32D000121382500 - startTimeUnixNano: "6000000" - timeUnixNano: "1000000" - - asInt: "2" - attributes: - - key: direction - value: - stringValue: read - - key: object - value: - stringValue: t10.NVMe____Dell_Express_Flash_PM1725b_3.2TB_SFF____B32D000121382500 - startTimeUnixNano: "6000000" - timeUnixNano: "2000000" - - asInt: "0" - attributes: - - key: direction - value: - stringValue: read - - key: object - value: - stringValue: t10.NVMe____Dell_Express_Flash_PM1725b_3.2TB_SFF____B32D000121382500 - startTimeUnixNano: "6000000" - timeUnixNano: "3000000" - - asInt: "2" - attributes: - - key: direction - value: - stringValue: read - - key: object - value: - stringValue: t10.NVMe____Dell_Express_Flash_PM1725b_3.2TB_SFF____B32D000121382500 - startTimeUnixNano: "6000000" - timeUnixNano: "4000000" - - asInt: "0" - attributes: - - key: direction - value: - stringValue: read - - key: object - value: - stringValue: t10.NVMe____Dell_Express_Flash_PM1725b_3.2TB_SFF____B32D000121382500 - startTimeUnixNano: "6000000" - timeUnixNano: "5000000" - - asInt: "0" - attributes: - - key: direction - value: - stringValue: read - - key: object - value: - stringValue: t10.NVMe____Dell_Express_Flash_PM1725b_3.2TB_SFF____BD2D000121382500 - startTimeUnixNano: "6000000" - timeUnixNano: "1000000" - - asInt: "0" - attributes: - - key: direction - value: - stringValue: read - - key: object - value: - stringValue: t10.NVMe____Dell_Express_Flash_PM1725b_3.2TB_SFF____BD2D000121382500 - startTimeUnixNano: "6000000" - timeUnixNano: "2000000" - - asInt: "0" - attributes: - - key: direction - value: - stringValue: read - - key: object - value: - stringValue: t10.NVMe____Dell_Express_Flash_PM1725b_3.2TB_SFF____BD2D000121382500 - startTimeUnixNano: "6000000" - timeUnixNano: "3000000" - - asInt: "0" - attributes: - - key: direction - value: - stringValue: read - - key: object - value: - stringValue: t10.NVMe____Dell_Express_Flash_PM1725b_3.2TB_SFF____BD2D000121382500 - startTimeUnixNano: "6000000" - timeUnixNano: "4000000" - - asInt: "0" - attributes: - - key: direction - value: - stringValue: read - - key: object - value: - stringValue: t10.NVMe____Dell_Express_Flash_PM1725b_3.2TB_SFF____BD2D000121382500 - startTimeUnixNano: "6000000" - timeUnixNano: "5000000" - - asInt: "781" - attributes: - - key: direction - value: - stringValue: write - - key: object - value: - stringValue: "4000" - startTimeUnixNano: "6000000" - timeUnixNano: "1000000" - - asInt: "789" - attributes: - - key: direction - value: - stringValue: write - - key: object - value: - stringValue: "4000" - startTimeUnixNano: "6000000" - timeUnixNano: "2000000" - - asInt: "645" - attributes: - - key: direction - value: - stringValue: write - - key: object - value: - stringValue: "4000" - startTimeUnixNano: "6000000" - timeUnixNano: "3000000" - - asInt: "781" - attributes: - - key: direction - value: - stringValue: write - - key: object - value: - stringValue: "4000" - startTimeUnixNano: "6000000" - timeUnixNano: "4000000" - - asInt: "782" - attributes: - - key: direction - value: - stringValue: write - - key: object - value: - stringValue: "4000" - startTimeUnixNano: "6000000" - timeUnixNano: "5000000" - unit: '{KiBy/s}' - - description: The amount of memory the host system is using. - name: vcenter.host.memory.usage - sum: - aggregationTemporality: 2 - dataPoints: - - asInt: "140833" - startTimeUnixNano: "1000000" - timeUnixNano: "2000000" - unit: MiBy - - description: The percentage of the host system's memory capacity that is being utilized. - gauge: - dataPoints: - - asDouble: 17.948557824655133 - startTimeUnixNano: "1000000" - timeUnixNano: "2000000" - name: vcenter.host.memory.utilization - unit: '%' - - description: The number of packets transmitted and received, as measured over the most recent 20s interval. - name: vcenter.host.network.packet.count - sum: - aggregationTemporality: 2 - dataPoints: - - asInt: "55647" - attributes: - - key: direction - value: - stringValue: received - - key: object - value: - stringValue: "" - startTimeUnixNano: "6000000" - timeUnixNano: "1000000" - - asInt: "57376" - attributes: - - key: direction - value: - stringValue: received - - key: object - value: - stringValue: "" - startTimeUnixNano: "6000000" - timeUnixNano: "2000000" - - asInt: "64156" - attributes: - - key: direction - value: - stringValue: received - - key: object - value: - stringValue: "" - startTimeUnixNano: "6000000" - timeUnixNano: "3000000" - - asInt: "58814" - attributes: - - key: direction - value: - stringValue: received - - key: object - value: - stringValue: "" - startTimeUnixNano: "6000000" - timeUnixNano: "4000000" - - asInt: "57390" - attributes: - - key: direction - value: - stringValue: received - - key: object - value: - stringValue: "" - startTimeUnixNano: "6000000" - timeUnixNano: "5000000" - - asInt: "13316" - attributes: - - key: direction - value: - stringValue: received - - key: object - value: - stringValue: vmnic0 - startTimeUnixNano: "6000000" - timeUnixNano: "1000000" - - asInt: "14473" - attributes: - - key: direction - value: - stringValue: received - - key: object - value: - stringValue: vmnic0 - startTimeUnixNano: "6000000" - timeUnixNano: "2000000" - - asInt: "19662" - attributes: - - key: direction - value: - stringValue: received - - key: object - value: - stringValue: vmnic0 - startTimeUnixNano: "6000000" - timeUnixNano: "3000000" - - asInt: "15478" - attributes: - - key: direction - value: - stringValue: received - - key: object - value: - stringValue: vmnic0 - startTimeUnixNano: "6000000" - timeUnixNano: "4000000" - - asInt: "14458" - attributes: - - key: direction - value: - stringValue: received - - key: object - value: - stringValue: vmnic0 - startTimeUnixNano: "6000000" - timeUnixNano: "5000000" - - asInt: "116" - attributes: - - key: direction - value: - stringValue: received - - key: object - value: - stringValue: vmnic1 - startTimeUnixNano: "6000000" - timeUnixNano: "1000000" - - asInt: "114" - attributes: - - key: direction - value: - stringValue: received - - key: object - value: - stringValue: vmnic1 - startTimeUnixNano: "6000000" - timeUnixNano: "2000000" - - asInt: "113" - attributes: - - key: direction - value: - stringValue: received - - key: object - value: - stringValue: vmnic1 - startTimeUnixNano: "6000000" - timeUnixNano: "3000000" - - asInt: "112" - attributes: - - key: direction - value: - stringValue: received - - key: object - value: - stringValue: vmnic1 - startTimeUnixNano: "6000000" - timeUnixNano: "4000000" - - asInt: "120" - attributes: - - key: direction - value: - stringValue: received - - key: object - value: - stringValue: vmnic1 - startTimeUnixNano: "6000000" - timeUnixNano: "5000000" - - asInt: "105" - attributes: - - key: direction - value: - stringValue: received - - key: object - value: - stringValue: vmnic2 - startTimeUnixNano: "6000000" - timeUnixNano: "1000000" - - asInt: "103" - attributes: - - key: direction - value: - stringValue: received - - key: object - value: - stringValue: vmnic2 - startTimeUnixNano: "6000000" - timeUnixNano: "2000000" - - asInt: "104" - attributes: - - key: direction - value: - stringValue: received - - key: object - value: - stringValue: vmnic2 - startTimeUnixNano: "6000000" - timeUnixNano: "3000000" - - asInt: "102" - attributes: - - key: direction - value: - stringValue: received - - key: object - value: - stringValue: vmnic2 - startTimeUnixNano: "6000000" - timeUnixNano: "4000000" - - asInt: "109" - attributes: - - key: direction - value: - stringValue: received - - key: object - value: - stringValue: vmnic2 - startTimeUnixNano: "6000000" - timeUnixNano: "5000000" - - asInt: "42110" - attributes: - - key: direction - value: - stringValue: received - - key: object - value: - stringValue: vmnic3 - startTimeUnixNano: "6000000" - timeUnixNano: "1000000" - - asInt: "42686" - attributes: - - key: direction - value: - stringValue: received - - key: object - value: - stringValue: vmnic3 - startTimeUnixNano: "6000000" - timeUnixNano: "2000000" - - asInt: "44277" - attributes: - - key: direction - value: - stringValue: received - - key: object - value: - stringValue: vmnic3 - startTimeUnixNano: "6000000" - timeUnixNano: "3000000" - - asInt: "43122" - attributes: - - key: direction - value: - stringValue: received - - key: object - value: - stringValue: vmnic3 - startTimeUnixNano: "6000000" - timeUnixNano: "4000000" - - asInt: "42703" - attributes: - - key: direction - value: - stringValue: received - - key: object - value: - stringValue: vmnic3 - startTimeUnixNano: "6000000" - timeUnixNano: "5000000" - - asInt: "51992" - attributes: - - key: direction - value: - stringValue: transmitted - - key: object - value: - stringValue: "" - startTimeUnixNano: "6000000" - timeUnixNano: "1000000" - - asInt: "54712" - attributes: - - key: direction - value: - stringValue: transmitted - - key: object - value: - stringValue: "" - startTimeUnixNano: "6000000" - timeUnixNano: "2000000" - - asInt: "59449" - attributes: - - key: direction - value: - stringValue: transmitted - - key: object - value: - stringValue: "" - startTimeUnixNano: "6000000" - timeUnixNano: "3000000" - - asInt: "54604" - attributes: - - key: direction - value: - stringValue: transmitted - - key: object - value: - stringValue: "" - startTimeUnixNano: "6000000" - timeUnixNano: "4000000" - - asInt: "54464" - attributes: - - key: direction - value: - stringValue: transmitted - - key: object - value: - stringValue: "" - startTimeUnixNano: "6000000" - timeUnixNano: "5000000" - - asInt: "11182" - attributes: - - key: direction - value: - stringValue: transmitted - - key: object - value: - stringValue: vmnic0 + stringValue: "4000" startTimeUnixNano: "6000000" timeUnixNano: "1000000" - - asInt: "13009" + - asInt: "789" attributes: - key: direction value: - stringValue: transmitted + stringValue: read - key: object value: - stringValue: vmnic0 + stringValue: "4000" startTimeUnixNano: "6000000" timeUnixNano: "2000000" - - asInt: "16489" + - asInt: "645" attributes: - key: direction value: - stringValue: transmitted + stringValue: read - key: object value: - stringValue: vmnic0 + stringValue: "4000" startTimeUnixNano: "6000000" timeUnixNano: "3000000" - - asInt: "12398" + - asInt: "781" attributes: - key: direction value: - stringValue: transmitted + stringValue: read - key: object value: - stringValue: vmnic0 + stringValue: "4000" startTimeUnixNano: "6000000" timeUnixNano: "4000000" - - asInt: "12984" + - asInt: "782" attributes: - key: direction value: - stringValue: transmitted + stringValue: read - key: object value: - stringValue: vmnic0 + stringValue: "4000" startTimeUnixNano: "6000000" timeUnixNano: "5000000" - - asInt: "0" + - asInt: "781" attributes: - key: direction value: - stringValue: transmitted + stringValue: write - key: object value: - stringValue: vmnic1 + stringValue: "4000" startTimeUnixNano: "6000000" timeUnixNano: "1000000" - - asInt: "0" + - asInt: "789" attributes: - key: direction value: - stringValue: transmitted + stringValue: write - key: object value: - stringValue: vmnic1 + stringValue: "4000" startTimeUnixNano: "6000000" timeUnixNano: "2000000" - - asInt: "0" + - asInt: "645" attributes: - key: direction value: - stringValue: transmitted + stringValue: write - key: object value: - stringValue: vmnic1 + stringValue: "4000" startTimeUnixNano: "6000000" timeUnixNano: "3000000" - - asInt: "0" + - asInt: "781" attributes: - key: direction value: - stringValue: transmitted + stringValue: write - key: object value: - stringValue: vmnic1 + stringValue: "4000" startTimeUnixNano: "6000000" timeUnixNano: "4000000" - - asInt: "0" + - asInt: "782" attributes: - key: direction value: - stringValue: transmitted + stringValue: write - key: object value: - stringValue: vmnic1 + stringValue: "4000" startTimeUnixNano: "6000000" timeUnixNano: "5000000" - - asInt: "0" + name: vcenter.host.disk.latency.avg + unit: ms + - description: Highest latency value across all disks used by the host. + gauge: + dataPoints: + - asInt: "899" attributes: - - key: direction - value: - stringValue: transmitted - key: object value: - stringValue: vmnic2 + stringValue: "4000" startTimeUnixNano: "6000000" timeUnixNano: "1000000" - - asInt: "0" + - asInt: "899" attributes: - - key: direction - value: - stringValue: transmitted - key: object value: - stringValue: vmnic2 + stringValue: "4000" startTimeUnixNano: "6000000" timeUnixNano: "2000000" - - asInt: "0" + - asInt: "905" attributes: - - key: direction - value: - stringValue: transmitted - key: object value: - stringValue: vmnic2 + stringValue: "4000" startTimeUnixNano: "6000000" timeUnixNano: "3000000" - - asInt: "0" + - asInt: "1000" attributes: - - key: direction - value: - stringValue: transmitted - key: object value: - stringValue: vmnic2 + stringValue: "4000" startTimeUnixNano: "6000000" timeUnixNano: "4000000" - - asInt: "0" + - asInt: "1002" attributes: - - key: direction - value: - stringValue: transmitted - key: object value: - stringValue: vmnic2 + stringValue: "4000" startTimeUnixNano: "6000000" timeUnixNano: "5000000" - - asInt: "40810" + name: vcenter.host.disk.latency.max + unit: ms + - description: Average number of kilobytes read from or written to the disk each second. + name: vcenter.host.disk.throughput + sum: + aggregationTemporality: 2 + dataPoints: + - asInt: "28" attributes: - key: direction value: - stringValue: transmitted + stringValue: read - key: object value: - stringValue: vmnic3 + stringValue: "" startTimeUnixNano: "6000000" timeUnixNano: "1000000" - - asInt: "41703" + - asInt: "45" attributes: - key: direction value: - stringValue: transmitted + stringValue: read - key: object value: - stringValue: vmnic3 + stringValue: "" startTimeUnixNano: "6000000" timeUnixNano: "2000000" - - asInt: "42960" + - asInt: "88" attributes: - key: direction value: - stringValue: transmitted + stringValue: read - key: object value: - stringValue: vmnic3 + stringValue: "" startTimeUnixNano: "6000000" timeUnixNano: "3000000" - - asInt: "42206" + - asInt: "92" attributes: - key: direction value: - stringValue: transmitted + stringValue: read - key: object value: - stringValue: vmnic3 + stringValue: "" startTimeUnixNano: "6000000" timeUnixNano: "4000000" - - asInt: "41480" + - asInt: "31" attributes: - key: direction value: - stringValue: transmitted + stringValue: read - key: object value: - stringValue: vmnic3 + stringValue: "" startTimeUnixNano: "6000000" timeUnixNano: "5000000" - unit: '{packets/sec}' - - description: The rate of packet errors transmitted or received on the host network. - name: vcenter.host.network.packet.error.rate - gauge: - dataPoints: - - asDouble: "0" + - asInt: "4" attributes: - key: direction value: - stringValue: received + stringValue: read - key: object value: - stringValue: "" + stringValue: t10.ATA_____DELLBOSS_VD_____________________________983baa25884a001000000000 startTimeUnixNano: "6000000" timeUnixNano: "1000000" - - asDouble: "0" + - asInt: "25" attributes: - key: direction value: - stringValue: received + stringValue: read - key: object value: - stringValue: "" + stringValue: t10.ATA_____DELLBOSS_VD_____________________________983baa25884a001000000000 startTimeUnixNano: "6000000" timeUnixNano: "2000000" - - asDouble: "0" + - asInt: "76" attributes: - key: direction value: - stringValue: received + stringValue: read - key: object value: - stringValue: "" + stringValue: t10.ATA_____DELLBOSS_VD_____________________________983baa25884a001000000000 startTimeUnixNano: "6000000" timeUnixNano: "3000000" - - asDouble: "0" + - asInt: "63" attributes: - key: direction value: - stringValue: received + stringValue: read - key: object value: - stringValue: "" + stringValue: t10.ATA_____DELLBOSS_VD_____________________________983baa25884a001000000000 startTimeUnixNano: "6000000" timeUnixNano: "4000000" - - asDouble: "0" + - asInt: "0" attributes: - key: direction value: - stringValue: received + stringValue: read - key: object value: - stringValue: "" + stringValue: t10.ATA_____DELLBOSS_VD_____________________________983baa25884a001000000000 startTimeUnixNano: "6000000" timeUnixNano: "5000000" - - asDouble: "0" + - asInt: "6" attributes: - key: direction value: - stringValue: received + stringValue: read - key: object value: - stringValue: vmnic0 + stringValue: t10.NVMe____Dell_Express_Flash_NVMe_P4610_1.6TB_SFF_00010E10E3E4D25C startTimeUnixNano: "6000000" timeUnixNano: "1000000" - - asDouble: "0" + - asInt: "6" attributes: - key: direction value: - stringValue: received + stringValue: read - key: object value: - stringValue: vmnic0 + stringValue: t10.NVMe____Dell_Express_Flash_NVMe_P4610_1.6TB_SFF_00010E10E3E4D25C startTimeUnixNano: "6000000" timeUnixNano: "2000000" - - asDouble: "0" + - asInt: "4" attributes: - key: direction value: - stringValue: received + stringValue: read - key: object value: - stringValue: vmnic0 + stringValue: t10.NVMe____Dell_Express_Flash_NVMe_P4610_1.6TB_SFF_00010E10E3E4D25C startTimeUnixNano: "6000000" timeUnixNano: "3000000" - - asDouble: "0" + - asInt: "8" attributes: - key: direction value: - stringValue: received + stringValue: read - key: object value: - stringValue: vmnic0 + stringValue: t10.NVMe____Dell_Express_Flash_NVMe_P4610_1.6TB_SFF_00010E10E3E4D25C startTimeUnixNano: "6000000" timeUnixNano: "4000000" - - asDouble: "0" + - asInt: "19" attributes: - key: direction value: - stringValue: received + stringValue: read - key: object value: - stringValue: vmnic0 + stringValue: t10.NVMe____Dell_Express_Flash_NVMe_P4610_1.6TB_SFF_00010E10E3E4D25C startTimeUnixNano: "6000000" timeUnixNano: "5000000" - - asDouble: "0" + - asInt: "5" attributes: - key: direction value: - stringValue: received + stringValue: read - key: object value: - stringValue: vmnic1 + stringValue: t10.NVMe____Dell_Express_Flash_NVMe_P4610_1.6TB_SFF_00010E266CE4D25C startTimeUnixNano: "6000000" timeUnixNano: "1000000" - - asDouble: "0" + - asInt: "10" attributes: - key: direction value: - stringValue: received + stringValue: read - key: object value: - stringValue: vmnic1 + stringValue: t10.NVMe____Dell_Express_Flash_NVMe_P4610_1.6TB_SFF_00010E266CE4D25C startTimeUnixNano: "6000000" timeUnixNano: "2000000" - - asDouble: "0" + - asInt: "5" attributes: - key: direction value: - stringValue: received + stringValue: read - key: object value: - stringValue: vmnic1 + stringValue: t10.NVMe____Dell_Express_Flash_NVMe_P4610_1.6TB_SFF_00010E266CE4D25C startTimeUnixNano: "6000000" timeUnixNano: "3000000" - - asDouble: "0" + - asInt: "7" attributes: - key: direction value: - stringValue: received + stringValue: read - key: object value: - stringValue: vmnic1 + stringValue: t10.NVMe____Dell_Express_Flash_NVMe_P4610_1.6TB_SFF_00010E266CE4D25C startTimeUnixNano: "6000000" timeUnixNano: "4000000" - - asDouble: "0" + - asInt: "6" attributes: - key: direction value: - stringValue: received + stringValue: read - key: object value: - stringValue: vmnic1 + stringValue: t10.NVMe____Dell_Express_Flash_NVMe_P4610_1.6TB_SFF_00010E266CE4D25C startTimeUnixNano: "6000000" timeUnixNano: "5000000" - - asDouble: "0" + - asInt: "4" attributes: - key: direction value: - stringValue: received + stringValue: read - key: object value: - stringValue: vmnic2 + stringValue: t10.NVMe____Dell_Express_Flash_PM1725b_3.2TB_SFF____362E000121382500 startTimeUnixNano: "6000000" timeUnixNano: "1000000" - - asDouble: "0" + - asInt: "0" attributes: - key: direction value: - stringValue: received + stringValue: read - key: object value: - stringValue: vmnic2 + stringValue: t10.NVMe____Dell_Express_Flash_PM1725b_3.2TB_SFF____362E000121382500 startTimeUnixNano: "6000000" timeUnixNano: "2000000" - - asDouble: "0" + - asInt: "1" attributes: - key: direction value: - stringValue: received + stringValue: read - key: object value: - stringValue: vmnic2 + stringValue: t10.NVMe____Dell_Express_Flash_PM1725b_3.2TB_SFF____362E000121382500 startTimeUnixNano: "6000000" timeUnixNano: "3000000" - - asDouble: "0" + - asInt: "2" attributes: - key: direction value: - stringValue: received + stringValue: read - key: object value: - stringValue: vmnic2 + stringValue: t10.NVMe____Dell_Express_Flash_PM1725b_3.2TB_SFF____362E000121382500 startTimeUnixNano: "6000000" timeUnixNano: "4000000" - - asDouble: "0" + - asInt: "0" attributes: - key: direction value: - stringValue: received + stringValue: read - key: object value: - stringValue: vmnic2 + stringValue: t10.NVMe____Dell_Express_Flash_PM1725b_3.2TB_SFF____362E000121382500 startTimeUnixNano: "6000000" timeUnixNano: "5000000" - - asDouble: "0" + - asInt: "7" attributes: - key: direction value: - stringValue: received + stringValue: read - key: object value: - stringValue: vmnic3 + stringValue: t10.NVMe____Dell_Express_Flash_PM1725b_3.2TB_SFF____3C2E000121382500 startTimeUnixNano: "6000000" timeUnixNano: "1000000" - - asDouble: "0" + - asInt: "0" attributes: - key: direction value: - stringValue: received + stringValue: read - key: object value: - stringValue: vmnic3 + stringValue: t10.NVMe____Dell_Express_Flash_PM1725b_3.2TB_SFF____3C2E000121382500 startTimeUnixNano: "6000000" timeUnixNano: "2000000" - - asDouble: "0" + - asInt: "0" attributes: - key: direction value: - stringValue: received + stringValue: read - key: object value: - stringValue: vmnic3 + stringValue: t10.NVMe____Dell_Express_Flash_PM1725b_3.2TB_SFF____3C2E000121382500 startTimeUnixNano: "6000000" timeUnixNano: "3000000" - - asDouble: "0" + - asInt: "4" attributes: - key: direction value: - stringValue: received + stringValue: read - key: object value: - stringValue: vmnic3 + stringValue: t10.NVMe____Dell_Express_Flash_PM1725b_3.2TB_SFF____3C2E000121382500 startTimeUnixNano: "6000000" timeUnixNano: "4000000" - - asDouble: "0" + - asInt: "2" attributes: - key: direction value: - stringValue: received + stringValue: read - key: object value: - stringValue: vmnic3 + stringValue: t10.NVMe____Dell_Express_Flash_PM1725b_3.2TB_SFF____3C2E000121382500 startTimeUnixNano: "6000000" timeUnixNano: "5000000" - - asDouble: "0" + - asInt: "0" attributes: - key: direction value: - stringValue: transmitted + stringValue: read - key: object value: - stringValue: "" + stringValue: t10.NVMe____Dell_Express_Flash_PM1725b_3.2TB_SFF____3D2E000121382500 startTimeUnixNano: "6000000" timeUnixNano: "1000000" - - asDouble: "0" + - asInt: "0" attributes: - key: direction value: - stringValue: transmitted + stringValue: read - key: object value: - stringValue: "" + stringValue: t10.NVMe____Dell_Express_Flash_PM1725b_3.2TB_SFF____3D2E000121382500 startTimeUnixNano: "6000000" timeUnixNano: "2000000" - - asDouble: "0" + - asInt: "0" attributes: - key: direction value: - stringValue: transmitted + stringValue: read - key: object value: - stringValue: "" + stringValue: t10.NVMe____Dell_Express_Flash_PM1725b_3.2TB_SFF____3D2E000121382500 startTimeUnixNano: "6000000" timeUnixNano: "3000000" - - asDouble: "0" + - asInt: "2" attributes: - key: direction value: - stringValue: transmitted + stringValue: read - key: object value: - stringValue: "" + stringValue: t10.NVMe____Dell_Express_Flash_PM1725b_3.2TB_SFF____3D2E000121382500 startTimeUnixNano: "6000000" timeUnixNano: "4000000" - - asDouble: "0" + - asInt: "1" attributes: - key: direction value: - stringValue: transmitted + stringValue: read - key: object value: - stringValue: "" + stringValue: t10.NVMe____Dell_Express_Flash_PM1725b_3.2TB_SFF____3D2E000121382500 startTimeUnixNano: "6000000" timeUnixNano: "5000000" - - asDouble: "0" + - asInt: "0" attributes: - key: direction value: - stringValue: transmitted + stringValue: read - key: object value: - stringValue: vmnic0 + stringValue: t10.NVMe____Dell_Express_Flash_PM1725b_3.2TB_SFF____482E000121382500 startTimeUnixNano: "6000000" timeUnixNano: "1000000" - - asDouble: "0" + - asInt: "0" attributes: - key: direction value: - stringValue: transmitted + stringValue: read - key: object value: - stringValue: vmnic0 + stringValue: t10.NVMe____Dell_Express_Flash_PM1725b_3.2TB_SFF____482E000121382500 startTimeUnixNano: "6000000" timeUnixNano: "2000000" - - asDouble: "0" + - asInt: "0" attributes: - key: direction value: - stringValue: transmitted + stringValue: read - key: object value: - stringValue: vmnic0 + stringValue: t10.NVMe____Dell_Express_Flash_PM1725b_3.2TB_SFF____482E000121382500 startTimeUnixNano: "6000000" timeUnixNano: "3000000" - - asDouble: "0" + - asInt: "1" attributes: - key: direction value: - stringValue: transmitted + stringValue: read - key: object value: - stringValue: vmnic0 + stringValue: t10.NVMe____Dell_Express_Flash_PM1725b_3.2TB_SFF____482E000121382500 startTimeUnixNano: "6000000" timeUnixNano: "4000000" - - asDouble: "0" + - asInt: "0" attributes: - key: direction value: - stringValue: transmitted + stringValue: read - key: object value: - stringValue: vmnic0 + stringValue: t10.NVMe____Dell_Express_Flash_PM1725b_3.2TB_SFF____482E000121382500 startTimeUnixNano: "6000000" timeUnixNano: "5000000" - - asDouble: "0" + - asInt: "0" attributes: - key: direction value: - stringValue: transmitted + stringValue: read - key: object value: - stringValue: vmnic1 + stringValue: t10.NVMe____Dell_Express_Flash_PM1725b_3.2TB_SFF____B32D000121382500 startTimeUnixNano: "6000000" timeUnixNano: "1000000" - - asDouble: "0" + - asInt: "2" attributes: - key: direction value: - stringValue: transmitted + stringValue: read - key: object value: - stringValue: vmnic1 + stringValue: t10.NVMe____Dell_Express_Flash_PM1725b_3.2TB_SFF____B32D000121382500 startTimeUnixNano: "6000000" timeUnixNano: "2000000" - - asDouble: "0" + - asInt: "0" attributes: - key: direction value: - stringValue: transmitted + stringValue: read - key: object value: - stringValue: vmnic1 + stringValue: t10.NVMe____Dell_Express_Flash_PM1725b_3.2TB_SFF____B32D000121382500 startTimeUnixNano: "6000000" timeUnixNano: "3000000" - - asDouble: "0" + - asInt: "2" attributes: - key: direction value: - stringValue: transmitted + stringValue: read - key: object value: - stringValue: vmnic1 + stringValue: t10.NVMe____Dell_Express_Flash_PM1725b_3.2TB_SFF____B32D000121382500 startTimeUnixNano: "6000000" timeUnixNano: "4000000" - - asDouble: "0" + - asInt: "0" attributes: - key: direction value: - stringValue: transmitted + stringValue: read - key: object value: - stringValue: vmnic1 + stringValue: t10.NVMe____Dell_Express_Flash_PM1725b_3.2TB_SFF____B32D000121382500 startTimeUnixNano: "6000000" timeUnixNano: "5000000" - - asDouble: "0" + - asInt: "0" attributes: - key: direction value: - stringValue: transmitted + stringValue: read - key: object value: - stringValue: vmnic2 + stringValue: t10.NVMe____Dell_Express_Flash_PM1725b_3.2TB_SFF____BD2D000121382500 startTimeUnixNano: "6000000" timeUnixNano: "1000000" - - asDouble: "0" + - asInt: "0" attributes: - key: direction value: - stringValue: transmitted + stringValue: read - key: object value: - stringValue: vmnic2 + stringValue: t10.NVMe____Dell_Express_Flash_PM1725b_3.2TB_SFF____BD2D000121382500 startTimeUnixNano: "6000000" timeUnixNano: "2000000" - - asDouble: "0" + - asInt: "0" attributes: - key: direction value: - stringValue: transmitted + stringValue: read - key: object value: - stringValue: vmnic2 + stringValue: t10.NVMe____Dell_Express_Flash_PM1725b_3.2TB_SFF____BD2D000121382500 startTimeUnixNano: "6000000" timeUnixNano: "3000000" - - asDouble: "0" + - asInt: "0" attributes: - key: direction value: - stringValue: transmitted + stringValue: read - key: object value: - stringValue: vmnic2 + stringValue: t10.NVMe____Dell_Express_Flash_PM1725b_3.2TB_SFF____BD2D000121382500 startTimeUnixNano: "6000000" timeUnixNano: "4000000" - - asDouble: "0" + - asInt: "0" attributes: - key: direction value: - stringValue: transmitted + stringValue: read - key: object value: - stringValue: vmnic2 + stringValue: t10.NVMe____Dell_Express_Flash_PM1725b_3.2TB_SFF____BD2D000121382500 startTimeUnixNano: "6000000" timeUnixNano: "5000000" - - asDouble: "0" + - asInt: "781" attributes: - key: direction value: - stringValue: transmitted + stringValue: write - key: object value: - stringValue: vmnic3 + stringValue: "4000" startTimeUnixNano: "6000000" timeUnixNano: "1000000" - - asDouble: "0" + - asInt: "789" attributes: - key: direction value: - stringValue: transmitted + stringValue: write - key: object value: - stringValue: vmnic3 + stringValue: "4000" startTimeUnixNano: "6000000" timeUnixNano: "2000000" - - asDouble: "0" + - asInt: "645" attributes: - key: direction value: - stringValue: transmitted + stringValue: write - key: object value: - stringValue: vmnic3 + stringValue: "4000" startTimeUnixNano: "6000000" timeUnixNano: "3000000" - - asDouble: "0" + - asInt: "781" attributes: - key: direction value: - stringValue: transmitted + stringValue: write - key: object value: - stringValue: vmnic3 + stringValue: "4000" startTimeUnixNano: "6000000" timeUnixNano: "4000000" - - asDouble: "0" + - asInt: "782" attributes: - key: direction value: - stringValue: transmitted + stringValue: write - key: object value: - stringValue: vmnic3 + stringValue: "4000" startTimeUnixNano: "6000000" timeUnixNano: "5000000" - unit: '{errors/sec}' - - description: The summation of packet errors on the host network. - name: vcenter.host.network.packet.errors + unit: '{KiBy/s}' + - description: The amount of memory the host system is using. + name: vcenter.host.memory.usage sum: aggregationTemporality: 2 dataPoints: - - asInt: "0" + - asInt: "140833" + startTimeUnixNano: "1000000" + timeUnixNano: "2000000" + unit: MiBy + - description: The percentage of the host system's memory capacity that is being utilized. + gauge: + dataPoints: + - asDouble: 17.948557824655133 + startTimeUnixNano: "1000000" + timeUnixNano: "2000000" + name: vcenter.host.memory.utilization + unit: '%' + - description: The number of packets transmitted and received, as measured over the most recent 20s interval. + name: vcenter.host.network.packet.count + sum: + aggregationTemporality: 2 + dataPoints: + - asInt: "55647" attributes: - key: direction value: @@ -5376,7 +3355,7 @@ resourceMetrics: stringValue: "" startTimeUnixNano: "6000000" timeUnixNano: "1000000" - - asInt: "0" + - asInt: "57376" attributes: - key: direction value: @@ -5386,7 +3365,7 @@ resourceMetrics: stringValue: "" startTimeUnixNano: "6000000" timeUnixNano: "2000000" - - asInt: "0" + - asInt: "64156" attributes: - key: direction value: @@ -5396,7 +3375,7 @@ resourceMetrics: stringValue: "" startTimeUnixNano: "6000000" timeUnixNano: "3000000" - - asInt: "0" + - asInt: "58814" attributes: - key: direction value: @@ -5406,7 +3385,7 @@ resourceMetrics: stringValue: "" startTimeUnixNano: "6000000" timeUnixNano: "4000000" - - asInt: "0" + - asInt: "57390" attributes: - key: direction value: @@ -5416,7 +3395,7 @@ resourceMetrics: stringValue: "" startTimeUnixNano: "6000000" timeUnixNano: "5000000" - - asInt: "0" + - asInt: "13316" attributes: - key: direction value: @@ -5426,7 +3405,7 @@ resourceMetrics: stringValue: vmnic0 startTimeUnixNano: "6000000" timeUnixNano: "1000000" - - asInt: "0" + - asInt: "14473" attributes: - key: direction value: @@ -5436,7 +3415,7 @@ resourceMetrics: stringValue: vmnic0 startTimeUnixNano: "6000000" timeUnixNano: "2000000" - - asInt: "0" + - asInt: "19662" attributes: - key: direction value: @@ -5446,7 +3425,7 @@ resourceMetrics: stringValue: vmnic0 startTimeUnixNano: "6000000" timeUnixNano: "3000000" - - asInt: "0" + - asInt: "15478" attributes: - key: direction value: @@ -5456,7 +3435,7 @@ resourceMetrics: stringValue: vmnic0 startTimeUnixNano: "6000000" timeUnixNano: "4000000" - - asInt: "0" + - asInt: "14458" attributes: - key: direction value: @@ -5466,7 +3445,7 @@ resourceMetrics: stringValue: vmnic0 startTimeUnixNano: "6000000" timeUnixNano: "5000000" - - asInt: "0" + - asInt: "116" attributes: - key: direction value: @@ -5476,7 +3455,7 @@ resourceMetrics: stringValue: vmnic1 startTimeUnixNano: "6000000" timeUnixNano: "1000000" - - asInt: "0" + - asInt: "114" attributes: - key: direction value: @@ -5486,7 +3465,7 @@ resourceMetrics: stringValue: vmnic1 startTimeUnixNano: "6000000" timeUnixNano: "2000000" - - asInt: "0" + - asInt: "113" attributes: - key: direction value: @@ -5496,7 +3475,7 @@ resourceMetrics: stringValue: vmnic1 startTimeUnixNano: "6000000" timeUnixNano: "3000000" - - asInt: "0" + - asInt: "112" attributes: - key: direction value: @@ -5506,7 +3485,7 @@ resourceMetrics: stringValue: vmnic1 startTimeUnixNano: "6000000" timeUnixNano: "4000000" - - asInt: "0" + - asInt: "120" attributes: - key: direction value: @@ -5516,7 +3495,7 @@ resourceMetrics: stringValue: vmnic1 startTimeUnixNano: "6000000" timeUnixNano: "5000000" - - asInt: "0" + - asInt: "105" attributes: - key: direction value: @@ -5526,7 +3505,7 @@ resourceMetrics: stringValue: vmnic2 startTimeUnixNano: "6000000" timeUnixNano: "1000000" - - asInt: "0" + - asInt: "103" attributes: - key: direction value: @@ -5536,7 +3515,7 @@ resourceMetrics: stringValue: vmnic2 startTimeUnixNano: "6000000" timeUnixNano: "2000000" - - asInt: "0" + - asInt: "104" attributes: - key: direction value: @@ -5546,7 +3525,7 @@ resourceMetrics: stringValue: vmnic2 startTimeUnixNano: "6000000" timeUnixNano: "3000000" - - asInt: "0" + - asInt: "102" attributes: - key: direction value: @@ -5556,7 +3535,7 @@ resourceMetrics: stringValue: vmnic2 startTimeUnixNano: "6000000" timeUnixNano: "4000000" - - asInt: "0" + - asInt: "109" attributes: - key: direction value: @@ -5566,7 +3545,7 @@ resourceMetrics: stringValue: vmnic2 startTimeUnixNano: "6000000" timeUnixNano: "5000000" - - asInt: "0" + - asInt: "42110" attributes: - key: direction value: @@ -5576,7 +3555,7 @@ resourceMetrics: stringValue: vmnic3 startTimeUnixNano: "6000000" timeUnixNano: "1000000" - - asInt: "0" + - asInt: "42686" attributes: - key: direction value: @@ -5586,7 +3565,7 @@ resourceMetrics: stringValue: vmnic3 startTimeUnixNano: "6000000" timeUnixNano: "2000000" - - asInt: "0" + - asInt: "44277" attributes: - key: direction value: @@ -5596,7 +3575,7 @@ resourceMetrics: stringValue: vmnic3 startTimeUnixNano: "6000000" timeUnixNano: "3000000" - - asInt: "0" + - asInt: "43122" attributes: - key: direction value: @@ -5606,7 +3585,7 @@ resourceMetrics: stringValue: vmnic3 startTimeUnixNano: "6000000" timeUnixNano: "4000000" - - asInt: "0" + - asInt: "42703" attributes: - key: direction value: @@ -5616,7 +3595,7 @@ resourceMetrics: stringValue: vmnic3 startTimeUnixNano: "6000000" timeUnixNano: "5000000" - - asInt: "0" + - asInt: "51992" attributes: - key: direction value: @@ -5626,7 +3605,7 @@ resourceMetrics: stringValue: "" startTimeUnixNano: "6000000" timeUnixNano: "1000000" - - asInt: "0" + - asInt: "54712" attributes: - key: direction value: @@ -5636,7 +3615,7 @@ resourceMetrics: stringValue: "" startTimeUnixNano: "6000000" timeUnixNano: "2000000" - - asInt: "0" + - asInt: "59449" attributes: - key: direction value: @@ -5646,7 +3625,7 @@ resourceMetrics: stringValue: "" startTimeUnixNano: "6000000" timeUnixNano: "3000000" - - asInt: "0" + - asInt: "54604" attributes: - key: direction value: @@ -5656,7 +3635,7 @@ resourceMetrics: stringValue: "" startTimeUnixNano: "6000000" timeUnixNano: "4000000" - - asInt: "0" + - asInt: "54464" attributes: - key: direction value: @@ -5666,7 +3645,7 @@ resourceMetrics: stringValue: "" startTimeUnixNano: "6000000" timeUnixNano: "5000000" - - asInt: "0" + - asInt: "11182" attributes: - key: direction value: @@ -5676,7 +3655,7 @@ resourceMetrics: stringValue: vmnic0 startTimeUnixNano: "6000000" timeUnixNano: "1000000" - - asInt: "0" + - asInt: "13009" attributes: - key: direction value: @@ -5686,7 +3665,7 @@ resourceMetrics: stringValue: vmnic0 startTimeUnixNano: "6000000" timeUnixNano: "2000000" - - asInt: "0" + - asInt: "16489" attributes: - key: direction value: @@ -5696,7 +3675,7 @@ resourceMetrics: stringValue: vmnic0 startTimeUnixNano: "6000000" timeUnixNano: "3000000" - - asInt: "0" + - asInt: "12398" attributes: - key: direction value: @@ -5706,7 +3685,7 @@ resourceMetrics: stringValue: vmnic0 startTimeUnixNano: "6000000" timeUnixNano: "4000000" - - asInt: "0" + - asInt: "12984" attributes: - key: direction value: @@ -5816,7 +3795,7 @@ resourceMetrics: stringValue: vmnic2 startTimeUnixNano: "6000000" timeUnixNano: "5000000" - - asInt: "0" + - asInt: "40810" attributes: - key: direction value: @@ -5826,7 +3805,7 @@ resourceMetrics: stringValue: vmnic3 startTimeUnixNano: "6000000" timeUnixNano: "1000000" - - asInt: "0" + - asInt: "41703" attributes: - key: direction value: @@ -5836,7 +3815,7 @@ resourceMetrics: stringValue: vmnic3 startTimeUnixNano: "6000000" timeUnixNano: "2000000" - - asInt: "0" + - asInt: "42960" attributes: - key: direction value: @@ -5846,7 +3825,7 @@ resourceMetrics: stringValue: vmnic3 startTimeUnixNano: "6000000" timeUnixNano: "3000000" - - asInt: "0" + - asInt: "42206" attributes: - key: direction value: @@ -5856,7 +3835,7 @@ resourceMetrics: stringValue: vmnic3 startTimeUnixNano: "6000000" timeUnixNano: "4000000" - - asInt: "0" + - asInt: "41480" attributes: - key: direction value: @@ -5866,12 +3845,13 @@ resourceMetrics: stringValue: vmnic3 startTimeUnixNano: "6000000" timeUnixNano: "5000000" - unit: '{errors}' - - description: The rate of packets transmitted or received across each physical NIC (network interface controller) instance on the host. - name: vcenter.host.network.packet.rate - gauge: + unit: '{packets/sec}' + - description: The summation of packet errors on the host network. + name: vcenter.host.network.packet.errors + sum: + aggregationTemporality: 2 dataPoints: - - asDouble: "2782.35" + - asInt: "0" attributes: - key: direction value: @@ -5881,7 +3861,7 @@ resourceMetrics: stringValue: "" startTimeUnixNano: "6000000" timeUnixNano: "1000000" - - asDouble: "2868.8" + - asInt: "0" attributes: - key: direction value: @@ -5891,7 +3871,7 @@ resourceMetrics: stringValue: "" startTimeUnixNano: "6000000" timeUnixNano: "2000000" - - asDouble: "3207.8" + - asInt: "0" attributes: - key: direction value: @@ -5901,7 +3881,7 @@ resourceMetrics: stringValue: "" startTimeUnixNano: "6000000" timeUnixNano: "3000000" - - asDouble: "2940.7" + - asInt: "0" attributes: - key: direction value: @@ -5911,7 +3891,7 @@ resourceMetrics: stringValue: "" startTimeUnixNano: "6000000" timeUnixNano: "4000000" - - asDouble: "2869.5" + - asInt: "0" attributes: - key: direction value: @@ -5921,7 +3901,7 @@ resourceMetrics: stringValue: "" startTimeUnixNano: "6000000" timeUnixNano: "5000000" - - asDouble: "665.8" + - asInt: "0" attributes: - key: direction value: @@ -5931,7 +3911,7 @@ resourceMetrics: stringValue: vmnic0 startTimeUnixNano: "6000000" timeUnixNano: "1000000" - - asDouble: "723.65" + - asInt: "0" attributes: - key: direction value: @@ -5941,7 +3921,7 @@ resourceMetrics: stringValue: vmnic0 startTimeUnixNano: "6000000" timeUnixNano: "2000000" - - asDouble: "983.1" + - asInt: "0" attributes: - key: direction value: @@ -5951,7 +3931,7 @@ resourceMetrics: stringValue: vmnic0 startTimeUnixNano: "6000000" timeUnixNano: "3000000" - - asDouble: "773.9" + - asInt: "0" attributes: - key: direction value: @@ -5961,7 +3941,7 @@ resourceMetrics: stringValue: vmnic0 startTimeUnixNano: "6000000" timeUnixNano: "4000000" - - asDouble: "722.9" + - asInt: "0" attributes: - key: direction value: @@ -5971,7 +3951,7 @@ resourceMetrics: stringValue: vmnic0 startTimeUnixNano: "6000000" timeUnixNano: "5000000" - - asDouble: "5.8" + - asInt: "0" attributes: - key: direction value: @@ -5981,7 +3961,7 @@ resourceMetrics: stringValue: vmnic1 startTimeUnixNano: "6000000" timeUnixNano: "1000000" - - asDouble: "5.7" + - asInt: "0" attributes: - key: direction value: @@ -5991,7 +3971,7 @@ resourceMetrics: stringValue: vmnic1 startTimeUnixNano: "6000000" timeUnixNano: "2000000" - - asDouble: "5.65" + - asInt: "0" attributes: - key: direction value: @@ -6001,7 +3981,7 @@ resourceMetrics: stringValue: vmnic1 startTimeUnixNano: "6000000" timeUnixNano: "3000000" - - asDouble: "5.6" + - asInt: "0" attributes: - key: direction value: @@ -6011,7 +3991,7 @@ resourceMetrics: stringValue: vmnic1 startTimeUnixNano: "6000000" timeUnixNano: "4000000" - - asDouble: "6" + - asInt: "0" attributes: - key: direction value: @@ -6021,7 +4001,7 @@ resourceMetrics: stringValue: vmnic1 startTimeUnixNano: "6000000" timeUnixNano: "5000000" - - asDouble: "5.25" + - asInt: "0" attributes: - key: direction value: @@ -6031,7 +4011,7 @@ resourceMetrics: stringValue: vmnic2 startTimeUnixNano: "6000000" timeUnixNano: "1000000" - - asDouble: "5.15" + - asInt: "0" attributes: - key: direction value: @@ -6041,7 +4021,7 @@ resourceMetrics: stringValue: vmnic2 startTimeUnixNano: "6000000" timeUnixNano: "2000000" - - asDouble: "5.2" + - asInt: "0" attributes: - key: direction value: @@ -6051,7 +4031,7 @@ resourceMetrics: stringValue: vmnic2 startTimeUnixNano: "6000000" timeUnixNano: "3000000" - - asDouble: "5.1" + - asInt: "0" attributes: - key: direction value: @@ -6061,7 +4041,7 @@ resourceMetrics: stringValue: vmnic2 startTimeUnixNano: "6000000" timeUnixNano: "4000000" - - asDouble: "5.45" + - asInt: "0" attributes: - key: direction value: @@ -6071,7 +4051,7 @@ resourceMetrics: stringValue: vmnic2 startTimeUnixNano: "6000000" timeUnixNano: "5000000" - - asDouble: "2105.5" + - asInt: "0" attributes: - key: direction value: @@ -6081,7 +4061,7 @@ resourceMetrics: stringValue: vmnic3 startTimeUnixNano: "6000000" timeUnixNano: "1000000" - - asDouble: "2134.3" + - asInt: "0" attributes: - key: direction value: @@ -6091,7 +4071,7 @@ resourceMetrics: stringValue: vmnic3 startTimeUnixNano: "6000000" timeUnixNano: "2000000" - - asDouble: "2213.85" + - asInt: "0" attributes: - key: direction value: @@ -6101,7 +4081,7 @@ resourceMetrics: stringValue: vmnic3 startTimeUnixNano: "6000000" timeUnixNano: "3000000" - - asDouble: "2156.1" + - asInt: "0" attributes: - key: direction value: @@ -6111,7 +4091,7 @@ resourceMetrics: stringValue: vmnic3 startTimeUnixNano: "6000000" timeUnixNano: "4000000" - - asDouble: "2135.15" + - asInt: "0" attributes: - key: direction value: @@ -6121,7 +4101,7 @@ resourceMetrics: stringValue: vmnic3 startTimeUnixNano: "6000000" timeUnixNano: "5000000" - - asDouble: "2599.6" + - asInt: "0" attributes: - key: direction value: @@ -6131,7 +4111,7 @@ resourceMetrics: stringValue: "" startTimeUnixNano: "6000000" timeUnixNano: "1000000" - - asDouble: "2735.6" + - asInt: "0" attributes: - key: direction value: @@ -6141,7 +4121,7 @@ resourceMetrics: stringValue: "" startTimeUnixNano: "6000000" timeUnixNano: "2000000" - - asDouble: "2972.45" + - asInt: "0" attributes: - key: direction value: @@ -6151,7 +4131,7 @@ resourceMetrics: stringValue: "" startTimeUnixNano: "6000000" timeUnixNano: "3000000" - - asDouble: "2730.2" + - asInt: "0" attributes: - key: direction value: @@ -6161,7 +4141,7 @@ resourceMetrics: stringValue: "" startTimeUnixNano: "6000000" timeUnixNano: "4000000" - - asDouble: "2723.2" + - asInt: "0" attributes: - key: direction value: @@ -6171,7 +4151,7 @@ resourceMetrics: stringValue: "" startTimeUnixNano: "6000000" timeUnixNano: "5000000" - - asDouble: "559.1" + - asInt: "0" attributes: - key: direction value: @@ -6181,7 +4161,7 @@ resourceMetrics: stringValue: vmnic0 startTimeUnixNano: "6000000" timeUnixNano: "1000000" - - asDouble: "650.45" + - asInt: "0" attributes: - key: direction value: @@ -6191,7 +4171,7 @@ resourceMetrics: stringValue: vmnic0 startTimeUnixNano: "6000000" timeUnixNano: "2000000" - - asDouble: "824.45" + - asInt: "0" attributes: - key: direction value: @@ -6201,7 +4181,7 @@ resourceMetrics: stringValue: vmnic0 startTimeUnixNano: "6000000" timeUnixNano: "3000000" - - asDouble: "619.9" + - asInt: "0" attributes: - key: direction value: @@ -6211,7 +4191,7 @@ resourceMetrics: stringValue: vmnic0 startTimeUnixNano: "6000000" timeUnixNano: "4000000" - - asDouble: "649.2" + - asInt: "0" attributes: - key: direction value: @@ -6221,7 +4201,7 @@ resourceMetrics: stringValue: vmnic0 startTimeUnixNano: "6000000" timeUnixNano: "5000000" - - asDouble: "0" + - asInt: "0" attributes: - key: direction value: @@ -6231,7 +4211,7 @@ resourceMetrics: stringValue: vmnic1 startTimeUnixNano: "6000000" timeUnixNano: "1000000" - - asDouble: "0" + - asInt: "0" attributes: - key: direction value: @@ -6241,7 +4221,7 @@ resourceMetrics: stringValue: vmnic1 startTimeUnixNano: "6000000" timeUnixNano: "2000000" - - asDouble: "0" + - asInt: "0" attributes: - key: direction value: @@ -6251,7 +4231,7 @@ resourceMetrics: stringValue: vmnic1 startTimeUnixNano: "6000000" timeUnixNano: "3000000" - - asDouble: "0" + - asInt: "0" attributes: - key: direction value: @@ -6261,7 +4241,7 @@ resourceMetrics: stringValue: vmnic1 startTimeUnixNano: "6000000" timeUnixNano: "4000000" - - asDouble: "0" + - asInt: "0" attributes: - key: direction value: @@ -6271,7 +4251,7 @@ resourceMetrics: stringValue: vmnic1 startTimeUnixNano: "6000000" timeUnixNano: "5000000" - - asDouble: "0" + - asInt: "0" attributes: - key: direction value: @@ -6281,7 +4261,7 @@ resourceMetrics: stringValue: vmnic2 startTimeUnixNano: "6000000" timeUnixNano: "1000000" - - asDouble: "0" + - asInt: "0" attributes: - key: direction value: @@ -6291,7 +4271,7 @@ resourceMetrics: stringValue: vmnic2 startTimeUnixNano: "6000000" timeUnixNano: "2000000" - - asDouble: "0" + - asInt: "0" attributes: - key: direction value: @@ -6301,7 +4281,7 @@ resourceMetrics: stringValue: vmnic2 startTimeUnixNano: "6000000" timeUnixNano: "3000000" - - asDouble: "0" + - asInt: "0" attributes: - key: direction value: @@ -6311,7 +4291,7 @@ resourceMetrics: stringValue: vmnic2 startTimeUnixNano: "6000000" timeUnixNano: "4000000" - - asDouble: "0" + - asInt: "0" attributes: - key: direction value: @@ -6321,7 +4301,7 @@ resourceMetrics: stringValue: vmnic2 startTimeUnixNano: "6000000" timeUnixNano: "5000000" - - asDouble: "2040.5" + - asInt: "0" attributes: - key: direction value: @@ -6331,7 +4311,7 @@ resourceMetrics: stringValue: vmnic3 startTimeUnixNano: "6000000" timeUnixNano: "1000000" - - asDouble: "2085.15" + - asInt: "0" attributes: - key: direction value: @@ -6341,7 +4321,7 @@ resourceMetrics: stringValue: vmnic3 startTimeUnixNano: "6000000" timeUnixNano: "2000000" - - asDouble: "2148" + - asInt: "0" attributes: - key: direction value: @@ -6351,7 +4331,7 @@ resourceMetrics: stringValue: vmnic3 startTimeUnixNano: "6000000" timeUnixNano: "3000000" - - asDouble: "2110.3" + - asInt: "0" attributes: - key: direction value: @@ -6361,7 +4341,7 @@ resourceMetrics: stringValue: vmnic3 startTimeUnixNano: "6000000" timeUnixNano: "4000000" - - asDouble: "2074" + - asInt: "0" attributes: - key: direction value: @@ -6371,7 +4351,7 @@ resourceMetrics: stringValue: vmnic3 startTimeUnixNano: "6000000" timeUnixNano: "5000000" - unit: '{packets/sec}' + unit: '{errors}' - description: The amount of data that was transmitted or received over the network by the host. name: vcenter.host.network.throughput sum: @@ -7500,256 +5480,6 @@ resourceMetrics: startTimeUnixNano: "2000000" timeUnixNano: "1000000" unit: '{packets/sec}' - - description: The rate of transmitted or received packets dropped by each vNIC (virtual network interface controller) on the virtual machine. - name: vcenter.vm.network.packet.drop.rate - gauge: - dataPoints: - - asDouble: "1" - attributes: - - key: direction - value: - stringValue: received - - key: object - value: - stringValue: "" - startTimeUnixNano: "2000000" - timeUnixNano: "1000000" - - asDouble: "1" - attributes: - - key: direction - value: - stringValue: received - - key: object - value: - stringValue: "4000" - startTimeUnixNano: "2000000" - timeUnixNano: "1000000" - - asDouble: "1" - attributes: - - key: direction - value: - stringValue: received - - key: object - value: - stringValue: vmnic0 - startTimeUnixNano: "2000000" - timeUnixNano: "1000000" - - asDouble: "1" - attributes: - - key: direction - value: - stringValue: received - - key: object - value: - stringValue: vmnic1 - startTimeUnixNano: "2000000" - timeUnixNano: "1000000" - - asDouble: "1" - attributes: - - key: direction - value: - stringValue: received - - key: object - value: - stringValue: vmnic2 - startTimeUnixNano: "2000000" - timeUnixNano: "1000000" - - asDouble: "1" - attributes: - - key: direction - value: - stringValue: received - - key: object - value: - stringValue: vmnic3 - startTimeUnixNano: "2000000" - timeUnixNano: "1000000" - - asDouble: "2" - attributes: - - key: direction - value: - stringValue: transmitted - - key: object - value: - stringValue: "" - startTimeUnixNano: "2000000" - timeUnixNano: "1000000" - - asDouble: "2" - attributes: - - key: direction - value: - stringValue: transmitted - - key: object - value: - stringValue: "4000" - startTimeUnixNano: "2000000" - timeUnixNano: "1000000" - - asDouble: "2" - attributes: - - key: direction - value: - stringValue: transmitted - - key: object - value: - stringValue: vmnic0 - startTimeUnixNano: "2000000" - timeUnixNano: "1000000" - - asDouble: "2" - attributes: - - key: direction - value: - stringValue: transmitted - - key: object - value: - stringValue: vmnic1 - startTimeUnixNano: "2000000" - timeUnixNano: "1000000" - - asDouble: "2" - attributes: - - key: direction - value: - stringValue: transmitted - - key: object - value: - stringValue: vmnic2 - startTimeUnixNano: "2000000" - timeUnixNano: "1000000" - - asDouble: "2" - attributes: - - key: direction - value: - stringValue: transmitted - - key: object - value: - stringValue: vmnic3 - startTimeUnixNano: "2000000" - timeUnixNano: "1000000" - unit: '{packets/sec}' - - description: The rate of packets transmitted or received by each vNIC (virtual network interface controller) on the virtual machine. - name: vcenter.vm.network.packet.rate - gauge: - dataPoints: - - asDouble: "0" - attributes: - - key: direction - value: - stringValue: received - - key: object - value: - stringValue: "" - startTimeUnixNano: "2000000" - timeUnixNano: "1000000" - - asDouble: "0" - attributes: - - key: direction - value: - stringValue: received - - key: object - value: - stringValue: "4000" - startTimeUnixNano: "2000000" - timeUnixNano: "1000000" - - asDouble: "0" - attributes: - - key: direction - value: - stringValue: received - - key: object - value: - stringValue: vmnic0 - startTimeUnixNano: "2000000" - timeUnixNano: "1000000" - - asDouble: "0" - attributes: - - key: direction - value: - stringValue: received - - key: object - value: - stringValue: vmnic1 - startTimeUnixNano: "2000000" - timeUnixNano: "1000000" - - asDouble: "0" - attributes: - - key: direction - value: - stringValue: received - - key: object - value: - stringValue: vmnic2 - startTimeUnixNano: "2000000" - timeUnixNano: "1000000" - - asDouble: "0" - attributes: - - key: direction - value: - stringValue: received - - key: object - value: - stringValue: vmnic3 - startTimeUnixNano: "2000000" - timeUnixNano: "1000000" - - asDouble: "0" - attributes: - - key: direction - value: - stringValue: transmitted - - key: object - value: - stringValue: "" - startTimeUnixNano: "2000000" - timeUnixNano: "1000000" - - asDouble: "0" - attributes: - - key: direction - value: - stringValue: transmitted - - key: object - value: - stringValue: "4000" - startTimeUnixNano: "2000000" - timeUnixNano: "1000000" - - asDouble: "0" - attributes: - - key: direction - value: - stringValue: transmitted - - key: object - value: - stringValue: vmnic0 - startTimeUnixNano: "2000000" - timeUnixNano: "1000000" - - asDouble: "0" - attributes: - - key: direction - value: - stringValue: transmitted - - key: object - value: - stringValue: vmnic1 - startTimeUnixNano: "2000000" - timeUnixNano: "1000000" - - asDouble: "0" - attributes: - - key: direction - value: - stringValue: transmitted - - key: object - value: - stringValue: vmnic2 - startTimeUnixNano: "2000000" - timeUnixNano: "1000000" - - asDouble: "0" - attributes: - - key: direction - value: - stringValue: transmitted - - key: object - value: - stringValue: vmnic3 - startTimeUnixNano: "2000000" - timeUnixNano: "1000000" - unit: '{packets/sec}' - description: The amount of data that was transmitted or received over the network of the virtual machine. name: vcenter.vm.network.throughput sum: @@ -8152,281 +5882,31 @@ resourceMetrics: dataPoints: - asInt: "0" startTimeUnixNano: "1000000" - timeUnixNano: "2000000" - unit: KiBy - - description: The amount of memory that is used by the virtual machine. - name: vcenter.vm.memory.usage - sum: - aggregationTemporality: 2 - dataPoints: - - asInt: "163" - startTimeUnixNano: "1000000" - timeUnixNano: "2000000" - unit: MiBy - - description: The memory utilization of the VM. - gauge: - dataPoints: - - asDouble: 0.994873046875 - startTimeUnixNano: "1000000" - timeUnixNano: "2000000" - name: vcenter.vm.memory.utilization - unit: '%' - - description: The amount of packets that was received or transmitted over the instance's network. - name: vcenter.vm.network.packet.count - sum: - aggregationTemporality: 2 - dataPoints: - - asInt: "0" - attributes: - - key: direction - value: - stringValue: received - - key: object - value: - stringValue: "" - startTimeUnixNano: "2000000" - timeUnixNano: "1000000" - - asInt: "0" - attributes: - - key: direction - value: - stringValue: received - - key: object - value: - stringValue: "4000" - startTimeUnixNano: "2000000" - timeUnixNano: "1000000" - - asInt: "0" - attributes: - - key: direction - value: - stringValue: received - - key: object - value: - stringValue: vmnic0 - startTimeUnixNano: "2000000" - timeUnixNano: "1000000" - - asInt: "0" - attributes: - - key: direction - value: - stringValue: received - - key: object - value: - stringValue: vmnic1 - startTimeUnixNano: "2000000" - timeUnixNano: "1000000" - - asInt: "0" - attributes: - - key: direction - value: - stringValue: received - - key: object - value: - stringValue: vmnic2 - startTimeUnixNano: "2000000" - timeUnixNano: "1000000" - - asInt: "0" - attributes: - - key: direction - value: - stringValue: received - - key: object - value: - stringValue: vmnic3 - startTimeUnixNano: "2000000" - timeUnixNano: "1000000" - - asInt: "0" - attributes: - - key: direction - value: - stringValue: transmitted - - key: object - value: - stringValue: "" - startTimeUnixNano: "2000000" - timeUnixNano: "1000000" - - asInt: "0" - attributes: - - key: direction - value: - stringValue: transmitted - - key: object - value: - stringValue: "4000" - startTimeUnixNano: "2000000" - timeUnixNano: "1000000" - - asInt: "0" - attributes: - - key: direction - value: - stringValue: transmitted - - key: object - value: - stringValue: vmnic0 - startTimeUnixNano: "2000000" - timeUnixNano: "1000000" - - asInt: "0" - attributes: - - key: direction - value: - stringValue: transmitted - - key: object - value: - stringValue: vmnic1 - startTimeUnixNano: "2000000" - timeUnixNano: "1000000" - - asInt: "0" - attributes: - - key: direction - value: - stringValue: transmitted - - key: object - value: - stringValue: vmnic2 - startTimeUnixNano: "2000000" - timeUnixNano: "1000000" - - asInt: "0" - attributes: - - key: direction - value: - stringValue: transmitted - - key: object - value: - stringValue: vmnic3 - startTimeUnixNano: "2000000" - timeUnixNano: "1000000" - unit: '{packets/sec}' - - description: The rate of transmitted or received packets dropped by each vNIC (virtual network interface controller) on the virtual machine. - name: vcenter.vm.network.packet.drop.rate - gauge: - dataPoints: - - asDouble: "1" - attributes: - - key: direction - value: - stringValue: received - - key: object - value: - stringValue: "" - startTimeUnixNano: "2000000" - timeUnixNano: "1000000" - - asDouble: "1" - attributes: - - key: direction - value: - stringValue: received - - key: object - value: - stringValue: "4000" - startTimeUnixNano: "2000000" - timeUnixNano: "1000000" - - asDouble: "1" - attributes: - - key: direction - value: - stringValue: received - - key: object - value: - stringValue: vmnic0 - startTimeUnixNano: "2000000" - timeUnixNano: "1000000" - - asDouble: "1" - attributes: - - key: direction - value: - stringValue: received - - key: object - value: - stringValue: vmnic1 - startTimeUnixNano: "2000000" - timeUnixNano: "1000000" - - asDouble: "1" - attributes: - - key: direction - value: - stringValue: received - - key: object - value: - stringValue: vmnic2 - startTimeUnixNano: "2000000" - timeUnixNano: "1000000" - - asDouble: "1" - attributes: - - key: direction - value: - stringValue: received - - key: object - value: - stringValue: vmnic3 - startTimeUnixNano: "2000000" - timeUnixNano: "1000000" - - asDouble: "2" - attributes: - - key: direction - value: - stringValue: transmitted - - key: object - value: - stringValue: "" - startTimeUnixNano: "2000000" - timeUnixNano: "1000000" - - asDouble: "2" - attributes: - - key: direction - value: - stringValue: transmitted - - key: object - value: - stringValue: "4000" - startTimeUnixNano: "2000000" - timeUnixNano: "1000000" - - asDouble: "2" - attributes: - - key: direction - value: - stringValue: transmitted - - key: object - value: - stringValue: vmnic0 - startTimeUnixNano: "2000000" - timeUnixNano: "1000000" - - asDouble: "2" - attributes: - - key: direction - value: - stringValue: transmitted - - key: object - value: - stringValue: vmnic1 - startTimeUnixNano: "2000000" - timeUnixNano: "1000000" - - asDouble: "2" - attributes: - - key: direction - value: - stringValue: transmitted - - key: object - value: - stringValue: vmnic2 - startTimeUnixNano: "2000000" - timeUnixNano: "1000000" - - asDouble: "2" - attributes: - - key: direction - value: - stringValue: transmitted - - key: object - value: - stringValue: vmnic3 - startTimeUnixNano: "2000000" - timeUnixNano: "1000000" - unit: '{packets/sec}' - - description: The rate of packets transmitted or received by each vNIC (virtual network interface controller) on the virtual machine. - name: vcenter.vm.network.packet.rate + timeUnixNano: "2000000" + unit: KiBy + - description: The amount of memory that is used by the virtual machine. + name: vcenter.vm.memory.usage + sum: + aggregationTemporality: 2 + dataPoints: + - asInt: "163" + startTimeUnixNano: "1000000" + timeUnixNano: "2000000" + unit: MiBy + - description: The memory utilization of the VM. gauge: dataPoints: - - asDouble: "0" + - asDouble: 0.994873046875 + startTimeUnixNano: "1000000" + timeUnixNano: "2000000" + name: vcenter.vm.memory.utilization + unit: '%' + - description: The amount of packets that was received or transmitted over the instance's network. + name: vcenter.vm.network.packet.count + sum: + aggregationTemporality: 2 + dataPoints: + - asInt: "0" attributes: - key: direction value: @@ -8436,7 +5916,7 @@ resourceMetrics: stringValue: "" startTimeUnixNano: "2000000" timeUnixNano: "1000000" - - asDouble: "0" + - asInt: "0" attributes: - key: direction value: @@ -8446,7 +5926,7 @@ resourceMetrics: stringValue: "4000" startTimeUnixNano: "2000000" timeUnixNano: "1000000" - - asDouble: "0" + - asInt: "0" attributes: - key: direction value: @@ -8456,7 +5936,7 @@ resourceMetrics: stringValue: vmnic0 startTimeUnixNano: "2000000" timeUnixNano: "1000000" - - asDouble: "0" + - asInt: "0" attributes: - key: direction value: @@ -8466,7 +5946,7 @@ resourceMetrics: stringValue: vmnic1 startTimeUnixNano: "2000000" timeUnixNano: "1000000" - - asDouble: "0" + - asInt: "0" attributes: - key: direction value: @@ -8476,7 +5956,7 @@ resourceMetrics: stringValue: vmnic2 startTimeUnixNano: "2000000" timeUnixNano: "1000000" - - asDouble: "0" + - asInt: "0" attributes: - key: direction value: @@ -8486,7 +5966,7 @@ resourceMetrics: stringValue: vmnic3 startTimeUnixNano: "2000000" timeUnixNano: "1000000" - - asDouble: "0" + - asInt: "0" attributes: - key: direction value: @@ -8496,7 +5976,7 @@ resourceMetrics: stringValue: "" startTimeUnixNano: "2000000" timeUnixNano: "1000000" - - asDouble: "0" + - asInt: "0" attributes: - key: direction value: @@ -8506,7 +5986,7 @@ resourceMetrics: stringValue: "4000" startTimeUnixNano: "2000000" timeUnixNano: "1000000" - - asDouble: "0" + - asInt: "0" attributes: - key: direction value: @@ -8516,7 +5996,7 @@ resourceMetrics: stringValue: vmnic0 startTimeUnixNano: "2000000" timeUnixNano: "1000000" - - asDouble: "0" + - asInt: "0" attributes: - key: direction value: @@ -8526,7 +6006,7 @@ resourceMetrics: stringValue: vmnic1 startTimeUnixNano: "2000000" timeUnixNano: "1000000" - - asDouble: "0" + - asInt: "0" attributes: - key: direction value: @@ -8536,7 +6016,7 @@ resourceMetrics: stringValue: vmnic2 startTimeUnixNano: "2000000" timeUnixNano: "1000000" - - asDouble: "0" + - asInt: "0" attributes: - key: direction value: @@ -9049,256 +6529,6 @@ resourceMetrics: startTimeUnixNano: "2000000" timeUnixNano: "1000000" unit: '{packets/sec}' - - description: The rate of transmitted or received packets dropped by each vNIC (virtual network interface controller) on the virtual machine. - name: vcenter.vm.network.packet.drop.rate - gauge: - dataPoints: - - asDouble: "1" - attributes: - - key: direction - value: - stringValue: received - - key: object - value: - stringValue: "" - startTimeUnixNano: "2000000" - timeUnixNano: "1000000" - - asDouble: "1" - attributes: - - key: direction - value: - stringValue: received - - key: object - value: - stringValue: "4000" - startTimeUnixNano: "2000000" - timeUnixNano: "1000000" - - asDouble: "1" - attributes: - - key: direction - value: - stringValue: received - - key: object - value: - stringValue: vmnic0 - startTimeUnixNano: "2000000" - timeUnixNano: "1000000" - - asDouble: "1" - attributes: - - key: direction - value: - stringValue: received - - key: object - value: - stringValue: vmnic1 - startTimeUnixNano: "2000000" - timeUnixNano: "1000000" - - asDouble: "1" - attributes: - - key: direction - value: - stringValue: received - - key: object - value: - stringValue: vmnic2 - startTimeUnixNano: "2000000" - timeUnixNano: "1000000" - - asDouble: "1" - attributes: - - key: direction - value: - stringValue: received - - key: object - value: - stringValue: vmnic3 - startTimeUnixNano: "2000000" - timeUnixNano: "1000000" - - asDouble: "2" - attributes: - - key: direction - value: - stringValue: transmitted - - key: object - value: - stringValue: "" - startTimeUnixNano: "2000000" - timeUnixNano: "1000000" - - asDouble: "2" - attributes: - - key: direction - value: - stringValue: transmitted - - key: object - value: - stringValue: "4000" - startTimeUnixNano: "2000000" - timeUnixNano: "1000000" - - asDouble: "2" - attributes: - - key: direction - value: - stringValue: transmitted - - key: object - value: - stringValue: vmnic0 - startTimeUnixNano: "2000000" - timeUnixNano: "1000000" - - asDouble: "2" - attributes: - - key: direction - value: - stringValue: transmitted - - key: object - value: - stringValue: vmnic1 - startTimeUnixNano: "2000000" - timeUnixNano: "1000000" - - asDouble: "2" - attributes: - - key: direction - value: - stringValue: transmitted - - key: object - value: - stringValue: vmnic2 - startTimeUnixNano: "2000000" - timeUnixNano: "1000000" - - asDouble: "2" - attributes: - - key: direction - value: - stringValue: transmitted - - key: object - value: - stringValue: vmnic3 - startTimeUnixNano: "2000000" - timeUnixNano: "1000000" - unit: '{packets/sec}' - - description: The rate of packets transmitted or received by each vNIC (virtual network interface controller) on the virtual machine. - name: vcenter.vm.network.packet.rate - gauge: - dataPoints: - - asDouble: "0" - attributes: - - key: direction - value: - stringValue: received - - key: object - value: - stringValue: "" - startTimeUnixNano: "2000000" - timeUnixNano: "1000000" - - asDouble: "0" - attributes: - - key: direction - value: - stringValue: received - - key: object - value: - stringValue: "4000" - startTimeUnixNano: "2000000" - timeUnixNano: "1000000" - - asDouble: "0" - attributes: - - key: direction - value: - stringValue: received - - key: object - value: - stringValue: vmnic0 - startTimeUnixNano: "2000000" - timeUnixNano: "1000000" - - asDouble: "0" - attributes: - - key: direction - value: - stringValue: received - - key: object - value: - stringValue: vmnic1 - startTimeUnixNano: "2000000" - timeUnixNano: "1000000" - - asDouble: "0" - attributes: - - key: direction - value: - stringValue: received - - key: object - value: - stringValue: vmnic2 - startTimeUnixNano: "2000000" - timeUnixNano: "1000000" - - asDouble: "0" - attributes: - - key: direction - value: - stringValue: received - - key: object - value: - stringValue: vmnic3 - startTimeUnixNano: "2000000" - timeUnixNano: "1000000" - - asDouble: "0" - attributes: - - key: direction - value: - stringValue: transmitted - - key: object - value: - stringValue: "" - startTimeUnixNano: "2000000" - timeUnixNano: "1000000" - - asDouble: "0" - attributes: - - key: direction - value: - stringValue: transmitted - - key: object - value: - stringValue: "4000" - startTimeUnixNano: "2000000" - timeUnixNano: "1000000" - - asDouble: "0" - attributes: - - key: direction - value: - stringValue: transmitted - - key: object - value: - stringValue: vmnic0 - startTimeUnixNano: "2000000" - timeUnixNano: "1000000" - - asDouble: "0" - attributes: - - key: direction - value: - stringValue: transmitted - - key: object - value: - stringValue: vmnic1 - startTimeUnixNano: "2000000" - timeUnixNano: "1000000" - - asDouble: "0" - attributes: - - key: direction - value: - stringValue: transmitted - - key: object - value: - stringValue: vmnic2 - startTimeUnixNano: "2000000" - timeUnixNano: "1000000" - - asDouble: "0" - attributes: - - key: direction - value: - stringValue: transmitted - - key: object - value: - stringValue: vmnic3 - startTimeUnixNano: "2000000" - timeUnixNano: "1000000" - unit: '{packets/sec}' - description: The amount of data that was transmitted or received over the network of the virtual machine. name: vcenter.vm.network.throughput sum: diff --git a/receiver/vcenterreceiver/testdata/metrics/expected.yaml b/receiver/vcenterreceiver/testdata/metrics/expected.yaml index 9340103f2377..f52ae97074e3 100644 --- a/receiver/vcenterreceiver/testdata/metrics/expected.yaml +++ b/receiver/vcenterreceiver/testdata/metrics/expected.yaml @@ -1,6 +1,9 @@ resourceMetrics: - resource: attributes: + - key: vcenter.datacenter.name + value: + stringValue: Datacenter - key: vcenter.cluster.name value: stringValue: Cluster @@ -89,11 +92,23 @@ resourceMetrics: startTimeUnixNano: "1000000" timeUnixNano: "2000000" unit: '{virtual_machines}' + - description: The number of virtual machine templates in the cluster. + name: vcenter.cluster.vm_template.count + sum: + aggregationTemporality: 2 + dataPoints: + - asInt: "1" + startTimeUnixNano: "1000000" + timeUnixNano: "2000000" + unit: '{virtual_machine_templates}' scope: name: otelcol/vcenterreceiver version: latest - resource: attributes: + - key: vcenter.datacenter.name + value: + stringValue: Datacenter - key: vcenter.datastore.name value: stringValue: vsanDatastore @@ -132,6 +147,9 @@ resourceMetrics: version: latest - resource: attributes: + - key: vcenter.datacenter.name + value: + stringValue: Datacenter - key: vcenter.cluster.name value: stringValue: Cluster @@ -2579,6 +2597,9 @@ resourceMetrics: version: latest - resource: attributes: + - key: vcenter.datacenter.name + value: + stringValue: Datacenter - key: vcenter.host.name value: stringValue: esxi-111.europe-southeast1.gve.goog @@ -5023,6 +5044,9 @@ resourceMetrics: version: latest - resource: attributes: + - key: vcenter.datacenter.name + value: + stringValue: Datacenter - key: vcenter.cluster.name value: stringValue: Cluster @@ -5075,6 +5099,9 @@ resourceMetrics: version: latest - resource: attributes: + - key: vcenter.datacenter.name + value: + stringValue: Datacenter - key: vcenter.host.name value: stringValue: esxi-111.europe-southeast1.gve.goog @@ -5127,6 +5154,9 @@ resourceMetrics: version: latest - resource: attributes: + - key: vcenter.datacenter.name + value: + stringValue: Datacenter - key: vcenter.cluster.name value: stringValue: Cluster @@ -5316,6 +5346,14 @@ resourceMetrics: startTimeUnixNano: "1000000" timeUnixNano: "2000000" unit: MiBy + - description: The memory utilization of the VM. + gauge: + dataPoints: + - asDouble: 0.994873046875 + startTimeUnixNano: "1000000" + timeUnixNano: "2000000" + name: vcenter.vm.memory.utilization + unit: '%' - description: The amount of packets that was received or transmitted over the instance's network. name: vcenter.vm.network.packet.count sum: @@ -5621,12 +5659,63 @@ resourceMetrics: version: latest - resource: attributes: + - key: vcenter.datacenter.name + value: + stringValue: Datacenter + - key: vcenter.cluster.name + value: + stringValue: Cluster + - key: vcenter.host.name + value: + stringValue: esxi-27971.cf5e88ac.australia-southeast1.gve.goog + - key: vcenter.vm_template.id + value: + stringValue: 5000bbe0-993e-5813-c56a-198eaa62fb64 + - key: vcenter.vm_template.name + value: + stringValue: CentOS 7 Template + scopeMetrics: + - metrics: + - description: The amount of storage space used by the virtual machine. + name: vcenter.vm.disk.usage + sum: + aggregationTemporality: 2 + dataPoints: + - asInt: "258847277056" + attributes: + - key: disk_state + value: + stringValue: available + startTimeUnixNano: "1000000" + timeUnixNano: "2000000" + - asInt: "16311648256" + attributes: + - key: disk_state + value: + stringValue: used + startTimeUnixNano: "1000000" + timeUnixNano: "2000000" + unit: By + scope: + name: otelcol/vcenterreceiver + version: latest + - resource: + attributes: + - key: vcenter.datacenter.name + value: + stringValue: Datacenter - key: vcenter.cluster.name value: stringValue: Cluster - key: vcenter.host.name value: stringValue: esxi-27971.cf5e88ac.australia-southeast1.gve.goog + - key: vcenter.virtual_app.name + value: + stringValue: v-app-1 + - key: vcenter.virtual_app.inventory_path + value: + stringValue: /Datacenter/vm/v-app-1 - key: vcenter.vm.id value: stringValue: 5000bbe0-993e-5813-c56a-198eaa62fb62 @@ -5804,6 +5893,14 @@ resourceMetrics: startTimeUnixNano: "1000000" timeUnixNano: "2000000" unit: MiBy + - description: The memory utilization of the VM. + gauge: + dataPoints: + - asDouble: 0.994873046875 + startTimeUnixNano: "1000000" + timeUnixNano: "2000000" + name: vcenter.vm.memory.utilization + unit: '%' - description: The amount of packets that was received or transmitted over the instance's network. name: vcenter.vm.network.packet.count sum: @@ -6109,6 +6206,9 @@ resourceMetrics: version: latest - resource: attributes: + - key: vcenter.datacenter.name + value: + stringValue: Datacenter - key: vcenter.host.name value: stringValue: esxi-111.europe-southeast1.gve.goog @@ -6295,6 +6395,14 @@ resourceMetrics: startTimeUnixNano: "1000000" timeUnixNano: "2000000" unit: MiBy + - description: The memory utilization of the VM. + gauge: + dataPoints: + - asDouble: 0.994873046875 + startTimeUnixNano: "1000000" + timeUnixNano: "2000000" + name: vcenter.vm.memory.utilization + unit: '%' - description: The amount of packets that was received or transmitted over the instance's network. name: vcenter.vm.network.packet.count sum: From b1bbd587d4adc89f9cbc34754c4e67123d8e5283 Mon Sep 17 00:00:00 2001 From: Daniel Jaglowski Date: Wed, 8 May 2024 12:38:49 -0500 Subject: [PATCH 09/55] [chore][fileconsumer] Skip flaky TestFlushPeriodEOF on windows (#32946) See https://github.com/open-telemetry/opentelemetry-collector-contrib/issues/32715 This also adds a bit more debugging info for other tests which fail on the same expectation, since it's not very obvious what was expected vs actually found. --- pkg/stanza/fileconsumer/internal/emittest/sink.go | 2 +- pkg/stanza/fileconsumer/internal/reader/reader_test.go | 4 ++++ 2 files changed, 5 insertions(+), 1 deletion(-) diff --git a/pkg/stanza/fileconsumer/internal/emittest/sink.go b/pkg/stanza/fileconsumer/internal/emittest/sink.go index 879bc0f2c36e..c836e41fd40b 100644 --- a/pkg/stanza/fileconsumer/internal/emittest/sink.go +++ b/pkg/stanza/fileconsumer/internal/emittest/sink.go @@ -119,7 +119,7 @@ func (s *Sink) ExpectTokens(t *testing.T, expected ...[]byte) { return } } - require.ElementsMatch(t, expected, actual) + require.ElementsMatch(t, expected, actual, fmt.Sprintf("expected: %v, actual: %v", expected, actual)) } func (s *Sink) ExpectCall(t *testing.T, expected []byte, attrs map[string]any) { diff --git a/pkg/stanza/fileconsumer/internal/reader/reader_test.go b/pkg/stanza/fileconsumer/internal/reader/reader_test.go index b26bd6a61c1f..97160e4ffc5f 100644 --- a/pkg/stanza/fileconsumer/internal/reader/reader_test.go +++ b/pkg/stanza/fileconsumer/internal/reader/reader_test.go @@ -6,6 +6,7 @@ package reader import ( "context" "fmt" + "runtime" "strings" "testing" "time" @@ -188,6 +189,9 @@ func TestFingerprintChangeSize(t *testing.T) { } func TestFlushPeriodEOF(t *testing.T) { + if runtime.GOOS == "windows" { + t.Skip("Skipping test on Windows; See https://github.com/open-telemetry/opentelemetry-collector-contrib/issues/32715") + } tempDir := t.TempDir() temp := filetest.OpenTemp(t, tempDir) // Create a long enough initial token, so the scanner can't read the whole file at once From 59d08fcba1d71d92ee9abb3ede7f6c353717bb39 Mon Sep 17 00:00:00 2001 From: Curtis Robert Date: Thu, 9 May 2024 03:26:49 -0700 Subject: [PATCH 10/55] [chore][CI/CD][arm] Trigger arm runs on label (#32955) **Description:** I found in https://github.com/open-telemetry/opentelemetry-collector-contrib/pull/32948 that the label `Run ARM` has been added, but the `build-and-test-arm / arm-unittest-matrix (pull_request) ` workflow is still skipped. This is because the `label` action does not trigger a retry. From [documentation](https://docs.github.com/en/actions/using-workflows/events-that-trigger-workflows#pull_request): ``` if no activity types are specified, the workflow runs when a pull request is opened or reopened or when the head branch of the pull request is updated. ``` We need to specify that labelling issues should trigger the workflow to check to see if it needs to run again. I've copied the added section from the Windows workflow. I also added that we should only run on PRs against `main`. **Testing:** This PR shows it's working as it should now. Arm test was [originally skipped](https://github.com/open-telemetry/opentelemetry-collector-contrib/actions/runs/9009218559/job/24753003216?pr=32955), but after adding the label, tests [have started](https://github.com/open-telemetry/opentelemetry-collector-contrib/actions/runs/9009223570/job/24753017935?pr=32955) --- .github/workflows/build-and-test-arm.yml | 3 +++ 1 file changed, 3 insertions(+) diff --git a/.github/workflows/build-and-test-arm.yml b/.github/workflows/build-and-test-arm.yml index d61e7f92e983..2f356c696eb6 100644 --- a/.github/workflows/build-and-test-arm.yml +++ b/.github/workflows/build-and-test-arm.yml @@ -6,6 +6,9 @@ on: - 'v[0-9]+.[0-9]+.[0-9]+*' merge_group: pull_request: + types: [opened, synchronize, reopened, labeled, unlabeled] + branches: + - main env: TEST_RESULTS: testbed/tests/results/junit/results.xml # Make sure to exit early if cache segment download times out after 2 minutes. From 17f5711fefff4e2cc26cf3e6414c676d33a0ba61 Mon Sep 17 00:00:00 2001 From: Curtis Robert Date: Thu, 9 May 2024 08:13:31 -0700 Subject: [PATCH 11/55] [chore][receiver/splunkenterprise] Add header to README (#32956) **Description:** The readme for the Splunk Enterprise receiver does not currently have the autogenerated header. This was missing because `mdatagen` requires `` and `` to know where to insert the generated data. --- receiver/splunkenterprisereceiver/README.md | 11 +++++++++++ 1 file changed, 11 insertions(+) diff --git a/receiver/splunkenterprisereceiver/README.md b/receiver/splunkenterprisereceiver/README.md index 969b6eb0d694..91a0a712f292 100644 --- a/receiver/splunkenterprisereceiver/README.md +++ b/receiver/splunkenterprisereceiver/README.md @@ -1,5 +1,16 @@ # Splunk Enterprise Receiver + +| Status | | +| ------------- |-----------| +| Stability | [development]: metrics | +| Distributions | [] | +| Issues | [![Open issues](https://img.shields.io/github/issues-search/open-telemetry/opentelemetry-collector-contrib?query=is%3Aissue%20is%3Aopen%20label%3Areceiver%2Fsplunkenterprise%20&label=open&color=orange&logo=opentelemetry)](https://github.com/open-telemetry/opentelemetry-collector-contrib/issues?q=is%3Aopen+is%3Aissue+label%3Areceiver%2Fsplunkenterprise) [![Closed issues](https://img.shields.io/github/issues-search/open-telemetry/opentelemetry-collector-contrib?query=is%3Aissue%20is%3Aclosed%20label%3Areceiver%2Fsplunkenterprise%20&label=closed&color=blue&logo=opentelemetry)](https://github.com/open-telemetry/opentelemetry-collector-contrib/issues?q=is%3Aclosed+is%3Aissue+label%3Areceiver%2Fsplunkenterprise) | +| [Code Owners](https://github.com/open-telemetry/opentelemetry-collector-contrib/blob/main/CONTRIBUTING.md#becoming-a-code-owner) | [@shalper2](https://www.github.com/shalper2), [@MovieStoreGuy](https://www.github.com/MovieStoreGuy), [@greatestusername](https://www.github.com/greatestusername) | + +[development]: https://github.com/open-telemetry/opentelemetry-collector#development + + The Splunk Enterprise Receiver is a pull based tool which enables the ingestion of performance metrics describing the operational status of a user's Splunk Enterprise deployment to an appropriate observability tool. It is designed to leverage several different data sources to gather these metrics including the [introspection api endpoint](https://docs.splunk.com/Documentation/Splunk/9.1.1/RESTREF/RESTintrospect) and serializing results from ad-hoc searches. Because of this, care must be taken by users when enabling metrics as running searches can effect your Splunk Enterprise Deployment and introspection may fail to report for Splunk From bcc9fe467b2b7937ac709fea15e3f65179aa11df Mon Sep 17 00:00:00 2001 From: Carson Ip Date: Thu, 9 May 2024 16:19:04 +0100 Subject: [PATCH 12/55] [exporter/elasticsearch] Replace go-elasticsearch BulkIndexer with go-docappender (#32359) **Description:** Replace go-elasticsearch BulkIndexer with go-docappender BulkIndexer for Flush function in preparation for reliability fixes. Maintain similar interface and implementation to go-elasticsearch BulkIndexer. Further changes to expose individual `docappender.BulkIndexer` instances are needed down the road but it is out of the scope of this PR. Implications of this change: - flush timeout is now enforced on client side - oversize payload special handling is now removed - go-docappender uses bulk request filterPath which means bulk response is smaller, less JSON parsing and lower CPU usage - document level retry debug logging is removed as retries are done transparently ~~Blocked by #32585~~ **Link to tracking Issue:** Fixes #32378 **Testing:** Integration test is passing --------- Co-authored-by: Vishal Raj --- .../elasticsearchexporter_go-docappender.yaml | 33 +++ cmd/configschema/go.mod | 11 + cmd/configschema/go.sum | 32 +++ cmd/otelcontribcol/go.mod | 11 + cmd/otelcontribcol/go.sum | 32 +++ exporter/elasticsearchexporter/README.md | 4 +- .../elasticsearch_bulk.go | 217 +++++++++++++----- .../elasticsearch_bulk_test.go | 163 +++++++++++++ exporter/elasticsearchexporter/go.mod | 15 +- exporter/elasticsearchexporter/go.sum | 48 +++- .../integrationtest/go.mod | 4 +- .../integrationtest/go.sum | 26 +-- .../elasticsearchexporter/logs_exporter.go | 15 +- .../logs_exporter_test.go | 2 +- .../elasticsearchexporter/trace_exporter.go | 13 +- .../traces_exporter_test.go | 2 +- go.mod | 11 + go.sum | 32 +++ 18 files changed, 555 insertions(+), 116 deletions(-) create mode 100644 .chloggen/elasticsearchexporter_go-docappender.yaml create mode 100644 exporter/elasticsearchexporter/elasticsearch_bulk_test.go diff --git a/.chloggen/elasticsearchexporter_go-docappender.yaml b/.chloggen/elasticsearchexporter_go-docappender.yaml new file mode 100644 index 000000000000..606ec783d555 --- /dev/null +++ b/.chloggen/elasticsearchexporter_go-docappender.yaml @@ -0,0 +1,33 @@ +# Use this changelog template to create an entry for release notes. + +# One of 'breaking', 'deprecation', 'new_component', 'enhancement', 'bug_fix' +change_type: enhancement + +# The name of the component, or a single word describing the area of concern, (e.g. filelogreceiver) +component: elasticsearchexporter + +# A brief description of the change. Surround your text with quotes ("") if it needs to start with a backtick (`). +note: Replace go-elasticsearch BulkIndexer with go-docappender + +# Mandatory: One or more tracking issues related to the change. You can use the PR number here if no issue exists. +issues: [32378] + +# (Optional) One or more lines of additional information to render under the primary note. +# These lines will be padded with 2 spaces and then inserted directly into the document. +# Use pipe (|) for multiline entries. +subtext: | + Replace go-elasticsearch BulkIndexer with go-docappender bulk indexer, in preparation for future reliability fixes. + As a result of this change, there are minor behavioral differences: + - flush timeout is now enforced on client side + - oversize payload special handling is now removed + - go-docappender uses bulk request filterPath which means bulk response is smaller, less JSON parsing and lower CPU usage + - document level retry debug logging is removed as retries are done transparently + +# If your change doesn't affect end users or the exported elements of any package, +# you should instead start your pull request title with [chore] or use the "Skip Changelog" label. +# Optional: The change log or logs in which this entry should be included. +# e.g. '[user]' or '[user, api]' +# Include 'user' if the change is relevant to end users. +# Include 'api' if there is a change to a library API. +# Default: '[user]' +change_logs: [user] diff --git a/cmd/configschema/go.mod b/cmd/configschema/go.mod index 77523712645a..4c4269eae03c 100644 --- a/cmd/configschema/go.mod +++ b/cmd/configschema/go.mod @@ -305,6 +305,7 @@ require ( github.com/apache/thrift v0.20.0 // indirect github.com/ardielle/ardielle-go v1.5.2 // indirect github.com/armon/go-metrics v0.4.1 // indirect + github.com/armon/go-radix v1.0.0 // indirect github.com/aws/aws-sdk-go v1.52.4 // indirect github.com/aws/aws-sdk-go-v2 v1.26.1 // indirect github.com/aws/aws-sdk-go-v2/aws/protocol/eventstream v1.6.2 // indirect @@ -364,8 +365,13 @@ require ( github.com/eapache/go-resiliency v1.6.0 // indirect github.com/eapache/go-xerial-snappy v0.0.0-20230731223053-c322873962e3 // indirect github.com/eapache/queue v1.1.0 // indirect + github.com/elastic/elastic-transport-go/v8 v8.5.0 // indirect + github.com/elastic/go-docappender/v2 v2.1.0 // indirect github.com/elastic/go-elasticsearch/v7 v7.17.10 // indirect + github.com/elastic/go-elasticsearch/v8 v8.13.1 // indirect github.com/elastic/go-structform v0.0.10 // indirect + github.com/elastic/go-sysinfo v1.7.1 // indirect + github.com/elastic/go-windows v1.0.1 // indirect github.com/emicklei/go-restful/v3 v3.11.0 // indirect github.com/envoyproxy/go-control-plane v0.12.0 // indirect github.com/envoyproxy/protoc-gen-validate v1.0.4 // indirect @@ -467,6 +473,7 @@ require ( github.com/jcmturner/gokrb5/v8 v8.4.4 // indirect github.com/jcmturner/rpc/v2 v2.0.3 // indirect github.com/jmespath/go-jmespath v0.4.0 // indirect + github.com/joeshaw/multierror v0.0.0-20140124173710-69b34d4ec901 // indirect github.com/josharian/intern v1.0.0 // indirect github.com/jpillora/backoff v1.0.0 // indirect github.com/json-iterator/go v1.1.12 // indirect @@ -670,6 +677,9 @@ require ( github.com/yuin/gopher-lua v0.0.0-20220504180219-658193537a64 // indirect github.com/yusufpapurcu/wmi v1.2.4 // indirect github.com/zeebo/xxh3 v1.0.2 // indirect + go.elastic.co/apm/module/apmzap/v2 v2.6.0 // indirect + go.elastic.co/apm/v2 v2.6.0 // indirect + go.elastic.co/fastjson v1.3.0 // indirect go.etcd.io/bbolt v1.3.10 // indirect go.mongodb.org/atlas v0.36.0 // indirect go.mongodb.org/mongo-driver v1.15.0 // indirect @@ -757,6 +767,7 @@ require ( gopkg.in/natefinch/lumberjack.v2 v2.2.1 // indirect gopkg.in/yaml.v3 v3.0.1 // indirect gopkg.in/zorkian/go-datadog-api.v2 v2.30.0 // indirect + howett.net/plist v1.0.0 // indirect k8s.io/api v0.29.3 // indirect k8s.io/apimachinery v0.29.3 // indirect k8s.io/client-go v0.29.3 // indirect diff --git a/cmd/configschema/go.sum b/cmd/configschema/go.sum index 1fbc4990342f..8a2b1ba43e42 100644 --- a/cmd/configschema/go.sum +++ b/cmd/configschema/go.sum @@ -995,6 +995,7 @@ github.com/armon/go-metrics v0.0.0-20180917152333-f0300d1749da/go.mod h1:Q73ZrmV github.com/armon/go-metrics v0.4.1 h1:hR91U9KYmb6bLBYLQjyM+3j+rcd/UhE+G78SFnF8gJA= github.com/armon/go-metrics v0.4.1/go.mod h1:E6amYzXo6aW1tqzoZGT755KkbgrJsSdpwZ+3JqfkOG4= github.com/armon/go-radix v0.0.0-20180808171621-7fddfc383310/go.mod h1:ufUuZ+zHj4x4TnLV4JWEpy2hxWSpsRywHrMgIH9cCH8= +github.com/armon/go-radix v1.0.0 h1:F4z6KzEeeQIMeLFa97iZU6vupzoecKdU5TX24SNppXI= github.com/armon/go-radix v1.0.0/go.mod h1:ufUuZ+zHj4x4TnLV4JWEpy2hxWSpsRywHrMgIH9cCH8= github.com/aryann/difflib v0.0.0-20170710044230-e206f873d14a/go.mod h1:DAHtR1m6lCRdSC2Tm3DSWRPvIPr6xNKyeHdqDQSQT+A= github.com/asaskevich/govalidator v0.0.0-20190424111038-f61b66f89f4a/go.mod h1:lB+ZfQJz7igIIfQNfa7Ml4HSf2uFQQRzpGGRXenZAgY= @@ -1247,10 +1248,21 @@ github.com/eapache/go-xerial-snappy v0.0.0-20230731223053-c322873962e3/go.mod h1 github.com/eapache/queue v1.1.0 h1:YOEu7KNc61ntiQlcEeUIoDTJ2o8mQznoNvUhiigpIqc= github.com/eapache/queue v1.1.0/go.mod h1:6eCeP0CKFpHLu8blIFXhExK/dRa7WDZfr6jVFPTqq+I= github.com/edsrzf/mmap-go v1.0.0/go.mod h1:YO35OhQPt3KJa3ryjFM5Bs14WD66h8eGKpfaBNrHW5M= +github.com/elastic/elastic-transport-go/v8 v8.5.0 h1:v5membAl7lvQgBTexPRDBO/RdnlQX+FM9fUVDyXxvH0= +github.com/elastic/elastic-transport-go/v8 v8.5.0/go.mod h1:YLHer5cj0csTzNFXoNQ8qhtGY1GTvSqPnKWKaqQE3Hk= +github.com/elastic/go-docappender/v2 v2.1.0 h1:Ct/C2J9qgKue8kQumUDZAi/AB2F+wlrIVOf2TH4afPA= +github.com/elastic/go-docappender/v2 v2.1.0/go.mod h1:oHi6MsHriWaG8W6T9iyJ/PkEo2+182HIzq+0RRAzzgA= github.com/elastic/go-elasticsearch/v7 v7.17.10 h1:TCQ8i4PmIJuBunvBS6bwT2ybzVFxxUhhltAs3Gyu1yo= github.com/elastic/go-elasticsearch/v7 v7.17.10/go.mod h1:OJ4wdbtDNk5g503kvlHLyErCgQwwzmDtaFC4XyOxXA4= +github.com/elastic/go-elasticsearch/v8 v8.13.1 h1:du5F8IzUUyCkzxyHdrO9AtopcG95I/qwi2WK8Kf1xlg= +github.com/elastic/go-elasticsearch/v8 v8.13.1/go.mod h1:DIn7HopJs4oZC/w0WoJR13uMUxtHeq92eI5bqv5CRfI= github.com/elastic/go-structform v0.0.10 h1:oy08o/Ih2hHTkNcRY/1HhaYvIp5z6t8si8gnCJPDo1w= github.com/elastic/go-structform v0.0.10/go.mod h1:CZWf9aIRYY5SuKSmOhtXScE5uQiLZNqAFnwKR4OrIM4= +github.com/elastic/go-sysinfo v1.7.1 h1:Wx4DSARcKLllpKT2TnFVdSUJOsybqMYCNQZq1/wO+s0= +github.com/elastic/go-sysinfo v1.7.1/go.mod h1:i1ZYdU10oLNfRzq4vq62BEwD2fH8KaWh6eh0ikPT9F0= +github.com/elastic/go-windows v1.0.0/go.mod h1:TsU0Nrp7/y3+VwE82FoZF8gC/XFg/Elz6CcloAxnPgU= +github.com/elastic/go-windows v1.0.1 h1:AlYZOldA+UJ0/2nBuqWdo90GFCgG9xuyw9SYzGUtJm0= +github.com/elastic/go-windows v1.0.1/go.mod h1:FoVvqWSun28vaDQPbj2Elfc0JahhPB7WQEGa3c814Ss= github.com/elazarl/goproxy v0.0.0-20180725130230-947c36da3153/go.mod h1:/Zj4wYkgs4iZTTu3o/KG3Itv/qCCa8VVMlb3i9OVuzc= github.com/emicklei/go-restful v0.0.0-20170410110728-ff4f55a20633/go.mod h1:otzb+WCGbkyDHkqmQmT5YD2WR4BBwUdeQoFo8l/7tVs= github.com/emicklei/go-restful v2.9.5+incompatible/go.mod h1:otzb+WCGbkyDHkqmQmT5YD2WR4BBwUdeQoFo8l/7tVs= @@ -1750,6 +1762,7 @@ github.com/jcmturner/gokrb5/v8 v8.4.4 h1:x1Sv4HaTpepFkXbt2IkL29DXRf8sOfZXo8eRKh6 github.com/jcmturner/gokrb5/v8 v8.4.4/go.mod h1:1btQEpgT6k+unzCwX1KdWMEwPPkkgBtP+F6aCACiMrs= github.com/jcmturner/rpc/v2 v2.0.3 h1:7FXXj8Ti1IaVFpSAziCZWNzbNuZmnvw/i6CqLNdWfZY= github.com/jcmturner/rpc/v2 v2.0.3/go.mod h1:VUJYCIDm3PVOEHw8sgt091/20OJjskO/YJki3ELg/Hc= +github.com/jessevdk/go-flags v1.4.0/go.mod h1:4FA24M0QyGHXBuZZK/XkWh8h0e1EYbRYJSGM75WSRxI= github.com/jmespath/go-jmespath v0.0.0-20180206201540-c2b33e8439af/go.mod h1:Nht3zPeWKUH0NzdCt2Blrr5ys8VGpn0CEB0cQHVjt7k= github.com/jmespath/go-jmespath v0.3.0/go.mod h1:9QtRXoHjLGCJ5IBSaohpXITPlowMeeYCZ7fLUTSywik= github.com/jmespath/go-jmespath v0.4.0 h1:BEgLn5cpjn8UN1mAw4NjwDrS35OdebyEtFe+9YPoQUg= @@ -1758,6 +1771,8 @@ github.com/jmespath/go-jmespath/internal/testify v1.5.1 h1:shLQSRRSCCPj3f2gpwzGw github.com/jmespath/go-jmespath/internal/testify v1.5.1/go.mod h1:L3OGu8Wl2/fWfCI6z80xFu9LTZmf1ZRjMHUOPmWr69U= github.com/jmoiron/sqlx v1.4.0 h1:1PLqN7S1UYp5t4SrVVnt4nUVNemrDAtxlulVe+Qgm3o= github.com/jmoiron/sqlx v1.4.0/go.mod h1:ZrZ7UsYB/weZdl2Bxg6jCRO9c3YHl8r3ahlKmRT4JLY= +github.com/joeshaw/multierror v0.0.0-20140124173710-69b34d4ec901 h1:rp+c0RAYOWj8l6qbCUTSiRLG/iKnW3K3/QfPPuSsBt4= +github.com/joeshaw/multierror v0.0.0-20140124173710-69b34d4ec901/go.mod h1:Z86h9688Y0wesXCyonoVr47MasHilkuLMqGhRZ4Hpak= github.com/joho/godotenv v1.3.0 h1:Zjp+RcGpHhGlrMbJzXTrZZPrWj+1vfm90La1wgB6Bhc= github.com/joho/godotenv v1.3.0/go.mod h1:7hK45KPybAkOC6peb+G5yklZfMxEjkZhHbwpqxOKXbg= github.com/jonboulle/clockwork v0.1.0/go.mod h1:Ii8DK3G1RaLaWxj9trq07+26W01tbo22gdxWY5EU2bo= @@ -2133,6 +2148,7 @@ github.com/prometheus/common/sigv4 v0.1.0 h1:qoVebwtwwEhS85Czm2dSROY5fTo2PAPEVdD github.com/prometheus/common/sigv4 v0.1.0/go.mod h1:2Jkxxk9yYvCkE5G1sQT7GuEXm57JrvHu9k5YwTjsNtI= github.com/prometheus/procfs v0.0.0-20181005140218-185b4288413d/go.mod h1:c3At6R/oaqEKCNdg8wHV1ftS6bRYblBhIjjI8uT2IGk= github.com/prometheus/procfs v0.0.0-20190117184657-bf6a532e95b1/go.mod h1:c3At6R/oaqEKCNdg8wHV1ftS6bRYblBhIjjI8uT2IGk= +github.com/prometheus/procfs v0.0.0-20190425082905-87a4384529e0/go.mod h1:TjEm7ze935MbeOT/UhFTIMYKhuLP4wbCsTZCD3I8kEA= github.com/prometheus/procfs v0.0.0-20190507164030-5867b95ac084/go.mod h1:TjEm7ze935MbeOT/UhFTIMYKhuLP4wbCsTZCD3I8kEA= github.com/prometheus/procfs v0.0.2/go.mod h1:TjEm7ze935MbeOT/UhFTIMYKhuLP4wbCsTZCD3I8kEA= github.com/prometheus/procfs v0.0.8/go.mod h1:7Qr8sr6344vo1JqZ6HhLceV9o3AJ1Ff+GxbHq6oeK9A= @@ -2375,6 +2391,16 @@ github.com/zorkian/go-datadog-api v2.30.0+incompatible h1:R4ryGocppDqZZbnNc5EDR8 github.com/zorkian/go-datadog-api v2.30.0+incompatible/go.mod h1:PkXwHX9CUQa/FpB9ZwAD45N1uhCW4MT/Wj7m36PbKss= go.einride.tech/aip v0.67.1 h1:d/4TW92OxXBngkSOwWS2CH5rez869KpKMaN44mdxkFI= go.einride.tech/aip v0.67.1/go.mod h1:ZGX4/zKw8dcgzdLsrvpOOGxfxI2QSk12SlP7d6c0/XI= +go.elastic.co/apm/module/apmelasticsearch/v2 v2.6.0 h1:ukMcwyMaDXsS1dRK2qRYXT2AsfwaUy74TOOYCqkWJow= +go.elastic.co/apm/module/apmelasticsearch/v2 v2.6.0/go.mod h1:YpfiTTrqX5LB/CKBwX89oDCBAxuLJTFv40gcfxJyehM= +go.elastic.co/apm/module/apmhttp/v2 v2.6.0 h1:s8UeNFQmVBCNd4eoz7KDD9rEFhQC0HeUFXz3z9gpAmQ= +go.elastic.co/apm/module/apmhttp/v2 v2.6.0/go.mod h1:D0GLppLuI0Ddwvtl595GUxRgn6Z8L5KaDFVMv2H3GK0= +go.elastic.co/apm/module/apmzap/v2 v2.6.0 h1:R/iVORzGu3F9uM43iEVHD0nwiRo59O0bIXdayKsgayQ= +go.elastic.co/apm/module/apmzap/v2 v2.6.0/go.mod h1:B3i/8xRkqLgi6zNuV+Bp7Pt4cutaOObvrVSa7wUTAPw= +go.elastic.co/apm/v2 v2.6.0 h1:VieBMLQFtXua2YxpYxaSdYGnmmxhLT46gosI5yErJgY= +go.elastic.co/apm/v2 v2.6.0/go.mod h1:33rOXgtHwbgZcDgi6I/GtCSMZQqgxkHC0IQT3gudKvo= +go.elastic.co/fastjson v1.3.0 h1:hJO3OsYIhiqiT4Fgu0ZxAECnKASbwgiS+LMW5oCopKs= +go.elastic.co/fastjson v1.3.0/go.mod h1:K9vDh7O0ODsVKV2B5e2XYLY277QZaCbB3tS1SnARvko= go.etcd.io/bbolt v1.3.2/go.mod h1:IbVyRI1SCnLcuJnV2u8VeU0CEYM7e686BmAb1XKL+uU= go.etcd.io/bbolt v1.3.3/go.mod h1:IbVyRI1SCnLcuJnV2u8VeU0CEYM7e686BmAb1XKL+uU= go.etcd.io/bbolt v1.3.10 h1:+BqfJTcCzTItrop8mq/lbzL8wSGtj94UO/3U31shqG0= @@ -2800,6 +2826,7 @@ golang.org/x/sys v0.0.0-20190616124812-15dcb6c0061f/go.mod h1:h1NjWce9XRLGQEsW7w golang.org/x/sys v0.0.0-20190624142023-c5567b49c5d0/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20190712062909-fae7ac547cb7/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20190726091711-fc99dfbffb4e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20190813064441-fde4db37ae7a/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20190826190057-c7b8b68b1456/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20190904154756-749cb33beabd/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20190916202348-b4ddaad3f8a3/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= @@ -2807,6 +2834,7 @@ golang.org/x/sys v0.0.0-20190922100055-0a153f010e69/go.mod h1:h1NjWce9XRLGQEsW7w golang.org/x/sys v0.0.0-20190924154521-2837fb4f24fe/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20191001151750-bb3f8db39f24/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20191005200804-aed5e4c7ecf9/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20191025021431-6c3a3bfe00ae/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20191026070338-33540a1f6037/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20191112214154-59a1497f0cea/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20191120155948-bd437916bb0e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= @@ -3360,6 +3388,7 @@ gopkg.in/square/go-jose.v2 v2.4.1/go.mod h1:M9dMgbHiYLoDGQrXy7OpJDJWiKiU//h+vD76 gopkg.in/tomb.v1 v1.0.0-20141024135613-dd632973f1e7 h1:uRGJdciOHaEIrze2W8Q3AKkepLTh2hOroT7a+7czfdQ= gopkg.in/tomb.v1 v1.0.0-20141024135613-dd632973f1e7/go.mod h1:dt/ZhP58zS4L8KSrWDmTeBkI65Dw0HsyUHuEVlX15mw= gopkg.in/warnings.v0 v0.1.2/go.mod h1:jksf8JmL6Qr/oQM2OXTHunEvvTAsrWBLb6OOjuVWRNI= +gopkg.in/yaml.v1 v1.0.0-20140924161607-9f9df34309c0/go.mod h1:WDnlLJ4WF5VGsH/HVa3CI79GS0ol3YnhVnKP89i0kNg= gopkg.in/yaml.v2 v2.0.0-20170812160011-eb3733d160e7/go.mod h1:JAlM8MvJe8wmxCU4Bli9HhUf9+ttbYbLASfIpnQbh74= gopkg.in/yaml.v2 v2.2.1/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= gopkg.in/yaml.v2 v2.2.2/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= @@ -3390,6 +3419,9 @@ honnef.co/go/tools v0.0.1-2019.2.3/go.mod h1:a3bituU0lyd329TUQxRnasdCoJDkEUEAqEt honnef.co/go/tools v0.0.1-2020.1.3/go.mod h1:X/FiERA/W4tHapMX5mGpAtMSVEeEUOyHaw9vFzvIQ3k= honnef.co/go/tools v0.0.1-2020.1.4/go.mod h1:X/FiERA/W4tHapMX5mGpAtMSVEeEUOyHaw9vFzvIQ3k= honnef.co/go/tools v0.1.3/go.mod h1:NgwopIslSNH47DimFoV78dnkksY2EFtX0ajyb3K/las= +howett.net/plist v0.0.0-20181124034731-591f970eefbb/go.mod h1:vMygbs4qMhSZSc4lCUl2OEE+rDiIIJAIdR4m7MiMcm0= +howett.net/plist v1.0.0 h1:7CrbWYbPPO/PyNy38b2EB/+gYbjCe2DXBxgtOOZbSQM= +howett.net/plist v1.0.0/go.mod h1:lqaXoTrLY4hg8tnEzNru53gicrbv7rrk+2xJA/7hw9g= k8s.io/api v0.21.1/go.mod h1:FstGROTmsSHBarKc8bylzXih8BLNYTiS3TZcsoEDg2s= k8s.io/api v0.29.3 h1:2ORfZ7+bGC3YJqGpV0KSDDEVf8hdGQ6A03/50vj8pmw= k8s.io/api v0.29.3/go.mod h1:y2yg2NTyHUUkIoTC+phinTnEa3KFM6RZ3szxt014a80= diff --git a/cmd/otelcontribcol/go.mod b/cmd/otelcontribcol/go.mod index c0cd3eafaab3..95e57015be97 100644 --- a/cmd/otelcontribcol/go.mod +++ b/cmd/otelcontribcol/go.mod @@ -372,6 +372,7 @@ require ( github.com/apache/thrift v0.20.0 // indirect github.com/ardielle/ardielle-go v1.5.2 // indirect github.com/armon/go-metrics v0.4.1 // indirect + github.com/armon/go-radix v1.0.0 // indirect github.com/aws/aws-sdk-go v1.52.4 // indirect github.com/aws/aws-sdk-go-v2 v1.26.1 // indirect github.com/aws/aws-sdk-go-v2/aws/protocol/eventstream v1.6.2 // indirect @@ -431,8 +432,13 @@ require ( github.com/eapache/go-resiliency v1.6.0 // indirect github.com/eapache/go-xerial-snappy v0.0.0-20230731223053-c322873962e3 // indirect github.com/eapache/queue v1.1.0 // indirect + github.com/elastic/elastic-transport-go/v8 v8.5.0 // indirect + github.com/elastic/go-docappender/v2 v2.1.0 // indirect github.com/elastic/go-elasticsearch/v7 v7.17.10 // indirect + github.com/elastic/go-elasticsearch/v8 v8.13.1 // indirect github.com/elastic/go-structform v0.0.10 // indirect + github.com/elastic/go-sysinfo v1.7.1 // indirect + github.com/elastic/go-windows v1.0.1 // indirect github.com/emicklei/go-restful/v3 v3.11.0 // indirect github.com/envoyproxy/go-control-plane v0.12.0 // indirect github.com/envoyproxy/protoc-gen-validate v1.0.4 // indirect @@ -537,6 +543,7 @@ require ( github.com/jcmturner/gokrb5/v8 v8.4.4 // indirect github.com/jcmturner/rpc/v2 v2.0.3 // indirect github.com/jmespath/go-jmespath v0.4.0 // indirect + github.com/joeshaw/multierror v0.0.0-20140124173710-69b34d4ec901 // indirect github.com/josharian/intern v1.0.0 // indirect github.com/jpillora/backoff v1.0.0 // indirect github.com/json-iterator/go v1.1.12 // indirect @@ -717,6 +724,9 @@ require ( github.com/yuin/gopher-lua v0.0.0-20220504180219-658193537a64 // indirect github.com/yusufpapurcu/wmi v1.2.4 // indirect github.com/zeebo/xxh3 v1.0.2 // indirect + go.elastic.co/apm/module/apmzap/v2 v2.6.0 // indirect + go.elastic.co/apm/v2 v2.6.0 // indirect + go.elastic.co/fastjson v1.3.0 // indirect go.etcd.io/bbolt v1.3.10 // indirect go.mongodb.org/atlas v0.36.0 // indirect go.mongodb.org/mongo-driver v1.15.0 // indirect @@ -783,6 +793,7 @@ require ( gopkg.in/yaml.v2 v2.4.0 // indirect gopkg.in/yaml.v3 v3.0.1 // indirect gopkg.in/zorkian/go-datadog-api.v2 v2.30.0 // indirect + howett.net/plist v1.0.0 // indirect k8s.io/api v0.29.3 // indirect k8s.io/apimachinery v0.29.3 // indirect k8s.io/client-go v0.29.3 // indirect diff --git a/cmd/otelcontribcol/go.sum b/cmd/otelcontribcol/go.sum index 3e90b0b5a599..781d867d8bca 100644 --- a/cmd/otelcontribcol/go.sum +++ b/cmd/otelcontribcol/go.sum @@ -996,6 +996,7 @@ github.com/armon/go-metrics v0.0.0-20180917152333-f0300d1749da/go.mod h1:Q73ZrmV github.com/armon/go-metrics v0.4.1 h1:hR91U9KYmb6bLBYLQjyM+3j+rcd/UhE+G78SFnF8gJA= github.com/armon/go-metrics v0.4.1/go.mod h1:E6amYzXo6aW1tqzoZGT755KkbgrJsSdpwZ+3JqfkOG4= github.com/armon/go-radix v0.0.0-20180808171621-7fddfc383310/go.mod h1:ufUuZ+zHj4x4TnLV4JWEpy2hxWSpsRywHrMgIH9cCH8= +github.com/armon/go-radix v1.0.0 h1:F4z6KzEeeQIMeLFa97iZU6vupzoecKdU5TX24SNppXI= github.com/armon/go-radix v1.0.0/go.mod h1:ufUuZ+zHj4x4TnLV4JWEpy2hxWSpsRywHrMgIH9cCH8= github.com/aryann/difflib v0.0.0-20170710044230-e206f873d14a/go.mod h1:DAHtR1m6lCRdSC2Tm3DSWRPvIPr6xNKyeHdqDQSQT+A= github.com/asaskevich/govalidator v0.0.0-20190424111038-f61b66f89f4a/go.mod h1:lB+ZfQJz7igIIfQNfa7Ml4HSf2uFQQRzpGGRXenZAgY= @@ -1248,10 +1249,21 @@ github.com/eapache/go-xerial-snappy v0.0.0-20230731223053-c322873962e3/go.mod h1 github.com/eapache/queue v1.1.0 h1:YOEu7KNc61ntiQlcEeUIoDTJ2o8mQznoNvUhiigpIqc= github.com/eapache/queue v1.1.0/go.mod h1:6eCeP0CKFpHLu8blIFXhExK/dRa7WDZfr6jVFPTqq+I= github.com/edsrzf/mmap-go v1.0.0/go.mod h1:YO35OhQPt3KJa3ryjFM5Bs14WD66h8eGKpfaBNrHW5M= +github.com/elastic/elastic-transport-go/v8 v8.5.0 h1:v5membAl7lvQgBTexPRDBO/RdnlQX+FM9fUVDyXxvH0= +github.com/elastic/elastic-transport-go/v8 v8.5.0/go.mod h1:YLHer5cj0csTzNFXoNQ8qhtGY1GTvSqPnKWKaqQE3Hk= +github.com/elastic/go-docappender/v2 v2.1.0 h1:Ct/C2J9qgKue8kQumUDZAi/AB2F+wlrIVOf2TH4afPA= +github.com/elastic/go-docappender/v2 v2.1.0/go.mod h1:oHi6MsHriWaG8W6T9iyJ/PkEo2+182HIzq+0RRAzzgA= github.com/elastic/go-elasticsearch/v7 v7.17.10 h1:TCQ8i4PmIJuBunvBS6bwT2ybzVFxxUhhltAs3Gyu1yo= github.com/elastic/go-elasticsearch/v7 v7.17.10/go.mod h1:OJ4wdbtDNk5g503kvlHLyErCgQwwzmDtaFC4XyOxXA4= +github.com/elastic/go-elasticsearch/v8 v8.13.1 h1:du5F8IzUUyCkzxyHdrO9AtopcG95I/qwi2WK8Kf1xlg= +github.com/elastic/go-elasticsearch/v8 v8.13.1/go.mod h1:DIn7HopJs4oZC/w0WoJR13uMUxtHeq92eI5bqv5CRfI= github.com/elastic/go-structform v0.0.10 h1:oy08o/Ih2hHTkNcRY/1HhaYvIp5z6t8si8gnCJPDo1w= github.com/elastic/go-structform v0.0.10/go.mod h1:CZWf9aIRYY5SuKSmOhtXScE5uQiLZNqAFnwKR4OrIM4= +github.com/elastic/go-sysinfo v1.7.1 h1:Wx4DSARcKLllpKT2TnFVdSUJOsybqMYCNQZq1/wO+s0= +github.com/elastic/go-sysinfo v1.7.1/go.mod h1:i1ZYdU10oLNfRzq4vq62BEwD2fH8KaWh6eh0ikPT9F0= +github.com/elastic/go-windows v1.0.0/go.mod h1:TsU0Nrp7/y3+VwE82FoZF8gC/XFg/Elz6CcloAxnPgU= +github.com/elastic/go-windows v1.0.1 h1:AlYZOldA+UJ0/2nBuqWdo90GFCgG9xuyw9SYzGUtJm0= +github.com/elastic/go-windows v1.0.1/go.mod h1:FoVvqWSun28vaDQPbj2Elfc0JahhPB7WQEGa3c814Ss= github.com/elazarl/goproxy v0.0.0-20180725130230-947c36da3153/go.mod h1:/Zj4wYkgs4iZTTu3o/KG3Itv/qCCa8VVMlb3i9OVuzc= github.com/emicklei/go-restful v0.0.0-20170410110728-ff4f55a20633/go.mod h1:otzb+WCGbkyDHkqmQmT5YD2WR4BBwUdeQoFo8l/7tVs= github.com/emicklei/go-restful v2.9.5+incompatible/go.mod h1:otzb+WCGbkyDHkqmQmT5YD2WR4BBwUdeQoFo8l/7tVs= @@ -1749,6 +1761,7 @@ github.com/jcmturner/gokrb5/v8 v8.4.4 h1:x1Sv4HaTpepFkXbt2IkL29DXRf8sOfZXo8eRKh6 github.com/jcmturner/gokrb5/v8 v8.4.4/go.mod h1:1btQEpgT6k+unzCwX1KdWMEwPPkkgBtP+F6aCACiMrs= github.com/jcmturner/rpc/v2 v2.0.3 h1:7FXXj8Ti1IaVFpSAziCZWNzbNuZmnvw/i6CqLNdWfZY= github.com/jcmturner/rpc/v2 v2.0.3/go.mod h1:VUJYCIDm3PVOEHw8sgt091/20OJjskO/YJki3ELg/Hc= +github.com/jessevdk/go-flags v1.4.0/go.mod h1:4FA24M0QyGHXBuZZK/XkWh8h0e1EYbRYJSGM75WSRxI= github.com/jmespath/go-jmespath v0.0.0-20180206201540-c2b33e8439af/go.mod h1:Nht3zPeWKUH0NzdCt2Blrr5ys8VGpn0CEB0cQHVjt7k= github.com/jmespath/go-jmespath v0.3.0/go.mod h1:9QtRXoHjLGCJ5IBSaohpXITPlowMeeYCZ7fLUTSywik= github.com/jmespath/go-jmespath v0.4.0 h1:BEgLn5cpjn8UN1mAw4NjwDrS35OdebyEtFe+9YPoQUg= @@ -1757,6 +1770,8 @@ github.com/jmespath/go-jmespath/internal/testify v1.5.1 h1:shLQSRRSCCPj3f2gpwzGw github.com/jmespath/go-jmespath/internal/testify v1.5.1/go.mod h1:L3OGu8Wl2/fWfCI6z80xFu9LTZmf1ZRjMHUOPmWr69U= github.com/jmoiron/sqlx v1.4.0 h1:1PLqN7S1UYp5t4SrVVnt4nUVNemrDAtxlulVe+Qgm3o= github.com/jmoiron/sqlx v1.4.0/go.mod h1:ZrZ7UsYB/weZdl2Bxg6jCRO9c3YHl8r3ahlKmRT4JLY= +github.com/joeshaw/multierror v0.0.0-20140124173710-69b34d4ec901 h1:rp+c0RAYOWj8l6qbCUTSiRLG/iKnW3K3/QfPPuSsBt4= +github.com/joeshaw/multierror v0.0.0-20140124173710-69b34d4ec901/go.mod h1:Z86h9688Y0wesXCyonoVr47MasHilkuLMqGhRZ4Hpak= github.com/joho/godotenv v1.3.0 h1:Zjp+RcGpHhGlrMbJzXTrZZPrWj+1vfm90La1wgB6Bhc= github.com/joho/godotenv v1.3.0/go.mod h1:7hK45KPybAkOC6peb+G5yklZfMxEjkZhHbwpqxOKXbg= github.com/jonboulle/clockwork v0.1.0/go.mod h1:Ii8DK3G1RaLaWxj9trq07+26W01tbo22gdxWY5EU2bo= @@ -2136,6 +2151,7 @@ github.com/prometheus/common/sigv4 v0.1.0 h1:qoVebwtwwEhS85Czm2dSROY5fTo2PAPEVdD github.com/prometheus/common/sigv4 v0.1.0/go.mod h1:2Jkxxk9yYvCkE5G1sQT7GuEXm57JrvHu9k5YwTjsNtI= github.com/prometheus/procfs v0.0.0-20181005140218-185b4288413d/go.mod h1:c3At6R/oaqEKCNdg8wHV1ftS6bRYblBhIjjI8uT2IGk= github.com/prometheus/procfs v0.0.0-20190117184657-bf6a532e95b1/go.mod h1:c3At6R/oaqEKCNdg8wHV1ftS6bRYblBhIjjI8uT2IGk= +github.com/prometheus/procfs v0.0.0-20190425082905-87a4384529e0/go.mod h1:TjEm7ze935MbeOT/UhFTIMYKhuLP4wbCsTZCD3I8kEA= github.com/prometheus/procfs v0.0.0-20190507164030-5867b95ac084/go.mod h1:TjEm7ze935MbeOT/UhFTIMYKhuLP4wbCsTZCD3I8kEA= github.com/prometheus/procfs v0.0.2/go.mod h1:TjEm7ze935MbeOT/UhFTIMYKhuLP4wbCsTZCD3I8kEA= github.com/prometheus/procfs v0.0.8/go.mod h1:7Qr8sr6344vo1JqZ6HhLceV9o3AJ1Ff+GxbHq6oeK9A= @@ -2379,6 +2395,16 @@ github.com/zorkian/go-datadog-api v2.30.0+incompatible h1:R4ryGocppDqZZbnNc5EDR8 github.com/zorkian/go-datadog-api v2.30.0+incompatible/go.mod h1:PkXwHX9CUQa/FpB9ZwAD45N1uhCW4MT/Wj7m36PbKss= go.einride.tech/aip v0.67.1 h1:d/4TW92OxXBngkSOwWS2CH5rez869KpKMaN44mdxkFI= go.einride.tech/aip v0.67.1/go.mod h1:ZGX4/zKw8dcgzdLsrvpOOGxfxI2QSk12SlP7d6c0/XI= +go.elastic.co/apm/module/apmelasticsearch/v2 v2.6.0 h1:ukMcwyMaDXsS1dRK2qRYXT2AsfwaUy74TOOYCqkWJow= +go.elastic.co/apm/module/apmelasticsearch/v2 v2.6.0/go.mod h1:YpfiTTrqX5LB/CKBwX89oDCBAxuLJTFv40gcfxJyehM= +go.elastic.co/apm/module/apmhttp/v2 v2.6.0 h1:s8UeNFQmVBCNd4eoz7KDD9rEFhQC0HeUFXz3z9gpAmQ= +go.elastic.co/apm/module/apmhttp/v2 v2.6.0/go.mod h1:D0GLppLuI0Ddwvtl595GUxRgn6Z8L5KaDFVMv2H3GK0= +go.elastic.co/apm/module/apmzap/v2 v2.6.0 h1:R/iVORzGu3F9uM43iEVHD0nwiRo59O0bIXdayKsgayQ= +go.elastic.co/apm/module/apmzap/v2 v2.6.0/go.mod h1:B3i/8xRkqLgi6zNuV+Bp7Pt4cutaOObvrVSa7wUTAPw= +go.elastic.co/apm/v2 v2.6.0 h1:VieBMLQFtXua2YxpYxaSdYGnmmxhLT46gosI5yErJgY= +go.elastic.co/apm/v2 v2.6.0/go.mod h1:33rOXgtHwbgZcDgi6I/GtCSMZQqgxkHC0IQT3gudKvo= +go.elastic.co/fastjson v1.3.0 h1:hJO3OsYIhiqiT4Fgu0ZxAECnKASbwgiS+LMW5oCopKs= +go.elastic.co/fastjson v1.3.0/go.mod h1:K9vDh7O0ODsVKV2B5e2XYLY277QZaCbB3tS1SnARvko= go.etcd.io/bbolt v1.3.2/go.mod h1:IbVyRI1SCnLcuJnV2u8VeU0CEYM7e686BmAb1XKL+uU= go.etcd.io/bbolt v1.3.3/go.mod h1:IbVyRI1SCnLcuJnV2u8VeU0CEYM7e686BmAb1XKL+uU= go.etcd.io/bbolt v1.3.10 h1:+BqfJTcCzTItrop8mq/lbzL8wSGtj94UO/3U31shqG0= @@ -2806,6 +2832,7 @@ golang.org/x/sys v0.0.0-20190616124812-15dcb6c0061f/go.mod h1:h1NjWce9XRLGQEsW7w golang.org/x/sys v0.0.0-20190624142023-c5567b49c5d0/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20190712062909-fae7ac547cb7/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20190726091711-fc99dfbffb4e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20190813064441-fde4db37ae7a/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20190826190057-c7b8b68b1456/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20190904154756-749cb33beabd/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20190916202348-b4ddaad3f8a3/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= @@ -2813,6 +2840,7 @@ golang.org/x/sys v0.0.0-20190922100055-0a153f010e69/go.mod h1:h1NjWce9XRLGQEsW7w golang.org/x/sys v0.0.0-20190924154521-2837fb4f24fe/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20191001151750-bb3f8db39f24/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20191005200804-aed5e4c7ecf9/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20191025021431-6c3a3bfe00ae/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20191026070338-33540a1f6037/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20191112214154-59a1497f0cea/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20191115151921-52ab43148777/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= @@ -3367,6 +3395,7 @@ gopkg.in/square/go-jose.v2 v2.4.1/go.mod h1:M9dMgbHiYLoDGQrXy7OpJDJWiKiU//h+vD76 gopkg.in/tomb.v1 v1.0.0-20141024135613-dd632973f1e7 h1:uRGJdciOHaEIrze2W8Q3AKkepLTh2hOroT7a+7czfdQ= gopkg.in/tomb.v1 v1.0.0-20141024135613-dd632973f1e7/go.mod h1:dt/ZhP58zS4L8KSrWDmTeBkI65Dw0HsyUHuEVlX15mw= gopkg.in/warnings.v0 v0.1.2/go.mod h1:jksf8JmL6Qr/oQM2OXTHunEvvTAsrWBLb6OOjuVWRNI= +gopkg.in/yaml.v1 v1.0.0-20140924161607-9f9df34309c0/go.mod h1:WDnlLJ4WF5VGsH/HVa3CI79GS0ol3YnhVnKP89i0kNg= gopkg.in/yaml.v2 v2.0.0-20170812160011-eb3733d160e7/go.mod h1:JAlM8MvJe8wmxCU4Bli9HhUf9+ttbYbLASfIpnQbh74= gopkg.in/yaml.v2 v2.2.1/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= gopkg.in/yaml.v2 v2.2.2/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= @@ -3397,6 +3426,9 @@ honnef.co/go/tools v0.0.1-2019.2.3/go.mod h1:a3bituU0lyd329TUQxRnasdCoJDkEUEAqEt honnef.co/go/tools v0.0.1-2020.1.3/go.mod h1:X/FiERA/W4tHapMX5mGpAtMSVEeEUOyHaw9vFzvIQ3k= honnef.co/go/tools v0.0.1-2020.1.4/go.mod h1:X/FiERA/W4tHapMX5mGpAtMSVEeEUOyHaw9vFzvIQ3k= honnef.co/go/tools v0.1.3/go.mod h1:NgwopIslSNH47DimFoV78dnkksY2EFtX0ajyb3K/las= +howett.net/plist v0.0.0-20181124034731-591f970eefbb/go.mod h1:vMygbs4qMhSZSc4lCUl2OEE+rDiIIJAIdR4m7MiMcm0= +howett.net/plist v1.0.0 h1:7CrbWYbPPO/PyNy38b2EB/+gYbjCe2DXBxgtOOZbSQM= +howett.net/plist v1.0.0/go.mod h1:lqaXoTrLY4hg8tnEzNru53gicrbv7rrk+2xJA/7hw9g= k8s.io/api v0.21.1/go.mod h1:FstGROTmsSHBarKc8bylzXih8BLNYTiS3TZcsoEDg2s= k8s.io/api v0.29.3 h1:2ORfZ7+bGC3YJqGpV0KSDDEVf8hdGQ6A03/50vj8pmw= k8s.io/api v0.29.3/go.mod h1:y2yg2NTyHUUkIoTC+phinTnEa3KFM6RZ3szxt014a80= diff --git a/exporter/elasticsearchexporter/README.md b/exporter/elasticsearchexporter/README.md index 0052204987cd..7a6d815475c5 100644 --- a/exporter/elasticsearchexporter/README.md +++ b/exporter/elasticsearchexporter/README.md @@ -22,7 +22,7 @@ This exporter supports sending OpenTelemetry logs and traces to [Elasticsearch]( [ID](https://www.elastic.co/guide/en/cloud/current/ec-cloud-id.html) of the Elastic Cloud Cluster to publish events to. The `cloudid` can be used instead of `endpoints`. -- `num_workers` (optional): Number of workers publishing bulk requests concurrently. +- `num_workers` (default=runtime.NumCPU()): Number of workers publishing bulk requests concurrently. - `index` (DEPRECATED, please use `logs_index` for logs, `traces_index` for traces): The [index](https://www.elastic.co/guide/en/elasticsearch/reference/current/indices.html) or [data stream](https://www.elastic.co/guide/en/elasticsearch/reference/current/data-streams.html) @@ -51,7 +51,7 @@ This exporter supports sending OpenTelemetry logs and traces to [Elasticsearch]( - `date_format`(default=`%Y.%m.%d`): Time format (based on strftime) to generate the second part of the Index name. - `pipeline` (optional): Optional [Ingest pipeline](https://www.elastic.co/guide/en/elasticsearch/reference/current/ingest.html) ID used for processing documents published by the exporter. - `flush`: Event bulk indexer buffer flush settings - - `bytes` (default=5242880): Write buffer flush size limit. + - `bytes` (default=5000000): Write buffer flush size limit. - `interval` (default=30s): Write buffer flush time limit. - `retry`: Elasticsearch bulk request retry settings - `enabled` (default=true): Enable/Disable request retry on error. Failed requests are retried with exponential backoff. diff --git a/exporter/elasticsearchexporter/elasticsearch_bulk.go b/exporter/elasticsearchexporter/elasticsearch_bulk.go index 150ca0f92aa9..e52a4cd5d232 100644 --- a/exporter/elasticsearchexporter/elasticsearch_bulk.go +++ b/exporter/elasticsearchexporter/elasticsearch_bulk.go @@ -12,11 +12,14 @@ import ( "fmt" "io" "net/http" + "runtime" + "sync" + "sync/atomic" "time" "github.com/cenkalti/backoff/v4" + "github.com/elastic/go-docappender/v2" elasticsearch7 "github.com/elastic/go-elasticsearch/v7" - esutil7 "github.com/elastic/go-elasticsearch/v7/esutil" "go.uber.org/zap" "github.com/open-telemetry/opentelemetry-collector-contrib/internal/common/sanitize" @@ -24,10 +27,10 @@ import ( type esClientCurrent = elasticsearch7.Client type esConfigCurrent = elasticsearch7.Config -type esBulkIndexerCurrent = esutil7.BulkIndexer -type esBulkIndexerItem = esutil7.BulkIndexerItem -type esBulkIndexerResponseItem = esutil7.BulkIndexerResponseItem +type esBulkIndexerCurrent = bulkIndexerPool + +type esBulkIndexerItem = docappender.BulkIndexerItem // clientLogger implements the estransport.Logger interface // that is required by the Elasticsearch client for logging. @@ -136,22 +139,6 @@ func newTransport(config *Config, tlsCfg *tls.Config) *http.Transport { return transport } -func newBulkIndexer(logger *zap.Logger, client *elasticsearch7.Client, config *Config) (esBulkIndexerCurrent, error) { - // TODO: add debug logger - return esutil7.NewBulkIndexer(esutil7.BulkIndexerConfig{ - NumWorkers: config.NumWorkers, - FlushBytes: config.Flush.Bytes, - FlushInterval: config.Flush.Interval, - Client: client, - Pipeline: config.Pipeline, - Timeout: config.Timeout, - - OnError: func(_ context.Context, err error) { - logger.Error(fmt.Sprintf("Bulk indexer error: %v", err)) - }, - }) -} - func createElasticsearchBackoffFunc(config *RetrySettings) func(int) time.Duration { if !config.Enabled { return nil @@ -175,52 +162,160 @@ func createElasticsearchBackoffFunc(config *RetrySettings) func(int) time.Durati } } -func shouldRetryEvent(status int, retryOnStatus []int) bool { - for _, retryable := range retryOnStatus { - if status == retryable { - return true +func pushDocuments(ctx context.Context, index string, document []byte, bulkIndexer *esBulkIndexerCurrent) error { + return bulkIndexer.Add(ctx, index, bytes.NewReader(document)) +} + +func newBulkIndexer(logger *zap.Logger, client *elasticsearch7.Client, config *Config) (*esBulkIndexerCurrent, error) { + numWorkers := config.NumWorkers + if numWorkers == 0 { + numWorkers = runtime.NumCPU() + } + + flushInterval := config.Flush.Interval + if flushInterval == 0 { + flushInterval = 30 * time.Second + } + + flushBytes := config.Flush.Bytes + if flushBytes == 0 { + flushBytes = 5e+6 + } + + var maxDocRetry int + if config.Retry.Enabled { + // max_requests includes initial attempt + // See https://github.com/open-telemetry/opentelemetry-collector-contrib/issues/32344 + maxDocRetry = config.Retry.MaxRequests - 1 + } + + pool := &bulkIndexerPool{ + wg: sync.WaitGroup{}, + items: make(chan esBulkIndexerItem, config.NumWorkers), + stats: bulkIndexerStats{}, + } + pool.wg.Add(numWorkers) + + for i := 0; i < numWorkers; i++ { + bi, err := docappender.NewBulkIndexer(docappender.BulkIndexerConfig{ + Client: client, + MaxDocumentRetries: maxDocRetry, + Pipeline: config.Pipeline, + RetryOnDocumentStatus: config.Retry.RetryOnStatus, + }) + if err != nil { + return nil, err } + w := worker{ + indexer: bi, + items: pool.items, + flushInterval: flushInterval, + flushTimeout: config.Timeout, + flushBytes: flushBytes, + logger: logger, + stats: &pool.stats, + } + go func() { + defer pool.wg.Done() + w.run() + }() } - return false + return pool, nil +} + +type bulkIndexerStats struct { + docsIndexed atomic.Int64 } -func pushDocuments(ctx context.Context, logger *zap.Logger, index string, document []byte, bulkIndexer esBulkIndexerCurrent, maxAttempts int, retryOnStatus []int) error { - attempts := 1 - body := bytes.NewReader(document) - item := esBulkIndexerItem{Action: createAction, Index: index, Body: body} - // Setup error handler. The handler handles the per item response status based on the - // selective ACKing in the bulk response. - item.OnFailure = func(ctx context.Context, item esBulkIndexerItem, resp esBulkIndexerResponseItem, err error) { - switch { - case attempts < maxAttempts && shouldRetryEvent(resp.Status, retryOnStatus): - logger.Debug("Retrying to index", - zap.String("name", index), - zap.Int("attempt", attempts), - zap.Int("status", resp.Status), - zap.NamedError("reason", err)) - - attempts++ - _, _ = body.Seek(0, io.SeekStart) - _ = bulkIndexer.Add(ctx, item) - - case resp.Status == 0 && err != nil: - // Encoding error. We didn't even attempt to send the event - logger.Error("Drop docs: failed to add docs to the bulk request buffer.", - zap.NamedError("reason", err)) - - case err != nil: - logger.Error("Drop docs: failed to index", - zap.String("name", index), - zap.Int("attempt", attempts), - zap.Int("status", resp.Status), - zap.NamedError("reason", err)) - - default: - logger.Error(fmt.Sprintf("Drop docs: failed to index: %#v", resp.Error), - zap.Int("attempt", attempts), - zap.Int("status", resp.Status)) +type bulkIndexerPool struct { + items chan esBulkIndexerItem + wg sync.WaitGroup + stats bulkIndexerStats +} + +// Add adds an item to the bulk indexer pool. +// +// Adding an item after a call to Close() will panic. +func (p *bulkIndexerPool) Add(ctx context.Context, index string, document io.WriterTo) error { + item := esBulkIndexerItem{ + Index: index, + Body: document, + } + select { + case <-ctx.Done(): + return ctx.Err() + case p.items <- item: + return nil + } +} + +// Close closes the items channel and waits for the workers to drain it. +func (p *bulkIndexerPool) Close(ctx context.Context) error { + close(p.items) + doneCh := make(chan struct{}) + go func() { + p.wg.Wait() + close(doneCh) + }() + select { + case <-ctx.Done(): + return ctx.Err() + case <-doneCh: + return nil + } +} + +type worker struct { + indexer *docappender.BulkIndexer + items <-chan esBulkIndexerItem + flushInterval time.Duration + flushTimeout time.Duration + flushBytes int + + stats *bulkIndexerStats + + logger *zap.Logger +} + +func (w *worker) run() { + flushTick := time.NewTicker(w.flushInterval) + defer flushTick.Stop() + for { + select { + case item, ok := <-w.items: + // if channel is closed, flush and return + if !ok { + w.flush() + return + } + + if err := w.indexer.Add(item); err != nil { + w.logger.Error("error adding item to bulk indexer", zap.Error(err)) + } + + // w.indexer.Len() can be either compressed or uncompressed bytes + if w.indexer.Len() >= w.flushBytes { + w.flush() + flushTick.Reset(w.flushInterval) + } + case <-flushTick.C: + // bulk indexer needs to be flushed every flush interval because + // there may be pending bytes in bulk indexer buffer due to e.g. document level 429 + w.flush() } } +} - return bulkIndexer.Add(ctx, item) +func (w *worker) flush() { + ctx, cancel := context.WithTimeout(context.Background(), w.flushTimeout) + defer cancel() + stat, err := w.indexer.Flush(ctx) + w.stats.docsIndexed.Add(stat.Indexed) + if err != nil { + w.logger.Error("bulk indexer flush error", zap.Error(err)) + } + for _, resp := range stat.FailedDocs { + w.logger.Error(fmt.Sprintf("Drop docs: failed to index: %#v", resp.Error), + zap.Int("status", resp.Status)) + } } diff --git a/exporter/elasticsearchexporter/elasticsearch_bulk_test.go b/exporter/elasticsearchexporter/elasticsearch_bulk_test.go new file mode 100644 index 000000000000..020d29fae623 --- /dev/null +++ b/exporter/elasticsearchexporter/elasticsearch_bulk_test.go @@ -0,0 +1,163 @@ +// Copyright The OpenTelemetry Authors +// SPDX-License-Identifier: Apache-2.0 + +package elasticsearchexporter + +import ( + "context" + "errors" + "io" + "net/http" + "strings" + "testing" + "time" + + "github.com/elastic/go-elasticsearch/v7" + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" + "go.uber.org/zap" + "go.uber.org/zap/zapcore" + "go.uber.org/zap/zaptest/observer" +) + +var defaultRoundTripFunc = func(*http.Request) (*http.Response, error) { + return &http.Response{ + Body: io.NopCloser(strings.NewReader("{}")), + }, nil +} + +type mockTransport struct { + RoundTripFunc func(*http.Request) (*http.Response, error) +} + +func (t *mockTransport) RoundTrip(req *http.Request) (*http.Response, error) { + if t.RoundTripFunc == nil { + return defaultRoundTripFunc(req) + } + return t.RoundTripFunc(req) +} + +const successResp = `{ + "took": 30, + "errors": false, + "items": [ + { + "create": { + "_index": "foo", + "status": 201 + } + } + ] +}` + +func TestBulkIndexer_flushOnClose(t *testing.T) { + cfg := Config{NumWorkers: 1, Flush: FlushSettings{Interval: time.Hour, Bytes: 2 << 30}} + client, err := elasticsearch.NewClient(elasticsearch.Config{Transport: &mockTransport{ + RoundTripFunc: func(*http.Request) (*http.Response, error) { + return &http.Response{ + Header: http.Header{"X-Elastic-Product": []string{"Elasticsearch"}}, + Body: io.NopCloser(strings.NewReader(successResp)), + }, nil + }, + }}) + require.NoError(t, err) + bulkIndexer, err := newBulkIndexer(zap.NewNop(), client, &cfg) + require.NoError(t, err) + assert.NoError(t, bulkIndexer.Add(context.Background(), "foo", strings.NewReader(`{"foo": "bar"}`))) + assert.NoError(t, bulkIndexer.Close(context.Background())) + assert.Equal(t, int64(1), bulkIndexer.stats.docsIndexed.Load()) +} + +func TestBulkIndexer_flush(t *testing.T) { + tests := []struct { + name string + config Config + }{ + { + name: "flush.bytes", + config: Config{NumWorkers: 1, Flush: FlushSettings{Interval: time.Hour, Bytes: 1}}, + }, + { + name: "flush.interval", + config: Config{NumWorkers: 1, Flush: FlushSettings{Interval: 50 * time.Millisecond, Bytes: 2 << 30}}, + }, + } + + for _, tt := range tests { + tt := tt + t.Run(tt.name, func(t *testing.T) { + t.Parallel() + client, err := elasticsearch.NewClient(elasticsearch.Config{Transport: &mockTransport{ + RoundTripFunc: func(*http.Request) (*http.Response, error) { + return &http.Response{ + Header: http.Header{"X-Elastic-Product": []string{"Elasticsearch"}}, + Body: io.NopCloser(strings.NewReader(successResp)), + }, nil + }, + }}) + require.NoError(t, err) + bulkIndexer, err := newBulkIndexer(zap.NewNop(), client, &tt.config) + require.NoError(t, err) + assert.NoError(t, bulkIndexer.Add(context.Background(), "foo", strings.NewReader(`{"foo": "bar"}`))) + // should flush + time.Sleep(100 * time.Millisecond) + assert.Equal(t, int64(1), bulkIndexer.stats.docsIndexed.Load()) + assert.NoError(t, bulkIndexer.Close(context.Background())) + }) + } +} + +func TestBulkIndexer_flush_error(t *testing.T) { + tests := []struct { + name string + roundTripFunc func(*http.Request) (*http.Response, error) + }{ + { + name: "500", + roundTripFunc: func(*http.Request) (*http.Response, error) { + return &http.Response{ + StatusCode: 500, + Header: http.Header{"X-Elastic-Product": []string{"Elasticsearch"}}, + Body: io.NopCloser(strings.NewReader("error")), + }, nil + }, + }, + { + name: "429", + roundTripFunc: func(*http.Request) (*http.Response, error) { + return &http.Response{ + StatusCode: 429, + Header: http.Header{"X-Elastic-Product": []string{"Elasticsearch"}}, + Body: io.NopCloser(strings.NewReader("error")), + }, nil + }, + }, + { + name: "transport error", + roundTripFunc: func(*http.Request) (*http.Response, error) { + return nil, errors.New("transport error") + }, + }, + } + + for _, tt := range tests { + tt := tt + t.Run(tt.name, func(t *testing.T) { + t.Parallel() + cfg := Config{NumWorkers: 1, Flush: FlushSettings{Interval: time.Hour, Bytes: 1}} + client, err := elasticsearch.NewClient(elasticsearch.Config{Transport: &mockTransport{ + RoundTripFunc: tt.roundTripFunc, + }}) + require.NoError(t, err) + core, observed := observer.New(zap.NewAtomicLevelAt(zapcore.DebugLevel)) + bulkIndexer, err := newBulkIndexer(zap.New(core), client, &cfg) + require.NoError(t, err) + assert.NoError(t, bulkIndexer.Add(context.Background(), "foo", strings.NewReader(`{"foo": "bar"}`))) + // should flush + time.Sleep(100 * time.Millisecond) + assert.Equal(t, int64(0), bulkIndexer.stats.docsIndexed.Load()) + assert.NoError(t, bulkIndexer.Close(context.Background())) + assert.Equal(t, 1, observed.FilterMessage("bulk indexer flush error").Len()) + }) + } +} diff --git a/exporter/elasticsearchexporter/go.mod b/exporter/elasticsearchexporter/go.mod index 99c0b32edd79..35ff23c29cd6 100644 --- a/exporter/elasticsearchexporter/go.mod +++ b/exporter/elasticsearchexporter/go.mod @@ -4,6 +4,7 @@ go 1.21.0 require ( github.com/cenkalti/backoff/v4 v4.3.0 + github.com/elastic/go-docappender/v2 v2.1.0 github.com/elastic/go-elasticsearch/v7 v7.17.10 github.com/elastic/go-structform v0.0.10 github.com/lestrrat-go/strftime v1.0.6 @@ -24,16 +25,23 @@ require ( ) require ( + github.com/armon/go-radix v1.0.0 // indirect github.com/beorn7/perks v1.0.1 // indirect github.com/cespare/xxhash/v2 v2.3.0 // indirect github.com/davecgh/go-spew v1.1.1 // indirect + github.com/elastic/elastic-transport-go/v8 v8.5.0 // indirect + github.com/elastic/go-elasticsearch/v8 v8.13.1 // indirect + github.com/elastic/go-sysinfo v1.7.1 // indirect + github.com/elastic/go-windows v1.0.1 // indirect github.com/fsnotify/fsnotify v1.7.0 // indirect github.com/go-logr/logr v1.4.1 // indirect github.com/go-logr/stdr v1.2.2 // indirect github.com/go-viper/mapstructure/v2 v2.0.0-alpha.1 // indirect github.com/gogo/protobuf v1.3.2 // indirect github.com/google/uuid v1.6.0 // indirect + github.com/joeshaw/multierror v0.0.0-20140124173710-69b34d4ec901 // indirect github.com/json-iterator/go v1.1.12 // indirect + github.com/klauspost/compress v1.17.8 // indirect github.com/knadh/koanf/maps v0.1.1 // indirect github.com/knadh/koanf/providers/confmap v0.1.0 // indirect github.com/knadh/koanf/v2 v2.1.1 // indirect @@ -46,7 +54,10 @@ require ( github.com/prometheus/client_golang v1.19.0 // indirect github.com/prometheus/client_model v0.6.1 // indirect github.com/prometheus/common v0.53.0 // indirect - github.com/prometheus/procfs v0.12.0 // indirect + github.com/prometheus/procfs v0.13.0 // indirect + go.elastic.co/apm/module/apmzap/v2 v2.6.0 // indirect + go.elastic.co/apm/v2 v2.6.0 // indirect + go.elastic.co/fastjson v1.3.0 // indirect go.opentelemetry.io/collector v0.100.0 // indirect go.opentelemetry.io/collector/config/configretry v0.100.0 // indirect go.opentelemetry.io/collector/config/configtelemetry v0.100.0 // indirect @@ -59,12 +70,14 @@ require ( go.opentelemetry.io/otel/sdk/metric v1.26.0 // indirect go.uber.org/multierr v1.11.0 // indirect golang.org/x/net v0.24.0 // indirect + golang.org/x/sync v0.7.0 // indirect golang.org/x/sys v0.19.0 // indirect golang.org/x/text v0.15.0 // indirect google.golang.org/genproto/googleapis/rpc v0.0.0-20240401170217-c3f982113cda // indirect google.golang.org/grpc v1.63.2 // indirect google.golang.org/protobuf v1.34.0 // indirect gopkg.in/yaml.v3 v3.0.1 // indirect + howett.net/plist v1.0.0 // indirect ) replace github.com/open-telemetry/opentelemetry-collector-contrib/internal/common => ../../internal/common diff --git a/exporter/elasticsearchexporter/go.sum b/exporter/elasticsearchexporter/go.sum index f155f47959a8..10311a4e7c10 100644 --- a/exporter/elasticsearchexporter/go.sum +++ b/exporter/elasticsearchexporter/go.sum @@ -1,3 +1,5 @@ +github.com/armon/go-radix v1.0.0 h1:F4z6KzEeeQIMeLFa97iZU6vupzoecKdU5TX24SNppXI= +github.com/armon/go-radix v1.0.0/go.mod h1:ufUuZ+zHj4x4TnLV4JWEpy2hxWSpsRywHrMgIH9cCH8= github.com/beorn7/perks v1.0.1 h1:VlbKKnNfV8bJzeqoa4cOKqO6bYr3WgKZxO8Z16+hsOM= github.com/beorn7/perks v1.0.1/go.mod h1:G2ZrVWU2WbWT9wwq4/hrbKbnv/1ERSJQ0ibhJ6rlkpw= github.com/cenkalti/backoff/v4 v4.3.0 h1:MyRJ/UdXutAwSAT+s3wNd7MfTIcy71VQueUuFK343L8= @@ -7,10 +9,21 @@ github.com/cespare/xxhash/v2 v2.3.0/go.mod h1:VGX0DQ3Q6kWi7AoAeZDth3/j3BFtOZR5XL github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= github.com/davecgh/go-spew v1.1.1 h1:vj9j/u1bqnvCEfJOwUhtlOARqs3+rkHYY13jYWTU97c= github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= +github.com/elastic/elastic-transport-go/v8 v8.5.0 h1:v5membAl7lvQgBTexPRDBO/RdnlQX+FM9fUVDyXxvH0= +github.com/elastic/elastic-transport-go/v8 v8.5.0/go.mod h1:YLHer5cj0csTzNFXoNQ8qhtGY1GTvSqPnKWKaqQE3Hk= +github.com/elastic/go-docappender/v2 v2.1.0 h1:Ct/C2J9qgKue8kQumUDZAi/AB2F+wlrIVOf2TH4afPA= +github.com/elastic/go-docappender/v2 v2.1.0/go.mod h1:oHi6MsHriWaG8W6T9iyJ/PkEo2+182HIzq+0RRAzzgA= github.com/elastic/go-elasticsearch/v7 v7.17.10 h1:TCQ8i4PmIJuBunvBS6bwT2ybzVFxxUhhltAs3Gyu1yo= github.com/elastic/go-elasticsearch/v7 v7.17.10/go.mod h1:OJ4wdbtDNk5g503kvlHLyErCgQwwzmDtaFC4XyOxXA4= +github.com/elastic/go-elasticsearch/v8 v8.13.1 h1:du5F8IzUUyCkzxyHdrO9AtopcG95I/qwi2WK8Kf1xlg= +github.com/elastic/go-elasticsearch/v8 v8.13.1/go.mod h1:DIn7HopJs4oZC/w0WoJR13uMUxtHeq92eI5bqv5CRfI= github.com/elastic/go-structform v0.0.10 h1:oy08o/Ih2hHTkNcRY/1HhaYvIp5z6t8si8gnCJPDo1w= github.com/elastic/go-structform v0.0.10/go.mod h1:CZWf9aIRYY5SuKSmOhtXScE5uQiLZNqAFnwKR4OrIM4= +github.com/elastic/go-sysinfo v1.7.1 h1:Wx4DSARcKLllpKT2TnFVdSUJOsybqMYCNQZq1/wO+s0= +github.com/elastic/go-sysinfo v1.7.1/go.mod h1:i1ZYdU10oLNfRzq4vq62BEwD2fH8KaWh6eh0ikPT9F0= +github.com/elastic/go-windows v1.0.0/go.mod h1:TsU0Nrp7/y3+VwE82FoZF8gC/XFg/Elz6CcloAxnPgU= +github.com/elastic/go-windows v1.0.1 h1:AlYZOldA+UJ0/2nBuqWdo90GFCgG9xuyw9SYzGUtJm0= +github.com/elastic/go-windows v1.0.1/go.mod h1:FoVvqWSun28vaDQPbj2Elfc0JahhPB7WQEGa3c814Ss= github.com/fsnotify/fsnotify v1.7.0 h1:8JEhPFa5W2WU7YfeZzPNqzMP6Lwt7L2715Ggo0nosvA= github.com/fsnotify/fsnotify v1.7.0/go.mod h1:40Bi/Hjc2AVfZrqy+aj+yEI+/bRxZnMJyTJwOpGvigM= github.com/go-logr/logr v1.2.2/go.mod h1:jdQByPbusPIv2/zmleS9BjJVeZ6kBagPoEUsqbVz/1A= @@ -27,18 +40,26 @@ github.com/google/go-cmp v0.6.0/go.mod h1:17dUlkBOakJ0+DkrSSNjCkIjxS6bF9zb3elmeN github.com/google/gofuzz v1.0.0/go.mod h1:dBl0BpW6vV/+mYPU4Po3pmUjxk6FQPldtuIdl/M65Eg= github.com/google/uuid v1.6.0 h1:NIvaJDMOsjHA8n1jAhLSgzrAzy1Hgr+hNrb57e+94F0= github.com/google/uuid v1.6.0/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= +github.com/jessevdk/go-flags v1.4.0/go.mod h1:4FA24M0QyGHXBuZZK/XkWh8h0e1EYbRYJSGM75WSRxI= +github.com/joeshaw/multierror v0.0.0-20140124173710-69b34d4ec901 h1:rp+c0RAYOWj8l6qbCUTSiRLG/iKnW3K3/QfPPuSsBt4= +github.com/joeshaw/multierror v0.0.0-20140124173710-69b34d4ec901/go.mod h1:Z86h9688Y0wesXCyonoVr47MasHilkuLMqGhRZ4Hpak= github.com/json-iterator/go v1.1.12 h1:PV8peI4a0ysnczrg+LtxykD8LfKY9ML6u2jnxaEnrnM= github.com/json-iterator/go v1.1.12/go.mod h1:e30LSqwooZae/UwlEbR2852Gd8hjQvJoHmT4TnhNGBo= github.com/kisielk/errcheck v1.5.0/go.mod h1:pFxgyoBC7bSaBwPgfKdkLd5X25qrDl4LWUI2bnpBCr8= github.com/kisielk/gotool v1.0.0/go.mod h1:XhKaO+MFFWcvkIS/tQcRk01m1F5IRFswLeQ+oQHNcck= +github.com/klauspost/compress v1.17.8 h1:YcnTYrq7MikUT7k0Yb5eceMmALQPYBW/Xltxn0NAMnU= +github.com/klauspost/compress v1.17.8/go.mod h1:Di0epgTjJY877eYKx5yC51cX2A2Vl2ibi7bDH9ttBbw= github.com/knadh/koanf/maps v0.1.1 h1:G5TjmUh2D7G2YWf5SQQqSiHRJEjaicvU0KpypqB3NIs= github.com/knadh/koanf/maps v0.1.1/go.mod h1:npD/QZY3V6ghQDdcQzl1W4ICNVTkohC8E73eI2xW4yI= github.com/knadh/koanf/providers/confmap v0.1.0 h1:gOkxhHkemwG4LezxxN8DMOFopOPghxRVp7JbIvdvqzU= github.com/knadh/koanf/providers/confmap v0.1.0/go.mod h1:2uLhxQzJnyHKfxG927awZC7+fyHFdQkd697K4MdLnIU= github.com/knadh/koanf/v2 v2.1.1 h1:/R8eXqasSTsmDCsAyYj+81Wteg8AqrV9CP6gvsTsOmM= github.com/knadh/koanf/v2 v2.1.1/go.mod h1:4mnTRbZCK+ALuBXHZMjDfG9y714L7TykVnZkXbMU3Es= +github.com/kr/pretty v0.1.0/go.mod h1:dAy3ld7l9f0ibDNOQOHHMYYIIbhfbHSm3C4ZsoJORNo= github.com/kr/pretty v0.3.1 h1:flRD4NNwYAUpkphVc1HcthR4KEIFJ65n8Mw5qdRn3LE= github.com/kr/pretty v0.3.1/go.mod h1:hoEshYVHaxMs3cyo3Yncou5ZscifuDolrwPKZanG3xk= +github.com/kr/pty v1.1.1/go.mod h1:pFQYn66WHrOpPYNljwOMqo10TkYh1fy3cYio2l3bCsQ= +github.com/kr/text v0.1.0/go.mod h1:4Jbv+DJW3UT/LiOwJeYQe1efqtUx/iVham/4vfdArNI= github.com/kr/text v0.2.0 h1:5Nx0Ya0ZqY2ygV366QzturHI13Jq95ApcVaJBhpS+AY= github.com/kr/text v0.2.0/go.mod h1:eLer722TekiGuMkidMxC/pM04lWEeraHUUmBw8l2grE= github.com/lestrrat-go/envload v0.0.0-20180220234015-a3eb8ddeffcc h1:RKf14vYWi2ttpEmkA4aQ3j4u9dStX2t4M8UM6qqNsG8= @@ -54,6 +75,7 @@ github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd h1:TRLaZ9cD/w github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd/go.mod h1:6dJC0mAP4ikYIbvyc7fijjWJddQyLn8Ig3JB5CqoB9Q= github.com/modern-go/reflect2 v1.0.2 h1:xBagoLtFs94CBntxluKeaWgTMpvLxC4ur3nMaC9Gz0M= github.com/modern-go/reflect2 v1.0.2/go.mod h1:yWuevngMOJpCy52FWWMvUC8ws7m/LJsjYzDa0/r8luk= +github.com/pkg/errors v0.8.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0= github.com/pkg/errors v0.9.1 h1:FEBLx1zS214owpjy7qsBeixbURkuhQAwrK5UwLGTwt4= github.com/pkg/errors v0.9.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0= github.com/pmezard/go-difflib v1.0.0 h1:4DBwDE0NGyQoBHbLQYPwSUPoCMWR5BEzIk/f1lZbAQM= @@ -64,8 +86,9 @@ github.com/prometheus/client_model v0.6.1 h1:ZKSh/rekM+n3CeS952MLRAdFwIKqeY8b62p github.com/prometheus/client_model v0.6.1/go.mod h1:OrxVMOVHjw3lKMa8+x6HeMGkHMQyHDk9E3jmP2AmGiY= github.com/prometheus/common v0.53.0 h1:U2pL9w9nmJwJDa4qqLQ3ZaePJ6ZTwt7cMD3AG3+aLCE= github.com/prometheus/common v0.53.0/go.mod h1:BrxBKv3FWBIGXw89Mg1AeBq7FSyRzXWI3l3e7W3RN5U= -github.com/prometheus/procfs v0.12.0 h1:jluTpSng7V9hY0O2R9DzzJHYb2xULk9VTR1V1R/k6Bo= -github.com/prometheus/procfs v0.12.0/go.mod h1:pcuDEFsWDnvcgNzo4EEweacyhjeA9Zk3cnaOZAZEfOo= +github.com/prometheus/procfs v0.0.0-20190425082905-87a4384529e0/go.mod h1:TjEm7ze935MbeOT/UhFTIMYKhuLP4wbCsTZCD3I8kEA= +github.com/prometheus/procfs v0.13.0 h1:GqzLlQyfsPbaEHaQkO7tbDlriv/4o5Hudv6OXHGKX7o= +github.com/prometheus/procfs v0.13.0/go.mod h1:cd4PFCR54QLnGKPaKGA6l+cfuNXtht43ZKY6tow0Y1g= github.com/rogpeppe/go-internal v1.10.0 h1:TMyTOH3F/DB16zRVcYyreMH6GnZZrwQVAoYjRBZyWFQ= github.com/rogpeppe/go-internal v1.10.0/go.mod h1:UQnix2H7Ngw/k4C5ijL5+65zddjncjaFoBhdsK/akog= github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME= @@ -75,6 +98,16 @@ github.com/stretchr/testify v1.9.0 h1:HtqpIVDClZ4nwg75+f6Lvsy/wHu+3BoSGCbBAcpTsT github.com/stretchr/testify v1.9.0/go.mod h1:r2ic/lqez/lEtzL7wO/rwa5dbSLXVDPFyf8C91i36aY= github.com/yuin/goldmark v1.1.27/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74= github.com/yuin/goldmark v1.2.1/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74= +go.elastic.co/apm/module/apmelasticsearch/v2 v2.6.0 h1:ukMcwyMaDXsS1dRK2qRYXT2AsfwaUy74TOOYCqkWJow= +go.elastic.co/apm/module/apmelasticsearch/v2 v2.6.0/go.mod h1:YpfiTTrqX5LB/CKBwX89oDCBAxuLJTFv40gcfxJyehM= +go.elastic.co/apm/module/apmhttp/v2 v2.6.0 h1:s8UeNFQmVBCNd4eoz7KDD9rEFhQC0HeUFXz3z9gpAmQ= +go.elastic.co/apm/module/apmhttp/v2 v2.6.0/go.mod h1:D0GLppLuI0Ddwvtl595GUxRgn6Z8L5KaDFVMv2H3GK0= +go.elastic.co/apm/module/apmzap/v2 v2.6.0 h1:R/iVORzGu3F9uM43iEVHD0nwiRo59O0bIXdayKsgayQ= +go.elastic.co/apm/module/apmzap/v2 v2.6.0/go.mod h1:B3i/8xRkqLgi6zNuV+Bp7Pt4cutaOObvrVSa7wUTAPw= +go.elastic.co/apm/v2 v2.6.0 h1:VieBMLQFtXua2YxpYxaSdYGnmmxhLT46gosI5yErJgY= +go.elastic.co/apm/v2 v2.6.0/go.mod h1:33rOXgtHwbgZcDgi6I/GtCSMZQqgxkHC0IQT3gudKvo= +go.elastic.co/fastjson v1.3.0 h1:hJO3OsYIhiqiT4Fgu0ZxAECnKASbwgiS+LMW5oCopKs= +go.elastic.co/fastjson v1.3.0/go.mod h1:K9vDh7O0ODsVKV2B5e2XYLY277QZaCbB3tS1SnARvko= go.opentelemetry.io/collector v0.100.0 h1:Q6IAGjMzjkZ7WepuwyCa6UytDPP0O88GemonQOUjP2s= go.opentelemetry.io/collector v0.100.0/go.mod h1:QlVjQWlrPtBwVRm8tr+3P4FzNZSlYEfuUSaWoAwK+ko= go.opentelemetry.io/collector/component v0.100.0 h1:3Y6dl3uDkDzilaikYrPxbZDOlzrDijrF1cIPzfyTwWA= @@ -132,11 +165,16 @@ golang.org/x/net v0.0.0-20200226121028-0de0cce0169b/go.mod h1:z5CRVTTTmAJ677TzLL golang.org/x/net v0.0.0-20201021035429-f5854403a974/go.mod h1:sp8m0HH+o8qH0wwXwYZr8TS3Oi6o0r6Gce1SSxlDquU= golang.org/x/net v0.24.0 h1:1PcaxkF854Fu3+lvBIx5SYn9wRlBzzcnHZSiaFFAb0w= golang.org/x/net v0.24.0/go.mod h1:2Q7sJY5mzlzWjKtYUEXSlBWCdyaioyXzRB2RtU8KVE8= +golang.org/x/sync v0.0.0-20181221193216-37e7f081c4d4/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20190423024810-112230192c58/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20190911185100-cd5d95a43a6e/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20201020160332-67f06af15bc9/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sync v0.7.0 h1:YsImfSBoP9QPYL0xyKJPq0gcaJdG3rInoqxTWbfQu9M= +golang.org/x/sync v0.7.0/go.mod h1:Czt+wKu1gCyEFDUtn0jG5QVvpJ6rzVqr5aXyt9drQfk= golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20190412213103-97732733099d/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20190813064441-fde4db37ae7a/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20191025021431-6c3a3bfe00ae/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20200930185726-fdedc70b468f/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.19.0 h1:q5f1RH2jigJ1MoAWp2KTp3gm5zAGFUTarQZ5U386+4o= golang.org/x/sys v0.19.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA= @@ -159,8 +197,14 @@ google.golang.org/grpc v1.63.2/go.mod h1:WAX/8DgncnokcFUldAxq7GeB5DXHDbMF+lLvDom google.golang.org/protobuf v1.34.0 h1:Qo/qEd2RZPCf2nKuorzksSknv0d3ERwp1vFG38gSmH4= google.golang.org/protobuf v1.34.0/go.mod h1:c6P6GXX6sHbq/GpV6MGZEdwhWPcYBgnhAHhKbcUYpos= gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= +gopkg.in/check.v1 v1.0.0-20180628173108-788fd7840127/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c h1:Hei/4ADfdWqJk1ZMxUNpqntNwaWcugrBjAiHlqqRiVk= gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c/go.mod h1:JHkPIbrfpd72SG/EVd6muEfDQjcINNoR0C8j2r3qZ4Q= +gopkg.in/yaml.v1 v1.0.0-20140924161607-9f9df34309c0/go.mod h1:WDnlLJ4WF5VGsH/HVa3CI79GS0ol3YnhVnKP89i0kNg= +gopkg.in/yaml.v2 v2.2.1/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= gopkg.in/yaml.v3 v3.0.0-20200313102051-9f266ea9e77c/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= gopkg.in/yaml.v3 v3.0.1 h1:fxVm/GzAzEWqLHuvctI91KS9hhNmmWOoWu0XTYJS7CA= gopkg.in/yaml.v3 v3.0.1/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= +howett.net/plist v0.0.0-20181124034731-591f970eefbb/go.mod h1:vMygbs4qMhSZSc4lCUl2OEE+rDiIIJAIdR4m7MiMcm0= +howett.net/plist v1.0.0 h1:7CrbWYbPPO/PyNy38b2EB/+gYbjCe2DXBxgtOOZbSQM= +howett.net/plist v1.0.0/go.mod h1:lqaXoTrLY4hg8tnEzNru53gicrbv7rrk+2xJA/7hw9g= diff --git a/exporter/elasticsearchexporter/integrationtest/go.mod b/exporter/elasticsearchexporter/integrationtest/go.mod index ba25f218a78e..230b4d99e08b 100644 --- a/exporter/elasticsearchexporter/integrationtest/go.mod +++ b/exporter/elasticsearchexporter/integrationtest/go.mod @@ -39,7 +39,7 @@ require ( github.com/elastic/go-elasticsearch/v7 v7.17.10 // indirect github.com/elastic/go-elasticsearch/v8 v8.13.1 // indirect github.com/elastic/go-structform v0.0.10 // indirect - github.com/elastic/go-sysinfo v1.13.1 // indirect + github.com/elastic/go-sysinfo v1.14.0 // indirect github.com/elastic/go-windows v1.0.1 // indirect github.com/expr-lang/expr v1.16.5 // indirect github.com/felixge/httpsnoop v1.0.4 // indirect @@ -59,7 +59,6 @@ require ( github.com/inconshreveable/mousetrap v1.1.0 // indirect github.com/influxdata/go-syslog/v3 v3.0.1-0.20230911200830-875f5bc594a4 // indirect github.com/jaegertracing/jaeger v1.57.0 // indirect - github.com/joeshaw/multierror v0.0.0-20140124173710-69b34d4ec901 // indirect github.com/jpillora/backoff v1.0.0 // indirect github.com/json-iterator/go v1.1.12 // indirect github.com/klauspost/compress v1.17.8 // indirect @@ -109,6 +108,7 @@ require ( github.com/yusufpapurcu/wmi v1.2.4 // indirect go.elastic.co/apm/module/apmelasticsearch/v2 v2.6.0 // indirect go.elastic.co/apm/module/apmhttp/v2 v2.6.0 // indirect + go.elastic.co/apm/module/apmzap/v2 v2.6.0 // indirect go.elastic.co/apm/v2 v2.6.0 // indirect go.elastic.co/fastjson v1.3.0 // indirect go.etcd.io/bbolt v1.3.10 // indirect diff --git a/exporter/elasticsearchexporter/integrationtest/go.sum b/exporter/elasticsearchexporter/integrationtest/go.sum index 3f78ca7da128..163a3b6c180f 100644 --- a/exporter/elasticsearchexporter/integrationtest/go.sum +++ b/exporter/elasticsearchexporter/integrationtest/go.sum @@ -2,8 +2,6 @@ cloud.google.com/go v0.26.0/go.mod h1:aQUYkXzVsufM+DwF1aE+0xfcU+56JwCaLick0ClmMT github.com/BurntSushi/toml v0.3.1/go.mod h1:xHWCNGjB5oqiDr8zfno3MHue2Ht5sIBksp03qcyfWMU= github.com/HdrHistogram/hdrhistogram-go v1.1.2 h1:5IcZpTvzydCQeHzK4Ef/D5rrSqwxob0t8PQPMybUNFM= github.com/HdrHistogram/hdrhistogram-go v1.1.2/go.mod h1:yDgFjdqOqDEKOvasDdhWNXYg9BVp4O+o5f6V/ehm6Oo= -github.com/Microsoft/go-winio v0.6.1 h1:9/kr64B9VUZrLm5YYwbGtUJnMgqWVOdUAXu6Migciow= -github.com/Microsoft/go-winio v0.6.1/go.mod h1:LRdKpFKfdobln8UmuiYcKPot9D2v6svN5+sAH+4kjUM= github.com/VividCortex/gohistogram v1.0.0 h1:6+hBz+qvs0JOrrNhhmR7lFxo5sINxBCGXrdtl/UvroE= github.com/VividCortex/gohistogram v1.0.0/go.mod h1:Pf5mBqqDxYaXu3hDrrU+w6nw50o/4+TcAqDqk/vUH7g= github.com/apache/thrift v0.20.0 h1:631+KvYbsBZxmuJjYwhezVsrfc/TbqtZV4QcxOX1fOI= @@ -26,14 +24,6 @@ github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSs github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= github.com/davecgh/go-spew v1.1.2-0.20180830191138-d8f796af33cc h1:U9qPSI2PIWSS1VwoXQT9A3Wy9MM3WgvqSxFWenqJduM= github.com/davecgh/go-spew v1.1.2-0.20180830191138-d8f796af33cc/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= -github.com/distribution/reference v0.5.0 h1:/FUIFXtfc/x2gpa5/VGfiGLuOIdYa1t65IKK2OFGvA0= -github.com/distribution/reference v0.5.0/go.mod h1:BbU0aIcezP1/5jX/8MP0YiH4SdvB5Y4f/wlDRiLyi3E= -github.com/docker/docker v25.0.5+incompatible h1:UmQydMduGkrD5nQde1mecF/YnSbTOaPeFIeP5C4W+DE= -github.com/docker/docker v25.0.5+incompatible/go.mod h1:eEKB0N0r5NX/I1kEveEz05bcu8tLC/8azJZsviup8Sk= -github.com/docker/go-connections v0.5.0 h1:USnMq7hx7gwdVZq1L49hLXaFtUdTADjXGp+uj1Br63c= -github.com/docker/go-connections v0.5.0/go.mod h1:ov60Kzw0kKElRwhNs9UlUHAE/F9Fe6GLaXnqyDdmEXc= -github.com/docker/go-units v0.5.0 h1:69rxXcBk27SvSaaxTtLh/8llcHD8vYHT7WSdRZ/jvr4= -github.com/docker/go-units v0.5.0/go.mod h1:fgPhTUdO+D/Jk86RDLlptpiXQzgHJF7gydDDbaIK4Dk= github.com/elastic/elastic-transport-go/v8 v8.5.0 h1:v5membAl7lvQgBTexPRDBO/RdnlQX+FM9fUVDyXxvH0= github.com/elastic/elastic-transport-go/v8 v8.5.0/go.mod h1:YLHer5cj0csTzNFXoNQ8qhtGY1GTvSqPnKWKaqQE3Hk= github.com/elastic/go-docappender/v2 v2.1.0 h1:Ct/C2J9qgKue8kQumUDZAi/AB2F+wlrIVOf2TH4afPA= @@ -44,8 +34,8 @@ github.com/elastic/go-elasticsearch/v8 v8.13.1 h1:du5F8IzUUyCkzxyHdrO9AtopcG95I/ github.com/elastic/go-elasticsearch/v8 v8.13.1/go.mod h1:DIn7HopJs4oZC/w0WoJR13uMUxtHeq92eI5bqv5CRfI= github.com/elastic/go-structform v0.0.10 h1:oy08o/Ih2hHTkNcRY/1HhaYvIp5z6t8si8gnCJPDo1w= github.com/elastic/go-structform v0.0.10/go.mod h1:CZWf9aIRYY5SuKSmOhtXScE5uQiLZNqAFnwKR4OrIM4= -github.com/elastic/go-sysinfo v1.13.1 h1:U5Jlx6c/rLkR72O8wXXXo1abnGlWGJU/wbzNJ2AfQa4= -github.com/elastic/go-sysinfo v1.13.1/go.mod h1:GKqR8bbMK/1ITnez9NIsIfXQr25aLhRJa7AfT8HpBFQ= +github.com/elastic/go-sysinfo v1.14.0 h1:dQRtiqLycoOOla7IflZg3aN213vqJmP0lpVpKQ9lUEY= +github.com/elastic/go-sysinfo v1.14.0/go.mod h1:FKUXnZWhnYI0ueO7jhsGV3uQJ5hiz8OqM5b3oGyaRr8= github.com/elastic/go-windows v1.0.1 h1:AlYZOldA+UJ0/2nBuqWdo90GFCgG9xuyw9SYzGUtJm0= github.com/elastic/go-windows v1.0.1/go.mod h1:FoVvqWSun28vaDQPbj2Elfc0JahhPB7WQEGa3c814Ss= github.com/envoyproxy/go-control-plane v0.9.0/go.mod h1:YTl/9mNaCwkRvm6d1a2C3ymFceY/DCBVvsKhRF0iEA4= @@ -125,8 +115,6 @@ github.com/influxdata/go-syslog/v3 v3.0.1-0.20230911200830-875f5bc594a4/go.mod h github.com/jaegertracing/jaeger v1.57.0 h1:3wDtUUPs6NRYH7+d+y8MilDkLHdpPrVlQ2wbcsA62bs= github.com/jaegertracing/jaeger v1.57.0/go.mod h1:p/1fxIU9hKHl7qEhKC72p2ZYVhvvZvNB73y6V7YyuTs= github.com/jessevdk/go-flags v1.4.0/go.mod h1:4FA24M0QyGHXBuZZK/XkWh8h0e1EYbRYJSGM75WSRxI= -github.com/joeshaw/multierror v0.0.0-20140124173710-69b34d4ec901 h1:rp+c0RAYOWj8l6qbCUTSiRLG/iKnW3K3/QfPPuSsBt4= -github.com/joeshaw/multierror v0.0.0-20140124173710-69b34d4ec901/go.mod h1:Z86h9688Y0wesXCyonoVr47MasHilkuLMqGhRZ4Hpak= github.com/jpillora/backoff v1.0.0 h1:uvFg412JmmHBHw7iwprIxkPMI+sGQ4kzOWsMeHnm2EA= github.com/jpillora/backoff v1.0.0/go.mod h1:J/6gKK9jxlEcS3zixgDgUAsiuZ7yrSoa/FX5e0EB2j4= github.com/json-iterator/go v1.1.12 h1:PV8peI4a0ysnczrg+LtxykD8LfKY9ML6u2jnxaEnrnM= @@ -169,10 +157,6 @@ github.com/modern-go/reflect2 v1.0.2 h1:xBagoLtFs94CBntxluKeaWgTMpvLxC4ur3nMaC9G github.com/modern-go/reflect2 v1.0.2/go.mod h1:yWuevngMOJpCy52FWWMvUC8ws7m/LJsjYzDa0/r8luk= github.com/mostynb/go-grpc-compression v1.2.2 h1:XaDbnRvt2+1vgr0b/l0qh4mJAfIxE0bKXtz2Znl3GGI= github.com/mostynb/go-grpc-compression v1.2.2/go.mod h1:GOCr2KBxXcblCuczg3YdLQlcin1/NfyDA348ckuCH6w= -github.com/opencontainers/go-digest v1.0.0 h1:apOUWs51W5PlhuyGyz9FCeeBIOUDA/6nW8Oi/yOhh5U= -github.com/opencontainers/go-digest v1.0.0/go.mod h1:0JzlMkj0TRzQZfJkVvzbP0HBR3IKzErnv2BNG4W4MAM= -github.com/opencontainers/image-spec v1.1.0 h1:8SG7/vwALn54lVB/0yZ/MMwhFrPYtpEHQb2IpWsCzug= -github.com/opencontainers/image-spec v1.1.0/go.mod h1:W4s4sFTMaBeK1BQLXbG4AdM2szdn85PY75RI83NrTrM= github.com/openzipkin/zipkin-go v0.4.3 h1:9EGwpqkgnwdEIJ+Od7QVSEIH+ocmm5nPat0G7sjsSdg= github.com/openzipkin/zipkin-go v0.4.3/go.mod h1:M9wCJZFWCo2RiY+o1eBCEMe0Dp2S5LDHcMZmk3RmK7c= github.com/pelletier/go-toml/v2 v2.1.0 h1:FnwAJ4oYMvbT/34k9zzHuZNrhlz48GB3/s6at6/MHO4= @@ -258,6 +242,8 @@ go.elastic.co/apm/module/apmelasticsearch/v2 v2.6.0 h1:ukMcwyMaDXsS1dRK2qRYXT2As go.elastic.co/apm/module/apmelasticsearch/v2 v2.6.0/go.mod h1:YpfiTTrqX5LB/CKBwX89oDCBAxuLJTFv40gcfxJyehM= go.elastic.co/apm/module/apmhttp/v2 v2.6.0 h1:s8UeNFQmVBCNd4eoz7KDD9rEFhQC0HeUFXz3z9gpAmQ= go.elastic.co/apm/module/apmhttp/v2 v2.6.0/go.mod h1:D0GLppLuI0Ddwvtl595GUxRgn6Z8L5KaDFVMv2H3GK0= +go.elastic.co/apm/module/apmzap/v2 v2.6.0 h1:R/iVORzGu3F9uM43iEVHD0nwiRo59O0bIXdayKsgayQ= +go.elastic.co/apm/module/apmzap/v2 v2.6.0/go.mod h1:B3i/8xRkqLgi6zNuV+Bp7Pt4cutaOObvrVSa7wUTAPw= go.elastic.co/apm/v2 v2.6.0 h1:VieBMLQFtXua2YxpYxaSdYGnmmxhLT46gosI5yErJgY= go.elastic.co/apm/v2 v2.6.0/go.mod h1:33rOXgtHwbgZcDgi6I/GtCSMZQqgxkHC0IQT3gudKvo= go.elastic.co/fastjson v1.3.0 h1:hJO3OsYIhiqiT4Fgu0ZxAECnKASbwgiS+LMW5oCopKs= @@ -405,8 +391,6 @@ golang.org/x/lint v0.0.0-20190313153728-d0100b6bd8b3/go.mod h1:6SW0HCj/g11FgYtHl golang.org/x/mod v0.2.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= golang.org/x/mod v0.3.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= golang.org/x/mod v0.6.0-dev.0.20220419223038-86c51ed26bb4/go.mod h1:jJ57K6gSWd91VN4djpZkiMVwK6gcyfeH4XE8wZrZaV4= -golang.org/x/mod v0.17.0 h1:zY54UmvipHiNd+pm+m0x9KhZ9hl1/7QNMyxXbc6ICqA= -golang.org/x/mod v0.17.0/go.mod h1:hTbmBsO62+eylJbnUtE2MGJUyE7QWk4xUqPFrRgJ+7c= golang.org/x/net v0.0.0-20180724234803-3673e40ba225/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20180826012351-8a410e7b638d/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20190213061140-3a22650c66bd/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= @@ -464,8 +448,6 @@ golang.org/x/tools v0.0.0-20191119224855-298f0cb1881e/go.mod h1:b+2E5dAYhXwXZwtn golang.org/x/tools v0.0.0-20200619180055-7c47624df98f/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE= golang.org/x/tools v0.0.0-20210106214847-113979e3529a/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA= golang.org/x/tools v0.1.12/go.mod h1:hNGJHUnrk76NpqgfD5Aqm5Crs+Hm0VOH/i9J2+nxYbc= -golang.org/x/tools v0.21.0 h1:qc0xYgIbsSDt9EyWz05J5wfa7LOVW0YTLOXrqdLAWIw= -golang.org/x/tools v0.21.0/go.mod h1:aiJjzUbINMkxbQROHiO6hDPo2LHcIPhhQsa9DLh0yGk= golang.org/x/xerrors v0.0.0-20190717185122-a985d3407aa7/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= golang.org/x/xerrors v0.0.0-20191011141410-1b5146add898/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= diff --git a/exporter/elasticsearchexporter/logs_exporter.go b/exporter/elasticsearchexporter/logs_exporter.go index 372ff2934cb6..f7ab2ea8a58f 100644 --- a/exporter/elasticsearchexporter/logs_exporter.go +++ b/exporter/elasticsearchexporter/logs_exporter.go @@ -22,16 +22,12 @@ type elasticsearchLogsExporter struct { index string logstashFormat LogstashFormatSettings dynamicIndex bool - maxAttempts int - retryOnStatus []int client *esClientCurrent - bulkIndexer esBulkIndexerCurrent + bulkIndexer *esBulkIndexerCurrent model mappingModel } -const createAction = "create" - func newLogsExporter(logger *zap.Logger, cfg *Config) (*elasticsearchLogsExporter, error) { if err := cfg.Validate(); err != nil { return nil, err @@ -47,11 +43,6 @@ func newLogsExporter(logger *zap.Logger, cfg *Config) (*elasticsearchLogsExporte return nil, err } - maxAttempts := 1 - if cfg.Retry.Enabled { - maxAttempts = cfg.Retry.MaxRequests - } - model := &encodeModel{ dedup: cfg.Mapping.Dedup, dedot: cfg.Mapping.Dedot, @@ -69,8 +60,6 @@ func newLogsExporter(logger *zap.Logger, cfg *Config) (*elasticsearchLogsExporte index: indexStr, dynamicIndex: cfg.LogsDynamicIndex.Enabled, - maxAttempts: maxAttempts, - retryOnStatus: cfg.Retry.RetryOnStatus, model: model, logstashFormat: cfg.LogstashFormat, } @@ -129,5 +118,5 @@ func (e *elasticsearchLogsExporter) pushLogRecord(ctx context.Context, resource if err != nil { return fmt.Errorf("Failed to encode log event: %w", err) } - return pushDocuments(ctx, e.logger, fIndex, document, e.bulkIndexer, e.maxAttempts, e.retryOnStatus) + return pushDocuments(ctx, fIndex, document, e.bulkIndexer) } diff --git a/exporter/elasticsearchexporter/logs_exporter_test.go b/exporter/elasticsearchexporter/logs_exporter_test.go index 60bc7d6ba719..28ce2fb0f624 100644 --- a/exporter/elasticsearchexporter/logs_exporter_test.go +++ b/exporter/elasticsearchexporter/logs_exporter_test.go @@ -516,7 +516,7 @@ func withTestExporterConfig(fns ...func(*Config)) func(string) *Config { } func mustSend(t *testing.T, exporter *elasticsearchLogsExporter, contents string) { - err := pushDocuments(context.TODO(), zap.L(), exporter.index, []byte(contents), exporter.bulkIndexer, exporter.maxAttempts, exporter.retryOnStatus) + err := pushDocuments(context.TODO(), exporter.index, []byte(contents), exporter.bulkIndexer) require.NoError(t, err) } diff --git a/exporter/elasticsearchexporter/trace_exporter.go b/exporter/elasticsearchexporter/trace_exporter.go index 7153132b4975..073bed4d8b6a 100644 --- a/exporter/elasticsearchexporter/trace_exporter.go +++ b/exporter/elasticsearchexporter/trace_exporter.go @@ -22,11 +22,9 @@ type elasticsearchTracesExporter struct { index string logstashFormat LogstashFormatSettings dynamicIndex bool - maxAttempts int - retryOnStatus []int client *esClientCurrent - bulkIndexer esBulkIndexerCurrent + bulkIndexer *esBulkIndexerCurrent model mappingModel } @@ -45,11 +43,6 @@ func newTracesExporter(logger *zap.Logger, cfg *Config) (*elasticsearchTracesExp return nil, err } - maxAttempts := 1 - if cfg.Retry.Enabled { - maxAttempts = cfg.Retry.MaxRequests - } - model := &encodeModel{ dedup: cfg.Mapping.Dedup, dedot: cfg.Mapping.Dedot, @@ -63,8 +56,6 @@ func newTracesExporter(logger *zap.Logger, cfg *Config) (*elasticsearchTracesExp index: cfg.TracesIndex, dynamicIndex: cfg.TracesDynamicIndex.Enabled, - maxAttempts: maxAttempts, - retryOnStatus: cfg.Retry.RetryOnStatus, model: model, logstashFormat: cfg.LogstashFormat, }, nil @@ -124,5 +115,5 @@ func (e *elasticsearchTracesExporter) pushTraceRecord(ctx context.Context, resou if err != nil { return fmt.Errorf("Failed to encode trace record: %w", err) } - return pushDocuments(ctx, e.logger, fIndex, document, e.bulkIndexer, e.maxAttempts, e.retryOnStatus) + return pushDocuments(ctx, fIndex, document, e.bulkIndexer) } diff --git a/exporter/elasticsearchexporter/traces_exporter_test.go b/exporter/elasticsearchexporter/traces_exporter_test.go index 57dd1cc41574..c5490398a56c 100644 --- a/exporter/elasticsearchexporter/traces_exporter_test.go +++ b/exporter/elasticsearchexporter/traces_exporter_test.go @@ -463,7 +463,7 @@ func withTestTracesExporterConfig(fns ...func(*Config)) func(string) *Config { } func mustSendTraces(t *testing.T, exporter *elasticsearchTracesExporter, contents string) { - err := pushDocuments(context.TODO(), zap.L(), exporter.index, []byte(contents), exporter.bulkIndexer, exporter.maxAttempts, exporter.retryOnStatus) + err := pushDocuments(context.TODO(), exporter.index, []byte(contents), exporter.bulkIndexer) require.NoError(t, err) } diff --git a/go.mod b/go.mod index f72a92b4927c..318de912780a 100644 --- a/go.mod +++ b/go.mod @@ -323,6 +323,7 @@ require ( github.com/apache/thrift v0.20.0 // indirect github.com/ardielle/ardielle-go v1.5.2 // indirect github.com/armon/go-metrics v0.4.1 // indirect + github.com/armon/go-radix v1.0.0 // indirect github.com/aws/aws-sdk-go v1.52.4 // indirect github.com/aws/aws-sdk-go-v2 v1.26.1 // indirect github.com/aws/aws-sdk-go-v2/aws/protocol/eventstream v1.6.2 // indirect @@ -382,8 +383,13 @@ require ( github.com/eapache/go-resiliency v1.6.0 // indirect github.com/eapache/go-xerial-snappy v0.0.0-20230731223053-c322873962e3 // indirect github.com/eapache/queue v1.1.0 // indirect + github.com/elastic/elastic-transport-go/v8 v8.5.0 // indirect + github.com/elastic/go-docappender/v2 v2.1.0 // indirect github.com/elastic/go-elasticsearch/v7 v7.17.10 // indirect + github.com/elastic/go-elasticsearch/v8 v8.13.1 // indirect github.com/elastic/go-structform v0.0.10 // indirect + github.com/elastic/go-sysinfo v1.7.1 // indirect + github.com/elastic/go-windows v1.0.1 // indirect github.com/emicklei/go-restful/v3 v3.11.0 // indirect github.com/envoyproxy/go-control-plane v0.12.0 // indirect github.com/envoyproxy/protoc-gen-validate v1.0.4 // indirect @@ -489,6 +495,7 @@ require ( github.com/jcmturner/gokrb5/v8 v8.4.4 // indirect github.com/jcmturner/rpc/v2 v2.0.3 // indirect github.com/jmespath/go-jmespath v0.4.0 // indirect + github.com/joeshaw/multierror v0.0.0-20140124173710-69b34d4ec901 // indirect github.com/josharian/intern v1.0.0 // indirect github.com/jpillora/backoff v1.0.0 // indirect github.com/json-iterator/go v1.1.12 // indirect @@ -671,6 +678,9 @@ require ( github.com/yuin/gopher-lua v0.0.0-20220504180219-658193537a64 // indirect github.com/yusufpapurcu/wmi v1.2.4 // indirect github.com/zeebo/xxh3 v1.0.2 // indirect + go.elastic.co/apm/module/apmzap/v2 v2.6.0 // indirect + go.elastic.co/apm/v2 v2.6.0 // indirect + go.elastic.co/fastjson v1.3.0 // indirect go.etcd.io/bbolt v1.3.10 // indirect go.mongodb.org/atlas v0.36.0 // indirect go.mongodb.org/mongo-driver v1.15.0 // indirect @@ -752,6 +762,7 @@ require ( gopkg.in/yaml.v3 v3.0.1 // indirect gopkg.in/zorkian/go-datadog-api.v2 v2.30.0 // indirect gotest.tools/v3 v3.5.0 // indirect + howett.net/plist v1.0.0 // indirect k8s.io/api v0.29.3 // indirect k8s.io/apimachinery v0.29.3 // indirect k8s.io/client-go v0.29.3 // indirect diff --git a/go.sum b/go.sum index c582ea752522..f2b8e860463e 100644 --- a/go.sum +++ b/go.sum @@ -997,6 +997,7 @@ github.com/armon/go-metrics v0.0.0-20180917152333-f0300d1749da/go.mod h1:Q73ZrmV github.com/armon/go-metrics v0.4.1 h1:hR91U9KYmb6bLBYLQjyM+3j+rcd/UhE+G78SFnF8gJA= github.com/armon/go-metrics v0.4.1/go.mod h1:E6amYzXo6aW1tqzoZGT755KkbgrJsSdpwZ+3JqfkOG4= github.com/armon/go-radix v0.0.0-20180808171621-7fddfc383310/go.mod h1:ufUuZ+zHj4x4TnLV4JWEpy2hxWSpsRywHrMgIH9cCH8= +github.com/armon/go-radix v1.0.0 h1:F4z6KzEeeQIMeLFa97iZU6vupzoecKdU5TX24SNppXI= github.com/armon/go-radix v1.0.0/go.mod h1:ufUuZ+zHj4x4TnLV4JWEpy2hxWSpsRywHrMgIH9cCH8= github.com/aryann/difflib v0.0.0-20170710044230-e206f873d14a/go.mod h1:DAHtR1m6lCRdSC2Tm3DSWRPvIPr6xNKyeHdqDQSQT+A= github.com/asaskevich/govalidator v0.0.0-20190424111038-f61b66f89f4a/go.mod h1:lB+ZfQJz7igIIfQNfa7Ml4HSf2uFQQRzpGGRXenZAgY= @@ -1249,10 +1250,21 @@ github.com/eapache/go-xerial-snappy v0.0.0-20230731223053-c322873962e3/go.mod h1 github.com/eapache/queue v1.1.0 h1:YOEu7KNc61ntiQlcEeUIoDTJ2o8mQznoNvUhiigpIqc= github.com/eapache/queue v1.1.0/go.mod h1:6eCeP0CKFpHLu8blIFXhExK/dRa7WDZfr6jVFPTqq+I= github.com/edsrzf/mmap-go v1.0.0/go.mod h1:YO35OhQPt3KJa3ryjFM5Bs14WD66h8eGKpfaBNrHW5M= +github.com/elastic/elastic-transport-go/v8 v8.5.0 h1:v5membAl7lvQgBTexPRDBO/RdnlQX+FM9fUVDyXxvH0= +github.com/elastic/elastic-transport-go/v8 v8.5.0/go.mod h1:YLHer5cj0csTzNFXoNQ8qhtGY1GTvSqPnKWKaqQE3Hk= +github.com/elastic/go-docappender/v2 v2.1.0 h1:Ct/C2J9qgKue8kQumUDZAi/AB2F+wlrIVOf2TH4afPA= +github.com/elastic/go-docappender/v2 v2.1.0/go.mod h1:oHi6MsHriWaG8W6T9iyJ/PkEo2+182HIzq+0RRAzzgA= github.com/elastic/go-elasticsearch/v7 v7.17.10 h1:TCQ8i4PmIJuBunvBS6bwT2ybzVFxxUhhltAs3Gyu1yo= github.com/elastic/go-elasticsearch/v7 v7.17.10/go.mod h1:OJ4wdbtDNk5g503kvlHLyErCgQwwzmDtaFC4XyOxXA4= +github.com/elastic/go-elasticsearch/v8 v8.13.1 h1:du5F8IzUUyCkzxyHdrO9AtopcG95I/qwi2WK8Kf1xlg= +github.com/elastic/go-elasticsearch/v8 v8.13.1/go.mod h1:DIn7HopJs4oZC/w0WoJR13uMUxtHeq92eI5bqv5CRfI= github.com/elastic/go-structform v0.0.10 h1:oy08o/Ih2hHTkNcRY/1HhaYvIp5z6t8si8gnCJPDo1w= github.com/elastic/go-structform v0.0.10/go.mod h1:CZWf9aIRYY5SuKSmOhtXScE5uQiLZNqAFnwKR4OrIM4= +github.com/elastic/go-sysinfo v1.7.1 h1:Wx4DSARcKLllpKT2TnFVdSUJOsybqMYCNQZq1/wO+s0= +github.com/elastic/go-sysinfo v1.7.1/go.mod h1:i1ZYdU10oLNfRzq4vq62BEwD2fH8KaWh6eh0ikPT9F0= +github.com/elastic/go-windows v1.0.0/go.mod h1:TsU0Nrp7/y3+VwE82FoZF8gC/XFg/Elz6CcloAxnPgU= +github.com/elastic/go-windows v1.0.1 h1:AlYZOldA+UJ0/2nBuqWdo90GFCgG9xuyw9SYzGUtJm0= +github.com/elastic/go-windows v1.0.1/go.mod h1:FoVvqWSun28vaDQPbj2Elfc0JahhPB7WQEGa3c814Ss= github.com/elazarl/goproxy v0.0.0-20180725130230-947c36da3153/go.mod h1:/Zj4wYkgs4iZTTu3o/KG3Itv/qCCa8VVMlb3i9OVuzc= github.com/emicklei/go-restful v0.0.0-20170410110728-ff4f55a20633/go.mod h1:otzb+WCGbkyDHkqmQmT5YD2WR4BBwUdeQoFo8l/7tVs= github.com/emicklei/go-restful v2.9.5+incompatible/go.mod h1:otzb+WCGbkyDHkqmQmT5YD2WR4BBwUdeQoFo8l/7tVs= @@ -1751,6 +1763,7 @@ github.com/jcmturner/gokrb5/v8 v8.4.4 h1:x1Sv4HaTpepFkXbt2IkL29DXRf8sOfZXo8eRKh6 github.com/jcmturner/gokrb5/v8 v8.4.4/go.mod h1:1btQEpgT6k+unzCwX1KdWMEwPPkkgBtP+F6aCACiMrs= github.com/jcmturner/rpc/v2 v2.0.3 h1:7FXXj8Ti1IaVFpSAziCZWNzbNuZmnvw/i6CqLNdWfZY= github.com/jcmturner/rpc/v2 v2.0.3/go.mod h1:VUJYCIDm3PVOEHw8sgt091/20OJjskO/YJki3ELg/Hc= +github.com/jessevdk/go-flags v1.4.0/go.mod h1:4FA24M0QyGHXBuZZK/XkWh8h0e1EYbRYJSGM75WSRxI= github.com/jmespath/go-jmespath v0.0.0-20180206201540-c2b33e8439af/go.mod h1:Nht3zPeWKUH0NzdCt2Blrr5ys8VGpn0CEB0cQHVjt7k= github.com/jmespath/go-jmespath v0.3.0/go.mod h1:9QtRXoHjLGCJ5IBSaohpXITPlowMeeYCZ7fLUTSywik= github.com/jmespath/go-jmespath v0.4.0 h1:BEgLn5cpjn8UN1mAw4NjwDrS35OdebyEtFe+9YPoQUg= @@ -1759,6 +1772,8 @@ github.com/jmespath/go-jmespath/internal/testify v1.5.1 h1:shLQSRRSCCPj3f2gpwzGw github.com/jmespath/go-jmespath/internal/testify v1.5.1/go.mod h1:L3OGu8Wl2/fWfCI6z80xFu9LTZmf1ZRjMHUOPmWr69U= github.com/jmoiron/sqlx v1.4.0 h1:1PLqN7S1UYp5t4SrVVnt4nUVNemrDAtxlulVe+Qgm3o= github.com/jmoiron/sqlx v1.4.0/go.mod h1:ZrZ7UsYB/weZdl2Bxg6jCRO9c3YHl8r3ahlKmRT4JLY= +github.com/joeshaw/multierror v0.0.0-20140124173710-69b34d4ec901 h1:rp+c0RAYOWj8l6qbCUTSiRLG/iKnW3K3/QfPPuSsBt4= +github.com/joeshaw/multierror v0.0.0-20140124173710-69b34d4ec901/go.mod h1:Z86h9688Y0wesXCyonoVr47MasHilkuLMqGhRZ4Hpak= github.com/joho/godotenv v1.3.0 h1:Zjp+RcGpHhGlrMbJzXTrZZPrWj+1vfm90La1wgB6Bhc= github.com/joho/godotenv v1.3.0/go.mod h1:7hK45KPybAkOC6peb+G5yklZfMxEjkZhHbwpqxOKXbg= github.com/jonboulle/clockwork v0.1.0/go.mod h1:Ii8DK3G1RaLaWxj9trq07+26W01tbo22gdxWY5EU2bo= @@ -2133,6 +2148,7 @@ github.com/prometheus/common/sigv4 v0.1.0 h1:qoVebwtwwEhS85Czm2dSROY5fTo2PAPEVdD github.com/prometheus/common/sigv4 v0.1.0/go.mod h1:2Jkxxk9yYvCkE5G1sQT7GuEXm57JrvHu9k5YwTjsNtI= github.com/prometheus/procfs v0.0.0-20181005140218-185b4288413d/go.mod h1:c3At6R/oaqEKCNdg8wHV1ftS6bRYblBhIjjI8uT2IGk= github.com/prometheus/procfs v0.0.0-20190117184657-bf6a532e95b1/go.mod h1:c3At6R/oaqEKCNdg8wHV1ftS6bRYblBhIjjI8uT2IGk= +github.com/prometheus/procfs v0.0.0-20190425082905-87a4384529e0/go.mod h1:TjEm7ze935MbeOT/UhFTIMYKhuLP4wbCsTZCD3I8kEA= github.com/prometheus/procfs v0.0.0-20190507164030-5867b95ac084/go.mod h1:TjEm7ze935MbeOT/UhFTIMYKhuLP4wbCsTZCD3I8kEA= github.com/prometheus/procfs v0.0.2/go.mod h1:TjEm7ze935MbeOT/UhFTIMYKhuLP4wbCsTZCD3I8kEA= github.com/prometheus/procfs v0.0.8/go.mod h1:7Qr8sr6344vo1JqZ6HhLceV9o3AJ1Ff+GxbHq6oeK9A= @@ -2375,6 +2391,16 @@ github.com/zorkian/go-datadog-api v2.30.0+incompatible h1:R4ryGocppDqZZbnNc5EDR8 github.com/zorkian/go-datadog-api v2.30.0+incompatible/go.mod h1:PkXwHX9CUQa/FpB9ZwAD45N1uhCW4MT/Wj7m36PbKss= go.einride.tech/aip v0.67.1 h1:d/4TW92OxXBngkSOwWS2CH5rez869KpKMaN44mdxkFI= go.einride.tech/aip v0.67.1/go.mod h1:ZGX4/zKw8dcgzdLsrvpOOGxfxI2QSk12SlP7d6c0/XI= +go.elastic.co/apm/module/apmelasticsearch/v2 v2.6.0 h1:ukMcwyMaDXsS1dRK2qRYXT2AsfwaUy74TOOYCqkWJow= +go.elastic.co/apm/module/apmelasticsearch/v2 v2.6.0/go.mod h1:YpfiTTrqX5LB/CKBwX89oDCBAxuLJTFv40gcfxJyehM= +go.elastic.co/apm/module/apmhttp/v2 v2.6.0 h1:s8UeNFQmVBCNd4eoz7KDD9rEFhQC0HeUFXz3z9gpAmQ= +go.elastic.co/apm/module/apmhttp/v2 v2.6.0/go.mod h1:D0GLppLuI0Ddwvtl595GUxRgn6Z8L5KaDFVMv2H3GK0= +go.elastic.co/apm/module/apmzap/v2 v2.6.0 h1:R/iVORzGu3F9uM43iEVHD0nwiRo59O0bIXdayKsgayQ= +go.elastic.co/apm/module/apmzap/v2 v2.6.0/go.mod h1:B3i/8xRkqLgi6zNuV+Bp7Pt4cutaOObvrVSa7wUTAPw= +go.elastic.co/apm/v2 v2.6.0 h1:VieBMLQFtXua2YxpYxaSdYGnmmxhLT46gosI5yErJgY= +go.elastic.co/apm/v2 v2.6.0/go.mod h1:33rOXgtHwbgZcDgi6I/GtCSMZQqgxkHC0IQT3gudKvo= +go.elastic.co/fastjson v1.3.0 h1:hJO3OsYIhiqiT4Fgu0ZxAECnKASbwgiS+LMW5oCopKs= +go.elastic.co/fastjson v1.3.0/go.mod h1:K9vDh7O0ODsVKV2B5e2XYLY277QZaCbB3tS1SnARvko= go.etcd.io/bbolt v1.3.2/go.mod h1:IbVyRI1SCnLcuJnV2u8VeU0CEYM7e686BmAb1XKL+uU= go.etcd.io/bbolt v1.3.3/go.mod h1:IbVyRI1SCnLcuJnV2u8VeU0CEYM7e686BmAb1XKL+uU= go.etcd.io/bbolt v1.3.10 h1:+BqfJTcCzTItrop8mq/lbzL8wSGtj94UO/3U31shqG0= @@ -2801,6 +2827,7 @@ golang.org/x/sys v0.0.0-20190616124812-15dcb6c0061f/go.mod h1:h1NjWce9XRLGQEsW7w golang.org/x/sys v0.0.0-20190624142023-c5567b49c5d0/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20190712062909-fae7ac547cb7/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20190726091711-fc99dfbffb4e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20190813064441-fde4db37ae7a/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20190826190057-c7b8b68b1456/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20190904154756-749cb33beabd/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20190916202348-b4ddaad3f8a3/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= @@ -2808,6 +2835,7 @@ golang.org/x/sys v0.0.0-20190922100055-0a153f010e69/go.mod h1:h1NjWce9XRLGQEsW7w golang.org/x/sys v0.0.0-20190924154521-2837fb4f24fe/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20191001151750-bb3f8db39f24/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20191005200804-aed5e4c7ecf9/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20191025021431-6c3a3bfe00ae/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20191026070338-33540a1f6037/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20191112214154-59a1497f0cea/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20191120155948-bd437916bb0e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= @@ -3361,6 +3389,7 @@ gopkg.in/square/go-jose.v2 v2.4.1/go.mod h1:M9dMgbHiYLoDGQrXy7OpJDJWiKiU//h+vD76 gopkg.in/tomb.v1 v1.0.0-20141024135613-dd632973f1e7 h1:uRGJdciOHaEIrze2W8Q3AKkepLTh2hOroT7a+7czfdQ= gopkg.in/tomb.v1 v1.0.0-20141024135613-dd632973f1e7/go.mod h1:dt/ZhP58zS4L8KSrWDmTeBkI65Dw0HsyUHuEVlX15mw= gopkg.in/warnings.v0 v0.1.2/go.mod h1:jksf8JmL6Qr/oQM2OXTHunEvvTAsrWBLb6OOjuVWRNI= +gopkg.in/yaml.v1 v1.0.0-20140924161607-9f9df34309c0/go.mod h1:WDnlLJ4WF5VGsH/HVa3CI79GS0ol3YnhVnKP89i0kNg= gopkg.in/yaml.v2 v2.0.0-20170812160011-eb3733d160e7/go.mod h1:JAlM8MvJe8wmxCU4Bli9HhUf9+ttbYbLASfIpnQbh74= gopkg.in/yaml.v2 v2.2.1/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= gopkg.in/yaml.v2 v2.2.2/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= @@ -3391,6 +3420,9 @@ honnef.co/go/tools v0.0.1-2019.2.3/go.mod h1:a3bituU0lyd329TUQxRnasdCoJDkEUEAqEt honnef.co/go/tools v0.0.1-2020.1.3/go.mod h1:X/FiERA/W4tHapMX5mGpAtMSVEeEUOyHaw9vFzvIQ3k= honnef.co/go/tools v0.0.1-2020.1.4/go.mod h1:X/FiERA/W4tHapMX5mGpAtMSVEeEUOyHaw9vFzvIQ3k= honnef.co/go/tools v0.1.3/go.mod h1:NgwopIslSNH47DimFoV78dnkksY2EFtX0ajyb3K/las= +howett.net/plist v0.0.0-20181124034731-591f970eefbb/go.mod h1:vMygbs4qMhSZSc4lCUl2OEE+rDiIIJAIdR4m7MiMcm0= +howett.net/plist v1.0.0 h1:7CrbWYbPPO/PyNy38b2EB/+gYbjCe2DXBxgtOOZbSQM= +howett.net/plist v1.0.0/go.mod h1:lqaXoTrLY4hg8tnEzNru53gicrbv7rrk+2xJA/7hw9g= k8s.io/api v0.21.1/go.mod h1:FstGROTmsSHBarKc8bylzXih8BLNYTiS3TZcsoEDg2s= k8s.io/api v0.29.3 h1:2ORfZ7+bGC3YJqGpV0KSDDEVf8hdGQ6A03/50vj8pmw= k8s.io/api v0.29.3/go.mod h1:y2yg2NTyHUUkIoTC+phinTnEa3KFM6RZ3szxt014a80= From e57185cb2c48e6e11ab8db73e63f4b36adcc2134 Mon Sep 17 00:00:00 2001 From: Raj Nishtala <113392743+rnishtala-sumo@users.noreply.github.com> Date: Thu, 9 May 2024 11:36:03 -0400 Subject: [PATCH 13/55] fix(test): Skip flaky test around forcing collector re-registration until the root cause is confirmed (#32937) **Description:** Remove flaky test around forcing collector re-registration until the root cause is confirmed **Link to tracking Issue:** https://github.com/open-telemetry/opentelemetry-collector-contrib/issues/32785 **Testing:** Unit tests --- extension/sumologicextension/extension_test.go | 1 + 1 file changed, 1 insertion(+) diff --git a/extension/sumologicextension/extension_test.go b/extension/sumologicextension/extension_test.go index f8479192f8b9..2e22649d5f7d 100644 --- a/extension/sumologicextension/extension_test.go +++ b/extension/sumologicextension/extension_test.go @@ -562,6 +562,7 @@ func TestRegisterEmptyCollectorName(t *testing.T) { } func TestRegisterEmptyCollectorNameForceRegistration(t *testing.T) { + t.SkipNow() // Skip this test for now as it is flaky t.Parallel() hostname, err := getHostname(zap.NewNop()) From 7fd145b5fceca5faadd35f5bcfca2d043fbd6e52 Mon Sep 17 00:00:00 2001 From: shalper2 <99686388+shalper2@users.noreply.github.com> Date: Thu, 9 May 2024 11:37:07 -0500 Subject: [PATCH 14/55] [chore][receiver/splunkenterprise] Splunkent wire component (#32795) **Description:** Graduate splunkenterprise receiver component to alpha **Link to tracking Issue:** **Testing:** Performed `make otelcontribcol` and ran resulting binary with the following config: ```yaml extensions: basicauth/indexer: client_auth: username: admin password: securityFirst basicauth/cluster_master: client_auth: username: admin password: securityFirst receivers: splunkenterprise: indexer: auth: authenticator: basicauth/indexer endpoint: "https://localhost:8089/" timeout: 45s cluster_master: auth: authenticator: basicauth/cluster_master endpoint: "https://localhost:8089/" timeout: 45s exporters: otlp: endpoint: 127.0.0.1:8000 service: extensions: [basicauth/indexer, basicauth/cluster_master] pipelines: metrics: receivers: [splunkenterprise] exporters: [otlp] ``` and received the following output: ``` sh ~> ./otelcontribcol_linux_amd64 --config=file:config.yaml 2024-05-08T17:34:33.032-0500 info service@v0.100.0/service.go:102 Setting up own telemetry... 2024-05-08T17:34:33.032-0500 info service@v0.100.0/telemetry.go:103 Serving metrics {"address": ":8888", "level": "Normal"} 2024-05-08T17:34:33.032-0500 info receiver@v0.100.0/receiver.go:310 Development component. May change in the future. {"kind": "receiver", "name": "splunkenterprise", "data_type": "metrics"} 2024-05-08T17:34:33.033-0500 info service@v0.100.0/service.go:169 Starting otelcontribcol... {"Version": "0.100.0-dev", "NumCPU": 16} 2024-05-08T17:34:33.033-0500 info extensions/extensions.go:34 Starting extensions... 2024-05-08T17:34:33.033-0500 info extensions/extensions.go:37 Extension is starting... {"kind": "extension", "name": "basicauth/cluster_master"} 2024-05-08T17:34:33.033-0500 info extensions/extensions.go:52 Extension started. {"kind": "extension", "name": "basicauth/cluster_master"} 2024-05-08T17:34:33.033-0500 info extensions/extensions.go:37 Extension is starting... {"kind": "extension", "name": "basicauth/indexer"} 2024-05-08T17:34:33.033-0500 info extensions/extensions.go:52 Extension started. {"kind": "extension", "name": "basicauth/indexer"} 2024-05-08T17:34:33.033-0500 info service@v0.100.0/service.go:195 Everything is ready. Begin running and processing data. ``` indicating that the collector was able to successfully start with the component configured. **Documentation:** Documentation was updated to indicate change in status from development to alpha --------- Co-authored-by: Curtis Robert --- cmd/configschema/go.mod | 3 +++ cmd/otelcontribcol/builder-config.yaml | 2 ++ cmd/otelcontribcol/components.go | 2 ++ cmd/otelcontribcol/go.mod | 3 +++ cmd/otelcontribcol/receivers_test.go | 3 +++ go.mod | 3 +++ internal/components/components.go | 2 ++ receiver/splunkenterprisereceiver/README.md | 4 ++-- .../internal/metadata/generated_status.go | 2 +- receiver/splunkenterprisereceiver/metadata.yaml | 2 +- 10 files changed, 22 insertions(+), 4 deletions(-) diff --git a/cmd/configschema/go.mod b/cmd/configschema/go.mod index 4c4269eae03c..28f840601974 100644 --- a/cmd/configschema/go.mod +++ b/cmd/configschema/go.mod @@ -587,6 +587,7 @@ require ( github.com/open-telemetry/opentelemetry-collector-contrib/receiver/simpleprometheusreceiver v0.100.0 // indirect github.com/open-telemetry/opentelemetry-collector-contrib/receiver/snmpreceiver v0.100.0 // indirect github.com/open-telemetry/opentelemetry-collector-contrib/receiver/snowflakereceiver v0.100.0 // indirect + github.com/open-telemetry/opentelemetry-collector-contrib/receiver/splunkenterprisereceiver v0.100.0 // indirect github.com/open-telemetry/opentelemetry-collector-contrib/receiver/sqlqueryreceiver v0.100.0 // indirect github.com/open-telemetry/opentelemetry-collector-contrib/receiver/statsdreceiver v0.100.0 // indirect github.com/open-telemetry/opentelemetry-collector-contrib/receiver/wavefrontreceiver v0.100.0 // indirect @@ -1224,3 +1225,5 @@ replace github.com/open-telemetry/opentelemetry-collector-contrib/extension/acke replace github.com/open-telemetry/opentelemetry-collector-contrib/connector/grafanacloudconnector => ../../connector/grafanacloudconnector replace github.com/open-telemetry/opentelemetry-collector-contrib/extension/sumologicextension => ../../extension/sumologicextension + +replace github.com/open-telemetry/opentelemetry-collector-contrib/receiver/splunkenterprisereceiver => ../../receiver/splunkenterprisereceiver diff --git a/cmd/otelcontribcol/builder-config.yaml b/cmd/otelcontribcol/builder-config.yaml index b8d5687a41ea..62dd27ec7191 100644 --- a/cmd/otelcontribcol/builder-config.yaml +++ b/cmd/otelcontribcol/builder-config.yaml @@ -190,6 +190,7 @@ receivers: - gomod: github.com/open-telemetry/opentelemetry-collector-contrib/receiver/skywalkingreceiver v0.100.0 - gomod: github.com/open-telemetry/opentelemetry-collector-contrib/receiver/snowflakereceiver v0.100.0 - gomod: github.com/open-telemetry/opentelemetry-collector-contrib/receiver/solacereceiver v0.100.0 + - gomod: github.com/open-telemetry/opentelemetry-collector-contrib/receiver/splunkenterprisereceiver v0.100.0 - gomod: github.com/open-telemetry/opentelemetry-collector-contrib/receiver/splunkhecreceiver v0.100.0 - gomod: github.com/open-telemetry/opentelemetry-collector-contrib/receiver/sqlqueryreceiver v0.100.0 - gomod: github.com/open-telemetry/opentelemetry-collector-contrib/receiver/sqlserverreceiver v0.100.0 @@ -458,3 +459,4 @@ replaces: - github.com/open-telemetry/opentelemetry-collector-contrib/internal/sqlquery => ../../internal/sqlquery - github.com/open-telemetry/opentelemetry-collector-contrib/extension/ackextension => ../../extension/ackextension - github.com/open-telemetry/opentelemetry-collector-contrib/extension/googleclientauthextension => ../../extension/googleclientauthextension + - github.com/open-telemetry/opentelemetry-collector-contrib/receiver/splunkenterprisereceiver => ../../receiver/splunkenterprisereceiver diff --git a/cmd/otelcontribcol/components.go b/cmd/otelcontribcol/components.go index f96684a55051..2ca7c997d6d9 100644 --- a/cmd/otelcontribcol/components.go +++ b/cmd/otelcontribcol/components.go @@ -195,6 +195,7 @@ import ( snmpreceiver "github.com/open-telemetry/opentelemetry-collector-contrib/receiver/snmpreceiver" snowflakereceiver "github.com/open-telemetry/opentelemetry-collector-contrib/receiver/snowflakereceiver" solacereceiver "github.com/open-telemetry/opentelemetry-collector-contrib/receiver/solacereceiver" + splunkenterprisereceiver "github.com/open-telemetry/opentelemetry-collector-contrib/receiver/splunkenterprisereceiver" splunkhecreceiver "github.com/open-telemetry/opentelemetry-collector-contrib/receiver/splunkhecreceiver" sqlqueryreceiver "github.com/open-telemetry/opentelemetry-collector-contrib/receiver/sqlqueryreceiver" sqlserverreceiver "github.com/open-telemetry/opentelemetry-collector-contrib/receiver/sqlserverreceiver" @@ -328,6 +329,7 @@ func components() (otelcol.Factories, error) { skywalkingreceiver.NewFactory(), snowflakereceiver.NewFactory(), solacereceiver.NewFactory(), + splunkenterprisereceiver.NewFactory(), splunkhecreceiver.NewFactory(), sqlqueryreceiver.NewFactory(), sqlserverreceiver.NewFactory(), diff --git a/cmd/otelcontribcol/go.mod b/cmd/otelcontribcol/go.mod index 95e57015be97..7aba937de96a 100644 --- a/cmd/otelcontribcol/go.mod +++ b/cmd/otelcontribcol/go.mod @@ -183,6 +183,7 @@ require ( github.com/open-telemetry/opentelemetry-collector-contrib/receiver/snmpreceiver v0.100.0 github.com/open-telemetry/opentelemetry-collector-contrib/receiver/snowflakereceiver v0.100.0 github.com/open-telemetry/opentelemetry-collector-contrib/receiver/solacereceiver v0.100.0 + github.com/open-telemetry/opentelemetry-collector-contrib/receiver/splunkenterprisereceiver v0.100.0 github.com/open-telemetry/opentelemetry-collector-contrib/receiver/splunkhecreceiver v0.100.0 github.com/open-telemetry/opentelemetry-collector-contrib/receiver/sqlqueryreceiver v0.100.0 github.com/open-telemetry/opentelemetry-collector-contrib/receiver/sqlserverreceiver v0.100.0 @@ -1281,3 +1282,5 @@ replace github.com/open-telemetry/opentelemetry-collector-contrib/internal/sqlqu replace github.com/open-telemetry/opentelemetry-collector-contrib/extension/ackextension => ../../extension/ackextension replace github.com/open-telemetry/opentelemetry-collector-contrib/extension/googleclientauthextension => ../../extension/googleclientauthextension + +replace github.com/open-telemetry/opentelemetry-collector-contrib/receiver/splunkenterprisereceiver => ../../receiver/splunkenterprisereceiver diff --git a/cmd/otelcontribcol/receivers_test.go b/cmd/otelcontribcol/receivers_test.go index c81c02503674..b2de7709530d 100644 --- a/cmd/otelcontribcol/receivers_test.go +++ b/cmd/otelcontribcol/receivers_test.go @@ -377,6 +377,9 @@ func TestDefaultReceivers(t *testing.T) { { receiver: "snowflake", }, + { + receiver: "splunkenterprise", + }, { receiver: "splunk_hec", }, diff --git a/go.mod b/go.mod index 318de912780a..e534890bbd9c 100644 --- a/go.mod +++ b/go.mod @@ -151,6 +151,7 @@ require ( github.com/open-telemetry/opentelemetry-collector-contrib/receiver/snmpreceiver v0.100.0 github.com/open-telemetry/opentelemetry-collector-contrib/receiver/snowflakereceiver v0.100.0 github.com/open-telemetry/opentelemetry-collector-contrib/receiver/solacereceiver v0.100.0 + github.com/open-telemetry/opentelemetry-collector-contrib/receiver/splunkenterprisereceiver v0.100.0 github.com/open-telemetry/opentelemetry-collector-contrib/receiver/splunkhecreceiver v0.100.0 github.com/open-telemetry/opentelemetry-collector-contrib/receiver/sqlqueryreceiver v0.100.0 github.com/open-telemetry/opentelemetry-collector-contrib/receiver/sqlserverreceiver v0.100.0 @@ -1224,3 +1225,5 @@ replace github.com/open-telemetry/opentelemetry-collector-contrib/extension/enco replace github.com/open-telemetry/opentelemetry-collector-contrib/extension/encoding/otlpencodingextension => ./extension/encoding/otlpencodingextension replace github.com/open-telemetry/opentelemetry-collector-contrib/extension/ackextension => ./extension/ackextension + +replace github.com/open-telemetry/opentelemetry-collector-contrib/receiver/splunkenterprisereceiver => ./receiver/splunkenterprisereceiver diff --git a/internal/components/components.go b/internal/components/components.go index 46966ee5bf78..47809d68925e 100644 --- a/internal/components/components.go +++ b/internal/components/components.go @@ -169,6 +169,7 @@ import ( "github.com/open-telemetry/opentelemetry-collector-contrib/receiver/snmpreceiver" "github.com/open-telemetry/opentelemetry-collector-contrib/receiver/snowflakereceiver" "github.com/open-telemetry/opentelemetry-collector-contrib/receiver/solacereceiver" + "github.com/open-telemetry/opentelemetry-collector-contrib/receiver/splunkenterprisereceiver" "github.com/open-telemetry/opentelemetry-collector-contrib/receiver/splunkhecreceiver" "github.com/open-telemetry/opentelemetry-collector-contrib/receiver/sqlqueryreceiver" "github.com/open-telemetry/opentelemetry-collector-contrib/receiver/sqlserverreceiver" @@ -286,6 +287,7 @@ func Components() (otelcol.Factories, error) { snmpreceiver.NewFactory(), snowflakereceiver.NewFactory(), solacereceiver.NewFactory(), + splunkenterprisereceiver.NewFactory(), splunkhecreceiver.NewFactory(), sqlqueryreceiver.NewFactory(), sqlserverreceiver.NewFactory(), diff --git a/receiver/splunkenterprisereceiver/README.md b/receiver/splunkenterprisereceiver/README.md index 91a0a712f292..9f7a69711d1a 100644 --- a/receiver/splunkenterprisereceiver/README.md +++ b/receiver/splunkenterprisereceiver/README.md @@ -3,12 +3,12 @@ | Status | | | ------------- |-----------| -| Stability | [development]: metrics | +| Stability | [alpha]: metrics | | Distributions | [] | | Issues | [![Open issues](https://img.shields.io/github/issues-search/open-telemetry/opentelemetry-collector-contrib?query=is%3Aissue%20is%3Aopen%20label%3Areceiver%2Fsplunkenterprise%20&label=open&color=orange&logo=opentelemetry)](https://github.com/open-telemetry/opentelemetry-collector-contrib/issues?q=is%3Aopen+is%3Aissue+label%3Areceiver%2Fsplunkenterprise) [![Closed issues](https://img.shields.io/github/issues-search/open-telemetry/opentelemetry-collector-contrib?query=is%3Aissue%20is%3Aclosed%20label%3Areceiver%2Fsplunkenterprise%20&label=closed&color=blue&logo=opentelemetry)](https://github.com/open-telemetry/opentelemetry-collector-contrib/issues?q=is%3Aclosed+is%3Aissue+label%3Areceiver%2Fsplunkenterprise) | | [Code Owners](https://github.com/open-telemetry/opentelemetry-collector-contrib/blob/main/CONTRIBUTING.md#becoming-a-code-owner) | [@shalper2](https://www.github.com/shalper2), [@MovieStoreGuy](https://www.github.com/MovieStoreGuy), [@greatestusername](https://www.github.com/greatestusername) | -[development]: https://github.com/open-telemetry/opentelemetry-collector#development +[alpha]: https://github.com/open-telemetry/opentelemetry-collector#alpha The Splunk Enterprise Receiver is a pull based tool which enables the ingestion of performance metrics describing the operational status of a user's Splunk Enterprise deployment to an appropriate observability tool. diff --git a/receiver/splunkenterprisereceiver/internal/metadata/generated_status.go b/receiver/splunkenterprisereceiver/internal/metadata/generated_status.go index 1ba8354ac2fb..63cfdd5247ff 100644 --- a/receiver/splunkenterprisereceiver/internal/metadata/generated_status.go +++ b/receiver/splunkenterprisereceiver/internal/metadata/generated_status.go @@ -11,5 +11,5 @@ var ( ) const ( - MetricsStability = component.StabilityLevelDevelopment + MetricsStability = component.StabilityLevelAlpha ) diff --git a/receiver/splunkenterprisereceiver/metadata.yaml b/receiver/splunkenterprisereceiver/metadata.yaml index d092ce95e31c..d9ec6a285521 100644 --- a/receiver/splunkenterprisereceiver/metadata.yaml +++ b/receiver/splunkenterprisereceiver/metadata.yaml @@ -4,7 +4,7 @@ scope_name: otelcol/splunkenterprisereceiver status: class: receiver stability: - development: [metrics] + alpha: [metrics] distributions: codeowners: active: [shalper2, MovieStoreGuy, greatestusername] From c0512b91f0ad83d2e7c56c193bc8503693ea569c Mon Sep 17 00:00:00 2001 From: Michal Pristas Date: Thu, 9 May 2024 21:06:40 +0200 Subject: [PATCH 15/55] [pkg/ottl] Added support for timezone in Time converter (#32479) **Description:** Added support for default timezone in Time converter. Timezone is optional and can be specified as so: `Time("2023-05-26 12:34:56", "%Y-%m-%d %H:%M:%S", "America/New_York")` **Link to tracking Issue:** #32140 **Testing:** Unit tests added **Documentation:** Documentation in ottl/Readme updated --------- Co-authored-by: Tyler Helmuth <12352919+TylerHelmuth@users.noreply.github.com> Co-authored-by: Evan Bradley <11745660+evan-bradley@users.noreply.github.com> --- .chloggen/ottl-time-timezone.yaml | 27 ++++++++++++ pkg/ottl/ottlfuncs/README.md | 15 ++++++- pkg/ottl/ottlfuncs/func_time.go | 17 +++++--- pkg/ottl/ottlfuncs/func_time_test.go | 64 ++++++++++++++++++++++++++-- 4 files changed, 113 insertions(+), 10 deletions(-) create mode 100644 .chloggen/ottl-time-timezone.yaml diff --git a/.chloggen/ottl-time-timezone.yaml b/.chloggen/ottl-time-timezone.yaml new file mode 100644 index 000000000000..96d171693828 --- /dev/null +++ b/.chloggen/ottl-time-timezone.yaml @@ -0,0 +1,27 @@ +# Use this changelog template to create an entry for release notes. + +# One of 'breaking', 'deprecation', 'new_component', 'enhancement', 'bug_fix' +change_type: enhancement + +# The name of the component, or a single word describing the area of concern, (e.g. filelogreceiver) +component: pkg/ottl + +# A brief description of the change. Surround your text with quotes ("") if it needs to start with a backtick (`). +note: Added support for timezone in Time converter + +# Mandatory: One or more tracking issues related to the change. You can use the PR number here if no issue exists. +issues: [32140] + +# (Optional) One or more lines of additional information to render under the primary note. +# These lines will be padded with 2 spaces and then inserted directly into the document. +# Use pipe (|) for multiline entries. +subtext: + +# If your change doesn't affect end users or the exported elements of any package, +# you should instead start your pull request title with [chore] or use the "Skip Changelog" label. +# Optional: The change log or logs in which this entry should be included. +# e.g. '[user]' or '[user, api]' +# Include 'user' if the change is relevant to end users. +# Include 'api' if there is a change to a library API. +# Default: '[user]' +change_logs: [user] diff --git a/pkg/ottl/ottlfuncs/README.md b/pkg/ottl/ottlfuncs/README.md index 2c6a5b65bdc4..4abbb3ea9508 100644 --- a/pkg/ottl/ottlfuncs/README.md +++ b/pkg/ottl/ottlfuncs/README.md @@ -1131,11 +1131,11 @@ Examples: ### Time -`Time(target, format)` +`Time(target, format, Optional[location])` The `Time` Converter takes a string representation of a time and converts it to a Golang `time.Time`. -`target` is a string. `format` is a string. +`target` is a string. `format` is a string, `location` is an optional string. If either `target` or `format` are nil, an error is returned. The parser used is the parser at [internal/coreinternal/parser](https://github.com/open-telemetry/opentelemetry-collector-contrib/tree/main/internal/coreinternal/timeutils). If the `target` and `format` do not follow the parsing rules used by this parser, an error is returned. @@ -1176,6 +1176,16 @@ If either `target` or `format` are nil, an error is returned. The parser used is |`%%` | A % sign | | |`%c` | Date and time representation | Mon Jan 02 15:04:05 2006 | +`location` specifies a default time zone canonical ID to be used for date parsing in case it is not part of `format`. + +When loading `location`, this function will look for the IANA Time Zone database in the following locations in order: +- a directory or uncompressed zip file named by the ZONEINFO environment variable +- on a Unix system, the system standard installation location +- $GOROOT/lib/time/zoneinfo.zip +- the `time/tzdata` package, if it was imported. + +When building a Collector binary, importing `time/tzdata` in any Go source file will bundle the database into the binary, which guarantees the lookups will work regardless of the setup on the host setup. Note this will add roughly 500kB to binary size. + Examples: - `Time("02/04/2023", "%m/%d/%Y")` @@ -1183,6 +1193,7 @@ Examples: - `Time("2023-05-26 12:34:56 HST", "%Y-%m-%d %H:%M:%S %Z")` - `Time("1986-10-01T00:17:33 MST", "%Y-%m-%dT%H:%M:%S %Z")` - `Time("2012-11-01T22:08:41+0000 EST", "%Y-%m-%dT%H:%M:%S%z %Z")` +- `Time("2023-05-26 12:34:56", "%Y-%m-%d %H:%M:%S", "America/New_York")` ### TraceID diff --git a/pkg/ottl/ottlfuncs/func_time.go b/pkg/ottl/ottlfuncs/func_time.go index a6ef708b0921..b6d793cc3e5d 100644 --- a/pkg/ottl/ottlfuncs/func_time.go +++ b/pkg/ottl/ottlfuncs/func_time.go @@ -12,8 +12,9 @@ import ( ) type TimeArguments[K any] struct { - Time ottl.StringGetter[K] - Format string + Time ottl.StringGetter[K] + Format string + Location ottl.Optional[string] } func NewTimeFactory[K any]() ottl.Factory[K] { @@ -26,14 +27,20 @@ func createTimeFunction[K any](_ ottl.FunctionContext, oArgs ottl.Arguments) (ot return nil, fmt.Errorf("TimeFactory args must be of type *TimeArguments[K]") } - return Time(args.Time, args.Format) + return Time(args.Time, args.Format, args.Location) } -func Time[K any](inputTime ottl.StringGetter[K], format string) (ottl.ExprFunc[K], error) { +func Time[K any](inputTime ottl.StringGetter[K], format string, location ottl.Optional[string]) (ottl.ExprFunc[K], error) { if format == "" { return nil, fmt.Errorf("format cannot be nil") } - loc, err := timeutils.GetLocation(nil, &format) + var defaultLocation *string + if !location.IsEmpty() { + l := location.Get() + defaultLocation = &l + } + + loc, err := timeutils.GetLocation(defaultLocation, &format) if err != nil { return nil, err } diff --git a/pkg/ottl/ottlfuncs/func_time_test.go b/pkg/ottl/ottlfuncs/func_time_test.go index 5f373026e9a7..41e62edaae04 100644 --- a/pkg/ottl/ottlfuncs/func_time_test.go +++ b/pkg/ottl/ottlfuncs/func_time_test.go @@ -15,11 +15,15 @@ import ( ) func Test_Time(t *testing.T) { + locationAmericaNewYork, _ := time.LoadLocation("America/New_York") + locationAsiaShanghai, _ := time.LoadLocation("Asia/Shanghai") + tests := []struct { name string time ottl.StringGetter[any] format string expected time.Time + location string }{ { name: "simple short form", @@ -151,10 +155,47 @@ func Test_Time(t *testing.T) { format: "%Y/%m/%d", expected: time.Date(2022, 01, 01, 0, 0, 0, 0, time.Local), }, + { + name: "with location - America", + time: &ottl.StandardStringGetter[any]{ + Getter: func(_ context.Context, _ any) (any, error) { + return "2023-05-26 12:34:56", nil + }, + }, + format: "%Y-%m-%d %H:%M:%S", + location: "America/New_York", + expected: time.Date(2023, 5, 26, 12, 34, 56, 0, locationAmericaNewYork), + }, + { + name: "with location - Asia", + time: &ottl.StandardStringGetter[any]{ + Getter: func(_ context.Context, _ any) (any, error) { + return "2023-05-26 12:34:56", nil + }, + }, + format: "%Y-%m-%d %H:%M:%S", + location: "Asia/Shanghai", + expected: time.Date(2023, 5, 26, 12, 34, 56, 0, locationAsiaShanghai), + }, + { + name: "RFC 3339 in custom format before 2000, ignore default location", + time: &ottl.StandardStringGetter[any]{ + Getter: func(_ context.Context, _ any) (any, error) { + return "1986-10-01T00:17:33 MST", nil + }, + }, + location: "Asia/Shanghai", + format: "%Y-%m-%dT%H:%M:%S %Z", + expected: time.Date(1986, 10, 01, 00, 17, 33, 00, time.FixedZone("MST", -7*60*60)), + }, } for _, tt := range tests { t.Run(tt.name, func(t *testing.T) { - exprFunc, err := Time(tt.time, tt.format) + var locOptional ottl.Optional[string] + if tt.location != "" { + locOptional = ottl.NewTestingOptional(tt.location) + } + exprFunc, err := Time(tt.time, tt.format, locOptional) assert.NoError(t, err) result, err := exprFunc(nil, nil) assert.NoError(t, err) @@ -193,7 +234,8 @@ func Test_TimeError(t *testing.T) { } for _, tt := range tests { t.Run(tt.name, func(t *testing.T) { - exprFunc, err := Time[any](tt.time, tt.format) + var locOptional ottl.Optional[string] + exprFunc, err := Time[any](tt.time, tt.format, locOptional) require.NoError(t, err) _, err = exprFunc(context.Background(), nil) assert.ErrorContains(t, err, tt.expectedError) @@ -207,6 +249,7 @@ func Test_TimeFormatError(t *testing.T) { time ottl.StringGetter[any] format string expectedError string + location string }{ { name: "invalid short with no format", @@ -218,10 +261,25 @@ func Test_TimeFormatError(t *testing.T) { format: "", expectedError: "format cannot be nil", }, + { + name: "with unknown location", + time: &ottl.StandardStringGetter[any]{ + Getter: func(_ context.Context, _ any) (any, error) { + return "2023-05-26 12:34:56", nil + }, + }, + format: "%Y-%m-%d %H:%M:%S", + location: "Jupiter/Ganymede", + expectedError: "unknown time zone Jupiter/Ganymede", + }, } for _, tt := range tests { t.Run(tt.name, func(t *testing.T) { - _, err := Time[any](tt.time, tt.format) + var locOptional ottl.Optional[string] + if tt.location != "" { + locOptional = ottl.NewTestingOptional(tt.location) + } + _, err := Time[any](tt.time, tt.format, locOptional) assert.ErrorContains(t, err, tt.expectedError) }) } From a4b0e5928e46c23a0ab42e2a72d468156d62051b Mon Sep 17 00:00:00 2001 From: lkwronski <45148751+lkwronski@users.noreply.github.com> Date: Fri, 10 May 2024 00:06:25 +0200 Subject: [PATCH 16/55] [processor/transform] Add common where clause (#31491) **Description:** Add global conditions with where clause **Link to tracking Issue:** Fixes #27830 **Testing:** Unit tests **Documentation:** TODO ~~The main objective is to extend the `ContextStatements` struct by adding a new `Conditions` parameter. By introducing `Conditions` to `ContextStatements`, we can now apply a global condition to all related statements in `WithStatementSequenceGlobalConditions` function.~~ Thanks in advance for your feedback! If this changes will be fine, I will add common where clause into another context `span`, `metrics`. --- ...onski.issue-27830-common-where-clause.yaml | 27 + internal/filter/expr/matcher.go | 10 + internal/filter/filterottl/filter.go | 17 + internal/filter/filterottl/filter_test.go | 51 ++ internal/filter/filterottl/functions.go | 5 + processor/transformprocessor/README.md | 32 +- processor/transformprocessor/config_test.go | 33 + processor/transformprocessor/factory_test.go | 564 ++++++++++++++++++ processor/transformprocessor/go.mod | 4 + processor/transformprocessor/go.sum | 2 + .../internal/common/config.go | 1 + .../internal/common/logs.go | 17 +- .../internal/common/metrics.go | 56 +- .../internal/common/processor.go | 77 ++- .../internal/common/traces.go | 32 +- .../transformprocessor/testdata/config.yaml | 20 + 16 files changed, 923 insertions(+), 25 deletions(-) create mode 100755 .chloggen/lkwronski.issue-27830-common-where-clause.yaml diff --git a/.chloggen/lkwronski.issue-27830-common-where-clause.yaml b/.chloggen/lkwronski.issue-27830-common-where-clause.yaml new file mode 100755 index 000000000000..d35728bcbe88 --- /dev/null +++ b/.chloggen/lkwronski.issue-27830-common-where-clause.yaml @@ -0,0 +1,27 @@ +# Use this changelog template to create an entry for release notes. + +# One of 'breaking', 'deprecation', 'new_component', 'enhancement', 'bug_fix' +change_type: enhancement + +# The name of the component, or a single word describing the area of concern, (e.g. filelogreceiver) +component: processor/transform + +# A brief description of the change. Surround your text with quotes ("") if it needs to start with a backtick (`). +note: Allow common where clause + +# Mandatory: One or more tracking issues related to the change. You can use the PR number here if no issue exists. +issues: [27830] + +# (Optional) One or more lines of additional information to render under the primary note. +# These lines will be padded with 2 spaces and then inserted directly into the document. +# Use pipe (|) for multiline entries. +subtext: + +# If your change doesn't affect end users or the exported elements of any package, +# you should instead start your pull request title with [chore] or use the "Skip Changelog" label. +# Optional: The change log or logs in which this entry should be included. +# e.g. '[user]' or '[user, api]' +# Include 'user' if the change is relevant to end users. +# Include 'api' if there is a change to a library API. +# Default: '[user]' +change_logs: [user] diff --git a/internal/filter/expr/matcher.go b/internal/filter/expr/matcher.go index f73e9f8e8a8c..037b4888d46a 100644 --- a/internal/filter/expr/matcher.go +++ b/internal/filter/expr/matcher.go @@ -25,6 +25,16 @@ func Not[K any](matcher BoolExpr[K]) BoolExpr[K] { return notMatcher[K]{matcher: matcher} } +type alwaysTrueMatcher[K any] struct{} + +func (alm alwaysTrueMatcher[K]) Eval(_ context.Context, _ K) (bool, error) { + return true, nil +} + +func AlwaysTrue[K any]() BoolExpr[K] { + return alwaysTrueMatcher[K]{} +} + type orMatcher[K any] struct { matchers []BoolExpr[K] } diff --git a/internal/filter/filterottl/filter.go b/internal/filter/filterottl/filter.go index 6324c8a35bd9..e4dad6ee9359 100644 --- a/internal/filter/filterottl/filter.go +++ b/internal/filter/filterottl/filter.go @@ -12,6 +12,7 @@ import ( "github.com/open-telemetry/opentelemetry-collector-contrib/pkg/ottl/contexts/ottllog" "github.com/open-telemetry/opentelemetry-collector-contrib/pkg/ottl/contexts/ottlmetric" "github.com/open-telemetry/opentelemetry-collector-contrib/pkg/ottl/contexts/ottlresource" + "github.com/open-telemetry/opentelemetry-collector-contrib/pkg/ottl/contexts/ottlscope" "github.com/open-telemetry/opentelemetry-collector-contrib/pkg/ottl/contexts/ottlspan" "github.com/open-telemetry/opentelemetry-collector-contrib/pkg/ottl/contexts/ottlspanevent" ) @@ -111,3 +112,19 @@ func NewBoolExprForResource(conditions []string, functions map[string]ottl.Facto c := ottlresource.NewConditionSequence(statements, set, ottlresource.WithConditionSequenceErrorMode(errorMode)) return &c, nil } + +// NewBoolExprForScope creates a BoolExpr[ottlscope.TransformContext] that will return true if any of the given OTTL conditions evaluate to true. +// The passed in functions should use the ottlresource.TransformContext. +// If a function named `match` is not present in the function map it will be added automatically so that parsing works as expected +func NewBoolExprForScope(conditions []string, functions map[string]ottl.Factory[ottlscope.TransformContext], errorMode ottl.ErrorMode, set component.TelemetrySettings) (expr.BoolExpr[ottlscope.TransformContext], error) { + parser, err := ottlscope.NewParser(functions, set) + if err != nil { + return nil, err + } + statements, err := parser.ParseConditions(conditions) + if err != nil { + return nil, err + } + c := ottlscope.NewConditionSequence(statements, set, ottlscope.WithConditionSequenceErrorMode(errorMode)) + return &c, nil +} diff --git a/internal/filter/filterottl/filter_test.go b/internal/filter/filterottl/filter_test.go index 8e6a65ebc4c8..d198f8924ec5 100644 --- a/internal/filter/filterottl/filter_test.go +++ b/internal/filter/filterottl/filter_test.go @@ -15,6 +15,7 @@ import ( "github.com/open-telemetry/opentelemetry-collector-contrib/pkg/ottl/contexts/ottllog" "github.com/open-telemetry/opentelemetry-collector-contrib/pkg/ottl/contexts/ottlmetric" "github.com/open-telemetry/opentelemetry-collector-contrib/pkg/ottl/contexts/ottlresource" + "github.com/open-telemetry/opentelemetry-collector-contrib/pkg/ottl/contexts/ottlscope" "github.com/open-telemetry/opentelemetry-collector-contrib/pkg/ottl/contexts/ottlspan" "github.com/open-telemetry/opentelemetry-collector-contrib/pkg/ottl/contexts/ottlspanevent" ) @@ -270,3 +271,53 @@ func Test_NewBoolExprForResource(t *testing.T) { }) } } + +func Test_NewBoolExprForScope(t *testing.T) { + tests := []struct { + name string + conditions []string + expectedResult bool + }{ + { + name: "basic", + conditions: []string{ + "true == true", + }, + expectedResult: true, + }, + { + name: "multiple conditions resulting true", + conditions: []string{ + "false == true", + "true == true", + }, + expectedResult: true, + }, + { + name: "multiple conditions resulting false", + conditions: []string{ + "false == true", + "true == false", + }, + expectedResult: false, + }, + { + name: "With Converter", + conditions: []string{ + `IsMatch("test", "pass")`, + }, + expectedResult: false, + }, + } + + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + resBoolExpr, err := NewBoolExprForScope(tt.conditions, StandardScopeFuncs(), ottl.PropagateError, componenttest.NewNopTelemetrySettings()) + assert.NoError(t, err) + assert.NotNil(t, resBoolExpr) + result, err := resBoolExpr.Eval(context.Background(), ottlscope.TransformContext{}) + assert.NoError(t, err) + assert.Equal(t, tt.expectedResult, result) + }) + } +} diff --git a/internal/filter/filterottl/functions.go b/internal/filter/filterottl/functions.go index c86ee64f89ae..c3ce56ce4abf 100644 --- a/internal/filter/filterottl/functions.go +++ b/internal/filter/filterottl/functions.go @@ -14,6 +14,7 @@ import ( "github.com/open-telemetry/opentelemetry-collector-contrib/pkg/ottl/contexts/ottllog" "github.com/open-telemetry/opentelemetry-collector-contrib/pkg/ottl/contexts/ottlmetric" "github.com/open-telemetry/opentelemetry-collector-contrib/pkg/ottl/contexts/ottlresource" + "github.com/open-telemetry/opentelemetry-collector-contrib/pkg/ottl/contexts/ottlscope" "github.com/open-telemetry/opentelemetry-collector-contrib/pkg/ottl/contexts/ottlspan" "github.com/open-telemetry/opentelemetry-collector-contrib/pkg/ottl/contexts/ottlspanevent" "github.com/open-telemetry/opentelemetry-collector-contrib/pkg/ottl/ottlfuncs" @@ -40,6 +41,10 @@ func StandardDataPointFuncs() map[string]ottl.Factory[ottldatapoint.TransformCon return ottlfuncs.StandardConverters[ottldatapoint.TransformContext]() } +func StandardScopeFuncs() map[string]ottl.Factory[ottlscope.TransformContext] { + return ottlfuncs.StandardConverters[ottlscope.TransformContext]() +} + func StandardLogFuncs() map[string]ottl.Factory[ottllog.TransformContext] { return ottlfuncs.StandardConverters[ottllog.TransformContext]() } diff --git a/processor/transformprocessor/README.md b/processor/transformprocessor/README.md index a347c12bebad..acb4ebd451f8 100644 --- a/processor/transformprocessor/README.md +++ b/processor/transformprocessor/README.md @@ -14,8 +14,8 @@ The transform processor modifies telemetry based on configuration using the [OpenTelemetry Transformation Language](https://github.com/open-telemetry/opentelemetry-collector-contrib/tree/main/pkg/ottl). -For each signal type, the processor takes a list of statements associated to a [Context type](#contexts) and executes the statements against the incoming telemetry in the order specified in the config. -Each statement can access and transform telemetry using functions and allow the use of a condition to help decide whether the function should be executed. +For each signal type, the processor takes a list of conditions and statements associated to a [Context type](#contexts) and executes the conditions and statements against the incoming telemetry in the order specified in the config. +Each condition and statement can access and transform telemetry using functions and allow the use of a condition to help decide whether the function should be executed. - [Config](#config) - [Grammar](#grammar) @@ -28,8 +28,8 @@ Each statement can access and transform telemetry using functions and allow the The transform processor allows configuring multiple context statements for traces, metrics, and logs. The value of `context` specifies which [OTTL Context](#contexts) to use when interpreting the associated statements. -The statement strings, which must be OTTL compatible, will be passed to the OTTL and interpreted using the associated context. -Each context will be processed in the order specified and each statement for a context will be executed in the order specified. +The conditions and statement strings, which must be OTTL compatible, will be passed to the OTTL and interpreted using the associated context. The conditions string should contain a string with a WHERE clause body without the `where` keyword at the beginning. +Each context will be processed in the order specified and each condition and statement for a context will be executed in the order specified. Conditions are executed first, if a context doesn't meet the conditions, the associated statement will be skipped. The transform processor also allows configuring an optional field, `error_mode`, which will determine how the processor reacts to errors that occur while processing a statement. @@ -46,6 +46,9 @@ transform: error_mode: ignore _statements: - context: string + conditions: + - string + - string statements: - string - string @@ -67,6 +70,27 @@ Valid values for `context` are: | metric_statements | `resource`, `scope`, `metric`, and `datapoint` | | log_statements | `resource`, `scope`, and `log` | +`conditions` is a list comprised of multiple where clauses, which will be processed as global conditions for the accompanying set of statements. + +```yaml +transform: + error_mode: ignore + metric_statements: + - context: metric + conditions: + - type == METRIC_DATA_TYPE_SUM + statements: + - set(description, "Sum") + + log_statements: + - context: log + conditions: + - IsMap(body) and body["object"] != nil + statements: + - set(body, attributes["http.route"]) +``` + + ### Example The example takes advantage of context efficiency by grouping transformations with the context which it intends to transform. diff --git a/processor/transformprocessor/config_test.go b/processor/transformprocessor/config_test.go index 256f69b9b6b7..1048ba19c36b 100644 --- a/processor/transformprocessor/config_test.go +++ b/processor/transformprocessor/config_test.go @@ -76,6 +76,39 @@ func TestLoadConfig(t *testing.T) { }, }, }, + { + id: component.NewIDWithName(metadata.Type, "with_conditions"), + expected: &Config{ + ErrorMode: ottl.PropagateError, + TraceStatements: []common.ContextStatements{ + { + Context: "span", + Conditions: []string{`attributes["http.path"] == "/animal"`}, + Statements: []string{ + `set(name, "bear")`, + }, + }, + }, + MetricStatements: []common.ContextStatements{ + { + Context: "datapoint", + Conditions: []string{`attributes["http.path"] == "/animal"`}, + Statements: []string{ + `set(metric.name, "bear")`, + }, + }, + }, + LogStatements: []common.ContextStatements{ + { + Context: "log", + Conditions: []string{`attributes["http.path"] == "/animal"`}, + Statements: []string{ + `set(body, "bear")`, + }, + }, + }, + }, + }, { id: component.NewIDWithName(metadata.Type, "ignore_errors"), expected: &Config{ diff --git a/processor/transformprocessor/factory_test.go b/processor/transformprocessor/factory_test.go index c9cc89ca2459..b56724a992ee 100644 --- a/processor/transformprocessor/factory_test.go +++ b/processor/transformprocessor/factory_test.go @@ -189,3 +189,567 @@ func TestFactoryCreateLogsProcessor_InvalidActions(t *testing.T) { assert.Error(t, err) assert.Nil(t, ap) } + +func TestFactoryCreateLogProcessor(t *testing.T) { + tests := []struct { + name string + conditions []string + statements []string + want func(plog.Logs) + createLogs func() plog.Logs + }{ + { + name: "create logs processor and pass log context is passed with a global condition that meets the specified condition", + conditions: []string{`body == "operationA"`}, + statements: []string{`set(attributes["test"], "pass")`}, + want: func(td plog.Logs) { + newLog := td.ResourceLogs().At(0).ScopeLogs().At(0).LogRecords().At(0) + newLog.Attributes().PutStr("test", "pass") + }, + createLogs: func() plog.Logs { + ld := plog.NewLogs() + log := ld.ResourceLogs().AppendEmpty().ScopeLogs().AppendEmpty().LogRecords().AppendEmpty() + log.Body().SetStr("operationA") + return ld + }, + }, + { + name: "create logs processor and pass log context is passed with a statement condition that meets the specified condition", + conditions: []string{}, + statements: []string{`set(attributes["test"], "pass") where body == "operationA"`}, + want: func(td plog.Logs) { + newLog := td.ResourceLogs().At(0).ScopeLogs().At(0).LogRecords().At(0) + newLog.Attributes().PutStr("test", "pass") + }, + createLogs: func() plog.Logs { + ld := plog.NewLogs() + log := ld.ResourceLogs().AppendEmpty().ScopeLogs().AppendEmpty().LogRecords().AppendEmpty() + log.Body().SetStr("operationA") + return ld + }, + }, + { + name: "create logs processor and pass log context is passed with a global condition that fails the specified condition", + conditions: []string{`body == "operationB"`}, + statements: []string{`set(attributes["test"], "pass")`}, + want: func(_ plog.Logs) {}, + createLogs: func() plog.Logs { + ld := plog.NewLogs() + log := ld.ResourceLogs().AppendEmpty().ScopeLogs().AppendEmpty().LogRecords().AppendEmpty() + log.Body().SetStr("operationA") + return ld + }, + }, + } + + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + factory := NewFactory() + cfg := factory.CreateDefaultConfig() + oCfg := cfg.(*Config) + oCfg.ErrorMode = ottl.IgnoreError + oCfg.LogStatements = []common.ContextStatements{ + { + Context: "log", + Conditions: tt.conditions, + Statements: tt.statements, + }, + } + lp, err := factory.CreateLogsProcessor(context.Background(), processortest.NewNopCreateSettings(), cfg, consumertest.NewNop()) + assert.NotNil(t, lp) + assert.NoError(t, err) + + ld := tt.createLogs() + + err = lp.ConsumeLogs(context.Background(), ld) + assert.NoError(t, err) + + exLd := tt.createLogs() + tt.want(exLd) + + assert.Equal(t, exLd, ld) + }) + } +} + +func TestFactoryCreateResourceProcessor(t *testing.T) { + tests := []struct { + name string + conditions []string + statements []string + want func(plog.Logs) + createLogs func() plog.Logs + }{ + { + name: "create logs processor and pass resource context is passed with a global condition that meets the specified condition", + conditions: []string{`attributes["test"] == "foo"`}, + statements: []string{`set(attributes["test"], "pass")`}, + want: func(td plog.Logs) { + td.ResourceLogs().At(0).Resource().Attributes().PutStr("test", "pass") + }, + createLogs: func() plog.Logs { + ld := plog.NewLogs() + ld.ResourceLogs().AppendEmpty().Resource().Attributes().PutStr("test", "foo") + return ld + }, + }, + { + name: "create logs processor and pass resource context is passed with a statement condition that meets the specified condition", + conditions: []string{}, + statements: []string{`set(attributes["test"], "pass") where attributes["test"] == "foo"`}, + want: func(td plog.Logs) { + td.ResourceLogs().At(0).Resource().Attributes().PutStr("test", "pass") + }, + createLogs: func() plog.Logs { + ld := plog.NewLogs() + ld.ResourceLogs().AppendEmpty().Resource().Attributes().PutStr("test", "foo") + return ld + }, + }, + { + name: "create logs processor and pass resource context is passed with a global condition that fails the specified condition", + conditions: []string{`attributes["test"] == "wrong"`}, + statements: []string{`set(attributes["test"], "pass")`}, + want: func(_ plog.Logs) {}, + createLogs: func() plog.Logs { + ld := plog.NewLogs() + ld.ResourceLogs().AppendEmpty().Resource().Attributes().PutStr("test", "foo") + return ld + }, + }, + } + + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + factory := NewFactory() + cfg := factory.CreateDefaultConfig() + oCfg := cfg.(*Config) + oCfg.ErrorMode = ottl.IgnoreError + oCfg.LogStatements = []common.ContextStatements{ + { + Context: "resource", + Conditions: tt.conditions, + Statements: tt.statements, + }, + } + lp, err := factory.CreateLogsProcessor(context.Background(), processortest.NewNopCreateSettings(), cfg, consumertest.NewNop()) + assert.NotNil(t, lp) + assert.NoError(t, err) + + ld := tt.createLogs() + + err = lp.ConsumeLogs(context.Background(), ld) + assert.NoError(t, err) + + exLd := tt.createLogs() + tt.want(exLd) + + assert.Equal(t, exLd, ld) + }) + } +} + +func TestFactoryCreateScopeProcessor(t *testing.T) { + tests := []struct { + name string + conditions []string + statements []string + want func(plog.Logs) + createLogs func() plog.Logs + }{ + { + name: "create logs processor and pass scope context is passed with a global condition that meets the specified condition", + conditions: []string{`attributes["test"] == "foo"`}, + statements: []string{`set(attributes["test"], "pass")`}, + want: func(td plog.Logs) { + td.ResourceLogs().At(0).ScopeLogs().At(0).Scope().Attributes().PutStr("test", "pass") + }, + createLogs: func() plog.Logs { + ld := plog.NewLogs() + ld.ResourceLogs().AppendEmpty().ScopeLogs().AppendEmpty().Scope().Attributes().PutStr("test", "foo") + return ld + }, + }, + { + name: "create logs processor and pass scope context is passed with a statement condition that meets the specified condition", + conditions: []string{}, + statements: []string{`set(attributes["test"], "pass") where attributes["test"] == "foo"`}, + want: func(td plog.Logs) { + td.ResourceLogs().At(0).ScopeLogs().At(0).Scope().Attributes().PutStr("test", "pass") + }, + createLogs: func() plog.Logs { + ld := plog.NewLogs() + ld.ResourceLogs().AppendEmpty().ScopeLogs().AppendEmpty().Scope().Attributes().PutStr("test", "foo") + return ld + }, + }, + { + name: "create logs processor and pass scope context is passed with a global condition that fails the specified condition", + conditions: []string{`attributes["test"] == "wrong"`}, + statements: []string{`set(attributes["test"], "pass")`}, + want: func(_ plog.Logs) {}, + createLogs: func() plog.Logs { + ld := plog.NewLogs() + ld.ResourceLogs().AppendEmpty().ScopeLogs().AppendEmpty().Scope().Attributes().PutStr("test", "foo") + return ld + }, + }, + } + + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + factory := NewFactory() + cfg := factory.CreateDefaultConfig() + oCfg := cfg.(*Config) + oCfg.ErrorMode = ottl.IgnoreError + oCfg.LogStatements = []common.ContextStatements{ + { + Context: "scope", + Conditions: tt.conditions, + Statements: tt.statements, + }, + } + lp, err := factory.CreateLogsProcessor(context.Background(), processortest.NewNopCreateSettings(), cfg, consumertest.NewNop()) + assert.NotNil(t, lp) + assert.NoError(t, err) + + ld := tt.createLogs() + + err = lp.ConsumeLogs(context.Background(), ld) + assert.NoError(t, err) + + exLd := tt.createLogs() + tt.want(exLd) + + assert.Equal(t, exLd, ld) + }) + } +} + +func TestFactoryCreateMetricProcessor(t *testing.T) { + tests := []struct { + name string + conditions []string + statements []string + want func(pmetric.Metrics) + createMetrics func() pmetric.Metrics + }{ + { + name: "create metrics processor and pass metric context is passed with a global condition that meets the specified condition", + conditions: []string{`name == "operationA"`}, + statements: []string{`set(description, "Sum")`}, + want: func(td pmetric.Metrics) { + newMetric := td.ResourceMetrics().At(0).ScopeMetrics().At(0).Metrics().At(0) + newMetric.SetDescription("Sum") + }, + createMetrics: func() pmetric.Metrics { + td := pmetric.NewMetrics() + metric := td.ResourceMetrics().AppendEmpty().ScopeMetrics().AppendEmpty().Metrics().AppendEmpty() + metric.SetName("operationA") + return td + }, + }, + { + name: "create metrics processor and pass metric context is passed with a statement condition that meets the specified condition", + conditions: []string{}, + statements: []string{`set(description, "Sum") where name == "operationA"`}, + want: func(td pmetric.Metrics) { + newMetric := td.ResourceMetrics().At(0).ScopeMetrics().At(0).Metrics().At(0) + newMetric.SetDescription("Sum") + }, + createMetrics: func() pmetric.Metrics { + td := pmetric.NewMetrics() + metric := td.ResourceMetrics().AppendEmpty().ScopeMetrics().AppendEmpty().Metrics().AppendEmpty() + metric.SetName("operationA") + return td + }, + }, + { + name: "create metrics processor and pass metric context is passed with a global condition that fails the specified condition", + conditions: []string{`name == "operationA"`}, + statements: []string{`set(description, "Sum")`}, + want: func(_ pmetric.Metrics) {}, + createMetrics: func() pmetric.Metrics { + td := pmetric.NewMetrics() + metric := td.ResourceMetrics().AppendEmpty().ScopeMetrics().AppendEmpty().Metrics().AppendEmpty() + metric.SetName("operationB") + return td + }, + }, + } + + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + factory := NewFactory() + cfg := factory.CreateDefaultConfig() + oCfg := cfg.(*Config) + oCfg.ErrorMode = ottl.IgnoreError + oCfg.MetricStatements = []common.ContextStatements{ + { + Context: "metric", + Conditions: tt.conditions, + Statements: tt.statements, + }, + } + mp, err := factory.CreateMetricsProcessor(context.Background(), processortest.NewNopCreateSettings(), cfg, consumertest.NewNop()) + assert.NotNil(t, mp) + assert.NoError(t, err) + + td := tt.createMetrics() + + err = mp.ConsumeMetrics(context.Background(), td) + assert.NoError(t, err) + + exTd := tt.createMetrics() + tt.want(exTd) + + assert.Equal(t, exTd, td) + }) + } +} + +func TestFactoryCreateDataPointProcessor(t *testing.T) { + tests := []struct { + name string + conditions []string + statements []string + want func(pmetric.Metrics) + createMetrics func() pmetric.Metrics + }{ + { + name: "create metrics processor and pass datapoint context is passed with a global condition that meets the specified condition", + conditions: []string{`metric.name == "operationA"`}, + statements: []string{`set(attributes["test"], "pass")`}, + want: func(td pmetric.Metrics) { + newMetric := td.ResourceMetrics().At(0).ScopeMetrics().At(0).Metrics().At(0) + newMetric.SetEmptySum().DataPoints().AppendEmpty().Attributes().PutStr("test", "pass") + }, + createMetrics: func() pmetric.Metrics { + td := pmetric.NewMetrics() + metric := td.ResourceMetrics().AppendEmpty().ScopeMetrics().AppendEmpty().Metrics().AppendEmpty() + metric.SetEmptySum().DataPoints().AppendEmpty() + metric.SetName("operationA") + return td + }, + }, + { + name: "create metrics processor and pass datapoint context is passed with a statement condition that meets the specified condition", + conditions: []string{}, + statements: []string{`set(attributes["test"], "pass") where metric.name == "operationA"`}, + want: func(td pmetric.Metrics) { + newMetric := td.ResourceMetrics().At(0).ScopeMetrics().At(0).Metrics().At(0) + newMetric.SetEmptySum().DataPoints().AppendEmpty().Attributes().PutStr("test", "pass") + }, + createMetrics: func() pmetric.Metrics { + td := pmetric.NewMetrics() + metric := td.ResourceMetrics().AppendEmpty().ScopeMetrics().AppendEmpty().Metrics().AppendEmpty() + metric.SetEmptySum().DataPoints().AppendEmpty() + metric.SetName("operationA") + return td + }, + }, + { + name: "create metrics processor and pass datapoint context is passed with a global condition that fails the specified condition", + conditions: []string{`metric.name == "operationB"`}, + statements: []string{`set(attributes["test"], "pass")`}, + want: func(_ pmetric.Metrics) {}, + createMetrics: func() pmetric.Metrics { + td := pmetric.NewMetrics() + metric := td.ResourceMetrics().AppendEmpty().ScopeMetrics().AppendEmpty().Metrics().AppendEmpty() + metric.SetEmptySum().DataPoints().AppendEmpty() + metric.SetName("operationA") + return td + }, + }, + } + + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + factory := NewFactory() + cfg := factory.CreateDefaultConfig() + oCfg := cfg.(*Config) + oCfg.ErrorMode = ottl.IgnoreError + oCfg.MetricStatements = []common.ContextStatements{ + { + Context: "datapoint", + Conditions: tt.conditions, + Statements: tt.statements, + }, + } + mp, err := factory.CreateMetricsProcessor(context.Background(), processortest.NewNopCreateSettings(), cfg, consumertest.NewNop()) + assert.NotNil(t, mp) + assert.NoError(t, err) + + td := tt.createMetrics() + + err = mp.ConsumeMetrics(context.Background(), td) + assert.NoError(t, err) + + exTd := tt.createMetrics() + tt.want(exTd) + + assert.Equal(t, exTd, td) + }) + } +} + +func TestFactoryCreateSpanProcessor(t *testing.T) { + tests := []struct { + name string + conditions []string + statements []string + want func(ptrace.Traces) + createTraces func() ptrace.Traces + }{ + { + name: "create traces processor and pass span context is passed with a global condition that meets the specified condition", + conditions: []string{`name == "operationA"`}, + statements: []string{`set(attributes["test"], "pass")`}, + want: func(td ptrace.Traces) { + newSpan := td.ResourceSpans().At(0).ScopeSpans().At(0).Spans().At(0) + newSpan.Attributes().PutStr("test", "pass") + }, + createTraces: func() ptrace.Traces { + td := ptrace.NewTraces() + span := td.ResourceSpans().AppendEmpty().ScopeSpans().AppendEmpty().Spans().AppendEmpty() + span.SetName("operationA") + return td + }, + }, + { + name: "create traces processor and pass span context is passed with a statement condition that meets the specified condition", + conditions: []string{}, + statements: []string{`set(attributes["test"], "pass") where name == "operationA"`}, + want: func(td ptrace.Traces) { + newSpan := td.ResourceSpans().At(0).ScopeSpans().At(0).Spans().At(0) + newSpan.Attributes().PutStr("test", "pass") + }, + createTraces: func() ptrace.Traces { + td := ptrace.NewTraces() + span := td.ResourceSpans().AppendEmpty().ScopeSpans().AppendEmpty().Spans().AppendEmpty() + span.SetName("operationA") + return td + }, + }, + { + name: "create traces processor and pass span context is passed with a global condition that fails the specified condition", + conditions: []string{`name == "operationB"`}, + statements: []string{`set(attributes["test"], "pass")`}, + want: func(_ ptrace.Traces) {}, + createTraces: func() ptrace.Traces { + td := ptrace.NewTraces() + td.ResourceSpans().AppendEmpty().ScopeSpans().AppendEmpty().Spans().AppendEmpty() + return td + }, + }, + } + + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + factory := NewFactory() + cfg := factory.CreateDefaultConfig() + oCfg := cfg.(*Config) + oCfg.ErrorMode = ottl.IgnoreError + oCfg.TraceStatements = []common.ContextStatements{ + { + Context: "span", + Conditions: tt.conditions, + Statements: tt.statements, + }, + } + mp, err := factory.CreateTracesProcessor(context.Background(), processortest.NewNopCreateSettings(), cfg, consumertest.NewNop()) + assert.NotNil(t, mp) + assert.NoError(t, err) + + td := tt.createTraces() + + err = mp.ConsumeTraces(context.Background(), td) + assert.NoError(t, err) + + exTd := tt.createTraces() + tt.want(exTd) + + assert.Equal(t, exTd, td) + }) + } +} + +func TestFactoryCreateSpanEventProcessor(t *testing.T) { + tests := []struct { + name string + conditions []string + statements []string + want func(ptrace.Traces) + createTraces func() ptrace.Traces + }{ + { + name: "create traces processor and pass spanevent context is passed with a global condition that meets the specified condition", + conditions: []string{`name == "eventA"`}, + statements: []string{`set(attributes["test"], "pass")`}, + want: func(td ptrace.Traces) { + td.ResourceSpans().At(0).ScopeSpans().At(0).Spans().At(0).Events().At(0).Attributes().PutStr("test", "pass") + }, + createTraces: func() ptrace.Traces { + td := ptrace.NewTraces() + event := td.ResourceSpans().AppendEmpty().ScopeSpans().AppendEmpty().Spans().AppendEmpty().Events().AppendEmpty() + event.SetName("eventA") + return td + }, + }, + { + name: "create traces processor and pass spanevent context is passed with a statement condition that meets the specified condition", + conditions: []string{}, + statements: []string{`set(attributes["test"], "pass") where name == "eventA"`}, + want: func(td ptrace.Traces) { + td.ResourceSpans().At(0).ScopeSpans().At(0).Spans().At(0).Events().At(0).Attributes().PutStr("test", "pass") + }, + createTraces: func() ptrace.Traces { + td := ptrace.NewTraces() + event := td.ResourceSpans().AppendEmpty().ScopeSpans().AppendEmpty().Spans().AppendEmpty().Events().AppendEmpty() + event.SetName("eventA") + return td + }, + }, + { + name: "create traces processor and pass spanevent context is passed with a global condition that fails the specified condition", + conditions: []string{`name == "eventB"`}, + statements: []string{`set(attributes["test"], "pass")`}, + want: func(_ ptrace.Traces) {}, + createTraces: func() ptrace.Traces { + td := ptrace.NewTraces() + event := td.ResourceSpans().AppendEmpty().ScopeSpans().AppendEmpty().Spans().AppendEmpty().Events().AppendEmpty() + event.SetName("eventA") + return td + }, + }, + } + + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + factory := NewFactory() + cfg := factory.CreateDefaultConfig() + oCfg := cfg.(*Config) + oCfg.ErrorMode = ottl.IgnoreError + oCfg.TraceStatements = []common.ContextStatements{ + { + Context: "spanevent", + Conditions: tt.conditions, + Statements: tt.statements, + }, + } + mp, err := factory.CreateTracesProcessor(context.Background(), processortest.NewNopCreateSettings(), cfg, consumertest.NewNop()) + assert.NotNil(t, mp) + assert.NoError(t, err) + + td := tt.createTraces() + + err = mp.ConsumeTraces(context.Background(), td) + assert.NoError(t, err) + + exTd := tt.createTraces() + tt.want(exTd) + + assert.Equal(t, exTd, td) + }) + } +} diff --git a/processor/transformprocessor/go.mod b/processor/transformprocessor/go.mod index f8eaac7eba42..7e817f252ea1 100644 --- a/processor/transformprocessor/go.mod +++ b/processor/transformprocessor/go.mod @@ -4,6 +4,7 @@ go 1.21.0 require ( github.com/open-telemetry/opentelemetry-collector-contrib/internal/common v0.100.0 + github.com/open-telemetry/opentelemetry-collector-contrib/internal/filter v0.100.0 github.com/open-telemetry/opentelemetry-collector-contrib/pkg/ottl v0.100.0 github.com/open-telemetry/opentelemetry-collector-contrib/pkg/pdatatest v0.100.0 github.com/stretchr/testify v1.9.0 @@ -32,6 +33,7 @@ require ( github.com/gogo/protobuf v1.3.2 // indirect github.com/google/uuid v1.6.0 // indirect github.com/hashicorp/go-version v1.6.0 // indirect + github.com/hashicorp/golang-lru/v2 v2.0.7 // indirect github.com/iancoleman/strcase v0.3.0 // indirect github.com/json-iterator/go v1.1.12 // indirect github.com/knadh/koanf/maps v0.1.1 // indirect @@ -82,3 +84,5 @@ replace github.com/open-telemetry/opentelemetry-collector-contrib/pkg/pdatatest replace github.com/open-telemetry/opentelemetry-collector-contrib/pkg/golden => ../../pkg/golden replace github.com/open-telemetry/opentelemetry-collector-contrib/internal/common => ../../internal/common + +replace github.com/open-telemetry/opentelemetry-collector-contrib/internal/filter => ../../internal/filter diff --git a/processor/transformprocessor/go.sum b/processor/transformprocessor/go.sum index 383306744162..1f9536f9a6c3 100644 --- a/processor/transformprocessor/go.sum +++ b/processor/transformprocessor/go.sum @@ -29,6 +29,8 @@ github.com/google/uuid v1.6.0 h1:NIvaJDMOsjHA8n1jAhLSgzrAzy1Hgr+hNrb57e+94F0= github.com/google/uuid v1.6.0/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= github.com/hashicorp/go-version v1.6.0 h1:feTTfFNnjP967rlCxM/I9g701jU+RN74YKx2mOkIeek= github.com/hashicorp/go-version v1.6.0/go.mod h1:fltr4n8CU8Ke44wwGCBoEymUuxUHl09ZGVZPK5anwXA= +github.com/hashicorp/golang-lru/v2 v2.0.7 h1:a+bsQ5rvGLjzHuww6tVxozPZFVghXaHOwFs4luLUK2k= +github.com/hashicorp/golang-lru/v2 v2.0.7/go.mod h1:QeFd9opnmA6QUJc5vARoKUSoFhyfM2/ZepoAG6RGpeM= github.com/hexops/gotextdiff v1.0.3 h1:gitA9+qJrrTCsiCl7+kh75nPqQt1cx4ZkudSTLoUqJM= github.com/hexops/gotextdiff v1.0.3/go.mod h1:pSWU5MAI3yDq+fZBTazCSJysOMbxWL1BSow5/V2vxeg= github.com/iancoleman/strcase v0.3.0 h1:nTXanmYxhfFAMjZL34Ov6gkzEsSJZ5DbhxWjvSASxEI= diff --git a/processor/transformprocessor/internal/common/config.go b/processor/transformprocessor/internal/common/config.go index 2747ac11d4db..c0f293457329 100644 --- a/processor/transformprocessor/internal/common/config.go +++ b/processor/transformprocessor/internal/common/config.go @@ -33,5 +33,6 @@ func (c *ContextID) UnmarshalText(text []byte) error { type ContextStatements struct { Context ContextID `mapstructure:"context"` + Conditions []string `mapstructure:"conditions"` Statements []string `mapstructure:"statements"` } diff --git a/processor/transformprocessor/internal/common/logs.go b/processor/transformprocessor/internal/common/logs.go index 83f2e81e9bce..fb350bc22137 100644 --- a/processor/transformprocessor/internal/common/logs.go +++ b/processor/transformprocessor/internal/common/logs.go @@ -10,6 +10,8 @@ import ( "go.opentelemetry.io/collector/consumer" "go.opentelemetry.io/collector/pdata/plog" + "github.com/open-telemetry/opentelemetry-collector-contrib/internal/filter/expr" + "github.com/open-telemetry/opentelemetry-collector-contrib/internal/filter/filterottl" "github.com/open-telemetry/opentelemetry-collector-contrib/pkg/ottl" "github.com/open-telemetry/opentelemetry-collector-contrib/pkg/ottl/contexts/ottllog" "github.com/open-telemetry/opentelemetry-collector-contrib/pkg/ottl/contexts/ottlresource" @@ -20,6 +22,7 @@ var _ consumer.Logs = &logStatements{} type logStatements struct { ottl.StatementSequence[ottllog.TransformContext] + expr.BoolExpr[ottllog.TransformContext] } func (l logStatements) Capabilities() consumer.Capabilities { @@ -36,10 +39,16 @@ func (l logStatements) ConsumeLogs(ctx context.Context, ld plog.Logs) error { logs := slogs.LogRecords() for k := 0; k < logs.Len(); k++ { tCtx := ottllog.NewTransformContext(logs.At(k), slogs.Scope(), rlogs.Resource()) - err := l.Execute(ctx, tCtx) + condition, err := l.BoolExpr.Eval(ctx, tCtx) if err != nil { return err } + if condition { + err := l.Execute(ctx, tCtx) + if err != nil { + return err + } + } } } } @@ -105,8 +114,12 @@ func (pc LogParserCollection) ParseContextStatements(contextStatements ContextSt if err != nil { return nil, err } + globalExpr, errGlobalBoolExpr := parseGlobalExpr(filterottl.NewBoolExprForLog, contextStatements.Conditions, pc.parserCollection, filterottl.StandardLogFuncs()) + if errGlobalBoolExpr != nil { + return nil, errGlobalBoolExpr + } lStatements := ottllog.NewStatementSequence(parsedStatements, pc.settings, ottllog.WithStatementSequenceErrorMode(pc.errorMode)) - return logStatements{lStatements}, nil + return logStatements{lStatements, globalExpr}, nil default: statements, err := pc.parseCommonContextStatements(contextStatements) if err != nil { diff --git a/processor/transformprocessor/internal/common/metrics.go b/processor/transformprocessor/internal/common/metrics.go index 602245ac0015..dd63e820487d 100644 --- a/processor/transformprocessor/internal/common/metrics.go +++ b/processor/transformprocessor/internal/common/metrics.go @@ -11,6 +11,8 @@ import ( "go.opentelemetry.io/collector/pdata/pcommon" "go.opentelemetry.io/collector/pdata/pmetric" + "github.com/open-telemetry/opentelemetry-collector-contrib/internal/filter/expr" + "github.com/open-telemetry/opentelemetry-collector-contrib/internal/filter/filterottl" "github.com/open-telemetry/opentelemetry-collector-contrib/pkg/ottl" "github.com/open-telemetry/opentelemetry-collector-contrib/pkg/ottl/contexts/ottldatapoint" "github.com/open-telemetry/opentelemetry-collector-contrib/pkg/ottl/contexts/ottlmetric" @@ -22,6 +24,7 @@ var _ consumer.Metrics = &metricStatements{} type metricStatements struct { ottl.StatementSequence[ottlmetric.TransformContext] + expr.BoolExpr[ottlmetric.TransformContext] } func (m metricStatements) Capabilities() consumer.Capabilities { @@ -38,10 +41,16 @@ func (m metricStatements) ConsumeMetrics(ctx context.Context, md pmetric.Metrics metrics := smetrics.Metrics() for k := 0; k < metrics.Len(); k++ { tCtx := ottlmetric.NewTransformContext(metrics.At(k), smetrics.Metrics(), smetrics.Scope(), rmetrics.Resource()) - err := m.Execute(ctx, tCtx) + condition, err := m.BoolExpr.Eval(ctx, tCtx) if err != nil { return err } + if condition { + err := m.Execute(ctx, tCtx) + if err != nil { + return err + } + } } } } @@ -52,6 +61,7 @@ var _ consumer.Metrics = &dataPointStatements{} type dataPointStatements struct { ottl.StatementSequence[ottldatapoint.TransformContext] + expr.BoolExpr[ottldatapoint.TransformContext] } func (d dataPointStatements) Capabilities() consumer.Capabilities { @@ -94,10 +104,16 @@ func (d dataPointStatements) ConsumeMetrics(ctx context.Context, md pmetric.Metr func (d dataPointStatements) handleNumberDataPoints(ctx context.Context, dps pmetric.NumberDataPointSlice, metric pmetric.Metric, metrics pmetric.MetricSlice, is pcommon.InstrumentationScope, resource pcommon.Resource) error { for i := 0; i < dps.Len(); i++ { tCtx := ottldatapoint.NewTransformContext(dps.At(i), metric, metrics, is, resource) - err := d.Execute(ctx, tCtx) + condition, err := d.BoolExpr.Eval(ctx, tCtx) if err != nil { return err } + if condition { + err := d.Execute(ctx, tCtx) + if err != nil { + return err + } + } } return nil } @@ -105,10 +121,16 @@ func (d dataPointStatements) handleNumberDataPoints(ctx context.Context, dps pme func (d dataPointStatements) handleHistogramDataPoints(ctx context.Context, dps pmetric.HistogramDataPointSlice, metric pmetric.Metric, metrics pmetric.MetricSlice, is pcommon.InstrumentationScope, resource pcommon.Resource) error { for i := 0; i < dps.Len(); i++ { tCtx := ottldatapoint.NewTransformContext(dps.At(i), metric, metrics, is, resource) - err := d.Execute(ctx, tCtx) + condition, err := d.BoolExpr.Eval(ctx, tCtx) if err != nil { return err } + if condition { + err := d.Execute(ctx, tCtx) + if err != nil { + return err + } + } } return nil } @@ -116,10 +138,16 @@ func (d dataPointStatements) handleHistogramDataPoints(ctx context.Context, dps func (d dataPointStatements) handleExponetialHistogramDataPoints(ctx context.Context, dps pmetric.ExponentialHistogramDataPointSlice, metric pmetric.Metric, metrics pmetric.MetricSlice, is pcommon.InstrumentationScope, resource pcommon.Resource) error { for i := 0; i < dps.Len(); i++ { tCtx := ottldatapoint.NewTransformContext(dps.At(i), metric, metrics, is, resource) - err := d.Execute(ctx, tCtx) + condition, err := d.BoolExpr.Eval(ctx, tCtx) if err != nil { return err } + if condition { + err := d.Execute(ctx, tCtx) + if err != nil { + return err + } + } } return nil } @@ -127,10 +155,16 @@ func (d dataPointStatements) handleExponetialHistogramDataPoints(ctx context.Con func (d dataPointStatements) handleSummaryDataPoints(ctx context.Context, dps pmetric.SummaryDataPointSlice, metric pmetric.Metric, metrics pmetric.MetricSlice, is pcommon.InstrumentationScope, resource pcommon.Resource) error { for i := 0; i < dps.Len(); i++ { tCtx := ottldatapoint.NewTransformContext(dps.At(i), metric, metrics, is, resource) - err := d.Execute(ctx, tCtx) + condition, err := d.BoolExpr.Eval(ctx, tCtx) if err != nil { return err } + if condition { + err := d.Execute(ctx, tCtx) + if err != nil { + return err + } + } } return nil } @@ -206,15 +240,23 @@ func (pc MetricParserCollection) ParseContextStatements(contextStatements Contex if err != nil { return nil, err } + globalExpr, errGlobalBoolExpr := parseGlobalExpr(filterottl.NewBoolExprForMetric, contextStatements.Conditions, pc.parserCollection, filterottl.StandardMetricFuncs()) + if errGlobalBoolExpr != nil { + return nil, errGlobalBoolExpr + } mStatements := ottlmetric.NewStatementSequence(parseStatements, pc.settings, ottlmetric.WithStatementSequenceErrorMode(pc.errorMode)) - return metricStatements{mStatements}, nil + return metricStatements{mStatements, globalExpr}, nil case DataPoint: parsedStatements, err := pc.dataPointParser.ParseStatements(contextStatements.Statements) if err != nil { return nil, err } + globalExpr, errGlobalBoolExpr := parseGlobalExpr(filterottl.NewBoolExprForDataPoint, contextStatements.Conditions, pc.parserCollection, filterottl.StandardDataPointFuncs()) + if errGlobalBoolExpr != nil { + return nil, errGlobalBoolExpr + } dpStatements := ottldatapoint.NewStatementSequence(parsedStatements, pc.settings, ottldatapoint.WithStatementSequenceErrorMode(pc.errorMode)) - return dataPointStatements{dpStatements}, nil + return dataPointStatements{dpStatements, globalExpr}, nil default: statements, err := pc.parseCommonContextStatements(contextStatements) if err != nil { diff --git a/processor/transformprocessor/internal/common/processor.go b/processor/transformprocessor/internal/common/processor.go index e3f291c4242f..77de35b81eeb 100644 --- a/processor/transformprocessor/internal/common/processor.go +++ b/processor/transformprocessor/internal/common/processor.go @@ -13,6 +13,8 @@ import ( "go.opentelemetry.io/collector/pdata/pmetric" "go.opentelemetry.io/collector/pdata/ptrace" + "github.com/open-telemetry/opentelemetry-collector-contrib/internal/filter/expr" + "github.com/open-telemetry/opentelemetry-collector-contrib/internal/filter/filterottl" "github.com/open-telemetry/opentelemetry-collector-contrib/pkg/ottl" "github.com/open-telemetry/opentelemetry-collector-contrib/pkg/ottl/contexts/ottlresource" "github.com/open-telemetry/opentelemetry-collector-contrib/pkg/ottl/contexts/ottlscope" @@ -25,6 +27,7 @@ var _ baseContext = &resourceStatements{} type resourceStatements struct { ottl.StatementSequence[ottlresource.TransformContext] + expr.BoolExpr[ottlresource.TransformContext] } func (r resourceStatements) Capabilities() consumer.Capabilities { @@ -37,10 +40,16 @@ func (r resourceStatements) ConsumeTraces(ctx context.Context, td ptrace.Traces) for i := 0; i < td.ResourceSpans().Len(); i++ { rspans := td.ResourceSpans().At(i) tCtx := ottlresource.NewTransformContext(rspans.Resource()) - err := r.Execute(ctx, tCtx) + condition, err := r.BoolExpr.Eval(ctx, tCtx) if err != nil { return err } + if condition { + err := r.Execute(ctx, tCtx) + if err != nil { + return err + } + } } return nil } @@ -49,10 +58,16 @@ func (r resourceStatements) ConsumeMetrics(ctx context.Context, md pmetric.Metri for i := 0; i < md.ResourceMetrics().Len(); i++ { rmetrics := md.ResourceMetrics().At(i) tCtx := ottlresource.NewTransformContext(rmetrics.Resource()) - err := r.Execute(ctx, tCtx) + condition, err := r.BoolExpr.Eval(ctx, tCtx) if err != nil { return err } + if condition { + err := r.Execute(ctx, tCtx) + if err != nil { + return err + } + } } return nil } @@ -61,10 +76,16 @@ func (r resourceStatements) ConsumeLogs(ctx context.Context, ld plog.Logs) error for i := 0; i < ld.ResourceLogs().Len(); i++ { rlogs := ld.ResourceLogs().At(i) tCtx := ottlresource.NewTransformContext(rlogs.Resource()) - err := r.Execute(ctx, tCtx) + condition, err := r.BoolExpr.Eval(ctx, tCtx) if err != nil { return err } + if condition { + err := r.Execute(ctx, tCtx) + if err != nil { + return err + } + } } return nil } @@ -76,6 +97,7 @@ var _ baseContext = &scopeStatements{} type scopeStatements struct { ottl.StatementSequence[ottlscope.TransformContext] + expr.BoolExpr[ottlscope.TransformContext] } func (s scopeStatements) Capabilities() consumer.Capabilities { @@ -90,10 +112,16 @@ func (s scopeStatements) ConsumeTraces(ctx context.Context, td ptrace.Traces) er for j := 0; j < rspans.ScopeSpans().Len(); j++ { sspans := rspans.ScopeSpans().At(j) tCtx := ottlscope.NewTransformContext(sspans.Scope(), rspans.Resource()) - err := s.Execute(ctx, tCtx) + condition, err := s.BoolExpr.Eval(ctx, tCtx) if err != nil { return err } + if condition { + err := s.Execute(ctx, tCtx) + if err != nil { + return err + } + } } } return nil @@ -105,10 +133,16 @@ func (s scopeStatements) ConsumeMetrics(ctx context.Context, md pmetric.Metrics) for j := 0; j < rmetrics.ScopeMetrics().Len(); j++ { smetrics := rmetrics.ScopeMetrics().At(j) tCtx := ottlscope.NewTransformContext(smetrics.Scope(), rmetrics.Resource()) - err := s.Execute(ctx, tCtx) + condition, err := s.BoolExpr.Eval(ctx, tCtx) if err != nil { return err } + if condition { + err := s.Execute(ctx, tCtx) + if err != nil { + return err + } + } } } return nil @@ -120,10 +154,16 @@ func (s scopeStatements) ConsumeLogs(ctx context.Context, ld plog.Logs) error { for j := 0; j < rlogs.ScopeLogs().Len(); j++ { slogs := rlogs.ScopeLogs().At(j) tCtx := ottlscope.NewTransformContext(slogs.Scope(), rlogs.Resource()) - err := s.Execute(ctx, tCtx) + condition, err := s.BoolExpr.Eval(ctx, tCtx) if err != nil { return err } + if condition { + err := s.Execute(ctx, tCtx) + if err != nil { + return err + } + } } } return nil @@ -149,16 +189,37 @@ func (pc parserCollection) parseCommonContextStatements(contextStatement Context if err != nil { return nil, err } + globalExpr, errGlobalBoolExpr := parseGlobalExpr(filterottl.NewBoolExprForResource, contextStatement.Conditions, pc, filterottl.StandardResourceFuncs()) + if errGlobalBoolExpr != nil { + return nil, errGlobalBoolExpr + } rStatements := ottlresource.NewStatementSequence(parsedStatements, pc.settings, ottlresource.WithStatementSequenceErrorMode(pc.errorMode)) - return resourceStatements{rStatements}, nil + return resourceStatements{rStatements, globalExpr}, nil case Scope: parsedStatements, err := pc.scopeParser.ParseStatements(contextStatement.Statements) if err != nil { return nil, err } + globalExpr, errGlobalBoolExpr := parseGlobalExpr(filterottl.NewBoolExprForScope, contextStatement.Conditions, pc, filterottl.StandardScopeFuncs()) + if errGlobalBoolExpr != nil { + return nil, errGlobalBoolExpr + } sStatements := ottlscope.NewStatementSequence(parsedStatements, pc.settings, ottlscope.WithStatementSequenceErrorMode(pc.errorMode)) - return scopeStatements{sStatements}, nil + return scopeStatements{sStatements, globalExpr}, nil default: return nil, fmt.Errorf("unknown context %v", contextStatement.Context) } } + +func parseGlobalExpr[K any]( + boolExprFunc func([]string, map[string]ottl.Factory[K], ottl.ErrorMode, component.TelemetrySettings) (expr.BoolExpr[K], error), + conditions []string, + pc parserCollection, + standardFuncs map[string]ottl.Factory[K]) (expr.BoolExpr[K], error) { + + if len(conditions) > 0 { + return boolExprFunc(conditions, standardFuncs, pc.errorMode, pc.settings) + } + // By default, set the global expression to always true unless conditions are specified. + return expr.AlwaysTrue[K](), nil +} diff --git a/processor/transformprocessor/internal/common/traces.go b/processor/transformprocessor/internal/common/traces.go index fe4c59709770..517b9e80969b 100644 --- a/processor/transformprocessor/internal/common/traces.go +++ b/processor/transformprocessor/internal/common/traces.go @@ -10,6 +10,8 @@ import ( "go.opentelemetry.io/collector/consumer" "go.opentelemetry.io/collector/pdata/ptrace" + "github.com/open-telemetry/opentelemetry-collector-contrib/internal/filter/expr" + "github.com/open-telemetry/opentelemetry-collector-contrib/internal/filter/filterottl" "github.com/open-telemetry/opentelemetry-collector-contrib/pkg/ottl" "github.com/open-telemetry/opentelemetry-collector-contrib/pkg/ottl/contexts/ottlresource" "github.com/open-telemetry/opentelemetry-collector-contrib/pkg/ottl/contexts/ottlscope" @@ -21,6 +23,7 @@ var _ consumer.Traces = &traceStatements{} type traceStatements struct { ottl.StatementSequence[ottlspan.TransformContext] + expr.BoolExpr[ottlspan.TransformContext] } func (t traceStatements) Capabilities() consumer.Capabilities { @@ -37,10 +40,16 @@ func (t traceStatements) ConsumeTraces(ctx context.Context, td ptrace.Traces) er spans := sspans.Spans() for k := 0; k < spans.Len(); k++ { tCtx := ottlspan.NewTransformContext(spans.At(k), sspans.Scope(), rspans.Resource()) - err := t.Execute(ctx, tCtx) + condition, err := t.BoolExpr.Eval(ctx, tCtx) if err != nil { return err } + if condition { + err := t.Execute(ctx, tCtx) + if err != nil { + return err + } + } } } } @@ -51,6 +60,7 @@ var _ consumer.Traces = &spanEventStatements{} type spanEventStatements struct { ottl.StatementSequence[ottlspanevent.TransformContext] + expr.BoolExpr[ottlspanevent.TransformContext] } func (s spanEventStatements) Capabilities() consumer.Capabilities { @@ -70,10 +80,16 @@ func (s spanEventStatements) ConsumeTraces(ctx context.Context, td ptrace.Traces spanEvents := span.Events() for n := 0; n < spanEvents.Len(); n++ { tCtx := ottlspanevent.NewTransformContext(spanEvents.At(n), span, sspans.Scope(), rspans.Resource()) - err := s.Execute(ctx, tCtx) + condition, err := s.BoolExpr.Eval(ctx, tCtx) if err != nil { return err } + if condition { + err := s.Execute(ctx, tCtx) + if err != nil { + return err + } + } } } } @@ -152,15 +168,23 @@ func (pc TraceParserCollection) ParseContextStatements(contextStatements Context if err != nil { return nil, err } + globalExpr, errGlobalBoolExpr := parseGlobalExpr(filterottl.NewBoolExprForSpan, contextStatements.Conditions, pc.parserCollection, filterottl.StandardSpanFuncs()) + if errGlobalBoolExpr != nil { + return nil, errGlobalBoolExpr + } sStatements := ottlspan.NewStatementSequence(parsedStatements, pc.settings, ottlspan.WithStatementSequenceErrorMode(pc.errorMode)) - return traceStatements{sStatements}, nil + return traceStatements{sStatements, globalExpr}, nil case SpanEvent: parsedStatements, err := pc.spanEventParser.ParseStatements(contextStatements.Statements) if err != nil { return nil, err } + globalExpr, errGlobalBoolExpr := parseGlobalExpr(filterottl.NewBoolExprForSpanEvent, contextStatements.Conditions, pc.parserCollection, filterottl.StandardSpanEventFuncs()) + if errGlobalBoolExpr != nil { + return nil, errGlobalBoolExpr + } seStatements := ottlspanevent.NewStatementSequence(parsedStatements, pc.settings, ottlspanevent.WithStatementSequenceErrorMode(pc.errorMode)) - return spanEventStatements{seStatements}, nil + return spanEventStatements{seStatements, globalExpr}, nil default: return pc.parseCommonContextStatements(contextStatements) } diff --git a/processor/transformprocessor/testdata/config.yaml b/processor/transformprocessor/testdata/config.yaml index 81a097e1098c..8cf295298e54 100644 --- a/processor/transformprocessor/testdata/config.yaml +++ b/processor/transformprocessor/testdata/config.yaml @@ -24,6 +24,26 @@ transform: statements: - set(attributes["name"], "bear") +transform/with_conditions: + trace_statements: + - context: span + conditions: + - attributes["http.path"] == "/animal" + statements: + - set(name, "bear") + metric_statements: + - context: datapoint + conditions: + - attributes["http.path"] == "/animal" + statements: + - set(metric.name, "bear") + log_statements: + - context: log + conditions: + - attributes["http.path"] == "/animal" + statements: + - set(body, "bear") + transform/ignore_errors: error_mode: ignore trace_statements: From 6015403943ced9d3014c2249cbb41906e9a3fb32 Mon Sep 17 00:00:00 2001 From: Ankit Patel <8731662+ankitpatel96@users.noreply.github.com> Date: Fri, 10 May 2024 00:57:05 -0400 Subject: [PATCH 17/55] Instantiate ID in pkg/stanza/adapter tests (#32966) **Description:** In https://github.com/open-telemetry/opentelemetry-collector/pull/10069, I am making Type an interface. This means the zero value of Type will be nil - which will cause this test to fail. Initializing ID instead of relying on the zero value fixes this --------- Co-authored-by: Pablo Baeyens Co-authored-by: Bogdan Drutu --- pkg/stanza/adapter/storage_test.go | 1 + 1 file changed, 1 insertion(+) diff --git a/pkg/stanza/adapter/storage_test.go b/pkg/stanza/adapter/storage_test.go index 4a3d2faf1267..d24b583a40e5 100644 --- a/pkg/stanza/adapter/storage_test.go +++ b/pkg/stanza/adapter/storage_test.go @@ -107,6 +107,7 @@ func TestFailOnNonStorageExtension(t *testing.T) { func createReceiver(t *testing.T, storageID component.ID) *receiver { params := rcvr.CreateSettings{ + ID: component.MustNewID("testreceiver"), TelemetrySettings: componenttest.NewNopTelemetrySettings(), } From fba86583b486fd40f190f3c994b8d9a84c81723c Mon Sep 17 00:00:00 2001 From: Huy Vo Date: Fri, 10 May 2024 04:34:09 -0700 Subject: [PATCH 18/55] Add connector usage to the testbed (#32881) **Description:** Added a new component to the testbed called DataConnectors which allows connectors to be added to the testbed config. Also, added a sample connector correctness test using the routingconnector as an example of the usage. **Link to tracking Issue:** #30165 **Testing:** Sample correctness test using routingconnector. **Documentation:** Will update the testbed README with new addition. --------- Co-authored-by: Bryan Aguilar --- .chloggen/connector-testbed.yaml | 27 ++++++ .../integrationtest/go.mod | 15 +++ .../integrationtest/go.sum | 18 ++++ testbed/Makefile | 4 + .../connectors/correctness_test.go | 97 +++++++++++++++++++ .../metrics/correctness_test_case.go | 2 +- .../traces/correctness_test.go | 4 +- testbed/correctnesstests/utils.go | 90 +++++++++++++++-- testbed/dataconnectors/routing.go | 36 +++++++ testbed/dataconnectors/spanmetrics.go | 33 +++++++ testbed/go.mod | 15 ++- testbed/go.sum | 14 +++ testbed/testbed/components.go | 10 ++ testbed/testbed/connectors.go | 22 +++++ 14 files changed, 375 insertions(+), 12 deletions(-) create mode 100644 .chloggen/connector-testbed.yaml create mode 100644 testbed/correctnesstests/connectors/correctness_test.go create mode 100644 testbed/dataconnectors/routing.go create mode 100644 testbed/dataconnectors/spanmetrics.go create mode 100644 testbed/testbed/connectors.go diff --git a/.chloggen/connector-testbed.yaml b/.chloggen/connector-testbed.yaml new file mode 100644 index 000000000000..750199c27a44 --- /dev/null +++ b/.chloggen/connector-testbed.yaml @@ -0,0 +1,27 @@ +# Use this changelog template to create an entry for release notes. + +# One of 'breaking', 'deprecation', 'new_component', 'enhancement', 'bug_fix' +change_type: enhancement + +# The name of the component, or a single word describing the area of concern, (e.g. filelogreceiver) +component: testbed + +# A brief description of the change. Surround your text with quotes ("") if it needs to start with a backtick (`). +note: Add the use of connectors to the testbed + +# Mandatory: One or more tracking issues related to the change. You can use the PR number here if no issue exists. +issues: [30165] + +# (Optional) One or more lines of additional information to render under the primary note. +# These lines will be padded with 2 spaces and then inserted directly into the document. +# Use pipe (|) for multiline entries. +subtext: + +# If your change doesn't affect end users or the exported elements of any package, +# you should instead start your pull request title with [chore] or use the "Skip Changelog" label. +# Optional: The change log or logs in which this entry should be included. +# e.g. '[user]' or '[user, api]' +# Include 'user' if the change is relevant to end users. +# Include 'api' if there is a change to a library API. +# Default: '[user]' +change_logs: [api] \ No newline at end of file diff --git a/exporter/elasticsearchexporter/integrationtest/go.mod b/exporter/elasticsearchexporter/integrationtest/go.mod index 230b4d99e08b..adb9db5ec9a0 100644 --- a/exporter/elasticsearchexporter/integrationtest/go.mod +++ b/exporter/elasticsearchexporter/integrationtest/go.mod @@ -28,6 +28,7 @@ require ( ) require ( + github.com/alecthomas/participle/v2 v2.1.1 // indirect github.com/apache/thrift v0.20.0 // indirect github.com/armon/go-radix v1.0.0 // indirect github.com/beorn7/perks v1.0.1 // indirect @@ -48,6 +49,7 @@ require ( github.com/go-logr/stdr v1.2.2 // indirect github.com/go-ole/go-ole v1.3.0 // indirect github.com/go-viper/mapstructure/v2 v2.0.0-alpha.1 // indirect + github.com/gobwas/glob v0.2.3 // indirect github.com/gogo/googleapis v1.4.1 // indirect github.com/gogo/protobuf v1.3.2 // indirect github.com/golang/groupcache v0.0.0-20210331224755-41bb18bfe9da // indirect @@ -56,6 +58,8 @@ require ( github.com/google/uuid v1.6.0 // indirect github.com/grpc-ecosystem/grpc-gateway/v2 v2.19.1 // indirect github.com/hashicorp/go-version v1.6.0 // indirect + github.com/hashicorp/golang-lru v1.0.2 // indirect + github.com/iancoleman/strcase v0.3.0 // indirect github.com/inconshreveable/mousetrap v1.1.0 // indirect github.com/influxdata/go-syslog/v3 v3.0.1-0.20230911200830-875f5bc594a4 // indirect github.com/jaegertracing/jaeger v1.57.0 // indirect @@ -67,18 +71,22 @@ require ( github.com/knadh/koanf/v2 v2.1.1 // indirect github.com/leodido/ragel-machinery v0.0.0-20181214104525-299bdde78165 // indirect github.com/lestrrat-go/strftime v1.0.6 // indirect + github.com/lightstep/go-expohisto v1.0.0 // indirect github.com/lufia/plan9stats v0.0.0-20240226150601-1dcf7310316a // indirect github.com/mitchellh/copystructure v1.2.0 // indirect github.com/mitchellh/reflectwalk v1.0.2 // indirect github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd // indirect github.com/modern-go/reflect2 v1.0.2 // indirect github.com/mostynb/go-grpc-compression v1.2.2 // indirect + github.com/open-telemetry/opentelemetry-collector-contrib/connector/routingconnector v0.100.0 // indirect + github.com/open-telemetry/opentelemetry-collector-contrib/connector/spanmetricsconnector v0.100.0 // indirect github.com/open-telemetry/opentelemetry-collector-contrib/exporter/opencensusexporter v0.100.0 // indirect github.com/open-telemetry/opentelemetry-collector-contrib/exporter/syslogexporter v0.100.0 // indirect github.com/open-telemetry/opentelemetry-collector-contrib/exporter/zipkinexporter v0.100.0 // indirect github.com/open-telemetry/opentelemetry-collector-contrib/internal/coreinternal v0.100.0 // indirect github.com/open-telemetry/opentelemetry-collector-contrib/internal/sharedcomponent v0.100.0 // indirect github.com/open-telemetry/opentelemetry-collector-contrib/pkg/golden v0.100.0 // indirect + github.com/open-telemetry/opentelemetry-collector-contrib/pkg/ottl v0.100.0 // indirect github.com/open-telemetry/opentelemetry-collector-contrib/pkg/pdatautil v0.100.0 // indirect github.com/open-telemetry/opentelemetry-collector-contrib/pkg/stanza v0.100.0 // indirect github.com/open-telemetry/opentelemetry-collector-contrib/pkg/translator/jaeger v0.100.0 // indirect @@ -102,6 +110,7 @@ require ( github.com/soheilhy/cmux v0.1.5 // indirect github.com/spf13/cobra v1.8.0 // indirect github.com/spf13/pflag v1.0.5 // indirect + github.com/tilinna/clock v1.1.0 // indirect github.com/tklauser/go-sysconf v0.3.13 // indirect github.com/tklauser/numcpus v0.7.0 // indirect github.com/valyala/fastjson v1.6.4 // indirect @@ -257,3 +266,9 @@ replace github.com/open-telemetry/opentelemetry-collector-contrib/pkg/resourceto replace github.com/open-telemetry/opentelemetry-collector-contrib/exporter/prometheusremotewriteexporter => ../../prometheusremotewriteexporter replace github.com/open-telemetry/opentelemetry-collector-contrib/extension/ackextension => ../../../extension/ackextension + +replace github.com/open-telemetry/opentelemetry-collector-contrib/connector/spanmetricsconnector => ../../../connector/spanmetricsconnector + +replace github.com/open-telemetry/opentelemetry-collector-contrib/pkg/ottl => ../../../pkg/ottl + +replace github.com/open-telemetry/opentelemetry-collector-contrib/connector/routingconnector => ../../../connector/routingconnector diff --git a/exporter/elasticsearchexporter/integrationtest/go.sum b/exporter/elasticsearchexporter/integrationtest/go.sum index 163a3b6c180f..4767d99538c6 100644 --- a/exporter/elasticsearchexporter/integrationtest/go.sum +++ b/exporter/elasticsearchexporter/integrationtest/go.sum @@ -4,6 +4,12 @@ github.com/HdrHistogram/hdrhistogram-go v1.1.2 h1:5IcZpTvzydCQeHzK4Ef/D5rrSqwxob github.com/HdrHistogram/hdrhistogram-go v1.1.2/go.mod h1:yDgFjdqOqDEKOvasDdhWNXYg9BVp4O+o5f6V/ehm6Oo= github.com/VividCortex/gohistogram v1.0.0 h1:6+hBz+qvs0JOrrNhhmR7lFxo5sINxBCGXrdtl/UvroE= github.com/VividCortex/gohistogram v1.0.0/go.mod h1:Pf5mBqqDxYaXu3hDrrU+w6nw50o/4+TcAqDqk/vUH7g= +github.com/alecthomas/assert/v2 v2.3.0 h1:mAsH2wmvjsuvyBvAmCtm7zFsBlb8mIHx5ySLVdDZXL0= +github.com/alecthomas/assert/v2 v2.3.0/go.mod h1:pXcQ2Asjp247dahGEmsZ6ru0UVwnkhktn7S0bBDLxvQ= +github.com/alecthomas/participle/v2 v2.1.1 h1:hrjKESvSqGHzRb4yW1ciisFJ4p3MGYih6icjJvbsmV8= +github.com/alecthomas/participle/v2 v2.1.1/go.mod h1:Y1+hAs8DHPmc3YUFzqllV+eSQ9ljPTk0ZkPMtEdAx2c= +github.com/alecthomas/repr v0.2.0 h1:HAzS41CIzNW5syS8Mf9UwXhNH1J9aix/BvDRf1Ml2Yk= +github.com/alecthomas/repr v0.2.0/go.mod h1:Fr0507jx4eOXV7AlPV6AVZLYrLIuIeSOWtW57eE/O/4= github.com/apache/thrift v0.20.0 h1:631+KvYbsBZxmuJjYwhezVsrfc/TbqtZV4QcxOX1fOI= github.com/apache/thrift v0.20.0/go.mod h1:hOk1BQqcp2OLzGsyVXdfMk7YFlMxK3aoEVhjD06QhB8= github.com/armon/go-radix v1.0.0 h1:F4z6KzEeeQIMeLFa97iZU6vupzoecKdU5TX24SNppXI= @@ -60,6 +66,8 @@ github.com/go-ole/go-ole v1.3.0 h1:Dt6ye7+vXGIKZ7Xtk4s6/xVdGDQynvom7xCFEdWr6uE= github.com/go-ole/go-ole v1.3.0/go.mod h1:5LS6F96DhAwUc7C+1HLexzMXY1xGRSryjyPPKW6zv78= github.com/go-viper/mapstructure/v2 v2.0.0-alpha.1 h1:TQcrn6Wq+sKGkpyPvppOz99zsMBaUOKXq6HSv655U1c= github.com/go-viper/mapstructure/v2 v2.0.0-alpha.1/go.mod h1:oJDH3BJKyqBA2TXFhDsKDGDTlndYOZ6rGS0BRZIxGhM= +github.com/gobwas/glob v0.2.3 h1:A4xDbljILXROh+kObIiy5kIaPYD8e96x1tgBhUI5J+Y= +github.com/gobwas/glob v0.2.3/go.mod h1:d3Ez4x06l9bZtSvzIay5+Yzi0fmZzPgnTbPcKjJAkT8= github.com/gogo/googleapis v1.4.1 h1:1Yx4Myt7BxzvUr5ldGSbwYiZG6t9wGBZ+8/fX3Wvtq0= github.com/gogo/googleapis v1.4.1/go.mod h1:2lpHqI5OcWCtVElxXnPt+s8oJvMpySlOyM6xDCrzib4= github.com/gogo/protobuf v1.3.2 h1:Ov1cvc58UF3b5XjBnZv7+opcTcQFZebYjWzi34vdm4Q= @@ -106,8 +114,14 @@ github.com/grpc-ecosystem/grpc-gateway/v2 v2.19.1 h1:/c3QmbOGMGTOumP2iT/rCwB7b0Q github.com/grpc-ecosystem/grpc-gateway/v2 v2.19.1/go.mod h1:5SN9VR2LTsRFsrEC6FHgRbTWrTHu6tqPeKxEQv15giM= github.com/hashicorp/go-version v1.6.0 h1:feTTfFNnjP967rlCxM/I9g701jU+RN74YKx2mOkIeek= github.com/hashicorp/go-version v1.6.0/go.mod h1:fltr4n8CU8Ke44wwGCBoEymUuxUHl09ZGVZPK5anwXA= +github.com/hashicorp/golang-lru v1.0.2 h1:dV3g9Z/unq5DpblPpw+Oqcv4dU/1omnb4Ok8iPY6p1c= +github.com/hashicorp/golang-lru v1.0.2/go.mod h1:iADmTwqILo4mZ8BN3D2Q6+9jd8WM5uGBxy+E8yxSoD4= github.com/hashicorp/hcl v1.0.0 h1:0Anlzjpi4vEasTeNFn2mLJgTSwt0+6sfsiTG8qcWGx4= github.com/hashicorp/hcl v1.0.0/go.mod h1:E5yfLk+7swimpb2L/Alb/PJmXilQ/rhwaUYs4T20WEQ= +github.com/hexops/gotextdiff v1.0.3 h1:gitA9+qJrrTCsiCl7+kh75nPqQt1cx4ZkudSTLoUqJM= +github.com/hexops/gotextdiff v1.0.3/go.mod h1:pSWU5MAI3yDq+fZBTazCSJysOMbxWL1BSow5/V2vxeg= +github.com/iancoleman/strcase v0.3.0 h1:nTXanmYxhfFAMjZL34Ov6gkzEsSJZ5DbhxWjvSASxEI= +github.com/iancoleman/strcase v0.3.0/go.mod h1:iwCmte+B7n89clKwxIoIXy/HfoL7AsD47ZCWhYzw7ho= github.com/inconshreveable/mousetrap v1.1.0 h1:wN+x4NVGpMsO7ErUn/mUI3vEoE6Jt13X2s0bqwp9tc8= github.com/inconshreveable/mousetrap v1.1.0/go.mod h1:vpF70FUmC8bwa3OWnCshd2FqLfsEA9PFc4w1p2J65bw= github.com/influxdata/go-syslog/v3 v3.0.1-0.20230911200830-875f5bc594a4 h1:2r2WiFeAwiJ/uyx1qIKnV1L4C9w/2V8ehlbJY4gjFaM= @@ -139,6 +153,8 @@ github.com/lestrrat-go/envload v0.0.0-20180220234015-a3eb8ddeffcc h1:RKf14vYWi2t github.com/lestrrat-go/envload v0.0.0-20180220234015-a3eb8ddeffcc/go.mod h1:kopuH9ugFRkIXf3YoqHKyrJ9YfUFsckUU9S7B+XP+is= github.com/lestrrat-go/strftime v1.0.6 h1:CFGsDEt1pOpFNU+TJB0nhz9jl+K0hZSLE205AhTIGQQ= github.com/lestrrat-go/strftime v1.0.6/go.mod h1:f7jQKgV5nnJpYgdEasS+/y7EsTb8ykN2z68n3TtcTaw= +github.com/lightstep/go-expohisto v1.0.0 h1:UPtTS1rGdtehbbAF7o/dhkWLTDI73UifG8LbfQI7cA4= +github.com/lightstep/go-expohisto v1.0.0/go.mod h1:xDXD0++Mu2FOaItXtdDfksfgxfV0z1TMPa+e/EUd0cs= github.com/lufia/plan9stats v0.0.0-20211012122336-39d0f177ccd0/go.mod h1:zJYVVT2jmtg6P3p1VtQj7WsuWi/y4VnjVBn7F8KPB3I= github.com/lufia/plan9stats v0.0.0-20240226150601-1dcf7310316a h1:3Bm7EwfUQUvhNeKIkUct/gl9eod1TcXuj8stxvi/GoI= github.com/lufia/plan9stats v0.0.0-20240226150601-1dcf7310316a/go.mod h1:ilwx/Dta8jXAgpFYFvSWEMwxmbWXyiUHkd5FwyKhb5k= @@ -225,6 +241,8 @@ github.com/stretchr/testify v1.9.0 h1:HtqpIVDClZ4nwg75+f6Lvsy/wHu+3BoSGCbBAcpTsT github.com/stretchr/testify v1.9.0/go.mod h1:r2ic/lqez/lEtzL7wO/rwa5dbSLXVDPFyf8C91i36aY= github.com/subosito/gotenv v1.6.0 h1:9NlTDc1FTs4qu0DDq7AEtTPNw6SVm7uBMsUCUjABIf8= github.com/subosito/gotenv v1.6.0/go.mod h1:Dk4QP5c2W3ibzajGcXpNraDfq2IrhjMIvMSWPKKo0FU= +github.com/tilinna/clock v1.1.0 h1:6IQQQCo6KoBxVudv6gwtY8o4eDfhHo8ojA5dP0MfhSs= +github.com/tilinna/clock v1.1.0/go.mod h1:ZsP7BcY7sEEz7ktc0IVy8Us6boDrK8VradlKRUGfOao= github.com/tklauser/go-sysconf v0.3.12/go.mod h1:Ho14jnntGE1fpdOqQEEaiKRpvIavV0hSfmBq8nJbHYI= github.com/tklauser/go-sysconf v0.3.13 h1:GBUpcahXSpR2xN01jhkNAbTLRk2Yzgggk8IM08lq3r4= github.com/tklauser/go-sysconf v0.3.13/go.mod h1:zwleP4Q4OehZHGn4CYZDipCgg9usW5IJePewFCGVEa0= diff --git a/testbed/Makefile b/testbed/Makefile index 71a5d3eca1b5..ad0f7ba8097f 100644 --- a/testbed/Makefile +++ b/testbed/Makefile @@ -35,3 +35,7 @@ list-correctness-traces-tests: .PHONY: run-correctness-traces-tests run-correctness-traces-tests: $(GOJUNIT) TESTS_DIR=correctnesstests/traces GOJUNIT=$(GOJUNIT) ./runtests.sh + +.PHONY: run-correctness-connectors-tests +run-correctness-connectors-tests: $(GOJUNIT) + TESTS_DIR=correctnesstests/connectors GOJUNIT=$(GOJUNIT) ./runtests.sh diff --git a/testbed/correctnesstests/connectors/correctness_test.go b/testbed/correctnesstests/connectors/correctness_test.go new file mode 100644 index 000000000000..90df960f1a11 --- /dev/null +++ b/testbed/correctnesstests/connectors/correctness_test.go @@ -0,0 +1,97 @@ +// Copyright The OpenTelemetry Authors +// SPDX-License-Identifier: Apache-2.0 + +package connectors + +import ( + "log" + "testing" + "time" + + "github.com/stretchr/testify/require" + + "github.com/open-telemetry/opentelemetry-collector-contrib/testbed/correctnesstests" + "github.com/open-telemetry/opentelemetry-collector-contrib/testbed/testbed" +) + +var correctnessResults testbed.TestResultsSummary = &testbed.CorrectnessResults{} + +func TestMain(m *testing.M) { + testbed.DoTestMain(m, correctnessResults) +} + +func TestGoldenData(t *testing.T) { + processors := map[string]string{ + "batch": ` + batch: + send_batch_size: 1024 +`, + } + sampleTest := correctnesstests.PipelineDef{ + TestName: "test routing", + Receiver: "otlp", + Exporter: "otlp", + Connector: "routing", + } + + sampleTest.DataSender = correctnesstests.ConstructTraceSender(t, sampleTest.Receiver) + sampleTest.DataReceiver = correctnesstests.ConstructReceiver(t, sampleTest.Exporter) + sampleTest.DataConnector = correctnesstests.ConstructConnector(t, sampleTest.Connector, "traces") + t.Run(sampleTest.TestName, func(t *testing.T) { + testWithGoldenDataset(t, sampleTest.DataSender, sampleTest.DataReceiver, sampleTest.ResourceSpec, sampleTest.DataConnector, processors) + }) + +} + +func testWithGoldenDataset( + t *testing.T, + sender testbed.DataSender, + receiver testbed.DataReceiver, + resourceSpec testbed.ResourceSpec, + connector testbed.DataConnector, + processors map[string]string, +) { + dataProvider := testbed.NewGoldenDataProvider( + "../../../internal/coreinternal/goldendataset/testdata/generated_pict_pairs_traces.txt", + "../../../internal/coreinternal/goldendataset/testdata/generated_pict_pairs_spans.txt", + "") + factories, err := testbed.Components() + require.NoError(t, err, "default components resulted in: %v", err) + runner := testbed.NewInProcessCollector(factories) + validator := testbed.NewCorrectTestValidator(sender.ProtocolName(), receiver.ProtocolName(), dataProvider) + config := correctnesstests.CreateConfigYaml(t, sender, receiver, connector, processors) + log.Println(config) + configCleanup, cfgErr := runner.PrepareConfig(config) + require.NoError(t, cfgErr, "collector configuration resulted in: %v", cfgErr) + defer configCleanup() + tc := testbed.NewTestCase( + t, + dataProvider, + sender, + receiver, + runner, + validator, + correctnessResults, + testbed.WithResourceLimits(resourceSpec), + ) + defer tc.Stop() + + tc.EnableRecording() + tc.StartBackend() + tc.StartAgent() + + tc.StartLoad(testbed.LoadOptions{ + DataItemsPerSecond: 1024, + ItemsPerBatch: 1, + }) + + tc.Sleep(2 * time.Second) + + tc.StopLoad() + + tc.WaitForN(func() bool { return tc.LoadGenerator.DataItemsSent() == tc.MockBackend.DataItemsReceived() }, + 3*time.Second, "all data items received") + + tc.StopAgent() + +} diff --git a/testbed/correctnesstests/metrics/correctness_test_case.go b/testbed/correctnesstests/metrics/correctness_test_case.go index 58765418e523..1129075f625c 100644 --- a/testbed/correctnesstests/metrics/correctness_test_case.go +++ b/testbed/correctnesstests/metrics/correctness_test_case.go @@ -34,7 +34,7 @@ func newCorrectnessTestCase( func (tc *correctnessTestCase) startCollector() { tc.collector = testbed.NewInProcessCollector(componentFactories(tc.t)) - _, err := tc.collector.PrepareConfig(correctnesstests.CreateConfigYaml(tc.t, tc.sender, tc.receiver, nil, "metrics")) + _, err := tc.collector.PrepareConfig(correctnesstests.CreateConfigYaml(tc.t, tc.sender, tc.receiver, nil, nil)) require.NoError(tc.t, err) rd, err := newResultsDir(tc.t.Name()) require.NoError(tc.t, err) diff --git a/testbed/correctnesstests/traces/correctness_test.go b/testbed/correctnesstests/traces/correctness_test.go index 6dd5d1486235..0253996602f0 100644 --- a/testbed/correctnesstests/traces/correctness_test.go +++ b/testbed/correctnesstests/traces/correctness_test.go @@ -56,7 +56,7 @@ func testWithTracingGoldenDataset( require.NoError(t, err, "default components resulted in: %v", err) runner := testbed.NewInProcessCollector(factories) validator := testbed.NewCorrectTestValidator(sender.ProtocolName(), receiver.ProtocolName(), dataProvider) - config := correctnesstests.CreateConfigYaml(t, sender, receiver, processors, "traces") + config := correctnesstests.CreateConfigYaml(t, sender, receiver, nil, processors) log.Println(config) configCleanup, cfgErr := runner.PrepareConfig(config) require.NoError(t, cfgErr, "collector configuration resulted in: %v", cfgErr) @@ -124,7 +124,7 @@ func TestSporadicGoldenDataset(t *testing.T) { sending_queue: enabled: false `) - _, err = runner.PrepareConfig(correctnesstests.CreateConfigYaml(t, sender, receiver, nil, "traces")) + _, err = runner.PrepareConfig(correctnesstests.CreateConfigYaml(t, sender, receiver, nil, nil)) require.NoError(t, err, "collector configuration resulted in: %v", err) validator := testbed.NewCorrectTestValidator(sender.ProtocolName(), receiver.ProtocolName(), dataProvider) tc := testbed.NewTestCase( diff --git a/testbed/correctnesstests/utils.go b/testbed/correctnesstests/utils.go index f84f803b1017..fe96c72d303d 100644 --- a/testbed/correctnesstests/utils.go +++ b/testbed/correctnesstests/utils.go @@ -12,6 +12,7 @@ import ( "testing" "github.com/open-telemetry/opentelemetry-collector-contrib/internal/common/testutil" + "github.com/open-telemetry/opentelemetry-collector-contrib/testbed/dataconnectors" "github.com/open-telemetry/opentelemetry-collector-contrib/testbed/datareceivers" "github.com/open-telemetry/opentelemetry-collector-contrib/testbed/datasenders" "github.com/open-telemetry/opentelemetry-collector-contrib/testbed/testbed" @@ -24,8 +25,8 @@ func CreateConfigYaml( t testing.TB, sender testbed.DataSender, receiver testbed.DataReceiver, + connector testbed.DataConnector, processors map[string]string, - pipelineType string, ) string { // Prepare extra processor config section and comma-separated list of extra processor @@ -44,6 +45,64 @@ func CreateConfigYaml( } } + var pipeline1 string + switch sender.(type) { + case testbed.TraceDataSender: + pipeline1 = "traces" + case testbed.MetricDataSender: + pipeline1 = "metrics" + case testbed.LogDataSender: + pipeline1 = "logs" + default: + t.Error("Invalid DataSender type") + } + + if connector != nil { + pipeline2 := connector.GetReceiverType() + + format := ` +receivers:%v +exporters:%v +processors: + %s + +extensions: + +connectors:%v + +service: + telemetry: + metrics: + address: 127.0.0.1:%d + logs: + level: "debug" + extensions: + pipelines: + %s/in: + receivers: [%v] + processors: [%s] + exporters: [%v] + %s/out: + receivers: [%v] + exporters: [%v] +` + return fmt.Sprintf( + format, + sender.GenConfigYAMLStr(), + receiver.GenConfigYAMLStr(), + processorsSections, + connector.GenConfigYAMLStr(), + testutil.GetAvailablePort(t), + pipeline1, + sender.ProtocolName(), + processorsList, + connector.ProtocolName(), + pipeline2, + connector.ProtocolName(), + receiver.ProtocolName(), + ) + } + format := ` receivers:%v exporters:%v @@ -70,7 +129,7 @@ service: receiver.GenConfigYAMLStr(), processorsSections, testutil.GetAvailablePort(t), - pipelineType, + pipeline1, sender.ProtocolName(), processorsList, receiver.ProtocolName(), @@ -79,12 +138,14 @@ service: // PipelineDef holds the information necessary to run a single testbed configuration. type PipelineDef struct { - Receiver string - Exporter string - TestName string - DataSender testbed.DataSender - DataReceiver testbed.DataReceiver - ResourceSpec testbed.ResourceSpec + Receiver string + Exporter string + Connector string + TestName string + DataSender testbed.DataSender + DataReceiver testbed.DataReceiver + DataConnector testbed.DataConnector + ResourceSpec testbed.ResourceSpec } // LoadPictOutputPipelineDefs generates a slice of PipelineDefs from the passed-in generated PICT file. The @@ -168,3 +229,16 @@ func ConstructReceiver(t *testing.T, exporter string) testbed.DataReceiver { } return receiver } + +func ConstructConnector(t *testing.T, connector string, receiverType string) testbed.DataConnector { + var dataconnector testbed.DataConnector + switch connector { + case "spanmetrics": + dataconnector = dataconnectors.NewSpanMetricDataConnector(receiverType) + case "routing": + dataconnector = dataconnectors.NewRoutingDataConnector(receiverType) + default: + t.Errorf("unknown connector type: %s", connector) + } + return dataconnector +} diff --git a/testbed/dataconnectors/routing.go b/testbed/dataconnectors/routing.go new file mode 100644 index 000000000000..d038bc8581d4 --- /dev/null +++ b/testbed/dataconnectors/routing.go @@ -0,0 +1,36 @@ +// Copyright The OpenTelemetry Authors +// SPDX-License-Identifier: Apache-2.0 + +package dataconnectors // import "github.com/open-telemetry/opentelemetry-collector-contrib/testbed/dataconnectors" + +import ( + "github.com/open-telemetry/opentelemetry-collector-contrib/testbed/testbed" +) + +type RoutingDataConnector struct { + testbed.DataConnectorBase +} + +var _ testbed.DataConnector = (*RoutingDataConnector)(nil) + +func NewRoutingDataConnector(receiverDataType string) *RoutingDataConnector { + return &RoutingDataConnector{DataConnectorBase: testbed.DataConnectorBase{ReceiverDataType: receiverDataType}} +} + +func (rc *RoutingDataConnector) GenConfigYAMLStr() string { + // Note that this generates an exporter config for agent. + return ` + routing: + table: + - statement: route() + pipelines: [traces/out]` +} + +// ProtocolName returns protocol name as it is specified in Collector config. +func (rc *RoutingDataConnector) ProtocolName() string { + return "routing" +} + +func (rc *RoutingDataConnector) GetReceiverType() string { + return rc.ReceiverDataType +} diff --git a/testbed/dataconnectors/spanmetrics.go b/testbed/dataconnectors/spanmetrics.go new file mode 100644 index 000000000000..65914c75a062 --- /dev/null +++ b/testbed/dataconnectors/spanmetrics.go @@ -0,0 +1,33 @@ +// Copyright The OpenTelemetry Authors +// SPDX-License-Identifier: Apache-2.0 + +package dataconnectors // import "github.com/open-telemetry/opentelemetry-collector-contrib/testbed/dataconnectors" + +import ( + "github.com/open-telemetry/opentelemetry-collector-contrib/testbed/testbed" +) + +type SpanMetricDataConnector struct { + testbed.DataConnectorBase +} + +var _ testbed.DataConnector = (*SpanMetricDataConnector)(nil) + +func NewSpanMetricDataConnector(receiverDataType string) *SpanMetricDataConnector { + return &SpanMetricDataConnector{DataConnectorBase: testbed.DataConnectorBase{ReceiverDataType: receiverDataType}} +} + +func (smc *SpanMetricDataConnector) GenConfigYAMLStr() string { + // Note that this generates an exporter config for agent. + return ` + spanmetrics:` +} + +// ProtocolName returns protocol name as it is specified in Collector config. +func (smc *SpanMetricDataConnector) ProtocolName() string { + return "spanmetrics" +} + +func (smc *SpanMetricDataConnector) GetReceiverType() string { + return smc.ReceiverDataType +} diff --git a/testbed/go.mod b/testbed/go.mod index b44e3a49a7d5..5a829f20b28a 100644 --- a/testbed/go.mod +++ b/testbed/go.mod @@ -5,6 +5,8 @@ go 1.21.0 require ( github.com/fluent/fluent-logger-golang v1.9.0 github.com/jaegertracing/jaeger v1.57.0 + github.com/open-telemetry/opentelemetry-collector-contrib/connector/routingconnector v0.100.0 + github.com/open-telemetry/opentelemetry-collector-contrib/connector/spanmetricsconnector v0.100.0 github.com/open-telemetry/opentelemetry-collector-contrib/exporter/carbonexporter v0.100.0 github.com/open-telemetry/opentelemetry-collector-contrib/exporter/opencensusexporter v0.100.0 github.com/open-telemetry/opentelemetry-collector-contrib/exporter/prometheusexporter v0.100.0 @@ -42,6 +44,7 @@ require ( go.opentelemetry.io/collector/config/configtls v0.100.0 go.opentelemetry.io/collector/confmap v0.100.0 go.opentelemetry.io/collector/confmap/provider/fileprovider v0.100.0 + go.opentelemetry.io/collector/connector v0.100.0 go.opentelemetry.io/collector/consumer v0.100.0 go.opentelemetry.io/collector/exporter v0.100.0 go.opentelemetry.io/collector/exporter/debugexporter v0.100.0 @@ -78,6 +81,7 @@ require ( github.com/DataDog/datadog-agent/pkg/proto v0.54.0-rc.2 // indirect github.com/DataDog/datadog-agent/pkg/trace/exportable v0.0.0-20201016145401-4646cf596b02 // indirect github.com/Microsoft/go-winio v0.6.1 // indirect + github.com/alecthomas/participle/v2 v2.1.1 // indirect github.com/alecthomas/units v0.0.0-20231202071711-9a357b53e9c9 // indirect github.com/apache/thrift v0.20.0 // indirect github.com/armon/go-metrics v0.4.1 // indirect @@ -148,6 +152,7 @@ require ( github.com/hashicorp/nomad/api v0.0.0-20240306004928-3e7191ccb702 // indirect github.com/hashicorp/serf v0.10.1 // indirect github.com/hetznercloud/hcloud-go/v2 v2.6.0 // indirect + github.com/iancoleman/strcase v0.3.0 // indirect github.com/imdario/mergo v0.3.16 // indirect github.com/inconshreveable/mousetrap v1.1.0 // indirect github.com/influxdata/go-syslog/v3 v3.0.1-0.20230911200830-875f5bc594a4 // indirect @@ -163,6 +168,7 @@ require ( github.com/kolo/xmlrpc v0.0.0-20220921171641-a4b6fa1dd06b // indirect github.com/kylelemons/godebug v1.1.0 // indirect github.com/leodido/ragel-machinery v0.0.0-20181214104525-299bdde78165 // indirect + github.com/lightstep/go-expohisto v1.0.0 // indirect github.com/linode/linodego v1.33.0 // indirect github.com/lufia/plan9stats v0.0.0-20220913051719-115f729f3c8c // indirect github.com/mailru/easyjson v0.7.7 // indirect @@ -183,6 +189,7 @@ require ( github.com/open-telemetry/opentelemetry-collector-contrib/internal/sharedcomponent v0.100.0 // indirect github.com/open-telemetry/opentelemetry-collector-contrib/pkg/batchperresourceattr v0.100.0 // indirect github.com/open-telemetry/opentelemetry-collector-contrib/pkg/experimentalmetricmetadata v0.100.0 // indirect + github.com/open-telemetry/opentelemetry-collector-contrib/pkg/ottl v0.100.0 // indirect github.com/open-telemetry/opentelemetry-collector-contrib/pkg/pdatautil v0.100.0 // indirect github.com/open-telemetry/opentelemetry-collector-contrib/pkg/resourcetotelemetry v0.100.0 // indirect github.com/open-telemetry/opentelemetry-collector-contrib/pkg/translator/opencensus v0.100.0 // indirect @@ -210,6 +217,7 @@ require ( github.com/soheilhy/cmux v0.1.5 // indirect github.com/spf13/cobra v1.8.0 // indirect github.com/spf13/pflag v1.0.5 // indirect + github.com/tilinna/clock v1.1.0 // indirect github.com/tinylib/msgp v1.1.9 // indirect github.com/tklauser/go-sysconf v0.3.12 // indirect github.com/tklauser/numcpus v0.6.1 // indirect @@ -227,7 +235,6 @@ require ( go.opentelemetry.io/collector/confmap/provider/httpprovider v0.100.0 // indirect go.opentelemetry.io/collector/confmap/provider/httpsprovider v0.100.0 // indirect go.opentelemetry.io/collector/confmap/provider/yamlprovider v0.100.0 // indirect - go.opentelemetry.io/collector/connector v0.100.0 // indirect go.opentelemetry.io/collector/extension/auth v0.100.0 // indirect go.opentelemetry.io/collector/featuregate v1.7.0 // indirect go.opentelemetry.io/collector/service v0.100.0 // indirect @@ -283,6 +290,10 @@ require ( sigs.k8s.io/yaml v1.3.0 // indirect ) +replace github.com/open-telemetry/opentelemetry-collector-contrib/connector/spanmetricsconnector => ../connector/spanmetricsconnector + +replace github.com/open-telemetry/opentelemetry-collector-contrib/connector/routingconnector => ../connector/routingconnector + replace github.com/open-telemetry/opentelemetry-collector-contrib/exporter/carbonexporter => ../exporter/carbonexporter replace github.com/open-telemetry/opentelemetry-collector-contrib/exporter/opencensusexporter => ../exporter/opencensusexporter @@ -366,3 +377,5 @@ replace github.com/open-telemetry/opentelemetry-collector-contrib/pkg/pdatautil replace github.com/open-telemetry/opentelemetry-collector-contrib/pkg/golden => ../pkg/golden replace github.com/open-telemetry/opentelemetry-collector-contrib/extension/ackextension => ../extension/ackextension + +replace github.com/open-telemetry/opentelemetry-collector-contrib/pkg/ottl => ../pkg/ottl diff --git a/testbed/go.sum b/testbed/go.sum index ac5e8a06b31a..31eb669a8bc6 100644 --- a/testbed/go.sum +++ b/testbed/go.sum @@ -74,6 +74,12 @@ github.com/OneOfOne/xxhash v1.2.2/go.mod h1:HSdplMjZKSmBqAxg5vPj2TmRDmfkzw+cTzAE github.com/StackExchange/wmi v0.0.0-20190523213315-cbe66965904d/go.mod h1:3eOhrUMpNV+6aFIbp5/iudMxNCF27Vw2OZgy4xEx0Fg= github.com/VividCortex/gohistogram v1.0.0 h1:6+hBz+qvs0JOrrNhhmR7lFxo5sINxBCGXrdtl/UvroE= github.com/VividCortex/gohistogram v1.0.0/go.mod h1:Pf5mBqqDxYaXu3hDrrU+w6nw50o/4+TcAqDqk/vUH7g= +github.com/alecthomas/assert/v2 v2.3.0 h1:mAsH2wmvjsuvyBvAmCtm7zFsBlb8mIHx5ySLVdDZXL0= +github.com/alecthomas/assert/v2 v2.3.0/go.mod h1:pXcQ2Asjp247dahGEmsZ6ru0UVwnkhktn7S0bBDLxvQ= +github.com/alecthomas/participle/v2 v2.1.1 h1:hrjKESvSqGHzRb4yW1ciisFJ4p3MGYih6icjJvbsmV8= +github.com/alecthomas/participle/v2 v2.1.1/go.mod h1:Y1+hAs8DHPmc3YUFzqllV+eSQ9ljPTk0ZkPMtEdAx2c= +github.com/alecthomas/repr v0.2.0 h1:HAzS41CIzNW5syS8Mf9UwXhNH1J9aix/BvDRf1Ml2Yk= +github.com/alecthomas/repr v0.2.0/go.mod h1:Fr0507jx4eOXV7AlPV6AVZLYrLIuIeSOWtW57eE/O/4= github.com/alecthomas/template v0.0.0-20160405071501-a0175ee3bccc/go.mod h1:LOuyumcjzFXgccqObfd/Ljyb9UuFJ6TxHnclSeseNhc= github.com/alecthomas/template v0.0.0-20190718012654-fb15b899a751/go.mod h1:LOuyumcjzFXgccqObfd/Ljyb9UuFJ6TxHnclSeseNhc= github.com/alecthomas/units v0.0.0-20151022065526-2efee857e7cf/go.mod h1:ybxpYRFXyAe+OPACYpWeL0wqObRcbAqCMya13uyzqw0= @@ -368,6 +374,10 @@ github.com/hashicorp/serf v0.10.1 h1:Z1H2J60yRKvfDYAOZLd2MU0ND4AH/WDz7xYHDWQsIPY github.com/hashicorp/serf v0.10.1/go.mod h1:yL2t6BqATOLGc5HF7qbFkTfXoPIY0WZdWHfEvMqbG+4= github.com/hetznercloud/hcloud-go/v2 v2.6.0 h1:RJOA2hHZ7rD1pScA4O1NF6qhkHyUdbbxjHgFNot8928= github.com/hetznercloud/hcloud-go/v2 v2.6.0/go.mod h1:4J1cSE57+g0WS93IiHLV7ubTHItcp+awzeBp5bM9mfA= +github.com/hexops/gotextdiff v1.0.3 h1:gitA9+qJrrTCsiCl7+kh75nPqQt1cx4ZkudSTLoUqJM= +github.com/hexops/gotextdiff v1.0.3/go.mod h1:pSWU5MAI3yDq+fZBTazCSJysOMbxWL1BSow5/V2vxeg= +github.com/iancoleman/strcase v0.3.0 h1:nTXanmYxhfFAMjZL34Ov6gkzEsSJZ5DbhxWjvSASxEI= +github.com/iancoleman/strcase v0.3.0/go.mod h1:iwCmte+B7n89clKwxIoIXy/HfoL7AsD47ZCWhYzw7ho= github.com/ianlancetaylor/demangle v0.0.0-20181102032728-5e5cf60278f6/go.mod h1:aSSvb/t6k1mPoxDqO4vJh6VOCGPwU4O0C2/Eqndh1Sc= github.com/imdario/mergo v0.3.16 h1:wwQJbIsHYGMUyLSPrEq1CT16AhnhNJQ51+4fdHUnCl4= github.com/imdario/mergo v0.3.16/go.mod h1:WBLT9ZmE3lPoWsEzCh9LPo3TiwVN+ZKEjmz+hD27ysY= @@ -426,6 +436,8 @@ github.com/kylelemons/godebug v1.1.0 h1:RPNrshWIDI6G2gRW9EHilWtl7Z6Sb1BR0xunSBf0 github.com/kylelemons/godebug v1.1.0/go.mod h1:9/0rRGxNHcop5bhtWyNeEfOS8JIWk580+fNqagV/RAw= github.com/leodido/ragel-machinery v0.0.0-20181214104525-299bdde78165 h1:bCiVCRCs1Heq84lurVinUPy19keqGEe4jh5vtK37jcg= github.com/leodido/ragel-machinery v0.0.0-20181214104525-299bdde78165/go.mod h1:WZxr2/6a/Ar9bMDc2rN/LJrE/hF6bXE4LPyDSIxwAfg= +github.com/lightstep/go-expohisto v1.0.0 h1:UPtTS1rGdtehbbAF7o/dhkWLTDI73UifG8LbfQI7cA4= +github.com/lightstep/go-expohisto v1.0.0/go.mod h1:xDXD0++Mu2FOaItXtdDfksfgxfV0z1TMPa+e/EUd0cs= github.com/linode/linodego v1.33.0 h1:cX2FYry7r6CA1ujBMsdqiM4VhvIQtnWsOuVblzfBhCw= github.com/linode/linodego v1.33.0/go.mod h1:dSJJgIwqZCF5wnpuC6w5cyIbRtcexAm7uVvuJopGB40= github.com/lufia/plan9stats v0.0.0-20211012122336-39d0f177ccd0/go.mod h1:zJYVVT2jmtg6P3p1VtQj7WsuWi/y4VnjVBn7F8KPB3I= @@ -634,6 +646,8 @@ github.com/tidwall/tinylru v1.1.0 h1:XY6IUfzVTU9rpwdhKUF6nQdChgCdGjkMfLzbWyiau6I github.com/tidwall/tinylru v1.1.0/go.mod h1:3+bX+TJ2baOLMWTnlyNWHh4QMnFyARg2TLTQ6OFbzw8= github.com/tidwall/wal v1.1.7 h1:emc1TRjIVsdKKSnpwGBAcsAGg0767SvUk8+ygx7Bb+4= github.com/tidwall/wal v1.1.7/go.mod h1:r6lR1j27W9EPalgHiB7zLJDYu3mzW5BQP5KrzBpYY/E= +github.com/tilinna/clock v1.1.0 h1:6IQQQCo6KoBxVudv6gwtY8o4eDfhHo8ojA5dP0MfhSs= +github.com/tilinna/clock v1.1.0/go.mod h1:ZsP7BcY7sEEz7ktc0IVy8Us6boDrK8VradlKRUGfOao= github.com/tinylib/msgp v1.1.2/go.mod h1:+d+yLhGm8mzTaHzB+wgMYrodPfmZrzkirds8fDWklFE= github.com/tinylib/msgp v1.1.9 h1:SHf3yoO2sGA0veCJeCBYLHuttAVFHGm2RHgNodW7wQU= github.com/tinylib/msgp v1.1.9/go.mod h1:BCXGB54lDD8qUEPmiG0cQQUANC4IUQyB2ItS2UDlO/k= diff --git a/testbed/testbed/components.go b/testbed/testbed/components.go index 6574937b8598..cd817d268ec5 100644 --- a/testbed/testbed/components.go +++ b/testbed/testbed/components.go @@ -4,6 +4,7 @@ package testbed // import "github.com/open-telemetry/opentelemetry-collector-contrib/testbed/testbed" import ( + "go.opentelemetry.io/collector/connector" "go.opentelemetry.io/collector/exporter" "go.opentelemetry.io/collector/exporter/debugexporter" "go.opentelemetry.io/collector/exporter/otlpexporter" @@ -19,6 +20,8 @@ import ( "go.opentelemetry.io/collector/receiver/otlpreceiver" "go.uber.org/multierr" + "github.com/open-telemetry/opentelemetry-collector-contrib/connector/routingconnector" + "github.com/open-telemetry/opentelemetry-collector-contrib/connector/spanmetricsconnector" "github.com/open-telemetry/opentelemetry-collector-contrib/exporter/opencensusexporter" "github.com/open-telemetry/opentelemetry-collector-contrib/exporter/syslogexporter" "github.com/open-telemetry/opentelemetry-collector-contrib/exporter/zipkinexporter" @@ -66,11 +69,18 @@ func Components() ( ) errs = multierr.Append(errs, err) + connectors, err := connector.MakeFactoryMap( + spanmetricsconnector.NewFactory(), + routingconnector.NewFactory(), + ) + errs = multierr.Append(errs, err) + factories := otelcol.Factories{ Extensions: extensions, Receivers: receivers, Processors: processors, Exporters: exporters, + Connectors: connectors, } return factories, errs diff --git a/testbed/testbed/connectors.go b/testbed/testbed/connectors.go new file mode 100644 index 000000000000..953d8793f934 --- /dev/null +++ b/testbed/testbed/connectors.go @@ -0,0 +1,22 @@ +// Copyright The OpenTelemetry Authors +// SPDX-License-Identifier: Apache-2.0 + +package testbed // import "github.com/open-telemetry/opentelemetry-collector-contrib/testbed/testbed" + +type DataConnector interface { + // GenConfigYAMLStr generates a config string to place in receiver part of collector config + // so that it can receive data from this sender. + GenConfigYAMLStr() string + + // ProtocolName returns exporter name to use in collector config pipeline. + ProtocolName() string + + // GetReceiverType returns the data type for the DataReceiver in the second pipeline when using connectors + GetReceiverType() string +} + +// DataReceiverBase implement basic functions needed by all receivers. +type DataConnectorBase struct { + // The data type of the receiver in second pipeline. + ReceiverDataType string +} From 0894a437bda260e5c4770914c94ab674f1172fce Mon Sep 17 00:00:00 2001 From: Curtis Robert Date: Fri, 10 May 2024 04:39:17 -0700 Subject: [PATCH 19/55] [chore][CONTRIBUTING.md] Update adding component directions (#32957) **Description:** This is two main changes: 1. Remove `goleak` section. It's now added by default by mdatagen, there's nothing required of users here. 2. Add information and reformat the `Last steps` section of adding a new component. - Move the directions to be bullet points - Add `make generate` to make sure the component's README is updated properly - Explicitly point out that stability and distribution needs to be updated in `metadata.yaml` - Add step for adding the component to the releases repo. My understanding is that if a component is `Alpha`, it should be included in the release, so I've made that a noted part of the steps here. (Let me know if we should word this in a more "optional" way.) --------- Co-authored-by: Pablo Baeyens --- CONTRIBUTING.md | 38 ++++++++++++++------------------------ 1 file changed, 14 insertions(+), 24 deletions(-) diff --git a/CONTRIBUTING.md b/CONTRIBUTING.md index 6643c475eb00..d7f4c79bf23a 100644 --- a/CONTRIBUTING.md +++ b/CONTRIBUTING.md @@ -161,24 +161,6 @@ and its contributors. and in the respective testing harnesses. To align with the test goal of the project, components must be testable within the framework defined within the folder. If a component can not be properly tested within the existing framework, it must increase the non testable components number with a comment within the PR explaining as to why it can not be tested. -- Enable [goleak checks](https://github.com/uber-go/goleak) to help ensure your component does not leak goroutines. This - requires adding a file named `package_test.go` to every sub-directory containing tests. This file should have the following contents by default: -``` -// Copyright The OpenTelemetry Authors -// SPDX-License-Identifier: Apache-2.0 - -package fooreceiver - -import ( - "testing" - - "go.uber.org/goleak" -) - -func TestMain(m *testing.M) { - goleak.VerifyTestMain(m) -} -``` - Create a `metadata.yaml` file with at minimum the required fields defined in [metadata-schema.yaml](https://github.com/open-telemetry/opentelemetry-collector/blob/main/cmd/mdatagen/metadata-schema.yaml). Here is a minimal representation: @@ -234,12 +216,20 @@ When submitting a component to the community, consider breaking it down into sep * **Second PR** should include the concrete implementation of the component. If the size of this PR is larger than the recommended size consider splitting it in multiple PRs. -* **Last PR** should mark the new component as `Alpha` stability and add it to the `cmd/otelcontribcol` - binary by updating the `cmd/otelcontribcol/builder-config.yaml` file and running `make genotelcontribcol`. - The component's tests must also be added as a part of its respective `component_type_tests.go` file in the `cmd/otelcontribcol` directory. - The component must be enabled only after sufficient testing and only when it meets [`Alpha` stability requirements](https://github.com/open-telemetry/opentelemetry-collector#alpha). -* Once a new component has been added to the executable, please add the component - to the [OpenTelemetry.io registry](https://github.com/open-telemetry/opentelemetry.io#adding-a-project-to-the-opentelemetry-registry). +* **Last PR** should mark the new component as `Alpha` stability. + * Update its `metadata.yaml` file. + * Mark the stability as `alpha` + * Add `contrib` to the list of distributions + * Add it to the `cmd/otelcontribcol` binary by updating the `cmd/otelcontribcol/builder-config.yaml` file. + * Please also run: + - `make generate` + - `make genotelcontribcol` + * The component's tests must also be added as a part of its respective `component_type_tests.go` file in the `cmd/otelcontribcol` directory. + * The component must be enabled only after sufficient testing and only when it meets [`Alpha` stability requirements](https://github.com/open-telemetry/opentelemetry-collector#alpha). +* Once your component has reached `Alpha` stability, you may also submit a PR to the [OpenTelemetry Collector Releases](https://github.com/open-telemetry/opentelemetry-collector-releases) repository to include your component in future releases of the OpenTelemetry Collector `contrib` distribution. +* Once a new component has been added to the executable: + * Please add the component + to the [OpenTelemetry.io registry](https://github.com/open-telemetry/opentelemetry.io#adding-a-project-to-the-opentelemetry-registry). ### Releasing New Components After a component has been approved and merged, and has been enabled in `internal/components/`, it must be added to the From cde2b0af181cae8333f1200e8f6750bfb4af7cf2 Mon Sep 17 00:00:00 2001 From: Brandon Johnson Date: Fri, 10 May 2024 08:45:08 -0400 Subject: [PATCH 20/55] [opampextension]: Move custom message interface to separate module (#32951) **Description:** * Breaks our the custom message interface to a separate module, so other components can use the interface without needing to import the `opampextension` module in its entirety. We could temporarily alias the old methods if we'd like, but I think that the CustomMessage stuff has been so short lived that, in addition to the alpha status of the opampextension component, it feels justified to just skip the deprecation process and move it to a new module. **Link to tracking Issue:** Closes #32950 **Testing:** * Covered by existing unit tests **Documentation:** * Added more documentation on usage in the new module. * Modified opampextension docs to point to the new module. --- .chloggen/chore_move-custom-messages.yaml | 29 +++++++ .github/CODEOWNERS | 1 + .github/ISSUE_TEMPLATE/bug_report.yaml | 1 + .github/ISSUE_TEMPLATE/feature_request.yaml | 1 + .github/ISSUE_TEMPLATE/other.yaml | 1 + .github/ISSUE_TEMPLATE/unmaintained.yaml | 1 + cmd/checkapi/allowlist.txt | 3 +- cmd/otelcontribcol/builder-config.yaml | 1 + cmd/otelcontribcol/go.mod | 3 + extension/opampcustommessages/Makefile | 1 + extension/opampcustommessages/README.md | 81 +++++++++++++++++++ .../custom_messages.go | 21 ++--- extension/opampcustommessages/go.mod | 7 ++ extension/opampcustommessages/go.sum | 8 ++ extension/opampcustommessages/metadata.yaml | 3 + extension/opampextension/README.md | 22 +---- extension/opampextension/go.mod | 3 + extension/opampextension/opamp_agent.go | 6 +- extension/opampextension/registry.go | 10 ++- extension/opampextension/registry_test.go | 4 +- versions.yaml | 1 + 21 files changed, 170 insertions(+), 38 deletions(-) create mode 100644 .chloggen/chore_move-custom-messages.yaml create mode 100644 extension/opampcustommessages/Makefile create mode 100644 extension/opampcustommessages/README.md rename extension/{opampextension => opampcustommessages}/custom_messages.go (76%) create mode 100644 extension/opampcustommessages/go.mod create mode 100644 extension/opampcustommessages/go.sum create mode 100644 extension/opampcustommessages/metadata.yaml diff --git a/.chloggen/chore_move-custom-messages.yaml b/.chloggen/chore_move-custom-messages.yaml new file mode 100644 index 000000000000..f3b31c0a6a4d --- /dev/null +++ b/.chloggen/chore_move-custom-messages.yaml @@ -0,0 +1,29 @@ +# Use this changelog template to create an entry for release notes. + +# One of 'breaking', 'deprecation', 'new_component', 'enhancement', 'bug_fix' +change_type: breaking + +# The name of the component, or a single word describing the area of concern, (e.g. filelogreceiver) +component: opampextension + +# A brief description of the change. Surround your text with quotes ("") if it needs to start with a backtick (`). +note: Move custom message interfaces to separate package + +# Mandatory: One or more tracking issues related to the change. You can use the PR number here if no issue exists. +issues: [32950] + +# (Optional) One or more lines of additional information to render under the primary note. +# These lines will be padded with 2 spaces and then inserted directly into the document. +# Use pipe (|) for multiline entries. +subtext: | + Moves `CustomCapabilityRegistry`, `CustomCapabilityHandler`, and `CustomCapabilityRegisterOption` to a new module. + These types can now be found in the new `github.com/open-telemetry/opentelemetry-collector-contrib/extension/opampcustommessages` module. + +# If your change doesn't affect end users or the exported elements of any package, +# you should instead start your pull request title with [chore] or use the "Skip Changelog" label. +# Optional: The change log or logs in which this entry should be included. +# e.g. '[user]' or '[user, api]' +# Include 'user' if the change is relevant to end users. +# Include 'api' if there is a change to a library API. +# Default: '[user]' +change_logs: [api] diff --git a/.github/CODEOWNERS b/.github/CODEOWNERS index 4c0c3ac8bec9..d45eac81d56d 100644 --- a/.github/CODEOWNERS +++ b/.github/CODEOWNERS @@ -109,6 +109,7 @@ extension/observer/ecstaskobserver/ @open-telemetry/collect extension/observer/hostobserver/ @open-telemetry/collector-contrib-approvers @MovieStoreGuy extension/observer/k8sobserver/ @open-telemetry/collector-contrib-approvers @rmfitzpatrick @dmitryax extension/oidcauthextension/ @open-telemetry/collector-contrib-approvers @jpkrohling +extension/opampcustommessages/ @open-telemetry/collector-contrib-approvers @BinaryFissionGames @evan-bradley extension/opampextension/ @open-telemetry/collector-contrib-approvers @portertech @evan-bradley @tigrannajaryan extension/pprofextension/ @open-telemetry/collector-contrib-approvers @MovieStoreGuy extension/remotetapextension/ @open-telemetry/collector-contrib-approvers @atoulme diff --git a/.github/ISSUE_TEMPLATE/bug_report.yaml b/.github/ISSUE_TEMPLATE/bug_report.yaml index ece9b9b14bba..18ebcf6084c2 100644 --- a/.github/ISSUE_TEMPLATE/bug_report.yaml +++ b/.github/ISSUE_TEMPLATE/bug_report.yaml @@ -109,6 +109,7 @@ body: - extension/observer/k8sobserver - extension/oidcauth - extension/opamp + - extension/opampcustommessages - extension/pprof - extension/remotetap - extension/sigv4auth diff --git a/.github/ISSUE_TEMPLATE/feature_request.yaml b/.github/ISSUE_TEMPLATE/feature_request.yaml index e01c8066b397..55d3661cd630 100644 --- a/.github/ISSUE_TEMPLATE/feature_request.yaml +++ b/.github/ISSUE_TEMPLATE/feature_request.yaml @@ -103,6 +103,7 @@ body: - extension/observer/k8sobserver - extension/oidcauth - extension/opamp + - extension/opampcustommessages - extension/pprof - extension/remotetap - extension/sigv4auth diff --git a/.github/ISSUE_TEMPLATE/other.yaml b/.github/ISSUE_TEMPLATE/other.yaml index 503aaa5a2cf4..00a97322e50a 100644 --- a/.github/ISSUE_TEMPLATE/other.yaml +++ b/.github/ISSUE_TEMPLATE/other.yaml @@ -103,6 +103,7 @@ body: - extension/observer/k8sobserver - extension/oidcauth - extension/opamp + - extension/opampcustommessages - extension/pprof - extension/remotetap - extension/sigv4auth diff --git a/.github/ISSUE_TEMPLATE/unmaintained.yaml b/.github/ISSUE_TEMPLATE/unmaintained.yaml index 0a3306ead60d..d5074d4c1e9f 100644 --- a/.github/ISSUE_TEMPLATE/unmaintained.yaml +++ b/.github/ISSUE_TEMPLATE/unmaintained.yaml @@ -108,6 +108,7 @@ body: - extension/observer/k8sobserver - extension/oidcauth - extension/opamp + - extension/opampcustommessages - extension/pprof - extension/remotetap - extension/sigv4auth diff --git a/cmd/checkapi/allowlist.txt b/cmd/checkapi/allowlist.txt index f2e16644eb81..f34b5728d7d2 100644 --- a/cmd/checkapi/allowlist.txt +++ b/cmd/checkapi/allowlist.txt @@ -1 +1,2 @@ -extension/observer \ No newline at end of file +extension/observer +extension/opampcustommessages diff --git a/cmd/otelcontribcol/builder-config.yaml b/cmd/otelcontribcol/builder-config.yaml index 62dd27ec7191..c8868207d191 100644 --- a/cmd/otelcontribcol/builder-config.yaml +++ b/cmd/otelcontribcol/builder-config.yaml @@ -460,3 +460,4 @@ replaces: - github.com/open-telemetry/opentelemetry-collector-contrib/extension/ackextension => ../../extension/ackextension - github.com/open-telemetry/opentelemetry-collector-contrib/extension/googleclientauthextension => ../../extension/googleclientauthextension - github.com/open-telemetry/opentelemetry-collector-contrib/receiver/splunkenterprisereceiver => ../../receiver/splunkenterprisereceiver + - github.com/open-telemetry/opentelemetry-collector-contrib/extension/opampcustommessages => ../../extension/opampcustommessages diff --git a/cmd/otelcontribcol/go.mod b/cmd/otelcontribcol/go.mod index 7aba937de96a..084aa0e0dc20 100644 --- a/cmd/otelcontribcol/go.mod +++ b/cmd/otelcontribcol/go.mod @@ -601,6 +601,7 @@ require ( github.com/open-telemetry/opamp-go v0.14.0 // indirect github.com/open-telemetry/opentelemetry-collector-contrib/extension/encoding v0.100.0 // indirect github.com/open-telemetry/opentelemetry-collector-contrib/extension/observer v0.100.0 // indirect + github.com/open-telemetry/opentelemetry-collector-contrib/extension/opampcustommessages v0.0.0 // indirect github.com/open-telemetry/opentelemetry-collector-contrib/internal/aws/awsutil v0.100.0 // indirect github.com/open-telemetry/opentelemetry-collector-contrib/internal/aws/containerinsight v0.100.0 // indirect github.com/open-telemetry/opentelemetry-collector-contrib/internal/aws/cwlogs v0.100.0 // indirect @@ -1284,3 +1285,5 @@ replace github.com/open-telemetry/opentelemetry-collector-contrib/extension/acke replace github.com/open-telemetry/opentelemetry-collector-contrib/extension/googleclientauthextension => ../../extension/googleclientauthextension replace github.com/open-telemetry/opentelemetry-collector-contrib/receiver/splunkenterprisereceiver => ../../receiver/splunkenterprisereceiver + +replace github.com/open-telemetry/opentelemetry-collector-contrib/extension/opampcustommessages => ../../extension/opampcustommessages diff --git a/extension/opampcustommessages/Makefile b/extension/opampcustommessages/Makefile new file mode 100644 index 000000000000..ded7a36092dc --- /dev/null +++ b/extension/opampcustommessages/Makefile @@ -0,0 +1 @@ +include ../../Makefile.Common diff --git a/extension/opampcustommessages/README.md b/extension/opampcustommessages/README.md new file mode 100644 index 000000000000..8c2db9d162a1 --- /dev/null +++ b/extension/opampcustommessages/README.md @@ -0,0 +1,81 @@ +# extension/opampcustommessages + +## Overview + +This modules contains interfaces and shared code for sending and receiving [custom messages](https://github.com/open-telemetry/opamp-spec/blob/main/specification.md#custom-messages) via OpAMP. + +## Usage + +An extension may implement the `opampcustommessages.CustomCapabilityRegistry` interface, which allows other components to register capabilities to send and receive messages to/from an OpAMP server. For an example of a component implementing this interface, see the [OpAMP extension](../opampextension/README.md). + + +### Registering a custom capability + +Other components may use a configured OpAMP extension to send and receive custom messages to and from an OpAMP server. Components may use the provided `components.Host` from the Start method in order to get a handle to the registry: + +```go +func Start(_ context.Context, host component.Host) error { + ext, ok := host.GetExtensions()[opampExtensionID] + if !ok { + return fmt.Errorf("extension %q does not exist", opampExtensionID) + } + + registry, ok := ext.(opampcustommessages.CustomCapabilityRegistry) + if !ok { + return fmt.Errorf("extension %q is not a custom message registry", opampExtensionID) + } + + handler, err := registry.Register("io.opentelemetry.custom-capability") + if err != nil { + return fmt.Errorf("failed to register custom capability: %w", err) + } + + // ... send/receive messages using the given handler + + return nil +} +``` + +### Using a CustomCapabilityHandler to send/receive messages + +After obtaining a handler for the custom capability, you can send and receive messages for the custom capability by using the SendMessage and Message methods, respectively: + +#### Sending a message + +To send a message, you can use the SendMessage method. Since only one custom message can be scheduled to send at a time, the error returned should be checked if it's [ErrCustomMessagePending](https://pkg.go.dev/github.com/open-telemetry/opamp-go@v0.14.0/client/types#pkg-variables), and wait on the returned channel to attempt sending the message again. + +```go +for { + sendingChan, err := handler.SendMessage("messageType", []byte("message-data")) + switch { + case err == nil: + break + case errors.Is(err, types.ErrCustomMessagePending): + <-sendingChan + continue + default: + return fmt.Errorf("failed to send message: %w", err) + } +} +``` + +#### Receiving a message + +Messages can be received through the channel returned by the `Message` method on the handler: + +```go +msg := <-handler.Message() +// process the message... +``` + +Components receiving messages should take care not to modify the received message, as the message may be shared between multiple components. + +### Unregistering a capability + +After a component is done processing messages for a given capability, or shuts down, it should unregister its handler. You can do this by calling the `Unregister` method: + +```go +handler.Unregister() +``` + +After a handler has been unregistered, it will no longer receive any messages from the OpAMP server, and any further calls to SendMessage will reject the message and return an error. diff --git a/extension/opampextension/custom_messages.go b/extension/opampcustommessages/custom_messages.go similarity index 76% rename from extension/opampextension/custom_messages.go rename to extension/opampcustommessages/custom_messages.go index b6183964432d..40bf8028d839 100644 --- a/extension/opampextension/custom_messages.go +++ b/extension/opampcustommessages/custom_messages.go @@ -1,29 +1,30 @@ // Copyright The OpenTelemetry Authors // SPDX-License-Identifier: Apache-2.0 -package opampextension // import "github.com/open-telemetry/opentelemetry-collector-contrib/extension/opampextension" +// Package opampcustommessages contains interfaces and shared code for sending and receiving custom messages via OpAMP. +package opampcustommessages // import "github.com/open-telemetry/opentelemetry-collector-contrib/extension/opampcustommessages" import "github.com/open-telemetry/opamp-go/protobufs" -// customCapabilityRegisterOptions represents extra options that can be use in CustomCapabilityRegistry.Register -type customCapabilityRegisterOptions struct { +// CustomCapabilityRegisterOptions represents extra options that can be use in CustomCapabilityRegistry.Register +type CustomCapabilityRegisterOptions struct { MaxQueuedMessages int } -// defaultCustomCapabilityRegisterOptions returns the default options for CustomCapabilityRegisterOptions -func defaultCustomCapabilityRegisterOptions() *customCapabilityRegisterOptions { - return &customCapabilityRegisterOptions{ +// DefaultCustomCapabilityRegisterOptions returns the default options for CustomCapabilityRegisterOptions +func DefaultCustomCapabilityRegisterOptions() *CustomCapabilityRegisterOptions { + return &CustomCapabilityRegisterOptions{ MaxQueuedMessages: 10, } } // CustomCapabilityRegisterOption represent a single option for CustomCapabilityRegistry.Register -type CustomCapabilityRegisterOption func(*customCapabilityRegisterOptions) +type CustomCapabilityRegisterOption func(*CustomCapabilityRegisterOptions) -// withMaxQueuedMessages overrides the maximum number of queued messages. If a message is received while +// WithMaxQueuedMessages overrides the maximum number of queued messages. If a message is received while // MaxQueuedMessages messages are already queued to be processed, the message is dropped. -func withMaxQueuedMessages(maxQueuedMessages int) CustomCapabilityRegisterOption { - return func(c *customCapabilityRegisterOptions) { +func WithMaxQueuedMessages(maxQueuedMessages int) CustomCapabilityRegisterOption { + return func(c *CustomCapabilityRegisterOptions) { c.MaxQueuedMessages = maxQueuedMessages } } diff --git a/extension/opampcustommessages/go.mod b/extension/opampcustommessages/go.mod new file mode 100644 index 000000000000..bee8259867ea --- /dev/null +++ b/extension/opampcustommessages/go.mod @@ -0,0 +1,7 @@ +module github.com/open-telemetry/opentelemetry-collector-contrib/extension/opampcustommessages + +go 1.21.0 + +require github.com/open-telemetry/opamp-go v0.14.0 + +require google.golang.org/protobuf v1.33.0 // indirect diff --git a/extension/opampcustommessages/go.sum b/extension/opampcustommessages/go.sum new file mode 100644 index 000000000000..494d37eede31 --- /dev/null +++ b/extension/opampcustommessages/go.sum @@ -0,0 +1,8 @@ +github.com/google/go-cmp v0.5.6 h1:BKbKCqvP6I+rmFHt06ZmyQtvB8xAkWdhFyr0ZUNZcxQ= +github.com/google/go-cmp v0.5.6/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= +github.com/open-telemetry/opamp-go v0.14.0 h1:KoziIK+wsFojhUXNTkCSTnCPf0eCMqFAaccOs0HrWIY= +github.com/open-telemetry/opamp-go v0.14.0/go.mod h1:XOGCigljsLSTZ8FfLwvat0M1QDj3conIIgRa77BWrKs= +golang.org/x/xerrors v0.0.0-20200804184101-5ec99f83aff1 h1:go1bK/D/BFZV2I8cIQd1NKEZ+0owSTG1fDTci4IqFcE= +golang.org/x/xerrors v0.0.0-20200804184101-5ec99f83aff1/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= +google.golang.org/protobuf v1.33.0 h1:uNO2rsAINq/JlFpSdYEKIZ0uKD/R9cpdv0T+yoGwGmI= +google.golang.org/protobuf v1.33.0/go.mod h1:c6P6GXX6sHbq/GpV6MGZEdwhWPcYBgnhAHhKbcUYpos= diff --git a/extension/opampcustommessages/metadata.yaml b/extension/opampcustommessages/metadata.yaml new file mode 100644 index 000000000000..500fe1925983 --- /dev/null +++ b/extension/opampcustommessages/metadata.yaml @@ -0,0 +1,3 @@ +status: + codeowners: + active: [BinaryFissionGames, evan-bradley] diff --git a/extension/opampextension/README.md b/extension/opampextension/README.md index 1f3beea3479d..daf678858f3a 100644 --- a/extension/opampextension/README.md +++ b/extension/opampextension/README.md @@ -47,27 +47,9 @@ extensions: ## Custom Messages -Other components may use a configured OpAMP extension to send and receive custom messages to and from an OpAMP server. Components may use the provided `components.Host` from the Start method in order to get a handle to the registry: +Other components may use a configured OpAMP extension to send and receive custom messages to and from an OpAMP server. -```go -func Start(_ context.Context, host component.Host) error { - ext, ok := host.GetExtensions()[opampExtensionID] - if !ok { - return fmt.Errorf("opamp extension %q does not exist", opampExtensionID) - } - - registry, ok := ext.(opampextension.CustomCapabilityRegistry) - if !ok { - return fmt.Errorf("extension %q is not a custom message registry", opampExtensionID) - } - - // You can now use registry.Register to register a custom capability - - return nil -} -``` - -See the [custom_messages.go](./custom_messages.go) for more information on the custom message API. +See the [opampcustommessages](../opampcustommessages/README.md) module for more information on the custom message API. ## Status diff --git a/extension/opampextension/go.mod b/extension/opampextension/go.mod index 1ff26a7af209..be32a5906f92 100644 --- a/extension/opampextension/go.mod +++ b/extension/opampextension/go.mod @@ -6,6 +6,7 @@ require ( github.com/google/uuid v1.6.0 github.com/oklog/ulid/v2 v2.1.0 github.com/open-telemetry/opamp-go v0.14.0 + github.com/open-telemetry/opentelemetry-collector-contrib/extension/opampcustommessages v0.0.0 github.com/shirou/gopsutil/v3 v3.24.4 github.com/stretchr/testify v1.9.0 go.opentelemetry.io/collector/component v0.100.0 @@ -65,3 +66,5 @@ require ( google.golang.org/grpc v1.63.2 // indirect google.golang.org/protobuf v1.34.0 // indirect ) + +replace github.com/open-telemetry/opentelemetry-collector-contrib/extension/opampcustommessages => ../opampcustommessages diff --git a/extension/opampextension/opamp_agent.go b/extension/opampextension/opamp_agent.go index 4a3c2924ae5e..b495bc62273b 100644 --- a/extension/opampextension/opamp_agent.go +++ b/extension/opampextension/opamp_agent.go @@ -24,6 +24,8 @@ import ( "go.uber.org/zap" "golang.org/x/exp/maps" "gopkg.in/yaml.v3" + + "github.com/open-telemetry/opentelemetry-collector-contrib/extension/opampcustommessages" ) type opampAgent struct { @@ -53,7 +55,7 @@ type opampAgent struct { customCapabilityRegistry *customCapabilityRegistry } -var _ CustomCapabilityRegistry = (*opampAgent)(nil) +var _ opampcustommessages.CustomCapabilityRegistry = (*opampAgent)(nil) func (o *opampAgent) Start(ctx context.Context, _ component.Host) error { header := http.Header{} @@ -141,7 +143,7 @@ func (o *opampAgent) NotifyConfig(ctx context.Context, conf *confmap.Conf) error return nil } -func (o *opampAgent) Register(capability string, opts ...CustomCapabilityRegisterOption) (CustomCapabilityHandler, error) { +func (o *opampAgent) Register(capability string, opts ...opampcustommessages.CustomCapabilityRegisterOption) (opampcustommessages.CustomCapabilityHandler, error) { return o.customCapabilityRegistry.Register(capability, opts...) } diff --git a/extension/opampextension/registry.go b/extension/opampextension/registry.go index 4c97c5500c20..22f42d4d9711 100644 --- a/extension/opampextension/registry.go +++ b/extension/opampextension/registry.go @@ -13,6 +13,8 @@ import ( "github.com/open-telemetry/opamp-go/protobufs" "go.uber.org/zap" "golang.org/x/exp/maps" + + "github.com/open-telemetry/opentelemetry-collector-contrib/extension/opampcustommessages" ) // customCapabilityClient is a subset of OpAMP client containing only the methods needed for the customCapabilityRegistry. @@ -28,7 +30,7 @@ type customCapabilityRegistry struct { logger *zap.Logger } -var _ CustomCapabilityRegistry = (*customCapabilityRegistry)(nil) +var _ opampcustommessages.CustomCapabilityRegistry = (*customCapabilityRegistry)(nil) func newCustomCapabilityRegistry(logger *zap.Logger, client customCapabilityClient) *customCapabilityRegistry { return &customCapabilityRegistry{ @@ -40,8 +42,8 @@ func newCustomCapabilityRegistry(logger *zap.Logger, client customCapabilityClie } // Register implements CustomCapabilityRegistry.Register -func (cr *customCapabilityRegistry) Register(capability string, opts ...CustomCapabilityRegisterOption) (CustomCapabilityHandler, error) { - optsStruct := defaultCustomCapabilityRegisterOptions() +func (cr *customCapabilityRegistry) Register(capability string, opts ...opampcustommessages.CustomCapabilityRegisterOption) (opampcustommessages.CustomCapabilityHandler, error) { + optsStruct := opampcustommessages.DefaultCustomCapabilityRegisterOptions() for _, opt := range opts { opt(optsStruct) } @@ -151,7 +153,7 @@ type customMessageHandler struct { unregistered bool } -var _ CustomCapabilityHandler = (*customMessageHandler)(nil) +var _ opampcustommessages.CustomCapabilityHandler = (*customMessageHandler)(nil) func newCustomMessageHandler( registry *customCapabilityRegistry, diff --git a/extension/opampextension/registry_test.go b/extension/opampextension/registry_test.go index 79ed2bf23332..2e395d50e24d 100644 --- a/extension/opampextension/registry_test.go +++ b/extension/opampextension/registry_test.go @@ -11,6 +11,8 @@ import ( "github.com/open-telemetry/opamp-go/protobufs" "github.com/stretchr/testify/require" "go.uber.org/zap" + + "github.com/open-telemetry/opentelemetry-collector-contrib/extension/opampcustommessages" ) func TestRegistry_Register(t *testing.T) { @@ -92,7 +94,7 @@ func TestRegistry_ProcessMessage(t *testing.T) { registry := newCustomCapabilityRegistry(zap.NewNop(), client) - sender, err := registry.Register(capabilityString, withMaxQueuedMessages(0)) + sender, err := registry.Register(capabilityString, opampcustommessages.WithMaxQueuedMessages(0)) require.NotNil(t, sender) require.NoError(t, err) diff --git a/versions.yaml b/versions.yaml index 2ff221b97bb5..44730c2ca3ad 100644 --- a/versions.yaml +++ b/versions.yaml @@ -97,6 +97,7 @@ module-sets: - github.com/open-telemetry/opentelemetry-collector-contrib/extension/observer/hostobserver - github.com/open-telemetry/opentelemetry-collector-contrib/extension/observer/k8sobserver - github.com/open-telemetry/opentelemetry-collector-contrib/extension/oidcauthextension + - github.com/open-telemetry/opentelemetry-collector-contrib/extension/opampcustommessages - github.com/open-telemetry/opentelemetry-collector-contrib/extension/opampextension - github.com/open-telemetry/opentelemetry-collector-contrib/extension/pprofextension - github.com/open-telemetry/opentelemetry-collector-contrib/extension/remotetapextension From bc2ff482f0669c00a17f8ea2004d8d6d164abd8b Mon Sep 17 00:00:00 2001 From: Evan Bradley <11745660+evan-bradley@users.noreply.github.com> Date: Fri, 10 May 2024 09:55:02 -0400 Subject: [PATCH 21/55] [chore] Add contrib confmap providers to otelcontribcol (#32916) **Description:** Contrib hosts two confmap providers which aren't in the local binary. We should add them so we can test them with a live system. cc @Aneurysm9 @driverpt @atoulme --------- Co-authored-by: Evan Bradley --- cmd/configschema/go.mod | 13 ++-- cmd/configschema/go.sum | 39 ++++------- cmd/otelcontribcol/builder-config.yaml | 11 +++ cmd/otelcontribcol/go.mod | 20 ++++-- cmd/otelcontribcol/go.sum | 41 ++++------- cmd/otelcontribcol/main.go | 5 ++ go.mod | 13 ++-- go.sum | 39 ++++------- receiver/snowflakereceiver/go.mod | 30 ++++---- receiver/snowflakereceiver/go.sum | 95 ++++++++++++-------------- 10 files changed, 136 insertions(+), 170 deletions(-) diff --git a/cmd/configschema/go.mod b/cmd/configschema/go.mod index 28f840601974..39722e6aeec8 100644 --- a/cmd/configschema/go.mod +++ b/cmd/configschema/go.mod @@ -312,17 +312,17 @@ require ( github.com/aws/aws-sdk-go-v2/config v1.27.11 // indirect github.com/aws/aws-sdk-go-v2/credentials v1.17.11 // indirect github.com/aws/aws-sdk-go-v2/feature/ec2/imds v1.16.1 // indirect - github.com/aws/aws-sdk-go-v2/feature/s3/manager v1.11.59 // indirect + github.com/aws/aws-sdk-go-v2/feature/s3/manager v1.16.15 // indirect github.com/aws/aws-sdk-go-v2/internal/configsources v1.3.5 // indirect github.com/aws/aws-sdk-go-v2/internal/endpoints/v2 v2.6.5 // indirect github.com/aws/aws-sdk-go-v2/internal/ini v1.8.0 // indirect - github.com/aws/aws-sdk-go-v2/internal/v4a v1.0.23 // indirect + github.com/aws/aws-sdk-go-v2/internal/v4a v1.3.5 // indirect github.com/aws/aws-sdk-go-v2/service/internal/accept-encoding v1.11.2 // indirect - github.com/aws/aws-sdk-go-v2/service/internal/checksum v1.1.26 // indirect + github.com/aws/aws-sdk-go-v2/service/internal/checksum v1.3.7 // indirect github.com/aws/aws-sdk-go-v2/service/internal/presigned-url v1.11.7 // indirect - github.com/aws/aws-sdk-go-v2/service/internal/s3shared v1.14.0 // indirect + github.com/aws/aws-sdk-go-v2/service/internal/s3shared v1.17.5 // indirect github.com/aws/aws-sdk-go-v2/service/kinesis v1.27.4 // indirect - github.com/aws/aws-sdk-go-v2/service/s3 v1.31.0 // indirect + github.com/aws/aws-sdk-go-v2/service/s3 v1.53.1 // indirect github.com/aws/aws-sdk-go-v2/service/servicediscovery v1.29.6 // indirect github.com/aws/aws-sdk-go-v2/service/sso v1.20.5 // indirect github.com/aws/aws-sdk-go-v2/service/ssooidc v1.23.4 // indirect @@ -380,7 +380,6 @@ require ( github.com/facebook/time v0.0.0-20240501094127-b56da860b6c1 // indirect github.com/fatih/color v1.15.0 // indirect github.com/felixge/httpsnoop v1.0.4 // indirect - github.com/form3tech-oss/jwt-go v3.2.5+incompatible // indirect github.com/fsnotify/fsnotify v1.7.0 // indirect github.com/gabriel-vasile/mimetype v1.4.2 // indirect github.com/getsentry/sentry-go v0.27.0 // indirect @@ -639,7 +638,7 @@ require ( github.com/signalfx/sapm-proto v0.14.0 // indirect github.com/sijms/go-ora/v2 v2.8.16 // indirect github.com/sirupsen/logrus v1.9.3 // indirect - github.com/snowflakedb/gosnowflake v1.9.0 // indirect + github.com/snowflakedb/gosnowflake v1.10.1-0.20240509141315-5570db2126fe // indirect github.com/soheilhy/cmux v0.1.5 // indirect github.com/sourcegraph/conc v0.3.0 // indirect github.com/spaolacci/murmur3 v1.1.0 // indirect diff --git a/cmd/configschema/go.sum b/cmd/configschema/go.sum index 8a2b1ba43e42..9f9d4f0297a5 100644 --- a/cmd/configschema/go.sum +++ b/cmd/configschema/go.sum @@ -1007,67 +1007,54 @@ github.com/aws/aws-sdk-go v1.44.263/go.mod h1:aVsgQcEevwlmQ7qHE9I3h+dtQgpqhFB+i8 github.com/aws/aws-sdk-go v1.52.4 h1:9VsBVJ2TKf8xPP3+yIPGSYcEBIEymXsJzQoFgQuyvA0= github.com/aws/aws-sdk-go v1.52.4/go.mod h1:LF8svs817+Nz+DmiMQKTO3ubZ/6IaTpq3TjupRn3Eqk= github.com/aws/aws-sdk-go-v2 v0.18.0/go.mod h1:JWVYvqSMppoMJC0x5wdwiImzgXTI9FuZwxzkQq9wy+g= -github.com/aws/aws-sdk-go-v2 v1.17.7/go.mod h1:uzbQtefpm44goOPmdKyAlXSNcwlRgF3ePWVW6EtJvvw= github.com/aws/aws-sdk-go-v2 v1.18.0/go.mod h1:uzbQtefpm44goOPmdKyAlXSNcwlRgF3ePWVW6EtJvvw= github.com/aws/aws-sdk-go-v2 v1.26.1 h1:5554eUqIYVWpU0YmeeYZ0wU64H2VLBs8TlhRB2L+EkA= github.com/aws/aws-sdk-go-v2 v1.26.1/go.mod h1:ffIFB97e2yNsv4aTSGkqtHnppsIJzw7G7BReUZ3jCXM= -github.com/aws/aws-sdk-go-v2/aws/protocol/eventstream v1.4.10/go.mod h1:VeTZetY5KRJLuD/7fkQXMU6Mw7H5m/KP2J5Iy9osMno= github.com/aws/aws-sdk-go-v2/aws/protocol/eventstream v1.6.2 h1:x6xsQXGSmW6frevwDA+vi/wqhp1ct18mVXYN08/93to= github.com/aws/aws-sdk-go-v2/aws/protocol/eventstream v1.6.2/go.mod h1:lPprDr1e6cJdyYeGXnRaJoP4Md+cDBvi2eOj00BlGmg= -github.com/aws/aws-sdk-go-v2/config v1.18.19/go.mod h1:XvTmGMY8d52ougvakOv1RpiTLPz9dlG/OQHsKU/cMmY= github.com/aws/aws-sdk-go-v2/config v1.18.25/go.mod h1:dZnYpD5wTW/dQF0rRNLVypB396zWCcPiBIvdvSWHEg4= github.com/aws/aws-sdk-go-v2/config v1.27.11 h1:f47rANd2LQEYHda2ddSCKYId18/8BhSRM4BULGmfgNA= github.com/aws/aws-sdk-go-v2/config v1.27.11/go.mod h1:SMsV78RIOYdve1vf36z8LmnszlRWkwMQtomCAI0/mIE= -github.com/aws/aws-sdk-go-v2/credentials v1.13.18/go.mod h1:vnwlwjIe+3XJPBYKu1et30ZPABG3VaXJYr8ryohpIyM= github.com/aws/aws-sdk-go-v2/credentials v1.13.24/go.mod h1:jYPYi99wUOPIFi0rhiOvXeSEReVOzBqFNOX5bXYoG2o= github.com/aws/aws-sdk-go-v2/credentials v1.17.11 h1:YuIB1dJNf1Re822rriUOTxopaHHvIq0l/pX3fwO+Tzs= github.com/aws/aws-sdk-go-v2/credentials v1.17.11/go.mod h1:AQtFPsDH9bI2O+71anW6EKL+NcD7LG3dpKGMV4SShgo= -github.com/aws/aws-sdk-go-v2/feature/ec2/imds v1.13.1/go.mod h1:lfUx8puBRdM5lVVMQlwt2v+ofiG/X6Ms+dy0UkG/kXw= github.com/aws/aws-sdk-go-v2/feature/ec2/imds v1.13.3/go.mod h1:4Q0UFP0YJf0NrsEuEYHpM9fTSEVnD16Z3uyEF7J9JGM= github.com/aws/aws-sdk-go-v2/feature/ec2/imds v1.16.1 h1:FVJ0r5XTHSmIHJV6KuDmdYhEpvlHpiSd38RQWhut5J4= github.com/aws/aws-sdk-go-v2/feature/ec2/imds v1.16.1/go.mod h1:zusuAeqezXzAB24LGuzuekqMAEgWkVYukBec3kr3jUg= -github.com/aws/aws-sdk-go-v2/feature/s3/manager v1.11.59 h1:E3Y+OfzOK1+rmRo/K2G0ml8Vs+Xqk0kOnf4nS0kUtBc= -github.com/aws/aws-sdk-go-v2/feature/s3/manager v1.11.59/go.mod h1:1M4PLSBUVfBI0aP+C9XI7SM6kZPCGYyI6izWz0TGprE= -github.com/aws/aws-sdk-go-v2/internal/configsources v1.1.31/go.mod h1:QT0BqUvX1Bh2ABdTGnjqEjvjzrCfIniM9Sc8zn9Yndo= +github.com/aws/aws-sdk-go-v2/feature/s3/manager v1.16.15 h1:7Zwtt/lP3KNRkeZre7soMELMGNoBrutx8nobg1jKWmo= +github.com/aws/aws-sdk-go-v2/feature/s3/manager v1.16.15/go.mod h1:436h2adoHb57yd+8W+gYPrrA9U/R/SuAuOO42Ushzhw= github.com/aws/aws-sdk-go-v2/internal/configsources v1.1.33/go.mod h1:7i0PF1ME/2eUPFcjkVIwq+DOygHEoK92t5cDqNgYbIw= github.com/aws/aws-sdk-go-v2/internal/configsources v1.3.5 h1:aw39xVGeRWlWx9EzGVnhOR4yOjQDHPQ6o6NmBlscyQg= github.com/aws/aws-sdk-go-v2/internal/configsources v1.3.5/go.mod h1:FSaRudD0dXiMPK2UjknVwwTYyZMRsHv3TtkabsZih5I= -github.com/aws/aws-sdk-go-v2/internal/endpoints/v2 v2.4.25/go.mod h1:zBHOPwhBc3FlQjQJE/D3IfPWiWaQmT06Vq9aNukDo0k= github.com/aws/aws-sdk-go-v2/internal/endpoints/v2 v2.4.27/go.mod h1:UrHnn3QV/d0pBZ6QBAEQcqFLf8FAzLmoUfPVIueOvoM= github.com/aws/aws-sdk-go-v2/internal/endpoints/v2 v2.6.5 h1:PG1F3OD1szkuQPzDw3CIQsRIrtTlUC3lP84taWzHlq0= github.com/aws/aws-sdk-go-v2/internal/endpoints/v2 v2.6.5/go.mod h1:jU1li6RFryMz+so64PpKtudI+QzbKoIEivqdf6LNpOc= -github.com/aws/aws-sdk-go-v2/internal/ini v1.3.32/go.mod h1:XGhIBZDEgfqmFIugclZ6FU7v75nHhBDtzuB4xB/tEi4= github.com/aws/aws-sdk-go-v2/internal/ini v1.3.34/go.mod h1:Etz2dj6UHYuw+Xw830KfzCfWGMzqvUTCjUj5b76GVDc= github.com/aws/aws-sdk-go-v2/internal/ini v1.8.0 h1:hT8rVHwugYE2lEfdFE0QWVo81lF7jMrYJVDWI+f+VxU= github.com/aws/aws-sdk-go-v2/internal/ini v1.8.0/go.mod h1:8tu/lYfQfFe6IGnaOdrpVgEL2IrrDOf6/m9RQum4NkY= -github.com/aws/aws-sdk-go-v2/internal/v4a v1.0.23 h1:DWYZIsyqagnWL00f8M/SOr9fN063OEQWn9LLTbdYXsk= -github.com/aws/aws-sdk-go-v2/internal/v4a v1.0.23/go.mod h1:uIiFgURZbACBEQJfqTZPb/jxO7R+9LeoHUFudtIdeQI= -github.com/aws/aws-sdk-go-v2/service/internal/accept-encoding v1.9.11/go.mod h1:iV4q2hsqtNECrfmlXyord9u4zyuFEJX9eLgLpSPzWA8= +github.com/aws/aws-sdk-go-v2/internal/v4a v1.3.5 h1:81KE7vaZzrl7yHBYHVEzYB8sypz11NMOZ40YlWvPxsU= +github.com/aws/aws-sdk-go-v2/internal/v4a v1.3.5/go.mod h1:LIt2rg7Mcgn09Ygbdh/RdIm0rQ+3BNkbP1gyVMFtRK0= github.com/aws/aws-sdk-go-v2/service/internal/accept-encoding v1.11.2 h1:Ji0DY1xUsUr3I8cHps0G+XM3WWU16lP6yG8qu1GAZAs= github.com/aws/aws-sdk-go-v2/service/internal/accept-encoding v1.11.2/go.mod h1:5CsjAbs3NlGQyZNFACh+zztPDI7fU6eW9QsxjfnuBKg= -github.com/aws/aws-sdk-go-v2/service/internal/checksum v1.1.26 h1:CeuSeq/8FnYpPtnuIeLQEEvDv9zUjneuYi8EghMBdwQ= -github.com/aws/aws-sdk-go-v2/service/internal/checksum v1.1.26/go.mod h1:2UqAAwMUXKeRkAHIlDJqvMVgOWkUi/AUXPk/YIe+Dg4= -github.com/aws/aws-sdk-go-v2/service/internal/presigned-url v1.9.25/go.mod h1:/95IA+0lMnzW6XzqYJRpjjsAbKEORVeO0anQqjd2CNU= +github.com/aws/aws-sdk-go-v2/service/internal/checksum v1.3.7 h1:ZMeFZ5yk+Ek+jNr1+uwCd2tG89t6oTS5yVWpa6yy2es= +github.com/aws/aws-sdk-go-v2/service/internal/checksum v1.3.7/go.mod h1:mxV05U+4JiHqIpGqqYXOHLPKUC6bDXC44bsUhNjOEwY= github.com/aws/aws-sdk-go-v2/service/internal/presigned-url v1.9.27/go.mod h1:EOwBD4J4S5qYszS5/3DpkejfuK+Z5/1uzICfPaZLtqw= github.com/aws/aws-sdk-go-v2/service/internal/presigned-url v1.11.7 h1:ogRAwT1/gxJBcSWDMZlgyFUM962F51A5CRhDLbxLdmo= github.com/aws/aws-sdk-go-v2/service/internal/presigned-url v1.11.7/go.mod h1:YCsIZhXfRPLFFCl5xxY+1T9RKzOKjCut+28JSX2DnAk= -github.com/aws/aws-sdk-go-v2/service/internal/s3shared v1.14.0 h1:e2ooMhpYGhDnBfSvIyusvAwX7KexuZaHbQY2Dyei7VU= -github.com/aws/aws-sdk-go-v2/service/internal/s3shared v1.14.0/go.mod h1:bh2E0CXKZsQN+faiKVqC40vfNMAWheoULBCnEgO9K+8= +github.com/aws/aws-sdk-go-v2/service/internal/s3shared v1.17.5 h1:f9RyWNtS8oH7cZlbn+/JNPpjUk5+5fLd5lM9M0i49Ys= +github.com/aws/aws-sdk-go-v2/service/internal/s3shared v1.17.5/go.mod h1:h5CoMZV2VF297/VLhRhO1WF+XYWOzXo+4HsObA4HjBQ= github.com/aws/aws-sdk-go-v2/service/kinesis v1.27.4 h1:Oe8awBiS/iitcsRJB5+DHa3iCxoA0KwJJf0JNrYMINY= github.com/aws/aws-sdk-go-v2/service/kinesis v1.27.4/go.mod h1:RCZCSFbieSgNG1RKegO26opXV4EXyef/vNBVJsUyHuw= -github.com/aws/aws-sdk-go-v2/service/s3 v1.31.0 h1:B1G2pSPvbAtQjilPq+Y7jLIzCOwKzuVEl+aBBaNG0AQ= -github.com/aws/aws-sdk-go-v2/service/s3 v1.31.0/go.mod h1:ncltU6n4Nof5uJttDtcNQ537uNuwYqsZZQcpkd2/GUQ= +github.com/aws/aws-sdk-go-v2/service/s3 v1.53.1 h1:6cnno47Me9bRykw9AEv9zkXE+5or7jz8TsskTTccbgc= +github.com/aws/aws-sdk-go-v2/service/s3 v1.53.1/go.mod h1:qmdkIIAC+GCLASF7R2whgNrJADz0QZPX+Seiw/i4S3o= github.com/aws/aws-sdk-go-v2/service/servicediscovery v1.29.6 h1:PUdCX18Ka+NsGyv+EZHjbbaRjEFP74h7wpZ36n1JBxI= github.com/aws/aws-sdk-go-v2/service/servicediscovery v1.29.6/go.mod h1:3pzLFJnbjkymz6RdZ963DuvMR9rzrKMXrlbteSk4Sxc= -github.com/aws/aws-sdk-go-v2/service/sso v1.12.6/go.mod h1:Y1VOmit/Fn6Tz1uFAeCO6Q7M2fmfXSCLeL5INVYsLuY= github.com/aws/aws-sdk-go-v2/service/sso v1.12.10/go.mod h1:ouy2P4z6sJN70fR3ka3wD3Ro3KezSxU6eKGQI2+2fjI= github.com/aws/aws-sdk-go-v2/service/sso v1.20.5 h1:vN8hEbpRnL7+Hopy9dzmRle1xmDc7o8tmY0klsr175w= github.com/aws/aws-sdk-go-v2/service/sso v1.20.5/go.mod h1:qGzynb/msuZIE8I75DVRCUXw3o3ZyBmUvMwQ2t/BrGM= -github.com/aws/aws-sdk-go-v2/service/ssooidc v1.14.6/go.mod h1:Lh/bc9XUf8CfOY6Jp5aIkQtN+j1mc+nExc+KXj9jx2s= github.com/aws/aws-sdk-go-v2/service/ssooidc v1.14.10/go.mod h1:AFvkxc8xfBe8XA+5St5XIHHrQQtkxqrRincx4hmMHOk= github.com/aws/aws-sdk-go-v2/service/ssooidc v1.23.4 h1:Jux+gDDyi1Lruk+KHF91tK2KCuY61kzoCpvtvJJBtOE= github.com/aws/aws-sdk-go-v2/service/ssooidc v1.23.4/go.mod h1:mUYPBhaF2lGiukDEjJX2BLRRKTmoUSitGDUgM4tRxak= -github.com/aws/aws-sdk-go-v2/service/sts v1.18.7/go.mod h1:JuTnSoeePXmMVe9G8NcjjwgOKEfZ4cOjMuT2IBT/2eI= github.com/aws/aws-sdk-go-v2/service/sts v1.19.0/go.mod h1:BgQOMsg8av8jset59jelyPW7NoZcZXLVpDsXunGDrk8= github.com/aws/aws-sdk-go-v2/service/sts v1.28.6 h1:cwIxeBttqPN3qkaAjcEcsh8NYr8n2HZPkcKgPAi1phU= github.com/aws/aws-sdk-go-v2/service/sts v1.28.6/go.mod h1:FZf1/nKNEkHdGGJP/cI2MoIMquumuRK6ol3QQJNDxmw= @@ -1311,8 +1298,6 @@ github.com/felixge/httpsnoop v1.0.4/go.mod h1:m8KPJKqk1gH5J9DgRY2ASl2lWCfGKXixSw github.com/fogleman/gg v1.2.1-0.20190220221249-0403632d5b90/go.mod h1:R/bRT+9gY/C5z7JzPU0zXsXHKM4/ayA+zqcVNZzPa1k= github.com/fogleman/gg v1.3.0/go.mod h1:R/bRT+9gY/C5z7JzPU0zXsXHKM4/ayA+zqcVNZzPa1k= github.com/form3tech-oss/jwt-go v3.2.2+incompatible/go.mod h1:pbq4aXjuKjdthFRnoDwaVPLA+WlJuPGy+QneDUgJi2k= -github.com/form3tech-oss/jwt-go v3.2.5+incompatible h1:/l4kBbb4/vGSsdtB5nUe8L7B9mImVMaBPw9L/0TBHU8= -github.com/form3tech-oss/jwt-go v3.2.5+incompatible/go.mod h1:pbq4aXjuKjdthFRnoDwaVPLA+WlJuPGy+QneDUgJi2k= github.com/fortytw2/leaktest v1.3.0 h1:u8491cBMTQ8ft8aeV+adlcytMZylmA5nnwwkRZjI8vw= github.com/fortytw2/leaktest v1.3.0/go.mod h1:jDsjWgpAGjm2CA7WthBh/CdZYEPF31XHquHwclZch5g= github.com/franela/goblin v0.0.0-20200105215937-c9ffbefa60db/go.mod h1:7dvUGVsVBjqR7JHJk0brhHOZYGmfBYOrK0ZhYMEtBr4= @@ -2227,8 +2212,8 @@ github.com/sirupsen/logrus v1.9.3/go.mod h1:naHLuLoDiP4jHNo9R0sCBMtWGeIprob74mVs github.com/smartystreets/assertions v0.0.0-20180927180507-b2de0cb4f26d/go.mod h1:OnSkiWE9lh6wB0YB77sQom3nweQdgAjqCqsofrRNTgc= github.com/smartystreets/assertions v1.1.0/go.mod h1:tcbTF8ujkAEcZ8TElKY+i30BzYlVhC/LOxJk7iOWnoo= github.com/smartystreets/goconvey v1.6.4/go.mod h1:syvi0/a8iFYH4r/RixwvyeAJjdLS9QV7WQ/tjFTllLA= -github.com/snowflakedb/gosnowflake v1.9.0 h1:s2ZdwFxFfpqwa5CqlhnzRESnLmwU3fED6zyNOJHFBQA= -github.com/snowflakedb/gosnowflake v1.9.0/go.mod h1:4ZgHxVf2OKwecx07WjfyAMr0gn8Qj4yvwAo68Og8wsU= +github.com/snowflakedb/gosnowflake v1.10.1-0.20240509141315-5570db2126fe h1:tyqmtuppkCBKehjrsrGgcO7xsNBEGWgIlgm9fq/4X4U= +github.com/snowflakedb/gosnowflake v1.10.1-0.20240509141315-5570db2126fe/go.mod h1:hvc58mU03qg78mSz5z17/qnzI56hOdYYK2txWbM0hN0= github.com/soheilhy/cmux v0.1.4/go.mod h1:IM3LyeVVIOuxMH7sFAkER9+bJ4dT7Ms6E4xg4kGIyLM= github.com/soheilhy/cmux v0.1.5 h1:jjzc5WVemNEDTLwv9tlmemhC73tI08BNOIGwBOo10Js= github.com/soheilhy/cmux v0.1.5/go.mod h1:T7TcVDs9LWfQgPlPsdngu6I6QIoyIFZDDC6sNE1GqG0= diff --git a/cmd/otelcontribcol/builder-config.yaml b/cmd/otelcontribcol/builder-config.yaml index c8868207d191..8216bb082efc 100644 --- a/cmd/otelcontribcol/builder-config.yaml +++ b/cmd/otelcontribcol/builder-config.yaml @@ -220,6 +220,15 @@ connectors: - gomod: github.com/open-telemetry/opentelemetry-collector-contrib/connector/servicegraphconnector v0.100.0 - gomod: github.com/open-telemetry/opentelemetry-collector-contrib/connector/spanmetricsconnector v0.100.0 +providers: + - gomod: go.opentelemetry.io/collector/confmap/provider/envprovider v0.100.0 + - gomod: go.opentelemetry.io/collector/confmap/provider/fileprovider v0.100.0 + - gomod: go.opentelemetry.io/collector/confmap/provider/httpprovider v0.100.0 + - gomod: go.opentelemetry.io/collector/confmap/provider/httpsprovider v0.100.0 + - gomod: go.opentelemetry.io/collector/confmap/provider/yamlprovider v0.100.0 + - gomod: github.com/open-telemetry/opentelemetry-collector-contrib/confmap/provider/s3provider v0.100.0 + - gomod: github.com/open-telemetry/opentelemetry-collector-contrib/confmap/provider/secretsmanagerprovider v0.100.0 + replaces: - github.com/open-telemetry/opentelemetry-collector-contrib/extension/storage => ../../extension/storage - github.com/open-telemetry/opentelemetry-collector-contrib/extension/storage/dbstorage => ../../extension/storage/dbstorage @@ -461,3 +470,5 @@ replaces: - github.com/open-telemetry/opentelemetry-collector-contrib/extension/googleclientauthextension => ../../extension/googleclientauthextension - github.com/open-telemetry/opentelemetry-collector-contrib/receiver/splunkenterprisereceiver => ../../receiver/splunkenterprisereceiver - github.com/open-telemetry/opentelemetry-collector-contrib/extension/opampcustommessages => ../../extension/opampcustommessages + - github.com/open-telemetry/opentelemetry-collector-contrib/confmap/provider/s3provider => ../../confmap/provider/s3provider + - github.com/open-telemetry/opentelemetry-collector-contrib/confmap/provider/secretsmanagerprovider => ../../confmap/provider/secretsmanagerprovider diff --git a/cmd/otelcontribcol/go.mod b/cmd/otelcontribcol/go.mod index 084aa0e0dc20..50e3327874ff 100644 --- a/cmd/otelcontribcol/go.mod +++ b/cmd/otelcontribcol/go.mod @@ -7,6 +7,8 @@ go 1.21.0 toolchain go1.21.10 require ( + github.com/open-telemetry/opentelemetry-collector-contrib/confmap/provider/s3provider v0.100.0 + github.com/open-telemetry/opentelemetry-collector-contrib/confmap/provider/secretsmanagerprovider v0.100.0 github.com/open-telemetry/opentelemetry-collector-contrib/connector/countconnector v0.100.0 github.com/open-telemetry/opentelemetry-collector-contrib/connector/datadogconnector v0.100.0 github.com/open-telemetry/opentelemetry-collector-contrib/connector/exceptionsconnector v0.100.0 @@ -380,17 +382,18 @@ require ( github.com/aws/aws-sdk-go-v2/config v1.27.11 // indirect github.com/aws/aws-sdk-go-v2/credentials v1.17.11 // indirect github.com/aws/aws-sdk-go-v2/feature/ec2/imds v1.16.1 // indirect - github.com/aws/aws-sdk-go-v2/feature/s3/manager v1.11.59 // indirect + github.com/aws/aws-sdk-go-v2/feature/s3/manager v1.16.15 // indirect github.com/aws/aws-sdk-go-v2/internal/configsources v1.3.5 // indirect github.com/aws/aws-sdk-go-v2/internal/endpoints/v2 v2.6.5 // indirect github.com/aws/aws-sdk-go-v2/internal/ini v1.8.0 // indirect - github.com/aws/aws-sdk-go-v2/internal/v4a v1.0.23 // indirect + github.com/aws/aws-sdk-go-v2/internal/v4a v1.3.5 // indirect github.com/aws/aws-sdk-go-v2/service/internal/accept-encoding v1.11.2 // indirect - github.com/aws/aws-sdk-go-v2/service/internal/checksum v1.1.26 // indirect + github.com/aws/aws-sdk-go-v2/service/internal/checksum v1.3.7 // indirect github.com/aws/aws-sdk-go-v2/service/internal/presigned-url v1.11.7 // indirect - github.com/aws/aws-sdk-go-v2/service/internal/s3shared v1.14.0 // indirect + github.com/aws/aws-sdk-go-v2/service/internal/s3shared v1.17.5 // indirect github.com/aws/aws-sdk-go-v2/service/kinesis v1.27.4 // indirect - github.com/aws/aws-sdk-go-v2/service/s3 v1.31.0 // indirect + github.com/aws/aws-sdk-go-v2/service/s3 v1.53.1 // indirect + github.com/aws/aws-sdk-go-v2/service/secretsmanager v1.28.6 // indirect github.com/aws/aws-sdk-go-v2/service/servicediscovery v1.29.6 // indirect github.com/aws/aws-sdk-go-v2/service/sso v1.20.5 // indirect github.com/aws/aws-sdk-go-v2/service/ssooidc v1.23.4 // indirect @@ -448,7 +451,6 @@ require ( github.com/facebook/time v0.0.0-20240501094127-b56da860b6c1 // indirect github.com/fatih/color v1.15.0 // indirect github.com/felixge/httpsnoop v1.0.4 // indirect - github.com/form3tech-oss/jwt-go v3.2.5+incompatible // indirect github.com/fsnotify/fsnotify v1.7.0 // indirect github.com/gabriel-vasile/mimetype v1.4.2 // indirect github.com/getsentry/sentry-go v0.27.0 // indirect @@ -687,7 +689,7 @@ require ( github.com/signalfx/sapm-proto v0.14.0 // indirect github.com/sijms/go-ora/v2 v2.8.16 // indirect github.com/sirupsen/logrus v1.9.3 // indirect - github.com/snowflakedb/gosnowflake v1.9.0 // indirect + github.com/snowflakedb/gosnowflake v1.10.1-0.20240509141315-5570db2126fe // indirect github.com/soheilhy/cmux v0.1.5 // indirect github.com/sourcegraph/conc v0.3.0 // indirect github.com/spaolacci/murmur3 v1.1.0 // indirect @@ -1287,3 +1289,7 @@ replace github.com/open-telemetry/opentelemetry-collector-contrib/extension/goog replace github.com/open-telemetry/opentelemetry-collector-contrib/receiver/splunkenterprisereceiver => ../../receiver/splunkenterprisereceiver replace github.com/open-telemetry/opentelemetry-collector-contrib/extension/opampcustommessages => ../../extension/opampcustommessages + +replace github.com/open-telemetry/opentelemetry-collector-contrib/confmap/provider/s3provider => ../../confmap/provider/s3provider + +replace github.com/open-telemetry/opentelemetry-collector-contrib/confmap/provider/secretsmanagerprovider => ../../confmap/provider/secretsmanagerprovider diff --git a/cmd/otelcontribcol/go.sum b/cmd/otelcontribcol/go.sum index 781d867d8bca..412e743171b6 100644 --- a/cmd/otelcontribcol/go.sum +++ b/cmd/otelcontribcol/go.sum @@ -1008,67 +1008,56 @@ github.com/aws/aws-sdk-go v1.44.263/go.mod h1:aVsgQcEevwlmQ7qHE9I3h+dtQgpqhFB+i8 github.com/aws/aws-sdk-go v1.52.4 h1:9VsBVJ2TKf8xPP3+yIPGSYcEBIEymXsJzQoFgQuyvA0= github.com/aws/aws-sdk-go v1.52.4/go.mod h1:LF8svs817+Nz+DmiMQKTO3ubZ/6IaTpq3TjupRn3Eqk= github.com/aws/aws-sdk-go-v2 v0.18.0/go.mod h1:JWVYvqSMppoMJC0x5wdwiImzgXTI9FuZwxzkQq9wy+g= -github.com/aws/aws-sdk-go-v2 v1.17.7/go.mod h1:uzbQtefpm44goOPmdKyAlXSNcwlRgF3ePWVW6EtJvvw= github.com/aws/aws-sdk-go-v2 v1.18.0/go.mod h1:uzbQtefpm44goOPmdKyAlXSNcwlRgF3ePWVW6EtJvvw= github.com/aws/aws-sdk-go-v2 v1.26.1 h1:5554eUqIYVWpU0YmeeYZ0wU64H2VLBs8TlhRB2L+EkA= github.com/aws/aws-sdk-go-v2 v1.26.1/go.mod h1:ffIFB97e2yNsv4aTSGkqtHnppsIJzw7G7BReUZ3jCXM= -github.com/aws/aws-sdk-go-v2/aws/protocol/eventstream v1.4.10/go.mod h1:VeTZetY5KRJLuD/7fkQXMU6Mw7H5m/KP2J5Iy9osMno= github.com/aws/aws-sdk-go-v2/aws/protocol/eventstream v1.6.2 h1:x6xsQXGSmW6frevwDA+vi/wqhp1ct18mVXYN08/93to= github.com/aws/aws-sdk-go-v2/aws/protocol/eventstream v1.6.2/go.mod h1:lPprDr1e6cJdyYeGXnRaJoP4Md+cDBvi2eOj00BlGmg= -github.com/aws/aws-sdk-go-v2/config v1.18.19/go.mod h1:XvTmGMY8d52ougvakOv1RpiTLPz9dlG/OQHsKU/cMmY= github.com/aws/aws-sdk-go-v2/config v1.18.25/go.mod h1:dZnYpD5wTW/dQF0rRNLVypB396zWCcPiBIvdvSWHEg4= github.com/aws/aws-sdk-go-v2/config v1.27.11 h1:f47rANd2LQEYHda2ddSCKYId18/8BhSRM4BULGmfgNA= github.com/aws/aws-sdk-go-v2/config v1.27.11/go.mod h1:SMsV78RIOYdve1vf36z8LmnszlRWkwMQtomCAI0/mIE= -github.com/aws/aws-sdk-go-v2/credentials v1.13.18/go.mod h1:vnwlwjIe+3XJPBYKu1et30ZPABG3VaXJYr8ryohpIyM= github.com/aws/aws-sdk-go-v2/credentials v1.13.24/go.mod h1:jYPYi99wUOPIFi0rhiOvXeSEReVOzBqFNOX5bXYoG2o= github.com/aws/aws-sdk-go-v2/credentials v1.17.11 h1:YuIB1dJNf1Re822rriUOTxopaHHvIq0l/pX3fwO+Tzs= github.com/aws/aws-sdk-go-v2/credentials v1.17.11/go.mod h1:AQtFPsDH9bI2O+71anW6EKL+NcD7LG3dpKGMV4SShgo= -github.com/aws/aws-sdk-go-v2/feature/ec2/imds v1.13.1/go.mod h1:lfUx8puBRdM5lVVMQlwt2v+ofiG/X6Ms+dy0UkG/kXw= github.com/aws/aws-sdk-go-v2/feature/ec2/imds v1.13.3/go.mod h1:4Q0UFP0YJf0NrsEuEYHpM9fTSEVnD16Z3uyEF7J9JGM= github.com/aws/aws-sdk-go-v2/feature/ec2/imds v1.16.1 h1:FVJ0r5XTHSmIHJV6KuDmdYhEpvlHpiSd38RQWhut5J4= github.com/aws/aws-sdk-go-v2/feature/ec2/imds v1.16.1/go.mod h1:zusuAeqezXzAB24LGuzuekqMAEgWkVYukBec3kr3jUg= -github.com/aws/aws-sdk-go-v2/feature/s3/manager v1.11.59 h1:E3Y+OfzOK1+rmRo/K2G0ml8Vs+Xqk0kOnf4nS0kUtBc= -github.com/aws/aws-sdk-go-v2/feature/s3/manager v1.11.59/go.mod h1:1M4PLSBUVfBI0aP+C9XI7SM6kZPCGYyI6izWz0TGprE= -github.com/aws/aws-sdk-go-v2/internal/configsources v1.1.31/go.mod h1:QT0BqUvX1Bh2ABdTGnjqEjvjzrCfIniM9Sc8zn9Yndo= +github.com/aws/aws-sdk-go-v2/feature/s3/manager v1.16.15 h1:7Zwtt/lP3KNRkeZre7soMELMGNoBrutx8nobg1jKWmo= +github.com/aws/aws-sdk-go-v2/feature/s3/manager v1.16.15/go.mod h1:436h2adoHb57yd+8W+gYPrrA9U/R/SuAuOO42Ushzhw= github.com/aws/aws-sdk-go-v2/internal/configsources v1.1.33/go.mod h1:7i0PF1ME/2eUPFcjkVIwq+DOygHEoK92t5cDqNgYbIw= github.com/aws/aws-sdk-go-v2/internal/configsources v1.3.5 h1:aw39xVGeRWlWx9EzGVnhOR4yOjQDHPQ6o6NmBlscyQg= github.com/aws/aws-sdk-go-v2/internal/configsources v1.3.5/go.mod h1:FSaRudD0dXiMPK2UjknVwwTYyZMRsHv3TtkabsZih5I= -github.com/aws/aws-sdk-go-v2/internal/endpoints/v2 v2.4.25/go.mod h1:zBHOPwhBc3FlQjQJE/D3IfPWiWaQmT06Vq9aNukDo0k= github.com/aws/aws-sdk-go-v2/internal/endpoints/v2 v2.4.27/go.mod h1:UrHnn3QV/d0pBZ6QBAEQcqFLf8FAzLmoUfPVIueOvoM= github.com/aws/aws-sdk-go-v2/internal/endpoints/v2 v2.6.5 h1:PG1F3OD1szkuQPzDw3CIQsRIrtTlUC3lP84taWzHlq0= github.com/aws/aws-sdk-go-v2/internal/endpoints/v2 v2.6.5/go.mod h1:jU1li6RFryMz+so64PpKtudI+QzbKoIEivqdf6LNpOc= -github.com/aws/aws-sdk-go-v2/internal/ini v1.3.32/go.mod h1:XGhIBZDEgfqmFIugclZ6FU7v75nHhBDtzuB4xB/tEi4= github.com/aws/aws-sdk-go-v2/internal/ini v1.3.34/go.mod h1:Etz2dj6UHYuw+Xw830KfzCfWGMzqvUTCjUj5b76GVDc= github.com/aws/aws-sdk-go-v2/internal/ini v1.8.0 h1:hT8rVHwugYE2lEfdFE0QWVo81lF7jMrYJVDWI+f+VxU= github.com/aws/aws-sdk-go-v2/internal/ini v1.8.0/go.mod h1:8tu/lYfQfFe6IGnaOdrpVgEL2IrrDOf6/m9RQum4NkY= -github.com/aws/aws-sdk-go-v2/internal/v4a v1.0.23 h1:DWYZIsyqagnWL00f8M/SOr9fN063OEQWn9LLTbdYXsk= -github.com/aws/aws-sdk-go-v2/internal/v4a v1.0.23/go.mod h1:uIiFgURZbACBEQJfqTZPb/jxO7R+9LeoHUFudtIdeQI= -github.com/aws/aws-sdk-go-v2/service/internal/accept-encoding v1.9.11/go.mod h1:iV4q2hsqtNECrfmlXyord9u4zyuFEJX9eLgLpSPzWA8= +github.com/aws/aws-sdk-go-v2/internal/v4a v1.3.5 h1:81KE7vaZzrl7yHBYHVEzYB8sypz11NMOZ40YlWvPxsU= +github.com/aws/aws-sdk-go-v2/internal/v4a v1.3.5/go.mod h1:LIt2rg7Mcgn09Ygbdh/RdIm0rQ+3BNkbP1gyVMFtRK0= github.com/aws/aws-sdk-go-v2/service/internal/accept-encoding v1.11.2 h1:Ji0DY1xUsUr3I8cHps0G+XM3WWU16lP6yG8qu1GAZAs= github.com/aws/aws-sdk-go-v2/service/internal/accept-encoding v1.11.2/go.mod h1:5CsjAbs3NlGQyZNFACh+zztPDI7fU6eW9QsxjfnuBKg= -github.com/aws/aws-sdk-go-v2/service/internal/checksum v1.1.26 h1:CeuSeq/8FnYpPtnuIeLQEEvDv9zUjneuYi8EghMBdwQ= -github.com/aws/aws-sdk-go-v2/service/internal/checksum v1.1.26/go.mod h1:2UqAAwMUXKeRkAHIlDJqvMVgOWkUi/AUXPk/YIe+Dg4= -github.com/aws/aws-sdk-go-v2/service/internal/presigned-url v1.9.25/go.mod h1:/95IA+0lMnzW6XzqYJRpjjsAbKEORVeO0anQqjd2CNU= +github.com/aws/aws-sdk-go-v2/service/internal/checksum v1.3.7 h1:ZMeFZ5yk+Ek+jNr1+uwCd2tG89t6oTS5yVWpa6yy2es= +github.com/aws/aws-sdk-go-v2/service/internal/checksum v1.3.7/go.mod h1:mxV05U+4JiHqIpGqqYXOHLPKUC6bDXC44bsUhNjOEwY= github.com/aws/aws-sdk-go-v2/service/internal/presigned-url v1.9.27/go.mod h1:EOwBD4J4S5qYszS5/3DpkejfuK+Z5/1uzICfPaZLtqw= github.com/aws/aws-sdk-go-v2/service/internal/presigned-url v1.11.7 h1:ogRAwT1/gxJBcSWDMZlgyFUM962F51A5CRhDLbxLdmo= github.com/aws/aws-sdk-go-v2/service/internal/presigned-url v1.11.7/go.mod h1:YCsIZhXfRPLFFCl5xxY+1T9RKzOKjCut+28JSX2DnAk= -github.com/aws/aws-sdk-go-v2/service/internal/s3shared v1.14.0 h1:e2ooMhpYGhDnBfSvIyusvAwX7KexuZaHbQY2Dyei7VU= -github.com/aws/aws-sdk-go-v2/service/internal/s3shared v1.14.0/go.mod h1:bh2E0CXKZsQN+faiKVqC40vfNMAWheoULBCnEgO9K+8= +github.com/aws/aws-sdk-go-v2/service/internal/s3shared v1.17.5 h1:f9RyWNtS8oH7cZlbn+/JNPpjUk5+5fLd5lM9M0i49Ys= +github.com/aws/aws-sdk-go-v2/service/internal/s3shared v1.17.5/go.mod h1:h5CoMZV2VF297/VLhRhO1WF+XYWOzXo+4HsObA4HjBQ= github.com/aws/aws-sdk-go-v2/service/kinesis v1.27.4 h1:Oe8awBiS/iitcsRJB5+DHa3iCxoA0KwJJf0JNrYMINY= github.com/aws/aws-sdk-go-v2/service/kinesis v1.27.4/go.mod h1:RCZCSFbieSgNG1RKegO26opXV4EXyef/vNBVJsUyHuw= -github.com/aws/aws-sdk-go-v2/service/s3 v1.31.0 h1:B1G2pSPvbAtQjilPq+Y7jLIzCOwKzuVEl+aBBaNG0AQ= -github.com/aws/aws-sdk-go-v2/service/s3 v1.31.0/go.mod h1:ncltU6n4Nof5uJttDtcNQ537uNuwYqsZZQcpkd2/GUQ= +github.com/aws/aws-sdk-go-v2/service/s3 v1.53.1 h1:6cnno47Me9bRykw9AEv9zkXE+5or7jz8TsskTTccbgc= +github.com/aws/aws-sdk-go-v2/service/s3 v1.53.1/go.mod h1:qmdkIIAC+GCLASF7R2whgNrJADz0QZPX+Seiw/i4S3o= +github.com/aws/aws-sdk-go-v2/service/secretsmanager v1.28.6 h1:TIOEjw0i2yyhmhRry3Oeu9YtiiHWISZ6j/irS1W3gX4= +github.com/aws/aws-sdk-go-v2/service/secretsmanager v1.28.6/go.mod h1:3Ba++UwWd154xtP4FRX5pUK3Gt4up5sDHCve6kVfE+g= github.com/aws/aws-sdk-go-v2/service/servicediscovery v1.29.6 h1:PUdCX18Ka+NsGyv+EZHjbbaRjEFP74h7wpZ36n1JBxI= github.com/aws/aws-sdk-go-v2/service/servicediscovery v1.29.6/go.mod h1:3pzLFJnbjkymz6RdZ963DuvMR9rzrKMXrlbteSk4Sxc= -github.com/aws/aws-sdk-go-v2/service/sso v1.12.6/go.mod h1:Y1VOmit/Fn6Tz1uFAeCO6Q7M2fmfXSCLeL5INVYsLuY= github.com/aws/aws-sdk-go-v2/service/sso v1.12.10/go.mod h1:ouy2P4z6sJN70fR3ka3wD3Ro3KezSxU6eKGQI2+2fjI= github.com/aws/aws-sdk-go-v2/service/sso v1.20.5 h1:vN8hEbpRnL7+Hopy9dzmRle1xmDc7o8tmY0klsr175w= github.com/aws/aws-sdk-go-v2/service/sso v1.20.5/go.mod h1:qGzynb/msuZIE8I75DVRCUXw3o3ZyBmUvMwQ2t/BrGM= -github.com/aws/aws-sdk-go-v2/service/ssooidc v1.14.6/go.mod h1:Lh/bc9XUf8CfOY6Jp5aIkQtN+j1mc+nExc+KXj9jx2s= github.com/aws/aws-sdk-go-v2/service/ssooidc v1.14.10/go.mod h1:AFvkxc8xfBe8XA+5St5XIHHrQQtkxqrRincx4hmMHOk= github.com/aws/aws-sdk-go-v2/service/ssooidc v1.23.4 h1:Jux+gDDyi1Lruk+KHF91tK2KCuY61kzoCpvtvJJBtOE= github.com/aws/aws-sdk-go-v2/service/ssooidc v1.23.4/go.mod h1:mUYPBhaF2lGiukDEjJX2BLRRKTmoUSitGDUgM4tRxak= -github.com/aws/aws-sdk-go-v2/service/sts v1.18.7/go.mod h1:JuTnSoeePXmMVe9G8NcjjwgOKEfZ4cOjMuT2IBT/2eI= github.com/aws/aws-sdk-go-v2/service/sts v1.19.0/go.mod h1:BgQOMsg8av8jset59jelyPW7NoZcZXLVpDsXunGDrk8= github.com/aws/aws-sdk-go-v2/service/sts v1.28.6 h1:cwIxeBttqPN3qkaAjcEcsh8NYr8n2HZPkcKgPAi1phU= github.com/aws/aws-sdk-go-v2/service/sts v1.28.6/go.mod h1:FZf1/nKNEkHdGGJP/cI2MoIMquumuRK6ol3QQJNDxmw= @@ -1310,8 +1299,6 @@ github.com/felixge/httpsnoop v1.0.4/go.mod h1:m8KPJKqk1gH5J9DgRY2ASl2lWCfGKXixSw github.com/fogleman/gg v1.2.1-0.20190220221249-0403632d5b90/go.mod h1:R/bRT+9gY/C5z7JzPU0zXsXHKM4/ayA+zqcVNZzPa1k= github.com/fogleman/gg v1.3.0/go.mod h1:R/bRT+9gY/C5z7JzPU0zXsXHKM4/ayA+zqcVNZzPa1k= github.com/form3tech-oss/jwt-go v3.2.2+incompatible/go.mod h1:pbq4aXjuKjdthFRnoDwaVPLA+WlJuPGy+QneDUgJi2k= -github.com/form3tech-oss/jwt-go v3.2.5+incompatible h1:/l4kBbb4/vGSsdtB5nUe8L7B9mImVMaBPw9L/0TBHU8= -github.com/form3tech-oss/jwt-go v3.2.5+incompatible/go.mod h1:pbq4aXjuKjdthFRnoDwaVPLA+WlJuPGy+QneDUgJi2k= github.com/fortytw2/leaktest v1.3.0 h1:u8491cBMTQ8ft8aeV+adlcytMZylmA5nnwwkRZjI8vw= github.com/fortytw2/leaktest v1.3.0/go.mod h1:jDsjWgpAGjm2CA7WthBh/CdZYEPF31XHquHwclZch5g= github.com/franela/goblin v0.0.0-20200105215937-c9ffbefa60db/go.mod h1:7dvUGVsVBjqR7JHJk0brhHOZYGmfBYOrK0ZhYMEtBr4= @@ -2230,8 +2217,8 @@ github.com/sirupsen/logrus v1.9.3/go.mod h1:naHLuLoDiP4jHNo9R0sCBMtWGeIprob74mVs github.com/smartystreets/assertions v0.0.0-20180927180507-b2de0cb4f26d/go.mod h1:OnSkiWE9lh6wB0YB77sQom3nweQdgAjqCqsofrRNTgc= github.com/smartystreets/assertions v1.1.0/go.mod h1:tcbTF8ujkAEcZ8TElKY+i30BzYlVhC/LOxJk7iOWnoo= github.com/smartystreets/goconvey v1.6.4/go.mod h1:syvi0/a8iFYH4r/RixwvyeAJjdLS9QV7WQ/tjFTllLA= -github.com/snowflakedb/gosnowflake v1.9.0 h1:s2ZdwFxFfpqwa5CqlhnzRESnLmwU3fED6zyNOJHFBQA= -github.com/snowflakedb/gosnowflake v1.9.0/go.mod h1:4ZgHxVf2OKwecx07WjfyAMr0gn8Qj4yvwAo68Og8wsU= +github.com/snowflakedb/gosnowflake v1.10.1-0.20240509141315-5570db2126fe h1:tyqmtuppkCBKehjrsrGgcO7xsNBEGWgIlgm9fq/4X4U= +github.com/snowflakedb/gosnowflake v1.10.1-0.20240509141315-5570db2126fe/go.mod h1:hvc58mU03qg78mSz5z17/qnzI56hOdYYK2txWbM0hN0= github.com/soheilhy/cmux v0.1.4/go.mod h1:IM3LyeVVIOuxMH7sFAkER9+bJ4dT7Ms6E4xg4kGIyLM= github.com/soheilhy/cmux v0.1.5 h1:jjzc5WVemNEDTLwv9tlmemhC73tI08BNOIGwBOo10Js= github.com/soheilhy/cmux v0.1.5/go.mod h1:T7TcVDs9LWfQgPlPsdngu6I6QIoyIFZDDC6sNE1GqG0= diff --git a/cmd/otelcontribcol/main.go b/cmd/otelcontribcol/main.go index 59d8007a7305..e611f7a4faf6 100644 --- a/cmd/otelcontribcol/main.go +++ b/cmd/otelcontribcol/main.go @@ -15,6 +15,9 @@ import ( httpsprovider "go.opentelemetry.io/collector/confmap/provider/httpsprovider" yamlprovider "go.opentelemetry.io/collector/confmap/provider/yamlprovider" "go.opentelemetry.io/collector/otelcol" + + s3provider "github.com/open-telemetry/opentelemetry-collector-contrib/confmap/provider/s3provider" + secretsmanagerprovider "github.com/open-telemetry/opentelemetry-collector-contrib/confmap/provider/secretsmanagerprovider" ) func main() { @@ -35,6 +38,8 @@ func main() { httpprovider.NewFactory(), httpsprovider.NewFactory(), yamlprovider.NewFactory(), + s3provider.NewFactory(), + secretsmanagerprovider.NewFactory(), }, ConverterFactories: []confmap.ConverterFactory{ expandconverter.NewFactory(), diff --git a/go.mod b/go.mod index e534890bbd9c..efc66a3b0020 100644 --- a/go.mod +++ b/go.mod @@ -331,17 +331,17 @@ require ( github.com/aws/aws-sdk-go-v2/config v1.27.11 // indirect github.com/aws/aws-sdk-go-v2/credentials v1.17.11 // indirect github.com/aws/aws-sdk-go-v2/feature/ec2/imds v1.16.1 // indirect - github.com/aws/aws-sdk-go-v2/feature/s3/manager v1.11.59 // indirect + github.com/aws/aws-sdk-go-v2/feature/s3/manager v1.16.15 // indirect github.com/aws/aws-sdk-go-v2/internal/configsources v1.3.5 // indirect github.com/aws/aws-sdk-go-v2/internal/endpoints/v2 v2.6.5 // indirect github.com/aws/aws-sdk-go-v2/internal/ini v1.8.0 // indirect - github.com/aws/aws-sdk-go-v2/internal/v4a v1.0.23 // indirect + github.com/aws/aws-sdk-go-v2/internal/v4a v1.3.5 // indirect github.com/aws/aws-sdk-go-v2/service/internal/accept-encoding v1.11.2 // indirect - github.com/aws/aws-sdk-go-v2/service/internal/checksum v1.1.26 // indirect + github.com/aws/aws-sdk-go-v2/service/internal/checksum v1.3.7 // indirect github.com/aws/aws-sdk-go-v2/service/internal/presigned-url v1.11.7 // indirect - github.com/aws/aws-sdk-go-v2/service/internal/s3shared v1.14.0 // indirect + github.com/aws/aws-sdk-go-v2/service/internal/s3shared v1.17.5 // indirect github.com/aws/aws-sdk-go-v2/service/kinesis v1.27.4 // indirect - github.com/aws/aws-sdk-go-v2/service/s3 v1.31.0 // indirect + github.com/aws/aws-sdk-go-v2/service/s3 v1.53.1 // indirect github.com/aws/aws-sdk-go-v2/service/servicediscovery v1.29.6 // indirect github.com/aws/aws-sdk-go-v2/service/sso v1.20.5 // indirect github.com/aws/aws-sdk-go-v2/service/ssooidc v1.23.4 // indirect @@ -399,7 +399,6 @@ require ( github.com/facebook/time v0.0.0-20240501094127-b56da860b6c1 // indirect github.com/fatih/color v1.15.0 // indirect github.com/felixge/httpsnoop v1.0.4 // indirect - github.com/form3tech-oss/jwt-go v3.2.5+incompatible // indirect github.com/fsnotify/fsnotify v1.7.0 // indirect github.com/gabriel-vasile/mimetype v1.4.2 // indirect github.com/getsentry/sentry-go v0.27.0 // indirect @@ -639,7 +638,7 @@ require ( github.com/signalfx/sapm-proto v0.14.0 // indirect github.com/sijms/go-ora/v2 v2.8.16 // indirect github.com/sirupsen/logrus v1.9.3 // indirect - github.com/snowflakedb/gosnowflake v1.9.0 // indirect + github.com/snowflakedb/gosnowflake v1.10.1-0.20240509141315-5570db2126fe // indirect github.com/soheilhy/cmux v0.1.5 // indirect github.com/sourcegraph/conc v0.3.0 // indirect github.com/spaolacci/murmur3 v1.1.0 // indirect diff --git a/go.sum b/go.sum index f2b8e860463e..fbf5ecf8b8fe 100644 --- a/go.sum +++ b/go.sum @@ -1009,67 +1009,54 @@ github.com/aws/aws-sdk-go v1.44.263/go.mod h1:aVsgQcEevwlmQ7qHE9I3h+dtQgpqhFB+i8 github.com/aws/aws-sdk-go v1.52.4 h1:9VsBVJ2TKf8xPP3+yIPGSYcEBIEymXsJzQoFgQuyvA0= github.com/aws/aws-sdk-go v1.52.4/go.mod h1:LF8svs817+Nz+DmiMQKTO3ubZ/6IaTpq3TjupRn3Eqk= github.com/aws/aws-sdk-go-v2 v0.18.0/go.mod h1:JWVYvqSMppoMJC0x5wdwiImzgXTI9FuZwxzkQq9wy+g= -github.com/aws/aws-sdk-go-v2 v1.17.7/go.mod h1:uzbQtefpm44goOPmdKyAlXSNcwlRgF3ePWVW6EtJvvw= github.com/aws/aws-sdk-go-v2 v1.18.0/go.mod h1:uzbQtefpm44goOPmdKyAlXSNcwlRgF3ePWVW6EtJvvw= github.com/aws/aws-sdk-go-v2 v1.26.1 h1:5554eUqIYVWpU0YmeeYZ0wU64H2VLBs8TlhRB2L+EkA= github.com/aws/aws-sdk-go-v2 v1.26.1/go.mod h1:ffIFB97e2yNsv4aTSGkqtHnppsIJzw7G7BReUZ3jCXM= -github.com/aws/aws-sdk-go-v2/aws/protocol/eventstream v1.4.10/go.mod h1:VeTZetY5KRJLuD/7fkQXMU6Mw7H5m/KP2J5Iy9osMno= github.com/aws/aws-sdk-go-v2/aws/protocol/eventstream v1.6.2 h1:x6xsQXGSmW6frevwDA+vi/wqhp1ct18mVXYN08/93to= github.com/aws/aws-sdk-go-v2/aws/protocol/eventstream v1.6.2/go.mod h1:lPprDr1e6cJdyYeGXnRaJoP4Md+cDBvi2eOj00BlGmg= -github.com/aws/aws-sdk-go-v2/config v1.18.19/go.mod h1:XvTmGMY8d52ougvakOv1RpiTLPz9dlG/OQHsKU/cMmY= github.com/aws/aws-sdk-go-v2/config v1.18.25/go.mod h1:dZnYpD5wTW/dQF0rRNLVypB396zWCcPiBIvdvSWHEg4= github.com/aws/aws-sdk-go-v2/config v1.27.11 h1:f47rANd2LQEYHda2ddSCKYId18/8BhSRM4BULGmfgNA= github.com/aws/aws-sdk-go-v2/config v1.27.11/go.mod h1:SMsV78RIOYdve1vf36z8LmnszlRWkwMQtomCAI0/mIE= -github.com/aws/aws-sdk-go-v2/credentials v1.13.18/go.mod h1:vnwlwjIe+3XJPBYKu1et30ZPABG3VaXJYr8ryohpIyM= github.com/aws/aws-sdk-go-v2/credentials v1.13.24/go.mod h1:jYPYi99wUOPIFi0rhiOvXeSEReVOzBqFNOX5bXYoG2o= github.com/aws/aws-sdk-go-v2/credentials v1.17.11 h1:YuIB1dJNf1Re822rriUOTxopaHHvIq0l/pX3fwO+Tzs= github.com/aws/aws-sdk-go-v2/credentials v1.17.11/go.mod h1:AQtFPsDH9bI2O+71anW6EKL+NcD7LG3dpKGMV4SShgo= -github.com/aws/aws-sdk-go-v2/feature/ec2/imds v1.13.1/go.mod h1:lfUx8puBRdM5lVVMQlwt2v+ofiG/X6Ms+dy0UkG/kXw= github.com/aws/aws-sdk-go-v2/feature/ec2/imds v1.13.3/go.mod h1:4Q0UFP0YJf0NrsEuEYHpM9fTSEVnD16Z3uyEF7J9JGM= github.com/aws/aws-sdk-go-v2/feature/ec2/imds v1.16.1 h1:FVJ0r5XTHSmIHJV6KuDmdYhEpvlHpiSd38RQWhut5J4= github.com/aws/aws-sdk-go-v2/feature/ec2/imds v1.16.1/go.mod h1:zusuAeqezXzAB24LGuzuekqMAEgWkVYukBec3kr3jUg= -github.com/aws/aws-sdk-go-v2/feature/s3/manager v1.11.59 h1:E3Y+OfzOK1+rmRo/K2G0ml8Vs+Xqk0kOnf4nS0kUtBc= -github.com/aws/aws-sdk-go-v2/feature/s3/manager v1.11.59/go.mod h1:1M4PLSBUVfBI0aP+C9XI7SM6kZPCGYyI6izWz0TGprE= -github.com/aws/aws-sdk-go-v2/internal/configsources v1.1.31/go.mod h1:QT0BqUvX1Bh2ABdTGnjqEjvjzrCfIniM9Sc8zn9Yndo= +github.com/aws/aws-sdk-go-v2/feature/s3/manager v1.16.15 h1:7Zwtt/lP3KNRkeZre7soMELMGNoBrutx8nobg1jKWmo= +github.com/aws/aws-sdk-go-v2/feature/s3/manager v1.16.15/go.mod h1:436h2adoHb57yd+8W+gYPrrA9U/R/SuAuOO42Ushzhw= github.com/aws/aws-sdk-go-v2/internal/configsources v1.1.33/go.mod h1:7i0PF1ME/2eUPFcjkVIwq+DOygHEoK92t5cDqNgYbIw= github.com/aws/aws-sdk-go-v2/internal/configsources v1.3.5 h1:aw39xVGeRWlWx9EzGVnhOR4yOjQDHPQ6o6NmBlscyQg= github.com/aws/aws-sdk-go-v2/internal/configsources v1.3.5/go.mod h1:FSaRudD0dXiMPK2UjknVwwTYyZMRsHv3TtkabsZih5I= -github.com/aws/aws-sdk-go-v2/internal/endpoints/v2 v2.4.25/go.mod h1:zBHOPwhBc3FlQjQJE/D3IfPWiWaQmT06Vq9aNukDo0k= github.com/aws/aws-sdk-go-v2/internal/endpoints/v2 v2.4.27/go.mod h1:UrHnn3QV/d0pBZ6QBAEQcqFLf8FAzLmoUfPVIueOvoM= github.com/aws/aws-sdk-go-v2/internal/endpoints/v2 v2.6.5 h1:PG1F3OD1szkuQPzDw3CIQsRIrtTlUC3lP84taWzHlq0= github.com/aws/aws-sdk-go-v2/internal/endpoints/v2 v2.6.5/go.mod h1:jU1li6RFryMz+so64PpKtudI+QzbKoIEivqdf6LNpOc= -github.com/aws/aws-sdk-go-v2/internal/ini v1.3.32/go.mod h1:XGhIBZDEgfqmFIugclZ6FU7v75nHhBDtzuB4xB/tEi4= github.com/aws/aws-sdk-go-v2/internal/ini v1.3.34/go.mod h1:Etz2dj6UHYuw+Xw830KfzCfWGMzqvUTCjUj5b76GVDc= github.com/aws/aws-sdk-go-v2/internal/ini v1.8.0 h1:hT8rVHwugYE2lEfdFE0QWVo81lF7jMrYJVDWI+f+VxU= github.com/aws/aws-sdk-go-v2/internal/ini v1.8.0/go.mod h1:8tu/lYfQfFe6IGnaOdrpVgEL2IrrDOf6/m9RQum4NkY= -github.com/aws/aws-sdk-go-v2/internal/v4a v1.0.23 h1:DWYZIsyqagnWL00f8M/SOr9fN063OEQWn9LLTbdYXsk= -github.com/aws/aws-sdk-go-v2/internal/v4a v1.0.23/go.mod h1:uIiFgURZbACBEQJfqTZPb/jxO7R+9LeoHUFudtIdeQI= -github.com/aws/aws-sdk-go-v2/service/internal/accept-encoding v1.9.11/go.mod h1:iV4q2hsqtNECrfmlXyord9u4zyuFEJX9eLgLpSPzWA8= +github.com/aws/aws-sdk-go-v2/internal/v4a v1.3.5 h1:81KE7vaZzrl7yHBYHVEzYB8sypz11NMOZ40YlWvPxsU= +github.com/aws/aws-sdk-go-v2/internal/v4a v1.3.5/go.mod h1:LIt2rg7Mcgn09Ygbdh/RdIm0rQ+3BNkbP1gyVMFtRK0= github.com/aws/aws-sdk-go-v2/service/internal/accept-encoding v1.11.2 h1:Ji0DY1xUsUr3I8cHps0G+XM3WWU16lP6yG8qu1GAZAs= github.com/aws/aws-sdk-go-v2/service/internal/accept-encoding v1.11.2/go.mod h1:5CsjAbs3NlGQyZNFACh+zztPDI7fU6eW9QsxjfnuBKg= -github.com/aws/aws-sdk-go-v2/service/internal/checksum v1.1.26 h1:CeuSeq/8FnYpPtnuIeLQEEvDv9zUjneuYi8EghMBdwQ= -github.com/aws/aws-sdk-go-v2/service/internal/checksum v1.1.26/go.mod h1:2UqAAwMUXKeRkAHIlDJqvMVgOWkUi/AUXPk/YIe+Dg4= -github.com/aws/aws-sdk-go-v2/service/internal/presigned-url v1.9.25/go.mod h1:/95IA+0lMnzW6XzqYJRpjjsAbKEORVeO0anQqjd2CNU= +github.com/aws/aws-sdk-go-v2/service/internal/checksum v1.3.7 h1:ZMeFZ5yk+Ek+jNr1+uwCd2tG89t6oTS5yVWpa6yy2es= +github.com/aws/aws-sdk-go-v2/service/internal/checksum v1.3.7/go.mod h1:mxV05U+4JiHqIpGqqYXOHLPKUC6bDXC44bsUhNjOEwY= github.com/aws/aws-sdk-go-v2/service/internal/presigned-url v1.9.27/go.mod h1:EOwBD4J4S5qYszS5/3DpkejfuK+Z5/1uzICfPaZLtqw= github.com/aws/aws-sdk-go-v2/service/internal/presigned-url v1.11.7 h1:ogRAwT1/gxJBcSWDMZlgyFUM962F51A5CRhDLbxLdmo= github.com/aws/aws-sdk-go-v2/service/internal/presigned-url v1.11.7/go.mod h1:YCsIZhXfRPLFFCl5xxY+1T9RKzOKjCut+28JSX2DnAk= -github.com/aws/aws-sdk-go-v2/service/internal/s3shared v1.14.0 h1:e2ooMhpYGhDnBfSvIyusvAwX7KexuZaHbQY2Dyei7VU= -github.com/aws/aws-sdk-go-v2/service/internal/s3shared v1.14.0/go.mod h1:bh2E0CXKZsQN+faiKVqC40vfNMAWheoULBCnEgO9K+8= +github.com/aws/aws-sdk-go-v2/service/internal/s3shared v1.17.5 h1:f9RyWNtS8oH7cZlbn+/JNPpjUk5+5fLd5lM9M0i49Ys= +github.com/aws/aws-sdk-go-v2/service/internal/s3shared v1.17.5/go.mod h1:h5CoMZV2VF297/VLhRhO1WF+XYWOzXo+4HsObA4HjBQ= github.com/aws/aws-sdk-go-v2/service/kinesis v1.27.4 h1:Oe8awBiS/iitcsRJB5+DHa3iCxoA0KwJJf0JNrYMINY= github.com/aws/aws-sdk-go-v2/service/kinesis v1.27.4/go.mod h1:RCZCSFbieSgNG1RKegO26opXV4EXyef/vNBVJsUyHuw= -github.com/aws/aws-sdk-go-v2/service/s3 v1.31.0 h1:B1G2pSPvbAtQjilPq+Y7jLIzCOwKzuVEl+aBBaNG0AQ= -github.com/aws/aws-sdk-go-v2/service/s3 v1.31.0/go.mod h1:ncltU6n4Nof5uJttDtcNQ537uNuwYqsZZQcpkd2/GUQ= +github.com/aws/aws-sdk-go-v2/service/s3 v1.53.1 h1:6cnno47Me9bRykw9AEv9zkXE+5or7jz8TsskTTccbgc= +github.com/aws/aws-sdk-go-v2/service/s3 v1.53.1/go.mod h1:qmdkIIAC+GCLASF7R2whgNrJADz0QZPX+Seiw/i4S3o= github.com/aws/aws-sdk-go-v2/service/servicediscovery v1.29.6 h1:PUdCX18Ka+NsGyv+EZHjbbaRjEFP74h7wpZ36n1JBxI= github.com/aws/aws-sdk-go-v2/service/servicediscovery v1.29.6/go.mod h1:3pzLFJnbjkymz6RdZ963DuvMR9rzrKMXrlbteSk4Sxc= -github.com/aws/aws-sdk-go-v2/service/sso v1.12.6/go.mod h1:Y1VOmit/Fn6Tz1uFAeCO6Q7M2fmfXSCLeL5INVYsLuY= github.com/aws/aws-sdk-go-v2/service/sso v1.12.10/go.mod h1:ouy2P4z6sJN70fR3ka3wD3Ro3KezSxU6eKGQI2+2fjI= github.com/aws/aws-sdk-go-v2/service/sso v1.20.5 h1:vN8hEbpRnL7+Hopy9dzmRle1xmDc7o8tmY0klsr175w= github.com/aws/aws-sdk-go-v2/service/sso v1.20.5/go.mod h1:qGzynb/msuZIE8I75DVRCUXw3o3ZyBmUvMwQ2t/BrGM= -github.com/aws/aws-sdk-go-v2/service/ssooidc v1.14.6/go.mod h1:Lh/bc9XUf8CfOY6Jp5aIkQtN+j1mc+nExc+KXj9jx2s= github.com/aws/aws-sdk-go-v2/service/ssooidc v1.14.10/go.mod h1:AFvkxc8xfBe8XA+5St5XIHHrQQtkxqrRincx4hmMHOk= github.com/aws/aws-sdk-go-v2/service/ssooidc v1.23.4 h1:Jux+gDDyi1Lruk+KHF91tK2KCuY61kzoCpvtvJJBtOE= github.com/aws/aws-sdk-go-v2/service/ssooidc v1.23.4/go.mod h1:mUYPBhaF2lGiukDEjJX2BLRRKTmoUSitGDUgM4tRxak= -github.com/aws/aws-sdk-go-v2/service/sts v1.18.7/go.mod h1:JuTnSoeePXmMVe9G8NcjjwgOKEfZ4cOjMuT2IBT/2eI= github.com/aws/aws-sdk-go-v2/service/sts v1.19.0/go.mod h1:BgQOMsg8av8jset59jelyPW7NoZcZXLVpDsXunGDrk8= github.com/aws/aws-sdk-go-v2/service/sts v1.28.6 h1:cwIxeBttqPN3qkaAjcEcsh8NYr8n2HZPkcKgPAi1phU= github.com/aws/aws-sdk-go-v2/service/sts v1.28.6/go.mod h1:FZf1/nKNEkHdGGJP/cI2MoIMquumuRK6ol3QQJNDxmw= @@ -1311,8 +1298,6 @@ github.com/felixge/httpsnoop v1.0.4/go.mod h1:m8KPJKqk1gH5J9DgRY2ASl2lWCfGKXixSw github.com/fogleman/gg v1.2.1-0.20190220221249-0403632d5b90/go.mod h1:R/bRT+9gY/C5z7JzPU0zXsXHKM4/ayA+zqcVNZzPa1k= github.com/fogleman/gg v1.3.0/go.mod h1:R/bRT+9gY/C5z7JzPU0zXsXHKM4/ayA+zqcVNZzPa1k= github.com/form3tech-oss/jwt-go v3.2.2+incompatible/go.mod h1:pbq4aXjuKjdthFRnoDwaVPLA+WlJuPGy+QneDUgJi2k= -github.com/form3tech-oss/jwt-go v3.2.5+incompatible h1:/l4kBbb4/vGSsdtB5nUe8L7B9mImVMaBPw9L/0TBHU8= -github.com/form3tech-oss/jwt-go v3.2.5+incompatible/go.mod h1:pbq4aXjuKjdthFRnoDwaVPLA+WlJuPGy+QneDUgJi2k= github.com/fortytw2/leaktest v1.3.0 h1:u8491cBMTQ8ft8aeV+adlcytMZylmA5nnwwkRZjI8vw= github.com/fortytw2/leaktest v1.3.0/go.mod h1:jDsjWgpAGjm2CA7WthBh/CdZYEPF31XHquHwclZch5g= github.com/franela/goblin v0.0.0-20200105215937-c9ffbefa60db/go.mod h1:7dvUGVsVBjqR7JHJk0brhHOZYGmfBYOrK0ZhYMEtBr4= @@ -2227,8 +2212,8 @@ github.com/sirupsen/logrus v1.9.3/go.mod h1:naHLuLoDiP4jHNo9R0sCBMtWGeIprob74mVs github.com/smartystreets/assertions v0.0.0-20180927180507-b2de0cb4f26d/go.mod h1:OnSkiWE9lh6wB0YB77sQom3nweQdgAjqCqsofrRNTgc= github.com/smartystreets/assertions v1.1.0/go.mod h1:tcbTF8ujkAEcZ8TElKY+i30BzYlVhC/LOxJk7iOWnoo= github.com/smartystreets/goconvey v1.6.4/go.mod h1:syvi0/a8iFYH4r/RixwvyeAJjdLS9QV7WQ/tjFTllLA= -github.com/snowflakedb/gosnowflake v1.9.0 h1:s2ZdwFxFfpqwa5CqlhnzRESnLmwU3fED6zyNOJHFBQA= -github.com/snowflakedb/gosnowflake v1.9.0/go.mod h1:4ZgHxVf2OKwecx07WjfyAMr0gn8Qj4yvwAo68Og8wsU= +github.com/snowflakedb/gosnowflake v1.10.1-0.20240509141315-5570db2126fe h1:tyqmtuppkCBKehjrsrGgcO7xsNBEGWgIlgm9fq/4X4U= +github.com/snowflakedb/gosnowflake v1.10.1-0.20240509141315-5570db2126fe/go.mod h1:hvc58mU03qg78mSz5z17/qnzI56hOdYYK2txWbM0hN0= github.com/soheilhy/cmux v0.1.4/go.mod h1:IM3LyeVVIOuxMH7sFAkER9+bJ4dT7Ms6E4xg4kGIyLM= github.com/soheilhy/cmux v0.1.5 h1:jjzc5WVemNEDTLwv9tlmemhC73tI08BNOIGwBOo10Js= github.com/soheilhy/cmux v0.1.5/go.mod h1:T7TcVDs9LWfQgPlPsdngu6I6QIoyIFZDDC6sNE1GqG0= diff --git a/receiver/snowflakereceiver/go.mod b/receiver/snowflakereceiver/go.mod index d64b1222cf38..eef65d113e8d 100644 --- a/receiver/snowflakereceiver/go.mod +++ b/receiver/snowflakereceiver/go.mod @@ -7,7 +7,7 @@ require ( github.com/google/go-cmp v0.6.0 github.com/open-telemetry/opentelemetry-collector-contrib/pkg/golden v0.100.0 github.com/open-telemetry/opentelemetry-collector-contrib/pkg/pdatatest v0.100.0 - github.com/snowflakedb/gosnowflake v1.9.0 + github.com/snowflakedb/gosnowflake v1.10.1-0.20240509141315-5570db2126fe github.com/stretchr/testify v1.9.0 go.opentelemetry.io/collector/component v0.100.0 go.opentelemetry.io/collector/config/configopaque v1.7.0 @@ -31,25 +31,24 @@ require ( github.com/Azure/azure-sdk-for-go/sdk/storage/azblob v1.2.0 // indirect github.com/JohnCGriffin/overflow v0.0.0-20211019200055-46fa312c352c // indirect github.com/apache/arrow/go/v15 v15.0.0 // indirect - github.com/aws/aws-sdk-go-v2 v1.22.2 // indirect - github.com/aws/aws-sdk-go-v2/aws/protocol/eventstream v1.4.10 // indirect - github.com/aws/aws-sdk-go-v2/credentials v1.15.2 // indirect - github.com/aws/aws-sdk-go-v2/feature/s3/manager v1.11.59 // indirect - github.com/aws/aws-sdk-go-v2/internal/configsources v1.2.2 // indirect - github.com/aws/aws-sdk-go-v2/internal/endpoints/v2 v2.5.2 // indirect - github.com/aws/aws-sdk-go-v2/internal/v4a v1.0.23 // indirect - github.com/aws/aws-sdk-go-v2/service/internal/accept-encoding v1.9.11 // indirect - github.com/aws/aws-sdk-go-v2/service/internal/checksum v1.1.26 // indirect - github.com/aws/aws-sdk-go-v2/service/internal/presigned-url v1.10.2 // indirect - github.com/aws/aws-sdk-go-v2/service/internal/s3shared v1.14.0 // indirect - github.com/aws/aws-sdk-go-v2/service/s3 v1.31.0 // indirect - github.com/aws/smithy-go v1.16.0 // indirect + github.com/aws/aws-sdk-go-v2 v1.26.1 // indirect + github.com/aws/aws-sdk-go-v2/aws/protocol/eventstream v1.6.2 // indirect + github.com/aws/aws-sdk-go-v2/credentials v1.17.11 // indirect + github.com/aws/aws-sdk-go-v2/feature/s3/manager v1.16.15 // indirect + github.com/aws/aws-sdk-go-v2/internal/configsources v1.3.5 // indirect + github.com/aws/aws-sdk-go-v2/internal/endpoints/v2 v2.6.5 // indirect + github.com/aws/aws-sdk-go-v2/internal/v4a v1.3.5 // indirect + github.com/aws/aws-sdk-go-v2/service/internal/accept-encoding v1.11.2 // indirect + github.com/aws/aws-sdk-go-v2/service/internal/checksum v1.3.7 // indirect + github.com/aws/aws-sdk-go-v2/service/internal/presigned-url v1.11.7 // indirect + github.com/aws/aws-sdk-go-v2/service/internal/s3shared v1.17.5 // indirect + github.com/aws/aws-sdk-go-v2/service/s3 v1.53.1 // indirect + github.com/aws/smithy-go v1.20.2 // indirect github.com/beorn7/perks v1.0.1 // indirect github.com/cespare/xxhash/v2 v2.3.0 // indirect github.com/danieljoos/wincred v1.1.2 // indirect github.com/davecgh/go-spew v1.1.1 // indirect github.com/dvsekhvalnov/jose2go v1.6.0 // indirect - github.com/form3tech-oss/jwt-go v3.2.5+incompatible // indirect github.com/gabriel-vasile/mimetype v1.4.2 // indirect github.com/go-logr/logr v1.4.1 // indirect github.com/go-logr/stdr v1.2.2 // indirect @@ -57,6 +56,7 @@ require ( github.com/goccy/go-json v0.10.2 // indirect github.com/godbus/dbus v0.0.0-20190726142602-4481cbc300e2 // indirect github.com/gogo/protobuf v1.3.2 // indirect + github.com/golang-jwt/jwt/v5 v5.2.1 // indirect github.com/google/flatbuffers v23.5.26+incompatible // indirect github.com/google/uuid v1.6.0 // indirect github.com/gsterjov/go-libsecret v0.0.0-20161001094733-a6f4afe4910c // indirect diff --git a/receiver/snowflakereceiver/go.sum b/receiver/snowflakereceiver/go.sum index a0e8ef4f73d1..8f92562fe0b8 100644 --- a/receiver/snowflakereceiver/go.sum +++ b/receiver/snowflakereceiver/go.sum @@ -20,54 +20,44 @@ github.com/JohnCGriffin/overflow v0.0.0-20211019200055-46fa312c352c h1:RGWPOewvK github.com/JohnCGriffin/overflow v0.0.0-20211019200055-46fa312c352c/go.mod h1:X0CRv0ky0k6m906ixxpzmDRLvX58TFUKS2eePweuyxk= github.com/apache/arrow/go/v15 v15.0.0 h1:1zZACWf85oEZY5/kd9dsQS7i+2G5zVQcbKTHgslqHNA= github.com/apache/arrow/go/v15 v15.0.0/go.mod h1:DGXsR3ajT524njufqf95822i+KTh+yea1jass9YXgjA= -github.com/aws/aws-sdk-go-v2 v1.17.7/go.mod h1:uzbQtefpm44goOPmdKyAlXSNcwlRgF3ePWVW6EtJvvw= -github.com/aws/aws-sdk-go-v2 v1.22.2 h1:lV0U8fnhAnPz8YcdmZVV60+tr6CakHzqA6P8T46ExJI= -github.com/aws/aws-sdk-go-v2 v1.22.2/go.mod h1:Kd0OJtkW3Q0M0lUWGszapWjEvrXDzRW+D21JNsroB+c= -github.com/aws/aws-sdk-go-v2/aws/protocol/eventstream v1.4.10 h1:dK82zF6kkPeCo8J1e+tGx4JdvDIQzj7ygIoLg8WMuGs= -github.com/aws/aws-sdk-go-v2/aws/protocol/eventstream v1.4.10/go.mod h1:VeTZetY5KRJLuD/7fkQXMU6Mw7H5m/KP2J5Iy9osMno= -github.com/aws/aws-sdk-go-v2/config v1.18.19 h1:AqFK6zFNtq4i1EYu+eC7lcKHYnZagMn6SW171la0bGw= -github.com/aws/aws-sdk-go-v2/config v1.18.19/go.mod h1:XvTmGMY8d52ougvakOv1RpiTLPz9dlG/OQHsKU/cMmY= -github.com/aws/aws-sdk-go-v2/credentials v1.13.18/go.mod h1:vnwlwjIe+3XJPBYKu1et30ZPABG3VaXJYr8ryohpIyM= -github.com/aws/aws-sdk-go-v2/credentials v1.15.2 h1:rKH7khRMxPdD0u3dHecd0Q7NOVw3EUe7AqdkUOkiOGI= -github.com/aws/aws-sdk-go-v2/credentials v1.15.2/go.mod h1:tXM8wmaeAhfC7nZoCxb0FzM/aRaB1m1WQ7x0qlBLq80= -github.com/aws/aws-sdk-go-v2/feature/ec2/imds v1.13.1/go.mod h1:lfUx8puBRdM5lVVMQlwt2v+ofiG/X6Ms+dy0UkG/kXw= -github.com/aws/aws-sdk-go-v2/feature/ec2/imds v1.14.3 h1:G5KawTAkyHH6WyKQCdHiW4h3PmAXNJpOgwKg3H7sDRE= -github.com/aws/aws-sdk-go-v2/feature/ec2/imds v1.14.3/go.mod h1:hugKmSFnZB+HgNI1sYGT14BUPZkO6alC/e0AWu+0IAQ= -github.com/aws/aws-sdk-go-v2/feature/s3/manager v1.11.59 h1:E3Y+OfzOK1+rmRo/K2G0ml8Vs+Xqk0kOnf4nS0kUtBc= -github.com/aws/aws-sdk-go-v2/feature/s3/manager v1.11.59/go.mod h1:1M4PLSBUVfBI0aP+C9XI7SM6kZPCGYyI6izWz0TGprE= -github.com/aws/aws-sdk-go-v2/internal/configsources v1.1.31/go.mod h1:QT0BqUvX1Bh2ABdTGnjqEjvjzrCfIniM9Sc8zn9Yndo= -github.com/aws/aws-sdk-go-v2/internal/configsources v1.2.2 h1:AaQsr5vvGR7rmeSWBtTCcw16tT9r51mWijuCQhzLnq8= -github.com/aws/aws-sdk-go-v2/internal/configsources v1.2.2/go.mod h1:o1IiRn7CWocIFTXJjGKJDOwxv1ibL53NpcvcqGWyRBA= -github.com/aws/aws-sdk-go-v2/internal/endpoints/v2 v2.4.25/go.mod h1:zBHOPwhBc3FlQjQJE/D3IfPWiWaQmT06Vq9aNukDo0k= -github.com/aws/aws-sdk-go-v2/internal/endpoints/v2 v2.5.2 h1:UZx8SXZ0YtzRiALzYAWcjb9Y9hZUR7MBKaBQ5ouOjPs= -github.com/aws/aws-sdk-go-v2/internal/endpoints/v2 v2.5.2/go.mod h1:ipuRpcSaklmxR6C39G187TpBAO132gUfleTGccUPs8c= -github.com/aws/aws-sdk-go-v2/internal/ini v1.3.32 h1:p5luUImdIqywn6JpQsW3tq5GNOxKmOnEpybzPx+d1lk= -github.com/aws/aws-sdk-go-v2/internal/ini v1.3.32/go.mod h1:XGhIBZDEgfqmFIugclZ6FU7v75nHhBDtzuB4xB/tEi4= -github.com/aws/aws-sdk-go-v2/internal/v4a v1.0.23 h1:DWYZIsyqagnWL00f8M/SOr9fN063OEQWn9LLTbdYXsk= -github.com/aws/aws-sdk-go-v2/internal/v4a v1.0.23/go.mod h1:uIiFgURZbACBEQJfqTZPb/jxO7R+9LeoHUFudtIdeQI= -github.com/aws/aws-sdk-go-v2/service/internal/accept-encoding v1.9.11 h1:y2+VQzC6Zh2ojtV2LoC0MNwHWc6qXv/j2vrQtlftkdA= -github.com/aws/aws-sdk-go-v2/service/internal/accept-encoding v1.9.11/go.mod h1:iV4q2hsqtNECrfmlXyord9u4zyuFEJX9eLgLpSPzWA8= -github.com/aws/aws-sdk-go-v2/service/internal/checksum v1.1.26 h1:CeuSeq/8FnYpPtnuIeLQEEvDv9zUjneuYi8EghMBdwQ= -github.com/aws/aws-sdk-go-v2/service/internal/checksum v1.1.26/go.mod h1:2UqAAwMUXKeRkAHIlDJqvMVgOWkUi/AUXPk/YIe+Dg4= -github.com/aws/aws-sdk-go-v2/service/internal/presigned-url v1.9.25/go.mod h1:/95IA+0lMnzW6XzqYJRpjjsAbKEORVeO0anQqjd2CNU= -github.com/aws/aws-sdk-go-v2/service/internal/presigned-url v1.10.2 h1:h7j73yuAVVjic8pqswh+L/7r2IHP43QwRyOu6zcCDDE= -github.com/aws/aws-sdk-go-v2/service/internal/presigned-url v1.10.2/go.mod h1:H07AHdK5LSy8F7EJUQhoxyiCNkePoHj2D8P2yGTWafo= -github.com/aws/aws-sdk-go-v2/service/internal/s3shared v1.14.0 h1:e2ooMhpYGhDnBfSvIyusvAwX7KexuZaHbQY2Dyei7VU= -github.com/aws/aws-sdk-go-v2/service/internal/s3shared v1.14.0/go.mod h1:bh2E0CXKZsQN+faiKVqC40vfNMAWheoULBCnEgO9K+8= -github.com/aws/aws-sdk-go-v2/service/s3 v1.31.0 h1:B1G2pSPvbAtQjilPq+Y7jLIzCOwKzuVEl+aBBaNG0AQ= -github.com/aws/aws-sdk-go-v2/service/s3 v1.31.0/go.mod h1:ncltU6n4Nof5uJttDtcNQ537uNuwYqsZZQcpkd2/GUQ= -github.com/aws/aws-sdk-go-v2/service/sso v1.12.6/go.mod h1:Y1VOmit/Fn6Tz1uFAeCO6Q7M2fmfXSCLeL5INVYsLuY= -github.com/aws/aws-sdk-go-v2/service/sso v1.17.1 h1:km+ZNjtLtpXYf42RdaDZnNHm9s7SYAuDGTafy6nd89A= -github.com/aws/aws-sdk-go-v2/service/sso v1.17.1/go.mod h1:aHBr3pvBSD5MbzOvQtYutyPLLRPbl/y9x86XyJJnUXQ= -github.com/aws/aws-sdk-go-v2/service/ssooidc v1.14.6/go.mod h1:Lh/bc9XUf8CfOY6Jp5aIkQtN+j1mc+nExc+KXj9jx2s= -github.com/aws/aws-sdk-go-v2/service/ssooidc v1.19.1 h1:iRFNqZH4a67IqPvK8xxtyQYnyrlsvwmpHOe9r55ggBA= -github.com/aws/aws-sdk-go-v2/service/ssooidc v1.19.1/go.mod h1:pTy5WM+6sNv2tB24JNKFtn6EvciQ5k40ZJ0pq/Iaxj0= -github.com/aws/aws-sdk-go-v2/service/sts v1.18.7/go.mod h1:JuTnSoeePXmMVe9G8NcjjwgOKEfZ4cOjMuT2IBT/2eI= -github.com/aws/aws-sdk-go-v2/service/sts v1.25.1 h1:txgVXIXWPXyqdiVn92BV6a/rgtpX31HYdsOYj0sVQQQ= -github.com/aws/aws-sdk-go-v2/service/sts v1.25.1/go.mod h1:VAiJiNaoP1L89STFlEMgmHX1bKixY+FaP+TpRFrmyZ4= -github.com/aws/smithy-go v1.13.5/go.mod h1:Tg+OJXh4MB2R/uN61Ko2f6hTZwB/ZYGOtib8J3gBHzA= -github.com/aws/smithy-go v1.16.0 h1:gJZEH/Fqh+RsvlJ1Zt4tVAtV6bKkp3cC+R6FCZMNzik= -github.com/aws/smithy-go v1.16.0/go.mod h1:NukqUGpCZIILqqiV0NIjeFh24kd/FAa4beRb6nbIUPE= +github.com/aws/aws-sdk-go-v2 v1.26.1 h1:5554eUqIYVWpU0YmeeYZ0wU64H2VLBs8TlhRB2L+EkA= +github.com/aws/aws-sdk-go-v2 v1.26.1/go.mod h1:ffIFB97e2yNsv4aTSGkqtHnppsIJzw7G7BReUZ3jCXM= +github.com/aws/aws-sdk-go-v2/aws/protocol/eventstream v1.6.2 h1:x6xsQXGSmW6frevwDA+vi/wqhp1ct18mVXYN08/93to= +github.com/aws/aws-sdk-go-v2/aws/protocol/eventstream v1.6.2/go.mod h1:lPprDr1e6cJdyYeGXnRaJoP4Md+cDBvi2eOj00BlGmg= +github.com/aws/aws-sdk-go-v2/config v1.27.11 h1:f47rANd2LQEYHda2ddSCKYId18/8BhSRM4BULGmfgNA= +github.com/aws/aws-sdk-go-v2/config v1.27.11/go.mod h1:SMsV78RIOYdve1vf36z8LmnszlRWkwMQtomCAI0/mIE= +github.com/aws/aws-sdk-go-v2/credentials v1.17.11 h1:YuIB1dJNf1Re822rriUOTxopaHHvIq0l/pX3fwO+Tzs= +github.com/aws/aws-sdk-go-v2/credentials v1.17.11/go.mod h1:AQtFPsDH9bI2O+71anW6EKL+NcD7LG3dpKGMV4SShgo= +github.com/aws/aws-sdk-go-v2/feature/ec2/imds v1.16.1 h1:FVJ0r5XTHSmIHJV6KuDmdYhEpvlHpiSd38RQWhut5J4= +github.com/aws/aws-sdk-go-v2/feature/ec2/imds v1.16.1/go.mod h1:zusuAeqezXzAB24LGuzuekqMAEgWkVYukBec3kr3jUg= +github.com/aws/aws-sdk-go-v2/feature/s3/manager v1.16.15 h1:7Zwtt/lP3KNRkeZre7soMELMGNoBrutx8nobg1jKWmo= +github.com/aws/aws-sdk-go-v2/feature/s3/manager v1.16.15/go.mod h1:436h2adoHb57yd+8W+gYPrrA9U/R/SuAuOO42Ushzhw= +github.com/aws/aws-sdk-go-v2/internal/configsources v1.3.5 h1:aw39xVGeRWlWx9EzGVnhOR4yOjQDHPQ6o6NmBlscyQg= +github.com/aws/aws-sdk-go-v2/internal/configsources v1.3.5/go.mod h1:FSaRudD0dXiMPK2UjknVwwTYyZMRsHv3TtkabsZih5I= +github.com/aws/aws-sdk-go-v2/internal/endpoints/v2 v2.6.5 h1:PG1F3OD1szkuQPzDw3CIQsRIrtTlUC3lP84taWzHlq0= +github.com/aws/aws-sdk-go-v2/internal/endpoints/v2 v2.6.5/go.mod h1:jU1li6RFryMz+so64PpKtudI+QzbKoIEivqdf6LNpOc= +github.com/aws/aws-sdk-go-v2/internal/ini v1.8.0 h1:hT8rVHwugYE2lEfdFE0QWVo81lF7jMrYJVDWI+f+VxU= +github.com/aws/aws-sdk-go-v2/internal/ini v1.8.0/go.mod h1:8tu/lYfQfFe6IGnaOdrpVgEL2IrrDOf6/m9RQum4NkY= +github.com/aws/aws-sdk-go-v2/internal/v4a v1.3.5 h1:81KE7vaZzrl7yHBYHVEzYB8sypz11NMOZ40YlWvPxsU= +github.com/aws/aws-sdk-go-v2/internal/v4a v1.3.5/go.mod h1:LIt2rg7Mcgn09Ygbdh/RdIm0rQ+3BNkbP1gyVMFtRK0= +github.com/aws/aws-sdk-go-v2/service/internal/accept-encoding v1.11.2 h1:Ji0DY1xUsUr3I8cHps0G+XM3WWU16lP6yG8qu1GAZAs= +github.com/aws/aws-sdk-go-v2/service/internal/accept-encoding v1.11.2/go.mod h1:5CsjAbs3NlGQyZNFACh+zztPDI7fU6eW9QsxjfnuBKg= +github.com/aws/aws-sdk-go-v2/service/internal/checksum v1.3.7 h1:ZMeFZ5yk+Ek+jNr1+uwCd2tG89t6oTS5yVWpa6yy2es= +github.com/aws/aws-sdk-go-v2/service/internal/checksum v1.3.7/go.mod h1:mxV05U+4JiHqIpGqqYXOHLPKUC6bDXC44bsUhNjOEwY= +github.com/aws/aws-sdk-go-v2/service/internal/presigned-url v1.11.7 h1:ogRAwT1/gxJBcSWDMZlgyFUM962F51A5CRhDLbxLdmo= +github.com/aws/aws-sdk-go-v2/service/internal/presigned-url v1.11.7/go.mod h1:YCsIZhXfRPLFFCl5xxY+1T9RKzOKjCut+28JSX2DnAk= +github.com/aws/aws-sdk-go-v2/service/internal/s3shared v1.17.5 h1:f9RyWNtS8oH7cZlbn+/JNPpjUk5+5fLd5lM9M0i49Ys= +github.com/aws/aws-sdk-go-v2/service/internal/s3shared v1.17.5/go.mod h1:h5CoMZV2VF297/VLhRhO1WF+XYWOzXo+4HsObA4HjBQ= +github.com/aws/aws-sdk-go-v2/service/s3 v1.53.1 h1:6cnno47Me9bRykw9AEv9zkXE+5or7jz8TsskTTccbgc= +github.com/aws/aws-sdk-go-v2/service/s3 v1.53.1/go.mod h1:qmdkIIAC+GCLASF7R2whgNrJADz0QZPX+Seiw/i4S3o= +github.com/aws/aws-sdk-go-v2/service/sso v1.20.5 h1:vN8hEbpRnL7+Hopy9dzmRle1xmDc7o8tmY0klsr175w= +github.com/aws/aws-sdk-go-v2/service/sso v1.20.5/go.mod h1:qGzynb/msuZIE8I75DVRCUXw3o3ZyBmUvMwQ2t/BrGM= +github.com/aws/aws-sdk-go-v2/service/ssooidc v1.23.4 h1:Jux+gDDyi1Lruk+KHF91tK2KCuY61kzoCpvtvJJBtOE= +github.com/aws/aws-sdk-go-v2/service/ssooidc v1.23.4/go.mod h1:mUYPBhaF2lGiukDEjJX2BLRRKTmoUSitGDUgM4tRxak= +github.com/aws/aws-sdk-go-v2/service/sts v1.28.6 h1:cwIxeBttqPN3qkaAjcEcsh8NYr8n2HZPkcKgPAi1phU= +github.com/aws/aws-sdk-go-v2/service/sts v1.28.6/go.mod h1:FZf1/nKNEkHdGGJP/cI2MoIMquumuRK6ol3QQJNDxmw= +github.com/aws/smithy-go v1.20.2 h1:tbp628ireGtzcHDDmLT/6ADHidqnwgF57XOXZe6tp4Q= +github.com/aws/smithy-go v1.20.2/go.mod h1:krry+ya/rV9RDcV/Q16kpu6ypI4K2czasz0NC3qS14E= github.com/beorn7/perks v1.0.1 h1:VlbKKnNfV8bJzeqoa4cOKqO6bYr3WgKZxO8Z16+hsOM= github.com/beorn7/perks v1.0.1/go.mod h1:G2ZrVWU2WbWT9wwq4/hrbKbnv/1ERSJQ0ibhJ6rlkpw= github.com/cespare/xxhash/v2 v2.3.0 h1:UL815xU9SqsFlibzuggzjXhog7bL6oX9BbNZnL2UFvs= @@ -81,8 +71,6 @@ github.com/dnaeon/go-vcr v1.2.0 h1:zHCHvJYTMh1N7xnV7zf1m1GPBF9Ad0Jk/whtQ1663qI= github.com/dnaeon/go-vcr v1.2.0/go.mod h1:R4UdLID7HZT3taECzJs4YgbbH6PIGXB6W/sc5OLb6RQ= github.com/dvsekhvalnov/jose2go v1.6.0 h1:Y9gnSnP4qEI0+/uQkHvFXeD2PLPJeXEL+ySMEA2EjTY= github.com/dvsekhvalnov/jose2go v1.6.0/go.mod h1:QsHjhyTlD/lAVqn/NSbVZmSCGeDehTB/mPZadG+mhXU= -github.com/form3tech-oss/jwt-go v3.2.5+incompatible h1:/l4kBbb4/vGSsdtB5nUe8L7B9mImVMaBPw9L/0TBHU8= -github.com/form3tech-oss/jwt-go v3.2.5+incompatible/go.mod h1:pbq4aXjuKjdthFRnoDwaVPLA+WlJuPGy+QneDUgJi2k= github.com/gabriel-vasile/mimetype v1.4.2 h1:w5qFW6JKBz9Y393Y4q372O9A7cUSequkh1Q7OhCmWKU= github.com/gabriel-vasile/mimetype v1.4.2/go.mod h1:zApsH/mKG4w07erKIaJPFiX0Tsq9BFQgN3qGY5GnNgA= github.com/go-logr/logr v1.2.2/go.mod h1:jdQByPbusPIv2/zmleS9BjJVeZ6kBagPoEUsqbVz/1A= @@ -100,9 +88,10 @@ github.com/gogo/protobuf v1.3.2 h1:Ov1cvc58UF3b5XjBnZv7+opcTcQFZebYjWzi34vdm4Q= github.com/gogo/protobuf v1.3.2/go.mod h1:P1XiOD3dCwIKUDQYPy72D8LYyHL2YPYrpS2s69NZV8Q= github.com/golang-jwt/jwt/v4 v4.5.0 h1:7cYmW1XlMY7h7ii7UhUyChSgS5wUJEnm9uZVTGqOWzg= github.com/golang-jwt/jwt/v4 v4.5.0/go.mod h1:m21LjoU+eqJr34lmDMbreY2eSTRJ1cv77w39/MY0Ch0= +github.com/golang-jwt/jwt/v5 v5.2.1 h1:OuVbFODueb089Lh128TAcimifWaLhJwVflnrgM17wHk= +github.com/golang-jwt/jwt/v5 v5.2.1/go.mod h1:pqrtFR0X4osieyHYxtmOUWsAWrfe1Q5UVIyoH402zdk= github.com/google/flatbuffers v23.5.26+incompatible h1:M9dgRyhJemaM4Sw8+66GHBu8ioaQmyPLg1b8VwK5WJg= github.com/google/flatbuffers v23.5.26+incompatible/go.mod h1:1AeVuKshWv4vARoZatz6mlQ0JxURH0Kv5+zNeJKJCa8= -github.com/google/go-cmp v0.5.8/go.mod h1:17dUlkBOakJ0+DkrSSNjCkIjxS6bF9zb3elmeNGIjoY= github.com/google/go-cmp v0.6.0 h1:ofyhxvXcZhMsU5ulbFiLKl/XBFqE1GSq7atu8tAmTRI= github.com/google/go-cmp v0.6.0/go.mod h1:17dUlkBOakJ0+DkrSSNjCkIjxS6bF9zb3elmeNGIjoY= github.com/google/gofuzz v1.0.0/go.mod h1:dBl0BpW6vV/+mYPU4Po3pmUjxk6FQPldtuIdl/M65Eg= @@ -167,8 +156,8 @@ github.com/rogpeppe/go-internal v1.10.0 h1:TMyTOH3F/DB16zRVcYyreMH6GnZZrwQVAoYjR github.com/rogpeppe/go-internal v1.10.0/go.mod h1:UQnix2H7Ngw/k4C5ijL5+65zddjncjaFoBhdsK/akog= github.com/sirupsen/logrus v1.9.0 h1:trlNQbNUG3OdDrDil03MCb1H2o9nJ1x4/5LYw7byDE0= github.com/sirupsen/logrus v1.9.0/go.mod h1:naHLuLoDiP4jHNo9R0sCBMtWGeIprob74mVsIT4qYEQ= -github.com/snowflakedb/gosnowflake v1.9.0 h1:s2ZdwFxFfpqwa5CqlhnzRESnLmwU3fED6zyNOJHFBQA= -github.com/snowflakedb/gosnowflake v1.9.0/go.mod h1:4ZgHxVf2OKwecx07WjfyAMr0gn8Qj4yvwAo68Og8wsU= +github.com/snowflakedb/gosnowflake v1.10.1-0.20240509141315-5570db2126fe h1:tyqmtuppkCBKehjrsrGgcO7xsNBEGWgIlgm9fq/4X4U= +github.com/snowflakedb/gosnowflake v1.10.1-0.20240509141315-5570db2126fe/go.mod h1:hvc58mU03qg78mSz5z17/qnzI56hOdYYK2txWbM0hN0= github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME= github.com/stretchr/objx v0.5.2 h1:xuMeJ0Sdp5ZMRXx/aWO6RZxdr3beISkG5/G/aIRr3pY= github.com/stretchr/objx v0.5.2/go.mod h1:FRsXN1f5AsAjCGJKqEizvkpNtU+EGNCLh3NxZ/8L+MA= From cf2afd0db91d01e001804efedbb789e6015a4a8b Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Juraj=20Mich=C3=A1lek?= Date: Fri, 10 May 2024 16:46:41 +0200 Subject: [PATCH 22/55] chore: remote write exporter retry on 429 (#31924) **Description:** This PR adds an option to retry the remote write requests when the receiving backend responds with 429 http status code, **Link to tracking Issue:** #31032 **Testing:** Added tests covering the case. **Documentation:** Not sure what's the pattern for documenting feature flags. --------- Co-authored-by: Anthony Mirabella Co-authored-by: Pablo Baeyens Co-authored-by: David Ashpole --- ...theus-remote-write-exporter-retry-429.yaml | 27 +++++++++++++++++ .../prometheusremotewriteexporter/README.md | 7 +++++ .../prometheusremotewriteexporter/exporter.go | 9 ++++++ .../exporter_test.go | 29 +++++++++++++++++-- .../prometheusremotewriteexporter/factory.go | 8 +++++ exporter/prometheusremotewriteexporter/go.mod | 2 +- 6 files changed, 79 insertions(+), 3 deletions(-) create mode 100644 .chloggen/prometheus-remote-write-exporter-retry-429.yaml diff --git a/.chloggen/prometheus-remote-write-exporter-retry-429.yaml b/.chloggen/prometheus-remote-write-exporter-retry-429.yaml new file mode 100644 index 000000000000..115675c38dcb --- /dev/null +++ b/.chloggen/prometheus-remote-write-exporter-retry-429.yaml @@ -0,0 +1,27 @@ +# Use this changelog template to create an entry for release notes. + +# One of 'breaking', 'deprecation', 'new_component', 'enhancement', 'bug_fix' +change_type: enhancement + +# The name of the component, or a single word describing the area of concern, (e.g. filelogreceiver) +component: prometheusremotewriteexporter + +# A brief description of the change. Surround your text with quotes ("") if it needs to start with a backtick (`). +note: Add `exporter.prometheusremotewritexporter.RetryOn429` feature gate to retry on http status code 429 response. + +# Mandatory: One or more tracking issues related to the change. You can use the PR number here if no issue exists. +issues: [ 31032 ] + +# (Optional) One or more lines of additional information to render under the primary note. +# These lines will be padded with 2 spaces and then inserted directly into the document. +# Use pipe (|) for multiline entries. +subtext: The feature gate is initially disabled by default. + +# If your change doesn't affect end users or the exported elements of any package, +# you should instead start your pull request title with [chore] or use the "Skip Changelog" label. +# Optional: The change log or logs in which this entry should be included. +# e.g. '[user]' or '[user, api]' +# Include 'user' if the change is relevant to end users. +# Include 'api' if there is a change to a library API. +# Default: '[user]' +change_logs: [] diff --git a/exporter/prometheusremotewriteexporter/README.md b/exporter/prometheusremotewriteexporter/README.md index f48f3e9d00a8..33ae05e1604d 100644 --- a/exporter/prometheusremotewriteexporter/README.md +++ b/exporter/prometheusremotewriteexporter/README.md @@ -100,6 +100,13 @@ Several helper files are leveraged to provide additional capabilities automatica - [TLS and mTLS settings](https://github.com/open-telemetry/opentelemetry-collector/blob/main/config/configtls/README.md) - [Retry and timeout settings](https://github.com/open-telemetry/opentelemetry-collector/blob/main/exporter/exporterhelper/README.md), note that the exporter doesn't support `sending_queue` but provides `remote_write_queue`. +### Feature gates +This exporter has feature gate: `exporter.prometheusremotewritexporter.RetryOn429`. +When this feature gate is enable the prometheus remote write exporter will retry on 429 http status code with the provided retry configuration. +It currently doesn't support respecting the http header `Retry-After` if provided since the retry library used doesn't support this feature. + +To enable it run collector with enabled feature gate `exporter.prometheusremotewritexporter.RetryOn429`. This can be done by executing it with one additional parameter - `--feature-gates=telemetry.useOtelForInternalMetrics`. + ## Metric names and labels normalization OpenTelemetry metric names and attributes are normalized to be compliant with Prometheus naming rules. [Details on this normalization process are described in the Prometheus translator module](../../pkg/translator/prometheus/). diff --git a/exporter/prometheusremotewriteexporter/exporter.go b/exporter/prometheusremotewriteexporter/exporter.go index 633dc727f3de..15118976624e 100644 --- a/exporter/prometheusremotewriteexporter/exporter.go +++ b/exporter/prometheusremotewriteexporter/exporter.go @@ -66,6 +66,7 @@ type prwExporter struct { clientSettings *confighttp.ClientConfig settings component.TelemetrySettings retrySettings configretry.BackOffConfig + retryOnHTTP429 bool wal *prweWAL exporterSettings prometheusremotewrite.Settings telemetry prwTelemetry @@ -124,6 +125,7 @@ func newPRWExporter(cfg *Config, set exporter.CreateSettings) (*prwExporter, err clientSettings: &cfg.ClientConfig, settings: set.TelemetrySettings, retrySettings: cfg.BackOffConfig, + retryOnHTTP429: retryOn429FeatureGate.IsEnabled(), exporterSettings: prometheusremotewrite.Settings{ Namespace: cfg.Namespace, ExternalLabels: sanitizedLabels, @@ -329,6 +331,13 @@ func (prwe *prwExporter) execute(ctx context.Context, writeReq *prompb.WriteRequ if resp.StatusCode >= 500 && resp.StatusCode < 600 { return rerr } + + // 429 errors are recoverable and the exporter should retry if RetryOnHTTP429 enabled + // Reference: https://github.com/prometheus/prometheus/pull/12677 + if prwe.retryOnHTTP429 && resp.StatusCode == 429 { + return rerr + } + return backoff.Permanent(consumererror.NewPermanent(rerr)) } diff --git a/exporter/prometheusremotewriteexporter/exporter_test.go b/exporter/prometheusremotewriteexporter/exporter_test.go index 6021c012ed37..c1dbaafe758e 100644 --- a/exporter/prometheusremotewriteexporter/exporter_test.go +++ b/exporter/prometheusremotewriteexporter/exporter_test.go @@ -1051,6 +1051,7 @@ func TestRetries(t *testing.T) { serverErrorCount int // number of times server should return error expectedAttempts int httpStatus int + RetryOnHTTP429 bool assertError assert.ErrorAssertionFunc assertErrorType assert.ErrorAssertionFunc ctx context.Context @@ -1060,15 +1061,37 @@ func TestRetries(t *testing.T) { 3, 4, http.StatusInternalServerError, + false, + assert.NoError, + assert.NoError, + context.Background(), + }, + { + "test 429 should retry", + 3, + 4, + http.StatusTooManyRequests, + true, assert.NoError, assert.NoError, context.Background(), }, + { + "test 429 should not retry", + 4, + 1, + http.StatusTooManyRequests, + false, + assert.Error, + assertPermanentConsumerError, + context.Background(), + }, { "test 4xx should not retry", 4, 1, http.StatusBadRequest, + false, assert.Error, assertPermanentConsumerError, context.Background(), @@ -1078,6 +1101,7 @@ func TestRetries(t *testing.T) { 4, 0, http.StatusInternalServerError, + false, assert.Error, assertPermanentConsumerError, canceledContext(), @@ -1103,8 +1127,9 @@ func TestRetries(t *testing.T) { // Create the prwExporter exporter := &prwExporter{ - endpointURL: endpointURL, - client: http.DefaultClient, + endpointURL: endpointURL, + client: http.DefaultClient, + retryOnHTTP429: tt.RetryOnHTTP429, retrySettings: configretry.BackOffConfig{ Enabled: true, }, diff --git a/exporter/prometheusremotewriteexporter/factory.go b/exporter/prometheusremotewriteexporter/factory.go index d0a0f8555513..b84fa7ce15af 100644 --- a/exporter/prometheusremotewriteexporter/factory.go +++ b/exporter/prometheusremotewriteexporter/factory.go @@ -14,11 +14,19 @@ import ( "go.opentelemetry.io/collector/config/configretry" "go.opentelemetry.io/collector/exporter" "go.opentelemetry.io/collector/exporter/exporterhelper" + "go.opentelemetry.io/collector/featuregate" "github.com/open-telemetry/opentelemetry-collector-contrib/exporter/prometheusremotewriteexporter/internal/metadata" "github.com/open-telemetry/opentelemetry-collector-contrib/pkg/resourcetotelemetry" ) +var retryOn429FeatureGate = featuregate.GlobalRegistry().MustRegister( + "exporter.prometheusremotewritexporter.RetryOn429", + featuregate.StageAlpha, + featuregate.WithRegisterFromVersion("v0.101.0"), + featuregate.WithRegisterDescription("When enabled, the Prometheus remote write exporter will retry 429 http status code. Requires exporter.prometheusremotewritexporter.metrics.RetryOn429 to be enabled."), +) + // NewFactory creates a new Prometheus Remote Write exporter. func NewFactory() exporter.Factory { return exporter.NewFactory( diff --git a/exporter/prometheusremotewriteexporter/go.mod b/exporter/prometheusremotewriteexporter/go.mod index 42e0991115d3..aad1129fae9d 100644 --- a/exporter/prometheusremotewriteexporter/go.mod +++ b/exporter/prometheusremotewriteexporter/go.mod @@ -22,6 +22,7 @@ require ( go.opentelemetry.io/collector/confmap v0.100.0 go.opentelemetry.io/collector/consumer v0.100.0 go.opentelemetry.io/collector/exporter v0.100.0 + go.opentelemetry.io/collector/featuregate v1.7.0 go.opentelemetry.io/collector/pdata v1.7.0 go.opentelemetry.io/otel v1.26.0 go.opentelemetry.io/otel/metric v1.26.0 @@ -67,7 +68,6 @@ require ( go.opentelemetry.io/collector/config/internal v0.100.0 // indirect go.opentelemetry.io/collector/extension v0.100.0 // indirect go.opentelemetry.io/collector/extension/auth v0.100.0 // indirect - go.opentelemetry.io/collector/featuregate v1.7.0 // indirect go.opentelemetry.io/collector/receiver v0.100.0 // indirect go.opentelemetry.io/collector/semconv v0.100.0 // indirect go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.51.0 // indirect From 82a828b798cd9074b9a3aec95c2115e825042799 Mon Sep 17 00:00:00 2001 From: Stefan Kurek Date: Fri, 10 May 2024 14:30:09 -0400 Subject: [PATCH 23/55] [chore] vcenterreceiver Adds Accidentally Removed Unit Test Configs/Results (#32987) **Description:** In this [PR](https://github.com/open-telemetry/opentelemetry-collector-contrib/pull/32913), I accidentally removed all the enabled configs from the 2nd set of unit tests. This was incorrect as there are still 4 of them that are currently disabled by default. This is rectifying that by adding those back in. **Link to tracking Issue:** **Testing:** This change is for the unit tests only **Documentation:** NA --- receiver/vcenterreceiver/scraper_test.go | 4 + .../metrics/expected-all-enabled.yaml | 3816 ++++++++++++++--- 2 files changed, 3297 insertions(+), 523 deletions(-) diff --git a/receiver/vcenterreceiver/scraper_test.go b/receiver/vcenterreceiver/scraper_test.go index ef1fdd26c887..7736f77b21a1 100644 --- a/receiver/vcenterreceiver/scraper_test.go +++ b/receiver/vcenterreceiver/scraper_test.go @@ -39,6 +39,10 @@ func TestScrapeConfigsEnabled(t *testing.T) { defer mockServer.Close() optConfigs := metadata.DefaultMetricsBuilderConfig() + optConfigs.Metrics.VcenterHostNetworkPacketErrorRate.Enabled = true + optConfigs.Metrics.VcenterHostNetworkPacketRate.Enabled = true + optConfigs.Metrics.VcenterVMNetworkPacketRate.Enabled = true + optConfigs.Metrics.VcenterVMNetworkPacketDropRate.Enabled = true cfg := &Config{ MetricsBuilderConfig: optConfigs, diff --git a/receiver/vcenterreceiver/testdata/metrics/expected-all-enabled.yaml b/receiver/vcenterreceiver/testdata/metrics/expected-all-enabled.yaml index f52ae97074e3..13610089822e 100644 --- a/receiver/vcenterreceiver/testdata/metrics/expected-all-enabled.yaml +++ b/receiver/vcenterreceiver/testdata/metrics/expected-all-enabled.yaml @@ -1399,12 +1399,11 @@ resourceMetrics: startTimeUnixNano: "6000000" timeUnixNano: "5000000" unit: '{packets/sec}' - - description: The summation of packet errors on the host network. - name: vcenter.host.network.packet.errors - sum: - aggregationTemporality: 2 + - description: The rate of packet errors transmitted or received on the host network. + name: vcenter.host.network.packet.error.rate + gauge: dataPoints: - - asInt: "0" + - asDouble: "0" attributes: - key: direction value: @@ -1414,7 +1413,7 @@ resourceMetrics: stringValue: "" startTimeUnixNano: "6000000" timeUnixNano: "1000000" - - asInt: "0" + - asDouble: "0" attributes: - key: direction value: @@ -1424,7 +1423,7 @@ resourceMetrics: stringValue: "" startTimeUnixNano: "6000000" timeUnixNano: "2000000" - - asInt: "0" + - asDouble: "0" attributes: - key: direction value: @@ -1434,7 +1433,7 @@ resourceMetrics: stringValue: "" startTimeUnixNano: "6000000" timeUnixNano: "3000000" - - asInt: "0" + - asDouble: "0" attributes: - key: direction value: @@ -1444,7 +1443,7 @@ resourceMetrics: stringValue: "" startTimeUnixNano: "6000000" timeUnixNano: "4000000" - - asInt: "0" + - asDouble: "0" attributes: - key: direction value: @@ -1454,7 +1453,7 @@ resourceMetrics: stringValue: "" startTimeUnixNano: "6000000" timeUnixNano: "5000000" - - asInt: "0" + - asDouble: "0" attributes: - key: direction value: @@ -1464,7 +1463,7 @@ resourceMetrics: stringValue: vmnic0 startTimeUnixNano: "6000000" timeUnixNano: "1000000" - - asInt: "0" + - asDouble: "0" attributes: - key: direction value: @@ -1474,7 +1473,7 @@ resourceMetrics: stringValue: vmnic0 startTimeUnixNano: "6000000" timeUnixNano: "2000000" - - asInt: "0" + - asDouble: "0" attributes: - key: direction value: @@ -1484,7 +1483,7 @@ resourceMetrics: stringValue: vmnic0 startTimeUnixNano: "6000000" timeUnixNano: "3000000" - - asInt: "0" + - asDouble: "0" attributes: - key: direction value: @@ -1494,7 +1493,7 @@ resourceMetrics: stringValue: vmnic0 startTimeUnixNano: "6000000" timeUnixNano: "4000000" - - asInt: "0" + - asDouble: "0" attributes: - key: direction value: @@ -1504,7 +1503,7 @@ resourceMetrics: stringValue: vmnic0 startTimeUnixNano: "6000000" timeUnixNano: "5000000" - - asInt: "0" + - asDouble: "0" attributes: - key: direction value: @@ -1514,7 +1513,7 @@ resourceMetrics: stringValue: vmnic1 startTimeUnixNano: "6000000" timeUnixNano: "1000000" - - asInt: "0" + - asDouble: "0" attributes: - key: direction value: @@ -1524,7 +1523,7 @@ resourceMetrics: stringValue: vmnic1 startTimeUnixNano: "6000000" timeUnixNano: "2000000" - - asInt: "0" + - asDouble: "0" attributes: - key: direction value: @@ -1534,7 +1533,7 @@ resourceMetrics: stringValue: vmnic1 startTimeUnixNano: "6000000" timeUnixNano: "3000000" - - asInt: "0" + - asDouble: "0" attributes: - key: direction value: @@ -1544,7 +1543,7 @@ resourceMetrics: stringValue: vmnic1 startTimeUnixNano: "6000000" timeUnixNano: "4000000" - - asInt: "0" + - asDouble: "0" attributes: - key: direction value: @@ -1554,7 +1553,7 @@ resourceMetrics: stringValue: vmnic1 startTimeUnixNano: "6000000" timeUnixNano: "5000000" - - asInt: "0" + - asDouble: "0" attributes: - key: direction value: @@ -1564,7 +1563,7 @@ resourceMetrics: stringValue: vmnic2 startTimeUnixNano: "6000000" timeUnixNano: "1000000" - - asInt: "0" + - asDouble: "0" attributes: - key: direction value: @@ -1574,7 +1573,7 @@ resourceMetrics: stringValue: vmnic2 startTimeUnixNano: "6000000" timeUnixNano: "2000000" - - asInt: "0" + - asDouble: "0" attributes: - key: direction value: @@ -1584,7 +1583,7 @@ resourceMetrics: stringValue: vmnic2 startTimeUnixNano: "6000000" timeUnixNano: "3000000" - - asInt: "0" + - asDouble: "0" attributes: - key: direction value: @@ -1594,7 +1593,7 @@ resourceMetrics: stringValue: vmnic2 startTimeUnixNano: "6000000" timeUnixNano: "4000000" - - asInt: "0" + - asDouble: "0" attributes: - key: direction value: @@ -1604,7 +1603,7 @@ resourceMetrics: stringValue: vmnic2 startTimeUnixNano: "6000000" timeUnixNano: "5000000" - - asInt: "0" + - asDouble: "0" attributes: - key: direction value: @@ -1614,7 +1613,7 @@ resourceMetrics: stringValue: vmnic3 startTimeUnixNano: "6000000" timeUnixNano: "1000000" - - asInt: "0" + - asDouble: "0" attributes: - key: direction value: @@ -1624,7 +1623,7 @@ resourceMetrics: stringValue: vmnic3 startTimeUnixNano: "6000000" timeUnixNano: "2000000" - - asInt: "0" + - asDouble: "0" attributes: - key: direction value: @@ -1634,7 +1633,7 @@ resourceMetrics: stringValue: vmnic3 startTimeUnixNano: "6000000" timeUnixNano: "3000000" - - asInt: "0" + - asDouble: "0" attributes: - key: direction value: @@ -1644,7 +1643,7 @@ resourceMetrics: stringValue: vmnic3 startTimeUnixNano: "6000000" timeUnixNano: "4000000" - - asInt: "0" + - asDouble: "0" attributes: - key: direction value: @@ -1654,7 +1653,7 @@ resourceMetrics: stringValue: vmnic3 startTimeUnixNano: "6000000" timeUnixNano: "5000000" - - asInt: "0" + - asDouble: "0" attributes: - key: direction value: @@ -1664,7 +1663,7 @@ resourceMetrics: stringValue: "" startTimeUnixNano: "6000000" timeUnixNano: "1000000" - - asInt: "0" + - asDouble: "0" attributes: - key: direction value: @@ -1674,7 +1673,7 @@ resourceMetrics: stringValue: "" startTimeUnixNano: "6000000" timeUnixNano: "2000000" - - asInt: "0" + - asDouble: "0" attributes: - key: direction value: @@ -1684,7 +1683,7 @@ resourceMetrics: stringValue: "" startTimeUnixNano: "6000000" timeUnixNano: "3000000" - - asInt: "0" + - asDouble: "0" attributes: - key: direction value: @@ -1694,7 +1693,7 @@ resourceMetrics: stringValue: "" startTimeUnixNano: "6000000" timeUnixNano: "4000000" - - asInt: "0" + - asDouble: "0" attributes: - key: direction value: @@ -1704,7 +1703,7 @@ resourceMetrics: stringValue: "" startTimeUnixNano: "6000000" timeUnixNano: "5000000" - - asInt: "0" + - asDouble: "0" attributes: - key: direction value: @@ -1714,7 +1713,7 @@ resourceMetrics: stringValue: vmnic0 startTimeUnixNano: "6000000" timeUnixNano: "1000000" - - asInt: "0" + - asDouble: "0" attributes: - key: direction value: @@ -1724,7 +1723,7 @@ resourceMetrics: stringValue: vmnic0 startTimeUnixNano: "6000000" timeUnixNano: "2000000" - - asInt: "0" + - asDouble: "0" attributes: - key: direction value: @@ -1734,7 +1733,7 @@ resourceMetrics: stringValue: vmnic0 startTimeUnixNano: "6000000" timeUnixNano: "3000000" - - asInt: "0" + - asDouble: "0" attributes: - key: direction value: @@ -1744,7 +1743,7 @@ resourceMetrics: stringValue: vmnic0 startTimeUnixNano: "6000000" timeUnixNano: "4000000" - - asInt: "0" + - asDouble: "0" attributes: - key: direction value: @@ -1754,7 +1753,7 @@ resourceMetrics: stringValue: vmnic0 startTimeUnixNano: "6000000" timeUnixNano: "5000000" - - asInt: "0" + - asDouble: "0" attributes: - key: direction value: @@ -1764,7 +1763,7 @@ resourceMetrics: stringValue: vmnic1 startTimeUnixNano: "6000000" timeUnixNano: "1000000" - - asInt: "0" + - asDouble: "0" attributes: - key: direction value: @@ -1774,7 +1773,7 @@ resourceMetrics: stringValue: vmnic1 startTimeUnixNano: "6000000" timeUnixNano: "2000000" - - asInt: "0" + - asDouble: "0" attributes: - key: direction value: @@ -1784,7 +1783,7 @@ resourceMetrics: stringValue: vmnic1 startTimeUnixNano: "6000000" timeUnixNano: "3000000" - - asInt: "0" + - asDouble: "0" attributes: - key: direction value: @@ -1794,7 +1793,7 @@ resourceMetrics: stringValue: vmnic1 startTimeUnixNano: "6000000" timeUnixNano: "4000000" - - asInt: "0" + - asDouble: "0" attributes: - key: direction value: @@ -1804,7 +1803,7 @@ resourceMetrics: stringValue: vmnic1 startTimeUnixNano: "6000000" timeUnixNano: "5000000" - - asInt: "0" + - asDouble: "0" attributes: - key: direction value: @@ -1814,7 +1813,7 @@ resourceMetrics: stringValue: vmnic2 startTimeUnixNano: "6000000" timeUnixNano: "1000000" - - asInt: "0" + - asDouble: "0" attributes: - key: direction value: @@ -1824,7 +1823,7 @@ resourceMetrics: stringValue: vmnic2 startTimeUnixNano: "6000000" timeUnixNano: "2000000" - - asInt: "0" + - asDouble: "0" attributes: - key: direction value: @@ -1834,7 +1833,7 @@ resourceMetrics: stringValue: vmnic2 startTimeUnixNano: "6000000" timeUnixNano: "3000000" - - asInt: "0" + - asDouble: "0" attributes: - key: direction value: @@ -1844,7 +1843,7 @@ resourceMetrics: stringValue: vmnic2 startTimeUnixNano: "6000000" timeUnixNano: "4000000" - - asInt: "0" + - asDouble: "0" attributes: - key: direction value: @@ -1854,7 +1853,7 @@ resourceMetrics: stringValue: vmnic2 startTimeUnixNano: "6000000" timeUnixNano: "5000000" - - asInt: "0" + - asDouble: "0" attributes: - key: direction value: @@ -1864,7 +1863,7 @@ resourceMetrics: stringValue: vmnic3 startTimeUnixNano: "6000000" timeUnixNano: "1000000" - - asInt: "0" + - asDouble: "0" attributes: - key: direction value: @@ -1874,7 +1873,7 @@ resourceMetrics: stringValue: vmnic3 startTimeUnixNano: "6000000" timeUnixNano: "2000000" - - asInt: "0" + - asDouble: "0" attributes: - key: direction value: @@ -1884,7 +1883,7 @@ resourceMetrics: stringValue: vmnic3 startTimeUnixNano: "6000000" timeUnixNano: "3000000" - - asInt: "0" + - asDouble: "0" attributes: - key: direction value: @@ -1894,7 +1893,7 @@ resourceMetrics: stringValue: vmnic3 startTimeUnixNano: "6000000" timeUnixNano: "4000000" - - asInt: "0" + - asDouble: "0" attributes: - key: direction value: @@ -1904,13 +1903,13 @@ resourceMetrics: stringValue: vmnic3 startTimeUnixNano: "6000000" timeUnixNano: "5000000" - unit: '{errors}' - - description: The amount of data that was transmitted or received over the network by the host. - name: vcenter.host.network.throughput + unit: '{errors/sec}' + - description: The summation of packet errors on the host network. + name: vcenter.host.network.packet.errors sum: aggregationTemporality: 2 dataPoints: - - asInt: "928" + - asInt: "0" attributes: - key: direction value: @@ -1920,7 +1919,7 @@ resourceMetrics: stringValue: "" startTimeUnixNano: "6000000" timeUnixNano: "1000000" - - asInt: "1120" + - asInt: "0" attributes: - key: direction value: @@ -1930,7 +1929,7 @@ resourceMetrics: stringValue: "" startTimeUnixNano: "6000000" timeUnixNano: "2000000" - - asInt: "1646" + - asInt: "0" attributes: - key: direction value: @@ -1940,7 +1939,7 @@ resourceMetrics: stringValue: "" startTimeUnixNano: "6000000" timeUnixNano: "3000000" - - asInt: "1291" + - asInt: "0" attributes: - key: direction value: @@ -1950,7 +1949,7 @@ resourceMetrics: stringValue: "" startTimeUnixNano: "6000000" timeUnixNano: "4000000" - - asInt: "1058" + - asInt: "0" attributes: - key: direction value: @@ -1960,7 +1959,7 @@ resourceMetrics: stringValue: "" startTimeUnixNano: "6000000" timeUnixNano: "5000000" - - asInt: "570" + - asInt: "0" attributes: - key: direction value: @@ -1970,7 +1969,7 @@ resourceMetrics: stringValue: vmnic0 startTimeUnixNano: "6000000" timeUnixNano: "1000000" - - asInt: "768" + - asInt: "0" attributes: - key: direction value: @@ -1980,7 +1979,7 @@ resourceMetrics: stringValue: vmnic0 startTimeUnixNano: "6000000" timeUnixNano: "2000000" - - asInt: "1269" + - asInt: "0" attributes: - key: direction value: @@ -1990,7 +1989,7 @@ resourceMetrics: stringValue: vmnic0 startTimeUnixNano: "6000000" timeUnixNano: "3000000" - - asInt: "927" + - asInt: "0" attributes: - key: direction value: @@ -2000,7 +1999,7 @@ resourceMetrics: stringValue: vmnic0 startTimeUnixNano: "6000000" timeUnixNano: "4000000" - - asInt: "681" + - asInt: "0" attributes: - key: direction value: @@ -2110,7 +2109,7 @@ resourceMetrics: stringValue: vmnic2 startTimeUnixNano: "6000000" timeUnixNano: "5000000" - - asInt: "357" + - asInt: "0" attributes: - key: direction value: @@ -2120,7 +2119,7 @@ resourceMetrics: stringValue: vmnic3 startTimeUnixNano: "6000000" timeUnixNano: "1000000" - - asInt: "351" + - asInt: "0" attributes: - key: direction value: @@ -2130,7 +2129,7 @@ resourceMetrics: stringValue: vmnic3 startTimeUnixNano: "6000000" timeUnixNano: "2000000" - - asInt: "376" + - asInt: "0" attributes: - key: direction value: @@ -2140,7 +2139,7 @@ resourceMetrics: stringValue: vmnic3 startTimeUnixNano: "6000000" timeUnixNano: "3000000" - - asInt: "363" + - asInt: "0" attributes: - key: direction value: @@ -2150,7 +2149,7 @@ resourceMetrics: stringValue: vmnic3 startTimeUnixNano: "6000000" timeUnixNano: "4000000" - - asInt: "376" + - asInt: "0" attributes: - key: direction value: @@ -2160,7 +2159,7 @@ resourceMetrics: stringValue: vmnic3 startTimeUnixNano: "6000000" timeUnixNano: "5000000" - - asInt: "3475" + - asInt: "0" attributes: - key: direction value: @@ -2170,7 +2169,7 @@ resourceMetrics: stringValue: "" startTimeUnixNano: "6000000" timeUnixNano: "1000000" - - asInt: "2959" + - asInt: "0" attributes: - key: direction value: @@ -2180,7 +2179,7 @@ resourceMetrics: stringValue: "" startTimeUnixNano: "6000000" timeUnixNano: "2000000" - - asInt: "4924" + - asInt: "0" attributes: - key: direction value: @@ -2190,7 +2189,7 @@ resourceMetrics: stringValue: "" startTimeUnixNano: "6000000" timeUnixNano: "3000000" - - asInt: "4364" + - asInt: "0" attributes: - key: direction value: @@ -2200,7 +2199,7 @@ resourceMetrics: stringValue: "" startTimeUnixNano: "6000000" timeUnixNano: "4000000" - - asInt: "3058" + - asInt: "0" attributes: - key: direction value: @@ -2210,7 +2209,7 @@ resourceMetrics: stringValue: "" startTimeUnixNano: "6000000" timeUnixNano: "5000000" - - asInt: "3064" + - asInt: "0" attributes: - key: direction value: @@ -2220,7 +2219,7 @@ resourceMetrics: stringValue: vmnic0 startTimeUnixNano: "6000000" timeUnixNano: "1000000" - - asInt: "2537" + - asInt: "0" attributes: - key: direction value: @@ -2230,7 +2229,7 @@ resourceMetrics: stringValue: vmnic0 startTimeUnixNano: "6000000" timeUnixNano: "2000000" - - asInt: "4373" + - asInt: "0" attributes: - key: direction value: @@ -2240,7 +2239,7 @@ resourceMetrics: stringValue: vmnic0 startTimeUnixNano: "6000000" timeUnixNano: "3000000" - - asInt: "3746" + - asInt: "0" attributes: - key: direction value: @@ -2250,7 +2249,7 @@ resourceMetrics: stringValue: vmnic0 startTimeUnixNano: "6000000" timeUnixNano: "4000000" - - asInt: "2569" + - asInt: "0" attributes: - key: direction value: @@ -2360,7 +2359,7 @@ resourceMetrics: stringValue: vmnic2 startTimeUnixNano: "6000000" timeUnixNano: "5000000" - - asInt: "411" + - asInt: "0" attributes: - key: direction value: @@ -2370,7 +2369,7 @@ resourceMetrics: stringValue: vmnic3 startTimeUnixNano: "6000000" timeUnixNano: "1000000" - - asInt: "422" + - asInt: "0" attributes: - key: direction value: @@ -2380,7 +2379,7 @@ resourceMetrics: stringValue: vmnic3 startTimeUnixNano: "6000000" timeUnixNano: "2000000" - - asInt: "551" + - asInt: "0" attributes: - key: direction value: @@ -2390,7 +2389,7 @@ resourceMetrics: stringValue: vmnic3 startTimeUnixNano: "6000000" timeUnixNano: "3000000" - - asInt: "617" + - asInt: "0" attributes: - key: direction value: @@ -2400,7 +2399,7 @@ resourceMetrics: stringValue: vmnic3 startTimeUnixNano: "6000000" timeUnixNano: "4000000" - - asInt: "488" + - asInt: "0" attributes: - key: direction value: @@ -2410,942 +2409,2964 @@ resourceMetrics: stringValue: vmnic3 startTimeUnixNano: "6000000" timeUnixNano: "5000000" - unit: '{KiBy/s}' - - description: The sum of the data transmitted and received for all the NIC instances of the host. - name: vcenter.host.network.usage - sum: - aggregationTemporality: 2 + unit: '{errors}' + - description: The rate of packets transmitted or received across each physical NIC (network interface controller) instance on the host. + name: vcenter.host.network.packet.rate + gauge: dataPoints: - - asInt: "4404" + - asDouble: "2782.35" attributes: + - key: direction + value: + stringValue: received - key: object value: stringValue: "" startTimeUnixNano: "6000000" timeUnixNano: "1000000" - - asInt: "4079" + - asDouble: "2868.8" attributes: + - key: direction + value: + stringValue: received - key: object value: stringValue: "" startTimeUnixNano: "6000000" timeUnixNano: "2000000" - - asInt: "6570" + - asDouble: "3207.8" attributes: + - key: direction + value: + stringValue: received - key: object value: stringValue: "" startTimeUnixNano: "6000000" timeUnixNano: "3000000" - - asInt: "5655" + - asDouble: "2940.7" attributes: + - key: direction + value: + stringValue: received - key: object value: stringValue: "" startTimeUnixNano: "6000000" timeUnixNano: "4000000" - - asInt: "4117" + - asDouble: "2869.5" attributes: + - key: direction + value: + stringValue: received - key: object value: stringValue: "" startTimeUnixNano: "6000000" timeUnixNano: "5000000" - - asInt: "3634" + - asDouble: "665.8" attributes: + - key: direction + value: + stringValue: received - key: object value: stringValue: vmnic0 startTimeUnixNano: "6000000" timeUnixNano: "1000000" - - asInt: "3305" + - asDouble: "723.65" attributes: + - key: direction + value: + stringValue: received - key: object value: stringValue: vmnic0 startTimeUnixNano: "6000000" timeUnixNano: "2000000" - - asInt: "5642" + - asDouble: "983.1" attributes: + - key: direction + value: + stringValue: received - key: object value: stringValue: vmnic0 startTimeUnixNano: "6000000" timeUnixNano: "3000000" - - asInt: "4674" + - asDouble: "773.9" attributes: + - key: direction + value: + stringValue: received - key: object value: stringValue: vmnic0 startTimeUnixNano: "6000000" timeUnixNano: "4000000" - - asInt: "3251" + - asDouble: "722.9" attributes: + - key: direction + value: + stringValue: received - key: object value: stringValue: vmnic0 startTimeUnixNano: "6000000" timeUnixNano: "5000000" - - asInt: "0" + - asDouble: "5.8" attributes: + - key: direction + value: + stringValue: received - key: object value: stringValue: vmnic1 startTimeUnixNano: "6000000" timeUnixNano: "1000000" - - asInt: "0" + - asDouble: "5.7" attributes: + - key: direction + value: + stringValue: received - key: object value: stringValue: vmnic1 startTimeUnixNano: "6000000" timeUnixNano: "2000000" - - asInt: "0" + - asDouble: "5.65" attributes: + - key: direction + value: + stringValue: received - key: object value: stringValue: vmnic1 startTimeUnixNano: "6000000" timeUnixNano: "3000000" - - asInt: "0" + - asDouble: "5.6" attributes: + - key: direction + value: + stringValue: received - key: object value: stringValue: vmnic1 startTimeUnixNano: "6000000" timeUnixNano: "4000000" - - asInt: "0" + - asDouble: "6" attributes: + - key: direction + value: + stringValue: received - key: object value: stringValue: vmnic1 startTimeUnixNano: "6000000" timeUnixNano: "5000000" - - asInt: "0" + - asDouble: "5.25" attributes: + - key: direction + value: + stringValue: received - key: object value: stringValue: vmnic2 startTimeUnixNano: "6000000" timeUnixNano: "1000000" - - asInt: "0" + - asDouble: "5.15" attributes: + - key: direction + value: + stringValue: received - key: object value: stringValue: vmnic2 startTimeUnixNano: "6000000" timeUnixNano: "2000000" - - asInt: "0" + - asDouble: "5.2" attributes: + - key: direction + value: + stringValue: received - key: object value: stringValue: vmnic2 startTimeUnixNano: "6000000" timeUnixNano: "3000000" - - asInt: "0" + - asDouble: "5.1" attributes: + - key: direction + value: + stringValue: received - key: object value: stringValue: vmnic2 startTimeUnixNano: "6000000" timeUnixNano: "4000000" - - asInt: "0" + - asDouble: "5.45" attributes: + - key: direction + value: + stringValue: received - key: object value: stringValue: vmnic2 startTimeUnixNano: "6000000" timeUnixNano: "5000000" - - asInt: "769" + - asDouble: "2105.5" attributes: + - key: direction + value: + stringValue: received - key: object value: stringValue: vmnic3 startTimeUnixNano: "6000000" timeUnixNano: "1000000" - - asInt: "773" + - asDouble: "2134.3" attributes: + - key: direction + value: + stringValue: received - key: object value: stringValue: vmnic3 startTimeUnixNano: "6000000" timeUnixNano: "2000000" - - asInt: "927" + - asDouble: "2213.85" attributes: + - key: direction + value: + stringValue: received - key: object value: stringValue: vmnic3 startTimeUnixNano: "6000000" timeUnixNano: "3000000" - - asInt: "980" + - asDouble: "2156.1" attributes: + - key: direction + value: + stringValue: received - key: object value: stringValue: vmnic3 startTimeUnixNano: "6000000" timeUnixNano: "4000000" - - asInt: "864" + - asDouble: "2135.15" attributes: + - key: direction + value: + stringValue: received - key: object value: stringValue: vmnic3 startTimeUnixNano: "6000000" timeUnixNano: "5000000" - unit: '{KiBy/s}' - scope: - name: otelcol/vcenterreceiver - version: latest - - resource: - attributes: - - key: vcenter.datacenter.name - value: - stringValue: Datacenter - - key: vcenter.host.name - value: - stringValue: esxi-111.europe-southeast1.gve.goog - scopeMetrics: - - metrics: - - description: The amount of CPU used by the host. - name: vcenter.host.cpu.usage - sum: - aggregationTemporality: 2 - dataPoints: - - asInt: "6107" - startTimeUnixNano: "1000000" - timeUnixNano: "2000000" - unit: MHz - - description: The CPU utilization of the host system. - gauge: - dataPoints: - - asDouble: 6.542186227878476 - startTimeUnixNano: "1000000" - timeUnixNano: "2000000" + - asDouble: "2599.6" + attributes: + - key: direction + value: + stringValue: transmitted + - key: object + value: + stringValue: "" + startTimeUnixNano: "6000000" + timeUnixNano: "1000000" + - asDouble: "2735.6" + attributes: + - key: direction + value: + stringValue: transmitted + - key: object + value: + stringValue: "" + startTimeUnixNano: "6000000" + timeUnixNano: "2000000" + - asDouble: "2972.45" + attributes: + - key: direction + value: + stringValue: transmitted + - key: object + value: + stringValue: "" + startTimeUnixNano: "6000000" + timeUnixNano: "3000000" + - asDouble: "2730.2" + attributes: + - key: direction + value: + stringValue: transmitted + - key: object + value: + stringValue: "" + startTimeUnixNano: "6000000" + timeUnixNano: "4000000" + - asDouble: "2723.2" + attributes: + - key: direction + value: + stringValue: transmitted + - key: object + value: + stringValue: "" + startTimeUnixNano: "6000000" + timeUnixNano: "5000000" + - asDouble: "559.1" + attributes: + - key: direction + value: + stringValue: transmitted + - key: object + value: + stringValue: vmnic0 + startTimeUnixNano: "6000000" + timeUnixNano: "1000000" + - asDouble: "650.45" + attributes: + - key: direction + value: + stringValue: transmitted + - key: object + value: + stringValue: vmnic0 + startTimeUnixNano: "6000000" + timeUnixNano: "2000000" + - asDouble: "824.45" + attributes: + - key: direction + value: + stringValue: transmitted + - key: object + value: + stringValue: vmnic0 + startTimeUnixNano: "6000000" + timeUnixNano: "3000000" + - asDouble: "619.9" + attributes: + - key: direction + value: + stringValue: transmitted + - key: object + value: + stringValue: vmnic0 + startTimeUnixNano: "6000000" + timeUnixNano: "4000000" + - asDouble: "649.2" + attributes: + - key: direction + value: + stringValue: transmitted + - key: object + value: + stringValue: vmnic0 + startTimeUnixNano: "6000000" + timeUnixNano: "5000000" + - asDouble: "0" + attributes: + - key: direction + value: + stringValue: transmitted + - key: object + value: + stringValue: vmnic1 + startTimeUnixNano: "6000000" + timeUnixNano: "1000000" + - asDouble: "0" + attributes: + - key: direction + value: + stringValue: transmitted + - key: object + value: + stringValue: vmnic1 + startTimeUnixNano: "6000000" + timeUnixNano: "2000000" + - asDouble: "0" + attributes: + - key: direction + value: + stringValue: transmitted + - key: object + value: + stringValue: vmnic1 + startTimeUnixNano: "6000000" + timeUnixNano: "3000000" + - asDouble: "0" + attributes: + - key: direction + value: + stringValue: transmitted + - key: object + value: + stringValue: vmnic1 + startTimeUnixNano: "6000000" + timeUnixNano: "4000000" + - asDouble: "0" + attributes: + - key: direction + value: + stringValue: transmitted + - key: object + value: + stringValue: vmnic1 + startTimeUnixNano: "6000000" + timeUnixNano: "5000000" + - asDouble: "0" + attributes: + - key: direction + value: + stringValue: transmitted + - key: object + value: + stringValue: vmnic2 + startTimeUnixNano: "6000000" + timeUnixNano: "1000000" + - asDouble: "0" + attributes: + - key: direction + value: + stringValue: transmitted + - key: object + value: + stringValue: vmnic2 + startTimeUnixNano: "6000000" + timeUnixNano: "2000000" + - asDouble: "0" + attributes: + - key: direction + value: + stringValue: transmitted + - key: object + value: + stringValue: vmnic2 + startTimeUnixNano: "6000000" + timeUnixNano: "3000000" + - asDouble: "0" + attributes: + - key: direction + value: + stringValue: transmitted + - key: object + value: + stringValue: vmnic2 + startTimeUnixNano: "6000000" + timeUnixNano: "4000000" + - asDouble: "0" + attributes: + - key: direction + value: + stringValue: transmitted + - key: object + value: + stringValue: vmnic2 + startTimeUnixNano: "6000000" + timeUnixNano: "5000000" + - asDouble: "2040.5" + attributes: + - key: direction + value: + stringValue: transmitted + - key: object + value: + stringValue: vmnic3 + startTimeUnixNano: "6000000" + timeUnixNano: "1000000" + - asDouble: "2085.15" + attributes: + - key: direction + value: + stringValue: transmitted + - key: object + value: + stringValue: vmnic3 + startTimeUnixNano: "6000000" + timeUnixNano: "2000000" + - asDouble: "2148" + attributes: + - key: direction + value: + stringValue: transmitted + - key: object + value: + stringValue: vmnic3 + startTimeUnixNano: "6000000" + timeUnixNano: "3000000" + - asDouble: "2110.3" + attributes: + - key: direction + value: + stringValue: transmitted + - key: object + value: + stringValue: vmnic3 + startTimeUnixNano: "6000000" + timeUnixNano: "4000000" + - asDouble: "2074" + attributes: + - key: direction + value: + stringValue: transmitted + - key: object + value: + stringValue: vmnic3 + startTimeUnixNano: "6000000" + timeUnixNano: "5000000" + unit: '{packets/sec}' + - description: The amount of data that was transmitted or received over the network by the host. + name: vcenter.host.network.throughput + sum: + aggregationTemporality: 2 + dataPoints: + - asInt: "928" + attributes: + - key: direction + value: + stringValue: received + - key: object + value: + stringValue: "" + startTimeUnixNano: "6000000" + timeUnixNano: "1000000" + - asInt: "1120" + attributes: + - key: direction + value: + stringValue: received + - key: object + value: + stringValue: "" + startTimeUnixNano: "6000000" + timeUnixNano: "2000000" + - asInt: "1646" + attributes: + - key: direction + value: + stringValue: received + - key: object + value: + stringValue: "" + startTimeUnixNano: "6000000" + timeUnixNano: "3000000" + - asInt: "1291" + attributes: + - key: direction + value: + stringValue: received + - key: object + value: + stringValue: "" + startTimeUnixNano: "6000000" + timeUnixNano: "4000000" + - asInt: "1058" + attributes: + - key: direction + value: + stringValue: received + - key: object + value: + stringValue: "" + startTimeUnixNano: "6000000" + timeUnixNano: "5000000" + - asInt: "570" + attributes: + - key: direction + value: + stringValue: received + - key: object + value: + stringValue: vmnic0 + startTimeUnixNano: "6000000" + timeUnixNano: "1000000" + - asInt: "768" + attributes: + - key: direction + value: + stringValue: received + - key: object + value: + stringValue: vmnic0 + startTimeUnixNano: "6000000" + timeUnixNano: "2000000" + - asInt: "1269" + attributes: + - key: direction + value: + stringValue: received + - key: object + value: + stringValue: vmnic0 + startTimeUnixNano: "6000000" + timeUnixNano: "3000000" + - asInt: "927" + attributes: + - key: direction + value: + stringValue: received + - key: object + value: + stringValue: vmnic0 + startTimeUnixNano: "6000000" + timeUnixNano: "4000000" + - asInt: "681" + attributes: + - key: direction + value: + stringValue: received + - key: object + value: + stringValue: vmnic0 + startTimeUnixNano: "6000000" + timeUnixNano: "5000000" + - asInt: "0" + attributes: + - key: direction + value: + stringValue: received + - key: object + value: + stringValue: vmnic1 + startTimeUnixNano: "6000000" + timeUnixNano: "1000000" + - asInt: "0" + attributes: + - key: direction + value: + stringValue: received + - key: object + value: + stringValue: vmnic1 + startTimeUnixNano: "6000000" + timeUnixNano: "2000000" + - asInt: "0" + attributes: + - key: direction + value: + stringValue: received + - key: object + value: + stringValue: vmnic1 + startTimeUnixNano: "6000000" + timeUnixNano: "3000000" + - asInt: "0" + attributes: + - key: direction + value: + stringValue: received + - key: object + value: + stringValue: vmnic1 + startTimeUnixNano: "6000000" + timeUnixNano: "4000000" + - asInt: "0" + attributes: + - key: direction + value: + stringValue: received + - key: object + value: + stringValue: vmnic1 + startTimeUnixNano: "6000000" + timeUnixNano: "5000000" + - asInt: "0" + attributes: + - key: direction + value: + stringValue: received + - key: object + value: + stringValue: vmnic2 + startTimeUnixNano: "6000000" + timeUnixNano: "1000000" + - asInt: "0" + attributes: + - key: direction + value: + stringValue: received + - key: object + value: + stringValue: vmnic2 + startTimeUnixNano: "6000000" + timeUnixNano: "2000000" + - asInt: "0" + attributes: + - key: direction + value: + stringValue: received + - key: object + value: + stringValue: vmnic2 + startTimeUnixNano: "6000000" + timeUnixNano: "3000000" + - asInt: "0" + attributes: + - key: direction + value: + stringValue: received + - key: object + value: + stringValue: vmnic2 + startTimeUnixNano: "6000000" + timeUnixNano: "4000000" + - asInt: "0" + attributes: + - key: direction + value: + stringValue: received + - key: object + value: + stringValue: vmnic2 + startTimeUnixNano: "6000000" + timeUnixNano: "5000000" + - asInt: "357" + attributes: + - key: direction + value: + stringValue: received + - key: object + value: + stringValue: vmnic3 + startTimeUnixNano: "6000000" + timeUnixNano: "1000000" + - asInt: "351" + attributes: + - key: direction + value: + stringValue: received + - key: object + value: + stringValue: vmnic3 + startTimeUnixNano: "6000000" + timeUnixNano: "2000000" + - asInt: "376" + attributes: + - key: direction + value: + stringValue: received + - key: object + value: + stringValue: vmnic3 + startTimeUnixNano: "6000000" + timeUnixNano: "3000000" + - asInt: "363" + attributes: + - key: direction + value: + stringValue: received + - key: object + value: + stringValue: vmnic3 + startTimeUnixNano: "6000000" + timeUnixNano: "4000000" + - asInt: "376" + attributes: + - key: direction + value: + stringValue: received + - key: object + value: + stringValue: vmnic3 + startTimeUnixNano: "6000000" + timeUnixNano: "5000000" + - asInt: "3475" + attributes: + - key: direction + value: + stringValue: transmitted + - key: object + value: + stringValue: "" + startTimeUnixNano: "6000000" + timeUnixNano: "1000000" + - asInt: "2959" + attributes: + - key: direction + value: + stringValue: transmitted + - key: object + value: + stringValue: "" + startTimeUnixNano: "6000000" + timeUnixNano: "2000000" + - asInt: "4924" + attributes: + - key: direction + value: + stringValue: transmitted + - key: object + value: + stringValue: "" + startTimeUnixNano: "6000000" + timeUnixNano: "3000000" + - asInt: "4364" + attributes: + - key: direction + value: + stringValue: transmitted + - key: object + value: + stringValue: "" + startTimeUnixNano: "6000000" + timeUnixNano: "4000000" + - asInt: "3058" + attributes: + - key: direction + value: + stringValue: transmitted + - key: object + value: + stringValue: "" + startTimeUnixNano: "6000000" + timeUnixNano: "5000000" + - asInt: "3064" + attributes: + - key: direction + value: + stringValue: transmitted + - key: object + value: + stringValue: vmnic0 + startTimeUnixNano: "6000000" + timeUnixNano: "1000000" + - asInt: "2537" + attributes: + - key: direction + value: + stringValue: transmitted + - key: object + value: + stringValue: vmnic0 + startTimeUnixNano: "6000000" + timeUnixNano: "2000000" + - asInt: "4373" + attributes: + - key: direction + value: + stringValue: transmitted + - key: object + value: + stringValue: vmnic0 + startTimeUnixNano: "6000000" + timeUnixNano: "3000000" + - asInt: "3746" + attributes: + - key: direction + value: + stringValue: transmitted + - key: object + value: + stringValue: vmnic0 + startTimeUnixNano: "6000000" + timeUnixNano: "4000000" + - asInt: "2569" + attributes: + - key: direction + value: + stringValue: transmitted + - key: object + value: + stringValue: vmnic0 + startTimeUnixNano: "6000000" + timeUnixNano: "5000000" + - asInt: "0" + attributes: + - key: direction + value: + stringValue: transmitted + - key: object + value: + stringValue: vmnic1 + startTimeUnixNano: "6000000" + timeUnixNano: "1000000" + - asInt: "0" + attributes: + - key: direction + value: + stringValue: transmitted + - key: object + value: + stringValue: vmnic1 + startTimeUnixNano: "6000000" + timeUnixNano: "2000000" + - asInt: "0" + attributes: + - key: direction + value: + stringValue: transmitted + - key: object + value: + stringValue: vmnic1 + startTimeUnixNano: "6000000" + timeUnixNano: "3000000" + - asInt: "0" + attributes: + - key: direction + value: + stringValue: transmitted + - key: object + value: + stringValue: vmnic1 + startTimeUnixNano: "6000000" + timeUnixNano: "4000000" + - asInt: "0" + attributes: + - key: direction + value: + stringValue: transmitted + - key: object + value: + stringValue: vmnic1 + startTimeUnixNano: "6000000" + timeUnixNano: "5000000" + - asInt: "0" + attributes: + - key: direction + value: + stringValue: transmitted + - key: object + value: + stringValue: vmnic2 + startTimeUnixNano: "6000000" + timeUnixNano: "1000000" + - asInt: "0" + attributes: + - key: direction + value: + stringValue: transmitted + - key: object + value: + stringValue: vmnic2 + startTimeUnixNano: "6000000" + timeUnixNano: "2000000" + - asInt: "0" + attributes: + - key: direction + value: + stringValue: transmitted + - key: object + value: + stringValue: vmnic2 + startTimeUnixNano: "6000000" + timeUnixNano: "3000000" + - asInt: "0" + attributes: + - key: direction + value: + stringValue: transmitted + - key: object + value: + stringValue: vmnic2 + startTimeUnixNano: "6000000" + timeUnixNano: "4000000" + - asInt: "0" + attributes: + - key: direction + value: + stringValue: transmitted + - key: object + value: + stringValue: vmnic2 + startTimeUnixNano: "6000000" + timeUnixNano: "5000000" + - asInt: "411" + attributes: + - key: direction + value: + stringValue: transmitted + - key: object + value: + stringValue: vmnic3 + startTimeUnixNano: "6000000" + timeUnixNano: "1000000" + - asInt: "422" + attributes: + - key: direction + value: + stringValue: transmitted + - key: object + value: + stringValue: vmnic3 + startTimeUnixNano: "6000000" + timeUnixNano: "2000000" + - asInt: "551" + attributes: + - key: direction + value: + stringValue: transmitted + - key: object + value: + stringValue: vmnic3 + startTimeUnixNano: "6000000" + timeUnixNano: "3000000" + - asInt: "617" + attributes: + - key: direction + value: + stringValue: transmitted + - key: object + value: + stringValue: vmnic3 + startTimeUnixNano: "6000000" + timeUnixNano: "4000000" + - asInt: "488" + attributes: + - key: direction + value: + stringValue: transmitted + - key: object + value: + stringValue: vmnic3 + startTimeUnixNano: "6000000" + timeUnixNano: "5000000" + unit: '{KiBy/s}' + - description: The sum of the data transmitted and received for all the NIC instances of the host. + name: vcenter.host.network.usage + sum: + aggregationTemporality: 2 + dataPoints: + - asInt: "4404" + attributes: + - key: object + value: + stringValue: "" + startTimeUnixNano: "6000000" + timeUnixNano: "1000000" + - asInt: "4079" + attributes: + - key: object + value: + stringValue: "" + startTimeUnixNano: "6000000" + timeUnixNano: "2000000" + - asInt: "6570" + attributes: + - key: object + value: + stringValue: "" + startTimeUnixNano: "6000000" + timeUnixNano: "3000000" + - asInt: "5655" + attributes: + - key: object + value: + stringValue: "" + startTimeUnixNano: "6000000" + timeUnixNano: "4000000" + - asInt: "4117" + attributes: + - key: object + value: + stringValue: "" + startTimeUnixNano: "6000000" + timeUnixNano: "5000000" + - asInt: "3634" + attributes: + - key: object + value: + stringValue: vmnic0 + startTimeUnixNano: "6000000" + timeUnixNano: "1000000" + - asInt: "3305" + attributes: + - key: object + value: + stringValue: vmnic0 + startTimeUnixNano: "6000000" + timeUnixNano: "2000000" + - asInt: "5642" + attributes: + - key: object + value: + stringValue: vmnic0 + startTimeUnixNano: "6000000" + timeUnixNano: "3000000" + - asInt: "4674" + attributes: + - key: object + value: + stringValue: vmnic0 + startTimeUnixNano: "6000000" + timeUnixNano: "4000000" + - asInt: "3251" + attributes: + - key: object + value: + stringValue: vmnic0 + startTimeUnixNano: "6000000" + timeUnixNano: "5000000" + - asInt: "0" + attributes: + - key: object + value: + stringValue: vmnic1 + startTimeUnixNano: "6000000" + timeUnixNano: "1000000" + - asInt: "0" + attributes: + - key: object + value: + stringValue: vmnic1 + startTimeUnixNano: "6000000" + timeUnixNano: "2000000" + - asInt: "0" + attributes: + - key: object + value: + stringValue: vmnic1 + startTimeUnixNano: "6000000" + timeUnixNano: "3000000" + - asInt: "0" + attributes: + - key: object + value: + stringValue: vmnic1 + startTimeUnixNano: "6000000" + timeUnixNano: "4000000" + - asInt: "0" + attributes: + - key: object + value: + stringValue: vmnic1 + startTimeUnixNano: "6000000" + timeUnixNano: "5000000" + - asInt: "0" + attributes: + - key: object + value: + stringValue: vmnic2 + startTimeUnixNano: "6000000" + timeUnixNano: "1000000" + - asInt: "0" + attributes: + - key: object + value: + stringValue: vmnic2 + startTimeUnixNano: "6000000" + timeUnixNano: "2000000" + - asInt: "0" + attributes: + - key: object + value: + stringValue: vmnic2 + startTimeUnixNano: "6000000" + timeUnixNano: "3000000" + - asInt: "0" + attributes: + - key: object + value: + stringValue: vmnic2 + startTimeUnixNano: "6000000" + timeUnixNano: "4000000" + - asInt: "0" + attributes: + - key: object + value: + stringValue: vmnic2 + startTimeUnixNano: "6000000" + timeUnixNano: "5000000" + - asInt: "769" + attributes: + - key: object + value: + stringValue: vmnic3 + startTimeUnixNano: "6000000" + timeUnixNano: "1000000" + - asInt: "773" + attributes: + - key: object + value: + stringValue: vmnic3 + startTimeUnixNano: "6000000" + timeUnixNano: "2000000" + - asInt: "927" + attributes: + - key: object + value: + stringValue: vmnic3 + startTimeUnixNano: "6000000" + timeUnixNano: "3000000" + - asInt: "980" + attributes: + - key: object + value: + stringValue: vmnic3 + startTimeUnixNano: "6000000" + timeUnixNano: "4000000" + - asInt: "864" + attributes: + - key: object + value: + stringValue: vmnic3 + startTimeUnixNano: "6000000" + timeUnixNano: "5000000" + unit: '{KiBy/s}' + scope: + name: otelcol/vcenterreceiver + version: latest + - resource: + attributes: + - key: vcenter.datacenter.name + value: + stringValue: Datacenter + - key: vcenter.host.name + value: + stringValue: esxi-111.europe-southeast1.gve.goog + scopeMetrics: + - metrics: + - description: The amount of CPU used by the host. + name: vcenter.host.cpu.usage + sum: + aggregationTemporality: 2 + dataPoints: + - asInt: "6107" + startTimeUnixNano: "1000000" + timeUnixNano: "2000000" + unit: MHz + - description: The CPU utilization of the host system. + gauge: + dataPoints: + - asDouble: 6.542186227878476 + startTimeUnixNano: "1000000" + timeUnixNano: "2000000" name: vcenter.host.cpu.utilization unit: '%' - description: The latency of operations to the host system's disk. gauge: dataPoints: - - asInt: "781" + - asInt: "781" + attributes: + - key: direction + value: + stringValue: read + - key: object + value: + stringValue: "4000" + startTimeUnixNano: "6000000" + timeUnixNano: "1000000" + - asInt: "789" + attributes: + - key: direction + value: + stringValue: read + - key: object + value: + stringValue: "4000" + startTimeUnixNano: "6000000" + timeUnixNano: "2000000" + - asInt: "645" + attributes: + - key: direction + value: + stringValue: read + - key: object + value: + stringValue: "4000" + startTimeUnixNano: "6000000" + timeUnixNano: "3000000" + - asInt: "781" + attributes: + - key: direction + value: + stringValue: read + - key: object + value: + stringValue: "4000" + startTimeUnixNano: "6000000" + timeUnixNano: "4000000" + - asInt: "782" + attributes: + - key: direction + value: + stringValue: read + - key: object + value: + stringValue: "4000" + startTimeUnixNano: "6000000" + timeUnixNano: "5000000" + - asInt: "781" + attributes: + - key: direction + value: + stringValue: write + - key: object + value: + stringValue: "4000" + startTimeUnixNano: "6000000" + timeUnixNano: "1000000" + - asInt: "789" + attributes: + - key: direction + value: + stringValue: write + - key: object + value: + stringValue: "4000" + startTimeUnixNano: "6000000" + timeUnixNano: "2000000" + - asInt: "645" + attributes: + - key: direction + value: + stringValue: write + - key: object + value: + stringValue: "4000" + startTimeUnixNano: "6000000" + timeUnixNano: "3000000" + - asInt: "781" + attributes: + - key: direction + value: + stringValue: write + - key: object + value: + stringValue: "4000" + startTimeUnixNano: "6000000" + timeUnixNano: "4000000" + - asInt: "782" + attributes: + - key: direction + value: + stringValue: write + - key: object + value: + stringValue: "4000" + startTimeUnixNano: "6000000" + timeUnixNano: "5000000" + name: vcenter.host.disk.latency.avg + unit: ms + - description: Highest latency value across all disks used by the host. + gauge: + dataPoints: + - asInt: "899" + attributes: + - key: object + value: + stringValue: "4000" + startTimeUnixNano: "6000000" + timeUnixNano: "1000000" + - asInt: "899" + attributes: + - key: object + value: + stringValue: "4000" + startTimeUnixNano: "6000000" + timeUnixNano: "2000000" + - asInt: "905" + attributes: + - key: object + value: + stringValue: "4000" + startTimeUnixNano: "6000000" + timeUnixNano: "3000000" + - asInt: "1000" + attributes: + - key: object + value: + stringValue: "4000" + startTimeUnixNano: "6000000" + timeUnixNano: "4000000" + - asInt: "1002" + attributes: + - key: object + value: + stringValue: "4000" + startTimeUnixNano: "6000000" + timeUnixNano: "5000000" + name: vcenter.host.disk.latency.max + unit: ms + - description: Average number of kilobytes read from or written to the disk each second. + name: vcenter.host.disk.throughput + sum: + aggregationTemporality: 2 + dataPoints: + - asInt: "28" + attributes: + - key: direction + value: + stringValue: read + - key: object + value: + stringValue: "" + startTimeUnixNano: "6000000" + timeUnixNano: "1000000" + - asInt: "45" + attributes: + - key: direction + value: + stringValue: read + - key: object + value: + stringValue: "" + startTimeUnixNano: "6000000" + timeUnixNano: "2000000" + - asInt: "88" + attributes: + - key: direction + value: + stringValue: read + - key: object + value: + stringValue: "" + startTimeUnixNano: "6000000" + timeUnixNano: "3000000" + - asInt: "92" + attributes: + - key: direction + value: + stringValue: read + - key: object + value: + stringValue: "" + startTimeUnixNano: "6000000" + timeUnixNano: "4000000" + - asInt: "31" + attributes: + - key: direction + value: + stringValue: read + - key: object + value: + stringValue: "" + startTimeUnixNano: "6000000" + timeUnixNano: "5000000" + - asInt: "4" + attributes: + - key: direction + value: + stringValue: read + - key: object + value: + stringValue: t10.ATA_____DELLBOSS_VD_____________________________983baa25884a001000000000 + startTimeUnixNano: "6000000" + timeUnixNano: "1000000" + - asInt: "25" + attributes: + - key: direction + value: + stringValue: read + - key: object + value: + stringValue: t10.ATA_____DELLBOSS_VD_____________________________983baa25884a001000000000 + startTimeUnixNano: "6000000" + timeUnixNano: "2000000" + - asInt: "76" + attributes: + - key: direction + value: + stringValue: read + - key: object + value: + stringValue: t10.ATA_____DELLBOSS_VD_____________________________983baa25884a001000000000 + startTimeUnixNano: "6000000" + timeUnixNano: "3000000" + - asInt: "63" + attributes: + - key: direction + value: + stringValue: read + - key: object + value: + stringValue: t10.ATA_____DELLBOSS_VD_____________________________983baa25884a001000000000 + startTimeUnixNano: "6000000" + timeUnixNano: "4000000" + - asInt: "0" + attributes: + - key: direction + value: + stringValue: read + - key: object + value: + stringValue: t10.ATA_____DELLBOSS_VD_____________________________983baa25884a001000000000 + startTimeUnixNano: "6000000" + timeUnixNano: "5000000" + - asInt: "6" + attributes: + - key: direction + value: + stringValue: read + - key: object + value: + stringValue: t10.NVMe____Dell_Express_Flash_NVMe_P4610_1.6TB_SFF_00010E10E3E4D25C + startTimeUnixNano: "6000000" + timeUnixNano: "1000000" + - asInt: "6" + attributes: + - key: direction + value: + stringValue: read + - key: object + value: + stringValue: t10.NVMe____Dell_Express_Flash_NVMe_P4610_1.6TB_SFF_00010E10E3E4D25C + startTimeUnixNano: "6000000" + timeUnixNano: "2000000" + - asInt: "4" + attributes: + - key: direction + value: + stringValue: read + - key: object + value: + stringValue: t10.NVMe____Dell_Express_Flash_NVMe_P4610_1.6TB_SFF_00010E10E3E4D25C + startTimeUnixNano: "6000000" + timeUnixNano: "3000000" + - asInt: "8" + attributes: + - key: direction + value: + stringValue: read + - key: object + value: + stringValue: t10.NVMe____Dell_Express_Flash_NVMe_P4610_1.6TB_SFF_00010E10E3E4D25C + startTimeUnixNano: "6000000" + timeUnixNano: "4000000" + - asInt: "19" + attributes: + - key: direction + value: + stringValue: read + - key: object + value: + stringValue: t10.NVMe____Dell_Express_Flash_NVMe_P4610_1.6TB_SFF_00010E10E3E4D25C + startTimeUnixNano: "6000000" + timeUnixNano: "5000000" + - asInt: "5" + attributes: + - key: direction + value: + stringValue: read + - key: object + value: + stringValue: t10.NVMe____Dell_Express_Flash_NVMe_P4610_1.6TB_SFF_00010E266CE4D25C + startTimeUnixNano: "6000000" + timeUnixNano: "1000000" + - asInt: "10" + attributes: + - key: direction + value: + stringValue: read + - key: object + value: + stringValue: t10.NVMe____Dell_Express_Flash_NVMe_P4610_1.6TB_SFF_00010E266CE4D25C + startTimeUnixNano: "6000000" + timeUnixNano: "2000000" + - asInt: "5" + attributes: + - key: direction + value: + stringValue: read + - key: object + value: + stringValue: t10.NVMe____Dell_Express_Flash_NVMe_P4610_1.6TB_SFF_00010E266CE4D25C + startTimeUnixNano: "6000000" + timeUnixNano: "3000000" + - asInt: "7" + attributes: + - key: direction + value: + stringValue: read + - key: object + value: + stringValue: t10.NVMe____Dell_Express_Flash_NVMe_P4610_1.6TB_SFF_00010E266CE4D25C + startTimeUnixNano: "6000000" + timeUnixNano: "4000000" + - asInt: "6" + attributes: + - key: direction + value: + stringValue: read + - key: object + value: + stringValue: t10.NVMe____Dell_Express_Flash_NVMe_P4610_1.6TB_SFF_00010E266CE4D25C + startTimeUnixNano: "6000000" + timeUnixNano: "5000000" + - asInt: "4" + attributes: + - key: direction + value: + stringValue: read + - key: object + value: + stringValue: t10.NVMe____Dell_Express_Flash_PM1725b_3.2TB_SFF____362E000121382500 + startTimeUnixNano: "6000000" + timeUnixNano: "1000000" + - asInt: "0" + attributes: + - key: direction + value: + stringValue: read + - key: object + value: + stringValue: t10.NVMe____Dell_Express_Flash_PM1725b_3.2TB_SFF____362E000121382500 + startTimeUnixNano: "6000000" + timeUnixNano: "2000000" + - asInt: "1" + attributes: + - key: direction + value: + stringValue: read + - key: object + value: + stringValue: t10.NVMe____Dell_Express_Flash_PM1725b_3.2TB_SFF____362E000121382500 + startTimeUnixNano: "6000000" + timeUnixNano: "3000000" + - asInt: "2" + attributes: + - key: direction + value: + stringValue: read + - key: object + value: + stringValue: t10.NVMe____Dell_Express_Flash_PM1725b_3.2TB_SFF____362E000121382500 + startTimeUnixNano: "6000000" + timeUnixNano: "4000000" + - asInt: "0" + attributes: + - key: direction + value: + stringValue: read + - key: object + value: + stringValue: t10.NVMe____Dell_Express_Flash_PM1725b_3.2TB_SFF____362E000121382500 + startTimeUnixNano: "6000000" + timeUnixNano: "5000000" + - asInt: "7" + attributes: + - key: direction + value: + stringValue: read + - key: object + value: + stringValue: t10.NVMe____Dell_Express_Flash_PM1725b_3.2TB_SFF____3C2E000121382500 + startTimeUnixNano: "6000000" + timeUnixNano: "1000000" + - asInt: "0" + attributes: + - key: direction + value: + stringValue: read + - key: object + value: + stringValue: t10.NVMe____Dell_Express_Flash_PM1725b_3.2TB_SFF____3C2E000121382500 + startTimeUnixNano: "6000000" + timeUnixNano: "2000000" + - asInt: "0" + attributes: + - key: direction + value: + stringValue: read + - key: object + value: + stringValue: t10.NVMe____Dell_Express_Flash_PM1725b_3.2TB_SFF____3C2E000121382500 + startTimeUnixNano: "6000000" + timeUnixNano: "3000000" + - asInt: "4" + attributes: + - key: direction + value: + stringValue: read + - key: object + value: + stringValue: t10.NVMe____Dell_Express_Flash_PM1725b_3.2TB_SFF____3C2E000121382500 + startTimeUnixNano: "6000000" + timeUnixNano: "4000000" + - asInt: "2" + attributes: + - key: direction + value: + stringValue: read + - key: object + value: + stringValue: t10.NVMe____Dell_Express_Flash_PM1725b_3.2TB_SFF____3C2E000121382500 + startTimeUnixNano: "6000000" + timeUnixNano: "5000000" + - asInt: "0" + attributes: + - key: direction + value: + stringValue: read + - key: object + value: + stringValue: t10.NVMe____Dell_Express_Flash_PM1725b_3.2TB_SFF____3D2E000121382500 + startTimeUnixNano: "6000000" + timeUnixNano: "1000000" + - asInt: "0" + attributes: + - key: direction + value: + stringValue: read + - key: object + value: + stringValue: t10.NVMe____Dell_Express_Flash_PM1725b_3.2TB_SFF____3D2E000121382500 + startTimeUnixNano: "6000000" + timeUnixNano: "2000000" + - asInt: "0" + attributes: + - key: direction + value: + stringValue: read + - key: object + value: + stringValue: t10.NVMe____Dell_Express_Flash_PM1725b_3.2TB_SFF____3D2E000121382500 + startTimeUnixNano: "6000000" + timeUnixNano: "3000000" + - asInt: "2" + attributes: + - key: direction + value: + stringValue: read + - key: object + value: + stringValue: t10.NVMe____Dell_Express_Flash_PM1725b_3.2TB_SFF____3D2E000121382500 + startTimeUnixNano: "6000000" + timeUnixNano: "4000000" + - asInt: "1" + attributes: + - key: direction + value: + stringValue: read + - key: object + value: + stringValue: t10.NVMe____Dell_Express_Flash_PM1725b_3.2TB_SFF____3D2E000121382500 + startTimeUnixNano: "6000000" + timeUnixNano: "5000000" + - asInt: "0" + attributes: + - key: direction + value: + stringValue: read + - key: object + value: + stringValue: t10.NVMe____Dell_Express_Flash_PM1725b_3.2TB_SFF____482E000121382500 + startTimeUnixNano: "6000000" + timeUnixNano: "1000000" + - asInt: "0" + attributes: + - key: direction + value: + stringValue: read + - key: object + value: + stringValue: t10.NVMe____Dell_Express_Flash_PM1725b_3.2TB_SFF____482E000121382500 + startTimeUnixNano: "6000000" + timeUnixNano: "2000000" + - asInt: "0" + attributes: + - key: direction + value: + stringValue: read + - key: object + value: + stringValue: t10.NVMe____Dell_Express_Flash_PM1725b_3.2TB_SFF____482E000121382500 + startTimeUnixNano: "6000000" + timeUnixNano: "3000000" + - asInt: "1" + attributes: + - key: direction + value: + stringValue: read + - key: object + value: + stringValue: t10.NVMe____Dell_Express_Flash_PM1725b_3.2TB_SFF____482E000121382500 + startTimeUnixNano: "6000000" + timeUnixNano: "4000000" + - asInt: "0" + attributes: + - key: direction + value: + stringValue: read + - key: object + value: + stringValue: t10.NVMe____Dell_Express_Flash_PM1725b_3.2TB_SFF____482E000121382500 + startTimeUnixNano: "6000000" + timeUnixNano: "5000000" + - asInt: "0" + attributes: + - key: direction + value: + stringValue: read + - key: object + value: + stringValue: t10.NVMe____Dell_Express_Flash_PM1725b_3.2TB_SFF____B32D000121382500 + startTimeUnixNano: "6000000" + timeUnixNano: "1000000" + - asInt: "2" + attributes: + - key: direction + value: + stringValue: read + - key: object + value: + stringValue: t10.NVMe____Dell_Express_Flash_PM1725b_3.2TB_SFF____B32D000121382500 + startTimeUnixNano: "6000000" + timeUnixNano: "2000000" + - asInt: "0" + attributes: + - key: direction + value: + stringValue: read + - key: object + value: + stringValue: t10.NVMe____Dell_Express_Flash_PM1725b_3.2TB_SFF____B32D000121382500 + startTimeUnixNano: "6000000" + timeUnixNano: "3000000" + - asInt: "2" + attributes: + - key: direction + value: + stringValue: read + - key: object + value: + stringValue: t10.NVMe____Dell_Express_Flash_PM1725b_3.2TB_SFF____B32D000121382500 + startTimeUnixNano: "6000000" + timeUnixNano: "4000000" + - asInt: "0" + attributes: + - key: direction + value: + stringValue: read + - key: object + value: + stringValue: t10.NVMe____Dell_Express_Flash_PM1725b_3.2TB_SFF____B32D000121382500 + startTimeUnixNano: "6000000" + timeUnixNano: "5000000" + - asInt: "0" + attributes: + - key: direction + value: + stringValue: read + - key: object + value: + stringValue: t10.NVMe____Dell_Express_Flash_PM1725b_3.2TB_SFF____BD2D000121382500 + startTimeUnixNano: "6000000" + timeUnixNano: "1000000" + - asInt: "0" + attributes: + - key: direction + value: + stringValue: read + - key: object + value: + stringValue: t10.NVMe____Dell_Express_Flash_PM1725b_3.2TB_SFF____BD2D000121382500 + startTimeUnixNano: "6000000" + timeUnixNano: "2000000" + - asInt: "0" + attributes: + - key: direction + value: + stringValue: read + - key: object + value: + stringValue: t10.NVMe____Dell_Express_Flash_PM1725b_3.2TB_SFF____BD2D000121382500 + startTimeUnixNano: "6000000" + timeUnixNano: "3000000" + - asInt: "0" + attributes: + - key: direction + value: + stringValue: read + - key: object + value: + stringValue: t10.NVMe____Dell_Express_Flash_PM1725b_3.2TB_SFF____BD2D000121382500 + startTimeUnixNano: "6000000" + timeUnixNano: "4000000" + - asInt: "0" + attributes: + - key: direction + value: + stringValue: read + - key: object + value: + stringValue: t10.NVMe____Dell_Express_Flash_PM1725b_3.2TB_SFF____BD2D000121382500 + startTimeUnixNano: "6000000" + timeUnixNano: "5000000" + - asInt: "781" + attributes: + - key: direction + value: + stringValue: write + - key: object + value: + stringValue: "4000" + startTimeUnixNano: "6000000" + timeUnixNano: "1000000" + - asInt: "789" + attributes: + - key: direction + value: + stringValue: write + - key: object + value: + stringValue: "4000" + startTimeUnixNano: "6000000" + timeUnixNano: "2000000" + - asInt: "645" + attributes: + - key: direction + value: + stringValue: write + - key: object + value: + stringValue: "4000" + startTimeUnixNano: "6000000" + timeUnixNano: "3000000" + - asInt: "781" + attributes: + - key: direction + value: + stringValue: write + - key: object + value: + stringValue: "4000" + startTimeUnixNano: "6000000" + timeUnixNano: "4000000" + - asInt: "782" + attributes: + - key: direction + value: + stringValue: write + - key: object + value: + stringValue: "4000" + startTimeUnixNano: "6000000" + timeUnixNano: "5000000" + unit: '{KiBy/s}' + - description: The amount of memory the host system is using. + name: vcenter.host.memory.usage + sum: + aggregationTemporality: 2 + dataPoints: + - asInt: "140833" + startTimeUnixNano: "1000000" + timeUnixNano: "2000000" + unit: MiBy + - description: The percentage of the host system's memory capacity that is being utilized. + gauge: + dataPoints: + - asDouble: 17.948557824655133 + startTimeUnixNano: "1000000" + timeUnixNano: "2000000" + name: vcenter.host.memory.utilization + unit: '%' + - description: The number of packets transmitted and received, as measured over the most recent 20s interval. + name: vcenter.host.network.packet.count + sum: + aggregationTemporality: 2 + dataPoints: + - asInt: "55647" + attributes: + - key: direction + value: + stringValue: received + - key: object + value: + stringValue: "" + startTimeUnixNano: "6000000" + timeUnixNano: "1000000" + - asInt: "57376" + attributes: + - key: direction + value: + stringValue: received + - key: object + value: + stringValue: "" + startTimeUnixNano: "6000000" + timeUnixNano: "2000000" + - asInt: "64156" + attributes: + - key: direction + value: + stringValue: received + - key: object + value: + stringValue: "" + startTimeUnixNano: "6000000" + timeUnixNano: "3000000" + - asInt: "58814" + attributes: + - key: direction + value: + stringValue: received + - key: object + value: + stringValue: "" + startTimeUnixNano: "6000000" + timeUnixNano: "4000000" + - asInt: "57390" + attributes: + - key: direction + value: + stringValue: received + - key: object + value: + stringValue: "" + startTimeUnixNano: "6000000" + timeUnixNano: "5000000" + - asInt: "13316" + attributes: + - key: direction + value: + stringValue: received + - key: object + value: + stringValue: vmnic0 + startTimeUnixNano: "6000000" + timeUnixNano: "1000000" + - asInt: "14473" + attributes: + - key: direction + value: + stringValue: received + - key: object + value: + stringValue: vmnic0 + startTimeUnixNano: "6000000" + timeUnixNano: "2000000" + - asInt: "19662" attributes: - key: direction value: - stringValue: read + stringValue: received - key: object value: - stringValue: "4000" + stringValue: vmnic0 + startTimeUnixNano: "6000000" + timeUnixNano: "3000000" + - asInt: "15478" + attributes: + - key: direction + value: + stringValue: received + - key: object + value: + stringValue: vmnic0 + startTimeUnixNano: "6000000" + timeUnixNano: "4000000" + - asInt: "14458" + attributes: + - key: direction + value: + stringValue: received + - key: object + value: + stringValue: vmnic0 + startTimeUnixNano: "6000000" + timeUnixNano: "5000000" + - asInt: "116" + attributes: + - key: direction + value: + stringValue: received + - key: object + value: + stringValue: vmnic1 startTimeUnixNano: "6000000" timeUnixNano: "1000000" - - asInt: "789" + - asInt: "114" attributes: - key: direction value: - stringValue: read + stringValue: received - key: object value: - stringValue: "4000" + stringValue: vmnic1 startTimeUnixNano: "6000000" timeUnixNano: "2000000" - - asInt: "645" + - asInt: "113" attributes: - key: direction value: - stringValue: read + stringValue: received - key: object value: - stringValue: "4000" + stringValue: vmnic1 startTimeUnixNano: "6000000" timeUnixNano: "3000000" - - asInt: "781" + - asInt: "112" attributes: - key: direction value: - stringValue: read + stringValue: received - key: object value: - stringValue: "4000" + stringValue: vmnic1 startTimeUnixNano: "6000000" timeUnixNano: "4000000" - - asInt: "782" + - asInt: "120" + attributes: + - key: direction + value: + stringValue: received + - key: object + value: + stringValue: vmnic1 + startTimeUnixNano: "6000000" + timeUnixNano: "5000000" + - asInt: "105" + attributes: + - key: direction + value: + stringValue: received + - key: object + value: + stringValue: vmnic2 + startTimeUnixNano: "6000000" + timeUnixNano: "1000000" + - asInt: "103" + attributes: + - key: direction + value: + stringValue: received + - key: object + value: + stringValue: vmnic2 + startTimeUnixNano: "6000000" + timeUnixNano: "2000000" + - asInt: "104" + attributes: + - key: direction + value: + stringValue: received + - key: object + value: + stringValue: vmnic2 + startTimeUnixNano: "6000000" + timeUnixNano: "3000000" + - asInt: "102" + attributes: + - key: direction + value: + stringValue: received + - key: object + value: + stringValue: vmnic2 + startTimeUnixNano: "6000000" + timeUnixNano: "4000000" + - asInt: "109" + attributes: + - key: direction + value: + stringValue: received + - key: object + value: + stringValue: vmnic2 + startTimeUnixNano: "6000000" + timeUnixNano: "5000000" + - asInt: "42110" + attributes: + - key: direction + value: + stringValue: received + - key: object + value: + stringValue: vmnic3 + startTimeUnixNano: "6000000" + timeUnixNano: "1000000" + - asInt: "42686" + attributes: + - key: direction + value: + stringValue: received + - key: object + value: + stringValue: vmnic3 + startTimeUnixNano: "6000000" + timeUnixNano: "2000000" + - asInt: "44277" + attributes: + - key: direction + value: + stringValue: received + - key: object + value: + stringValue: vmnic3 + startTimeUnixNano: "6000000" + timeUnixNano: "3000000" + - asInt: "43122" + attributes: + - key: direction + value: + stringValue: received + - key: object + value: + stringValue: vmnic3 + startTimeUnixNano: "6000000" + timeUnixNano: "4000000" + - asInt: "42703" + attributes: + - key: direction + value: + stringValue: received + - key: object + value: + stringValue: vmnic3 + startTimeUnixNano: "6000000" + timeUnixNano: "5000000" + - asInt: "51992" + attributes: + - key: direction + value: + stringValue: transmitted + - key: object + value: + stringValue: "" + startTimeUnixNano: "6000000" + timeUnixNano: "1000000" + - asInt: "54712" + attributes: + - key: direction + value: + stringValue: transmitted + - key: object + value: + stringValue: "" + startTimeUnixNano: "6000000" + timeUnixNano: "2000000" + - asInt: "59449" + attributes: + - key: direction + value: + stringValue: transmitted + - key: object + value: + stringValue: "" + startTimeUnixNano: "6000000" + timeUnixNano: "3000000" + - asInt: "54604" + attributes: + - key: direction + value: + stringValue: transmitted + - key: object + value: + stringValue: "" + startTimeUnixNano: "6000000" + timeUnixNano: "4000000" + - asInt: "54464" + attributes: + - key: direction + value: + stringValue: transmitted + - key: object + value: + stringValue: "" + startTimeUnixNano: "6000000" + timeUnixNano: "5000000" + - asInt: "11182" + attributes: + - key: direction + value: + stringValue: transmitted + - key: object + value: + stringValue: vmnic0 + startTimeUnixNano: "6000000" + timeUnixNano: "1000000" + - asInt: "13009" + attributes: + - key: direction + value: + stringValue: transmitted + - key: object + value: + stringValue: vmnic0 + startTimeUnixNano: "6000000" + timeUnixNano: "2000000" + - asInt: "16489" + attributes: + - key: direction + value: + stringValue: transmitted + - key: object + value: + stringValue: vmnic0 + startTimeUnixNano: "6000000" + timeUnixNano: "3000000" + - asInt: "12398" + attributes: + - key: direction + value: + stringValue: transmitted + - key: object + value: + stringValue: vmnic0 + startTimeUnixNano: "6000000" + timeUnixNano: "4000000" + - asInt: "12984" attributes: - key: direction value: - stringValue: read + stringValue: transmitted - key: object value: - stringValue: "4000" + stringValue: vmnic0 startTimeUnixNano: "6000000" timeUnixNano: "5000000" - - asInt: "781" + - asInt: "0" attributes: - key: direction value: - stringValue: write + stringValue: transmitted - key: object value: - stringValue: "4000" + stringValue: vmnic1 startTimeUnixNano: "6000000" timeUnixNano: "1000000" - - asInt: "789" + - asInt: "0" attributes: - key: direction value: - stringValue: write + stringValue: transmitted - key: object value: - stringValue: "4000" + stringValue: vmnic1 startTimeUnixNano: "6000000" timeUnixNano: "2000000" - - asInt: "645" + - asInt: "0" attributes: - key: direction value: - stringValue: write + stringValue: transmitted - key: object value: - stringValue: "4000" + stringValue: vmnic1 startTimeUnixNano: "6000000" timeUnixNano: "3000000" - - asInt: "781" + - asInt: "0" attributes: - key: direction value: - stringValue: write + stringValue: transmitted - key: object value: - stringValue: "4000" + stringValue: vmnic1 startTimeUnixNano: "6000000" timeUnixNano: "4000000" - - asInt: "782" + - asInt: "0" attributes: - key: direction value: - stringValue: write + stringValue: transmitted - key: object value: - stringValue: "4000" + stringValue: vmnic1 startTimeUnixNano: "6000000" timeUnixNano: "5000000" - name: vcenter.host.disk.latency.avg - unit: ms - - description: Highest latency value across all disks used by the host. - gauge: - dataPoints: - - asInt: "899" + - asInt: "0" attributes: + - key: direction + value: + stringValue: transmitted - key: object value: - stringValue: "4000" + stringValue: vmnic2 startTimeUnixNano: "6000000" timeUnixNano: "1000000" - - asInt: "899" + - asInt: "0" attributes: + - key: direction + value: + stringValue: transmitted - key: object value: - stringValue: "4000" + stringValue: vmnic2 startTimeUnixNano: "6000000" timeUnixNano: "2000000" - - asInt: "905" + - asInt: "0" attributes: + - key: direction + value: + stringValue: transmitted - key: object value: - stringValue: "4000" + stringValue: vmnic2 startTimeUnixNano: "6000000" timeUnixNano: "3000000" - - asInt: "1000" + - asInt: "0" attributes: + - key: direction + value: + stringValue: transmitted - key: object value: - stringValue: "4000" + stringValue: vmnic2 startTimeUnixNano: "6000000" timeUnixNano: "4000000" - - asInt: "1002" + - asInt: "0" attributes: + - key: direction + value: + stringValue: transmitted - key: object value: - stringValue: "4000" + stringValue: vmnic2 startTimeUnixNano: "6000000" timeUnixNano: "5000000" - name: vcenter.host.disk.latency.max - unit: ms - - description: Average number of kilobytes read from or written to the disk each second. - name: vcenter.host.disk.throughput - sum: - aggregationTemporality: 2 - dataPoints: - - asInt: "28" + - asInt: "40810" attributes: - key: direction value: - stringValue: read + stringValue: transmitted - key: object value: - stringValue: "" + stringValue: vmnic3 startTimeUnixNano: "6000000" timeUnixNano: "1000000" - - asInt: "45" + - asInt: "41703" attributes: - key: direction value: - stringValue: read + stringValue: transmitted - key: object value: - stringValue: "" + stringValue: vmnic3 startTimeUnixNano: "6000000" timeUnixNano: "2000000" - - asInt: "88" + - asInt: "42960" attributes: - key: direction value: - stringValue: read + stringValue: transmitted - key: object value: - stringValue: "" + stringValue: vmnic3 startTimeUnixNano: "6000000" timeUnixNano: "3000000" - - asInt: "92" + - asInt: "42206" attributes: - key: direction value: - stringValue: read + stringValue: transmitted - key: object value: - stringValue: "" + stringValue: vmnic3 startTimeUnixNano: "6000000" timeUnixNano: "4000000" - - asInt: "31" + - asInt: "41480" attributes: - key: direction value: - stringValue: read + stringValue: transmitted - key: object value: - stringValue: "" + stringValue: vmnic3 startTimeUnixNano: "6000000" timeUnixNano: "5000000" - - asInt: "4" + unit: '{packets/sec}' + - description: The rate of packet errors transmitted or received on the host network. + name: vcenter.host.network.packet.error.rate + gauge: + dataPoints: + - asDouble: "0" attributes: - key: direction value: - stringValue: read + stringValue: received - key: object value: - stringValue: t10.ATA_____DELLBOSS_VD_____________________________983baa25884a001000000000 + stringValue: "" startTimeUnixNano: "6000000" timeUnixNano: "1000000" - - asInt: "25" + - asDouble: "0" attributes: - key: direction value: - stringValue: read + stringValue: received - key: object value: - stringValue: t10.ATA_____DELLBOSS_VD_____________________________983baa25884a001000000000 + stringValue: "" startTimeUnixNano: "6000000" timeUnixNano: "2000000" - - asInt: "76" + - asDouble: "0" attributes: - key: direction value: - stringValue: read + stringValue: received - key: object value: - stringValue: t10.ATA_____DELLBOSS_VD_____________________________983baa25884a001000000000 + stringValue: "" startTimeUnixNano: "6000000" timeUnixNano: "3000000" - - asInt: "63" + - asDouble: "0" attributes: - key: direction value: - stringValue: read + stringValue: received - key: object value: - stringValue: t10.ATA_____DELLBOSS_VD_____________________________983baa25884a001000000000 + stringValue: "" startTimeUnixNano: "6000000" timeUnixNano: "4000000" - - asInt: "0" + - asDouble: "0" attributes: - key: direction value: - stringValue: read + stringValue: received - key: object value: - stringValue: t10.ATA_____DELLBOSS_VD_____________________________983baa25884a001000000000 + stringValue: "" startTimeUnixNano: "6000000" timeUnixNano: "5000000" - - asInt: "6" + - asDouble: "0" attributes: - key: direction value: - stringValue: read + stringValue: received - key: object value: - stringValue: t10.NVMe____Dell_Express_Flash_NVMe_P4610_1.6TB_SFF_00010E10E3E4D25C + stringValue: vmnic0 startTimeUnixNano: "6000000" timeUnixNano: "1000000" - - asInt: "6" + - asDouble: "0" attributes: - key: direction value: - stringValue: read + stringValue: received - key: object value: - stringValue: t10.NVMe____Dell_Express_Flash_NVMe_P4610_1.6TB_SFF_00010E10E3E4D25C + stringValue: vmnic0 startTimeUnixNano: "6000000" timeUnixNano: "2000000" - - asInt: "4" + - asDouble: "0" attributes: - key: direction value: - stringValue: read + stringValue: received - key: object value: - stringValue: t10.NVMe____Dell_Express_Flash_NVMe_P4610_1.6TB_SFF_00010E10E3E4D25C + stringValue: vmnic0 startTimeUnixNano: "6000000" timeUnixNano: "3000000" - - asInt: "8" + - asDouble: "0" attributes: - key: direction value: - stringValue: read + stringValue: received - key: object value: - stringValue: t10.NVMe____Dell_Express_Flash_NVMe_P4610_1.6TB_SFF_00010E10E3E4D25C + stringValue: vmnic0 startTimeUnixNano: "6000000" timeUnixNano: "4000000" - - asInt: "19" + - asDouble: "0" attributes: - key: direction value: - stringValue: read + stringValue: received - key: object value: - stringValue: t10.NVMe____Dell_Express_Flash_NVMe_P4610_1.6TB_SFF_00010E10E3E4D25C + stringValue: vmnic0 startTimeUnixNano: "6000000" timeUnixNano: "5000000" - - asInt: "5" + - asDouble: "0" attributes: - key: direction value: - stringValue: read + stringValue: received - key: object value: - stringValue: t10.NVMe____Dell_Express_Flash_NVMe_P4610_1.6TB_SFF_00010E266CE4D25C + stringValue: vmnic1 startTimeUnixNano: "6000000" timeUnixNano: "1000000" - - asInt: "10" + - asDouble: "0" attributes: - key: direction value: - stringValue: read + stringValue: received - key: object value: - stringValue: t10.NVMe____Dell_Express_Flash_NVMe_P4610_1.6TB_SFF_00010E266CE4D25C + stringValue: vmnic1 startTimeUnixNano: "6000000" timeUnixNano: "2000000" - - asInt: "5" + - asDouble: "0" attributes: - key: direction value: - stringValue: read + stringValue: received - key: object value: - stringValue: t10.NVMe____Dell_Express_Flash_NVMe_P4610_1.6TB_SFF_00010E266CE4D25C + stringValue: vmnic1 startTimeUnixNano: "6000000" timeUnixNano: "3000000" - - asInt: "7" + - asDouble: "0" attributes: - key: direction value: - stringValue: read + stringValue: received - key: object value: - stringValue: t10.NVMe____Dell_Express_Flash_NVMe_P4610_1.6TB_SFF_00010E266CE4D25C + stringValue: vmnic1 startTimeUnixNano: "6000000" timeUnixNano: "4000000" - - asInt: "6" + - asDouble: "0" attributes: - key: direction value: - stringValue: read + stringValue: received - key: object value: - stringValue: t10.NVMe____Dell_Express_Flash_NVMe_P4610_1.6TB_SFF_00010E266CE4D25C + stringValue: vmnic1 startTimeUnixNano: "6000000" timeUnixNano: "5000000" - - asInt: "4" + - asDouble: "0" attributes: - key: direction value: - stringValue: read + stringValue: received - key: object value: - stringValue: t10.NVMe____Dell_Express_Flash_PM1725b_3.2TB_SFF____362E000121382500 + stringValue: vmnic2 startTimeUnixNano: "6000000" timeUnixNano: "1000000" - - asInt: "0" + - asDouble: "0" attributes: - key: direction value: - stringValue: read + stringValue: received - key: object value: - stringValue: t10.NVMe____Dell_Express_Flash_PM1725b_3.2TB_SFF____362E000121382500 + stringValue: vmnic2 startTimeUnixNano: "6000000" timeUnixNano: "2000000" - - asInt: "1" + - asDouble: "0" attributes: - key: direction value: - stringValue: read + stringValue: received - key: object value: - stringValue: t10.NVMe____Dell_Express_Flash_PM1725b_3.2TB_SFF____362E000121382500 + stringValue: vmnic2 startTimeUnixNano: "6000000" timeUnixNano: "3000000" - - asInt: "2" + - asDouble: "0" attributes: - key: direction value: - stringValue: read + stringValue: received - key: object value: - stringValue: t10.NVMe____Dell_Express_Flash_PM1725b_3.2TB_SFF____362E000121382500 + stringValue: vmnic2 startTimeUnixNano: "6000000" timeUnixNano: "4000000" - - asInt: "0" + - asDouble: "0" attributes: - key: direction value: - stringValue: read + stringValue: received - key: object value: - stringValue: t10.NVMe____Dell_Express_Flash_PM1725b_3.2TB_SFF____362E000121382500 + stringValue: vmnic2 startTimeUnixNano: "6000000" timeUnixNano: "5000000" - - asInt: "7" + - asDouble: "0" attributes: - key: direction value: - stringValue: read + stringValue: received - key: object value: - stringValue: t10.NVMe____Dell_Express_Flash_PM1725b_3.2TB_SFF____3C2E000121382500 + stringValue: vmnic3 startTimeUnixNano: "6000000" timeUnixNano: "1000000" - - asInt: "0" + - asDouble: "0" attributes: - key: direction value: - stringValue: read + stringValue: received - key: object value: - stringValue: t10.NVMe____Dell_Express_Flash_PM1725b_3.2TB_SFF____3C2E000121382500 + stringValue: vmnic3 startTimeUnixNano: "6000000" timeUnixNano: "2000000" - - asInt: "0" + - asDouble: "0" attributes: - key: direction value: - stringValue: read + stringValue: received - key: object value: - stringValue: t10.NVMe____Dell_Express_Flash_PM1725b_3.2TB_SFF____3C2E000121382500 + stringValue: vmnic3 startTimeUnixNano: "6000000" timeUnixNano: "3000000" - - asInt: "4" + - asDouble: "0" attributes: - key: direction value: - stringValue: read + stringValue: received - key: object value: - stringValue: t10.NVMe____Dell_Express_Flash_PM1725b_3.2TB_SFF____3C2E000121382500 + stringValue: vmnic3 startTimeUnixNano: "6000000" timeUnixNano: "4000000" - - asInt: "2" + - asDouble: "0" attributes: - key: direction value: - stringValue: read + stringValue: received - key: object value: - stringValue: t10.NVMe____Dell_Express_Flash_PM1725b_3.2TB_SFF____3C2E000121382500 + stringValue: vmnic3 startTimeUnixNano: "6000000" timeUnixNano: "5000000" - - asInt: "0" + - asDouble: "0" attributes: - key: direction value: - stringValue: read + stringValue: transmitted - key: object value: - stringValue: t10.NVMe____Dell_Express_Flash_PM1725b_3.2TB_SFF____3D2E000121382500 + stringValue: "" startTimeUnixNano: "6000000" timeUnixNano: "1000000" - - asInt: "0" + - asDouble: "0" attributes: - key: direction value: - stringValue: read + stringValue: transmitted - key: object value: - stringValue: t10.NVMe____Dell_Express_Flash_PM1725b_3.2TB_SFF____3D2E000121382500 + stringValue: "" startTimeUnixNano: "6000000" timeUnixNano: "2000000" - - asInt: "0" + - asDouble: "0" attributes: - key: direction value: - stringValue: read + stringValue: transmitted - key: object value: - stringValue: t10.NVMe____Dell_Express_Flash_PM1725b_3.2TB_SFF____3D2E000121382500 + stringValue: "" startTimeUnixNano: "6000000" timeUnixNano: "3000000" - - asInt: "2" + - asDouble: "0" attributes: - key: direction value: - stringValue: read + stringValue: transmitted - key: object value: - stringValue: t10.NVMe____Dell_Express_Flash_PM1725b_3.2TB_SFF____3D2E000121382500 + stringValue: "" startTimeUnixNano: "6000000" timeUnixNano: "4000000" - - asInt: "1" + - asDouble: "0" attributes: - key: direction value: - stringValue: read + stringValue: transmitted - key: object value: - stringValue: t10.NVMe____Dell_Express_Flash_PM1725b_3.2TB_SFF____3D2E000121382500 + stringValue: "" startTimeUnixNano: "6000000" timeUnixNano: "5000000" - - asInt: "0" + - asDouble: "0" attributes: - key: direction value: - stringValue: read + stringValue: transmitted - key: object value: - stringValue: t10.NVMe____Dell_Express_Flash_PM1725b_3.2TB_SFF____482E000121382500 + stringValue: vmnic0 startTimeUnixNano: "6000000" timeUnixNano: "1000000" - - asInt: "0" + - asDouble: "0" attributes: - key: direction value: - stringValue: read + stringValue: transmitted - key: object value: - stringValue: t10.NVMe____Dell_Express_Flash_PM1725b_3.2TB_SFF____482E000121382500 + stringValue: vmnic0 startTimeUnixNano: "6000000" timeUnixNano: "2000000" - - asInt: "0" + - asDouble: "0" attributes: - key: direction value: - stringValue: read + stringValue: transmitted - key: object value: - stringValue: t10.NVMe____Dell_Express_Flash_PM1725b_3.2TB_SFF____482E000121382500 + stringValue: vmnic0 startTimeUnixNano: "6000000" timeUnixNano: "3000000" - - asInt: "1" + - asDouble: "0" attributes: - key: direction value: - stringValue: read + stringValue: transmitted - key: object value: - stringValue: t10.NVMe____Dell_Express_Flash_PM1725b_3.2TB_SFF____482E000121382500 + stringValue: vmnic0 startTimeUnixNano: "6000000" timeUnixNano: "4000000" - - asInt: "0" + - asDouble: "0" attributes: - key: direction value: - stringValue: read + stringValue: transmitted - key: object value: - stringValue: t10.NVMe____Dell_Express_Flash_PM1725b_3.2TB_SFF____482E000121382500 + stringValue: vmnic0 startTimeUnixNano: "6000000" timeUnixNano: "5000000" - - asInt: "0" + - asDouble: "0" attributes: - key: direction value: - stringValue: read + stringValue: transmitted - key: object value: - stringValue: t10.NVMe____Dell_Express_Flash_PM1725b_3.2TB_SFF____B32D000121382500 + stringValue: vmnic1 startTimeUnixNano: "6000000" timeUnixNano: "1000000" - - asInt: "2" + - asDouble: "0" attributes: - key: direction value: - stringValue: read + stringValue: transmitted - key: object value: - stringValue: t10.NVMe____Dell_Express_Flash_PM1725b_3.2TB_SFF____B32D000121382500 + stringValue: vmnic1 startTimeUnixNano: "6000000" timeUnixNano: "2000000" - - asInt: "0" + - asDouble: "0" attributes: - key: direction value: - stringValue: read + stringValue: transmitted - key: object value: - stringValue: t10.NVMe____Dell_Express_Flash_PM1725b_3.2TB_SFF____B32D000121382500 + stringValue: vmnic1 startTimeUnixNano: "6000000" timeUnixNano: "3000000" - - asInt: "2" + - asDouble: "0" attributes: - key: direction value: - stringValue: read + stringValue: transmitted - key: object value: - stringValue: t10.NVMe____Dell_Express_Flash_PM1725b_3.2TB_SFF____B32D000121382500 + stringValue: vmnic1 startTimeUnixNano: "6000000" timeUnixNano: "4000000" - - asInt: "0" + - asDouble: "0" attributes: - key: direction value: - stringValue: read + stringValue: transmitted - key: object value: - stringValue: t10.NVMe____Dell_Express_Flash_PM1725b_3.2TB_SFF____B32D000121382500 + stringValue: vmnic1 startTimeUnixNano: "6000000" timeUnixNano: "5000000" - - asInt: "0" + - asDouble: "0" attributes: - key: direction value: - stringValue: read + stringValue: transmitted - key: object value: - stringValue: t10.NVMe____Dell_Express_Flash_PM1725b_3.2TB_SFF____BD2D000121382500 + stringValue: vmnic2 startTimeUnixNano: "6000000" timeUnixNano: "1000000" - - asInt: "0" + - asDouble: "0" attributes: - key: direction value: - stringValue: read + stringValue: transmitted - key: object value: - stringValue: t10.NVMe____Dell_Express_Flash_PM1725b_3.2TB_SFF____BD2D000121382500 + stringValue: vmnic2 startTimeUnixNano: "6000000" timeUnixNano: "2000000" - - asInt: "0" + - asDouble: "0" attributes: - key: direction value: - stringValue: read + stringValue: transmitted - key: object value: - stringValue: t10.NVMe____Dell_Express_Flash_PM1725b_3.2TB_SFF____BD2D000121382500 + stringValue: vmnic2 startTimeUnixNano: "6000000" timeUnixNano: "3000000" - - asInt: "0" + - asDouble: "0" attributes: - key: direction value: - stringValue: read + stringValue: transmitted - key: object value: - stringValue: t10.NVMe____Dell_Express_Flash_PM1725b_3.2TB_SFF____BD2D000121382500 + stringValue: vmnic2 startTimeUnixNano: "6000000" timeUnixNano: "4000000" - - asInt: "0" + - asDouble: "0" attributes: - key: direction value: - stringValue: read + stringValue: transmitted - key: object value: - stringValue: t10.NVMe____Dell_Express_Flash_PM1725b_3.2TB_SFF____BD2D000121382500 + stringValue: vmnic2 startTimeUnixNano: "6000000" timeUnixNano: "5000000" - - asInt: "781" + - asDouble: "0" attributes: - key: direction value: - stringValue: write + stringValue: transmitted - key: object value: - stringValue: "4000" + stringValue: vmnic3 startTimeUnixNano: "6000000" timeUnixNano: "1000000" - - asInt: "789" + - asDouble: "0" attributes: - key: direction value: - stringValue: write + stringValue: transmitted - key: object value: - stringValue: "4000" + stringValue: vmnic3 startTimeUnixNano: "6000000" timeUnixNano: "2000000" - - asInt: "645" + - asDouble: "0" attributes: - key: direction value: - stringValue: write + stringValue: transmitted - key: object value: - stringValue: "4000" + stringValue: vmnic3 startTimeUnixNano: "6000000" timeUnixNano: "3000000" - - asInt: "781" + - asDouble: "0" attributes: - key: direction value: - stringValue: write + stringValue: transmitted - key: object value: - stringValue: "4000" + stringValue: vmnic3 startTimeUnixNano: "6000000" timeUnixNano: "4000000" - - asInt: "782" + - asDouble: "0" attributes: - key: direction value: - stringValue: write + stringValue: transmitted - key: object value: - stringValue: "4000" + stringValue: vmnic3 startTimeUnixNano: "6000000" timeUnixNano: "5000000" - unit: '{KiBy/s}' - - description: The amount of memory the host system is using. - name: vcenter.host.memory.usage - sum: - aggregationTemporality: 2 - dataPoints: - - asInt: "140833" - startTimeUnixNano: "1000000" - timeUnixNano: "2000000" - unit: MiBy - - description: The percentage of the host system's memory capacity that is being utilized. - gauge: - dataPoints: - - asDouble: 17.948557824655133 - startTimeUnixNano: "1000000" - timeUnixNano: "2000000" - name: vcenter.host.memory.utilization - unit: '%' - - description: The number of packets transmitted and received, as measured over the most recent 20s interval. - name: vcenter.host.network.packet.count + unit: '{errors/sec}' + - description: The summation of packet errors on the host network. + name: vcenter.host.network.packet.errors sum: aggregationTemporality: 2 dataPoints: - - asInt: "55647" + - asInt: "0" attributes: - key: direction value: @@ -3355,7 +5376,7 @@ resourceMetrics: stringValue: "" startTimeUnixNano: "6000000" timeUnixNano: "1000000" - - asInt: "57376" + - asInt: "0" attributes: - key: direction value: @@ -3365,7 +5386,7 @@ resourceMetrics: stringValue: "" startTimeUnixNano: "6000000" timeUnixNano: "2000000" - - asInt: "64156" + - asInt: "0" attributes: - key: direction value: @@ -3375,7 +5396,7 @@ resourceMetrics: stringValue: "" startTimeUnixNano: "6000000" timeUnixNano: "3000000" - - asInt: "58814" + - asInt: "0" attributes: - key: direction value: @@ -3385,7 +5406,7 @@ resourceMetrics: stringValue: "" startTimeUnixNano: "6000000" timeUnixNano: "4000000" - - asInt: "57390" + - asInt: "0" attributes: - key: direction value: @@ -3395,7 +5416,7 @@ resourceMetrics: stringValue: "" startTimeUnixNano: "6000000" timeUnixNano: "5000000" - - asInt: "13316" + - asInt: "0" attributes: - key: direction value: @@ -3405,7 +5426,7 @@ resourceMetrics: stringValue: vmnic0 startTimeUnixNano: "6000000" timeUnixNano: "1000000" - - asInt: "14473" + - asInt: "0" attributes: - key: direction value: @@ -3415,7 +5436,7 @@ resourceMetrics: stringValue: vmnic0 startTimeUnixNano: "6000000" timeUnixNano: "2000000" - - asInt: "19662" + - asInt: "0" attributes: - key: direction value: @@ -3425,7 +5446,7 @@ resourceMetrics: stringValue: vmnic0 startTimeUnixNano: "6000000" timeUnixNano: "3000000" - - asInt: "15478" + - asInt: "0" attributes: - key: direction value: @@ -3435,7 +5456,7 @@ resourceMetrics: stringValue: vmnic0 startTimeUnixNano: "6000000" timeUnixNano: "4000000" - - asInt: "14458" + - asInt: "0" attributes: - key: direction value: @@ -3445,7 +5466,7 @@ resourceMetrics: stringValue: vmnic0 startTimeUnixNano: "6000000" timeUnixNano: "5000000" - - asInt: "116" + - asInt: "0" attributes: - key: direction value: @@ -3455,7 +5476,7 @@ resourceMetrics: stringValue: vmnic1 startTimeUnixNano: "6000000" timeUnixNano: "1000000" - - asInt: "114" + - asInt: "0" attributes: - key: direction value: @@ -3465,7 +5486,7 @@ resourceMetrics: stringValue: vmnic1 startTimeUnixNano: "6000000" timeUnixNano: "2000000" - - asInt: "113" + - asInt: "0" attributes: - key: direction value: @@ -3475,7 +5496,7 @@ resourceMetrics: stringValue: vmnic1 startTimeUnixNano: "6000000" timeUnixNano: "3000000" - - asInt: "112" + - asInt: "0" attributes: - key: direction value: @@ -3485,7 +5506,7 @@ resourceMetrics: stringValue: vmnic1 startTimeUnixNano: "6000000" timeUnixNano: "4000000" - - asInt: "120" + - asInt: "0" attributes: - key: direction value: @@ -3495,7 +5516,7 @@ resourceMetrics: stringValue: vmnic1 startTimeUnixNano: "6000000" timeUnixNano: "5000000" - - asInt: "105" + - asInt: "0" attributes: - key: direction value: @@ -3505,7 +5526,7 @@ resourceMetrics: stringValue: vmnic2 startTimeUnixNano: "6000000" timeUnixNano: "1000000" - - asInt: "103" + - asInt: "0" attributes: - key: direction value: @@ -3515,7 +5536,7 @@ resourceMetrics: stringValue: vmnic2 startTimeUnixNano: "6000000" timeUnixNano: "2000000" - - asInt: "104" + - asInt: "0" attributes: - key: direction value: @@ -3525,7 +5546,7 @@ resourceMetrics: stringValue: vmnic2 startTimeUnixNano: "6000000" timeUnixNano: "3000000" - - asInt: "102" + - asInt: "0" attributes: - key: direction value: @@ -3535,7 +5556,7 @@ resourceMetrics: stringValue: vmnic2 startTimeUnixNano: "6000000" timeUnixNano: "4000000" - - asInt: "109" + - asInt: "0" attributes: - key: direction value: @@ -3545,7 +5566,7 @@ resourceMetrics: stringValue: vmnic2 startTimeUnixNano: "6000000" timeUnixNano: "5000000" - - asInt: "42110" + - asInt: "0" attributes: - key: direction value: @@ -3555,7 +5576,7 @@ resourceMetrics: stringValue: vmnic3 startTimeUnixNano: "6000000" timeUnixNano: "1000000" - - asInt: "42686" + - asInt: "0" attributes: - key: direction value: @@ -3565,7 +5586,7 @@ resourceMetrics: stringValue: vmnic3 startTimeUnixNano: "6000000" timeUnixNano: "2000000" - - asInt: "44277" + - asInt: "0" attributes: - key: direction value: @@ -3575,7 +5596,7 @@ resourceMetrics: stringValue: vmnic3 startTimeUnixNano: "6000000" timeUnixNano: "3000000" - - asInt: "43122" + - asInt: "0" attributes: - key: direction value: @@ -3585,7 +5606,7 @@ resourceMetrics: stringValue: vmnic3 startTimeUnixNano: "6000000" timeUnixNano: "4000000" - - asInt: "42703" + - asInt: "0" attributes: - key: direction value: @@ -3595,7 +5616,7 @@ resourceMetrics: stringValue: vmnic3 startTimeUnixNano: "6000000" timeUnixNano: "5000000" - - asInt: "51992" + - asInt: "0" attributes: - key: direction value: @@ -3605,7 +5626,7 @@ resourceMetrics: stringValue: "" startTimeUnixNano: "6000000" timeUnixNano: "1000000" - - asInt: "54712" + - asInt: "0" attributes: - key: direction value: @@ -3615,7 +5636,7 @@ resourceMetrics: stringValue: "" startTimeUnixNano: "6000000" timeUnixNano: "2000000" - - asInt: "59449" + - asInt: "0" attributes: - key: direction value: @@ -3625,7 +5646,7 @@ resourceMetrics: stringValue: "" startTimeUnixNano: "6000000" timeUnixNano: "3000000" - - asInt: "54604" + - asInt: "0" attributes: - key: direction value: @@ -3635,7 +5656,7 @@ resourceMetrics: stringValue: "" startTimeUnixNano: "6000000" timeUnixNano: "4000000" - - asInt: "54464" + - asInt: "0" attributes: - key: direction value: @@ -3645,7 +5666,7 @@ resourceMetrics: stringValue: "" startTimeUnixNano: "6000000" timeUnixNano: "5000000" - - asInt: "11182" + - asInt: "0" attributes: - key: direction value: @@ -3655,7 +5676,7 @@ resourceMetrics: stringValue: vmnic0 startTimeUnixNano: "6000000" timeUnixNano: "1000000" - - asInt: "13009" + - asInt: "0" attributes: - key: direction value: @@ -3665,7 +5686,7 @@ resourceMetrics: stringValue: vmnic0 startTimeUnixNano: "6000000" timeUnixNano: "2000000" - - asInt: "16489" + - asInt: "0" attributes: - key: direction value: @@ -3675,7 +5696,7 @@ resourceMetrics: stringValue: vmnic0 startTimeUnixNano: "6000000" timeUnixNano: "3000000" - - asInt: "12398" + - asInt: "0" attributes: - key: direction value: @@ -3685,7 +5706,7 @@ resourceMetrics: stringValue: vmnic0 startTimeUnixNano: "6000000" timeUnixNano: "4000000" - - asInt: "12984" + - asInt: "0" attributes: - key: direction value: @@ -3795,7 +5816,7 @@ resourceMetrics: stringValue: vmnic2 startTimeUnixNano: "6000000" timeUnixNano: "5000000" - - asInt: "40810" + - asInt: "0" attributes: - key: direction value: @@ -3805,7 +5826,7 @@ resourceMetrics: stringValue: vmnic3 startTimeUnixNano: "6000000" timeUnixNano: "1000000" - - asInt: "41703" + - asInt: "0" attributes: - key: direction value: @@ -3815,7 +5836,7 @@ resourceMetrics: stringValue: vmnic3 startTimeUnixNano: "6000000" timeUnixNano: "2000000" - - asInt: "42960" + - asInt: "0" attributes: - key: direction value: @@ -3825,7 +5846,7 @@ resourceMetrics: stringValue: vmnic3 startTimeUnixNano: "6000000" timeUnixNano: "3000000" - - asInt: "42206" + - asInt: "0" attributes: - key: direction value: @@ -3835,7 +5856,7 @@ resourceMetrics: stringValue: vmnic3 startTimeUnixNano: "6000000" timeUnixNano: "4000000" - - asInt: "41480" + - asInt: "0" attributes: - key: direction value: @@ -3845,13 +5866,12 @@ resourceMetrics: stringValue: vmnic3 startTimeUnixNano: "6000000" timeUnixNano: "5000000" - unit: '{packets/sec}' - - description: The summation of packet errors on the host network. - name: vcenter.host.network.packet.errors - sum: - aggregationTemporality: 2 + unit: '{errors}' + - description: The rate of packets transmitted or received across each physical NIC (network interface controller) instance on the host. + name: vcenter.host.network.packet.rate + gauge: dataPoints: - - asInt: "0" + - asDouble: "2782.35" attributes: - key: direction value: @@ -3861,7 +5881,7 @@ resourceMetrics: stringValue: "" startTimeUnixNano: "6000000" timeUnixNano: "1000000" - - asInt: "0" + - asDouble: "2868.8" attributes: - key: direction value: @@ -3871,7 +5891,7 @@ resourceMetrics: stringValue: "" startTimeUnixNano: "6000000" timeUnixNano: "2000000" - - asInt: "0" + - asDouble: "3207.8" attributes: - key: direction value: @@ -3881,7 +5901,7 @@ resourceMetrics: stringValue: "" startTimeUnixNano: "6000000" timeUnixNano: "3000000" - - asInt: "0" + - asDouble: "2940.7" attributes: - key: direction value: @@ -3891,7 +5911,7 @@ resourceMetrics: stringValue: "" startTimeUnixNano: "6000000" timeUnixNano: "4000000" - - asInt: "0" + - asDouble: "2869.5" attributes: - key: direction value: @@ -3901,7 +5921,7 @@ resourceMetrics: stringValue: "" startTimeUnixNano: "6000000" timeUnixNano: "5000000" - - asInt: "0" + - asDouble: "665.8" attributes: - key: direction value: @@ -3911,7 +5931,7 @@ resourceMetrics: stringValue: vmnic0 startTimeUnixNano: "6000000" timeUnixNano: "1000000" - - asInt: "0" + - asDouble: "723.65" attributes: - key: direction value: @@ -3921,7 +5941,7 @@ resourceMetrics: stringValue: vmnic0 startTimeUnixNano: "6000000" timeUnixNano: "2000000" - - asInt: "0" + - asDouble: "983.1" attributes: - key: direction value: @@ -3931,7 +5951,7 @@ resourceMetrics: stringValue: vmnic0 startTimeUnixNano: "6000000" timeUnixNano: "3000000" - - asInt: "0" + - asDouble: "773.9" attributes: - key: direction value: @@ -3941,7 +5961,7 @@ resourceMetrics: stringValue: vmnic0 startTimeUnixNano: "6000000" timeUnixNano: "4000000" - - asInt: "0" + - asDouble: "722.9" attributes: - key: direction value: @@ -3951,7 +5971,7 @@ resourceMetrics: stringValue: vmnic0 startTimeUnixNano: "6000000" timeUnixNano: "5000000" - - asInt: "0" + - asDouble: "5.8" attributes: - key: direction value: @@ -3961,7 +5981,7 @@ resourceMetrics: stringValue: vmnic1 startTimeUnixNano: "6000000" timeUnixNano: "1000000" - - asInt: "0" + - asDouble: "5.7" attributes: - key: direction value: @@ -3971,7 +5991,7 @@ resourceMetrics: stringValue: vmnic1 startTimeUnixNano: "6000000" timeUnixNano: "2000000" - - asInt: "0" + - asDouble: "5.65" attributes: - key: direction value: @@ -3981,7 +6001,7 @@ resourceMetrics: stringValue: vmnic1 startTimeUnixNano: "6000000" timeUnixNano: "3000000" - - asInt: "0" + - asDouble: "5.6" attributes: - key: direction value: @@ -3991,7 +6011,7 @@ resourceMetrics: stringValue: vmnic1 startTimeUnixNano: "6000000" timeUnixNano: "4000000" - - asInt: "0" + - asDouble: "6" attributes: - key: direction value: @@ -4001,7 +6021,7 @@ resourceMetrics: stringValue: vmnic1 startTimeUnixNano: "6000000" timeUnixNano: "5000000" - - asInt: "0" + - asDouble: "5.25" attributes: - key: direction value: @@ -4011,7 +6031,7 @@ resourceMetrics: stringValue: vmnic2 startTimeUnixNano: "6000000" timeUnixNano: "1000000" - - asInt: "0" + - asDouble: "5.15" attributes: - key: direction value: @@ -4021,7 +6041,7 @@ resourceMetrics: stringValue: vmnic2 startTimeUnixNano: "6000000" timeUnixNano: "2000000" - - asInt: "0" + - asDouble: "5.2" attributes: - key: direction value: @@ -4031,7 +6051,7 @@ resourceMetrics: stringValue: vmnic2 startTimeUnixNano: "6000000" timeUnixNano: "3000000" - - asInt: "0" + - asDouble: "5.1" attributes: - key: direction value: @@ -4041,7 +6061,7 @@ resourceMetrics: stringValue: vmnic2 startTimeUnixNano: "6000000" timeUnixNano: "4000000" - - asInt: "0" + - asDouble: "5.45" attributes: - key: direction value: @@ -4051,7 +6071,7 @@ resourceMetrics: stringValue: vmnic2 startTimeUnixNano: "6000000" timeUnixNano: "5000000" - - asInt: "0" + - asDouble: "2105.5" attributes: - key: direction value: @@ -4061,7 +6081,7 @@ resourceMetrics: stringValue: vmnic3 startTimeUnixNano: "6000000" timeUnixNano: "1000000" - - asInt: "0" + - asDouble: "2134.3" attributes: - key: direction value: @@ -4071,7 +6091,7 @@ resourceMetrics: stringValue: vmnic3 startTimeUnixNano: "6000000" timeUnixNano: "2000000" - - asInt: "0" + - asDouble: "2213.85" attributes: - key: direction value: @@ -4081,7 +6101,7 @@ resourceMetrics: stringValue: vmnic3 startTimeUnixNano: "6000000" timeUnixNano: "3000000" - - asInt: "0" + - asDouble: "2156.1" attributes: - key: direction value: @@ -4091,7 +6111,7 @@ resourceMetrics: stringValue: vmnic3 startTimeUnixNano: "6000000" timeUnixNano: "4000000" - - asInt: "0" + - asDouble: "2135.15" attributes: - key: direction value: @@ -4101,7 +6121,7 @@ resourceMetrics: stringValue: vmnic3 startTimeUnixNano: "6000000" timeUnixNano: "5000000" - - asInt: "0" + - asDouble: "2599.6" attributes: - key: direction value: @@ -4111,7 +6131,7 @@ resourceMetrics: stringValue: "" startTimeUnixNano: "6000000" timeUnixNano: "1000000" - - asInt: "0" + - asDouble: "2735.6" attributes: - key: direction value: @@ -4121,7 +6141,7 @@ resourceMetrics: stringValue: "" startTimeUnixNano: "6000000" timeUnixNano: "2000000" - - asInt: "0" + - asDouble: "2972.45" attributes: - key: direction value: @@ -4131,7 +6151,7 @@ resourceMetrics: stringValue: "" startTimeUnixNano: "6000000" timeUnixNano: "3000000" - - asInt: "0" + - asDouble: "2730.2" attributes: - key: direction value: @@ -4141,7 +6161,7 @@ resourceMetrics: stringValue: "" startTimeUnixNano: "6000000" timeUnixNano: "4000000" - - asInt: "0" + - asDouble: "2723.2" attributes: - key: direction value: @@ -4151,7 +6171,7 @@ resourceMetrics: stringValue: "" startTimeUnixNano: "6000000" timeUnixNano: "5000000" - - asInt: "0" + - asDouble: "559.1" attributes: - key: direction value: @@ -4161,7 +6181,7 @@ resourceMetrics: stringValue: vmnic0 startTimeUnixNano: "6000000" timeUnixNano: "1000000" - - asInt: "0" + - asDouble: "650.45" attributes: - key: direction value: @@ -4171,7 +6191,7 @@ resourceMetrics: stringValue: vmnic0 startTimeUnixNano: "6000000" timeUnixNano: "2000000" - - asInt: "0" + - asDouble: "824.45" attributes: - key: direction value: @@ -4181,7 +6201,7 @@ resourceMetrics: stringValue: vmnic0 startTimeUnixNano: "6000000" timeUnixNano: "3000000" - - asInt: "0" + - asDouble: "619.9" attributes: - key: direction value: @@ -4191,7 +6211,7 @@ resourceMetrics: stringValue: vmnic0 startTimeUnixNano: "6000000" timeUnixNano: "4000000" - - asInt: "0" + - asDouble: "649.2" attributes: - key: direction value: @@ -4201,7 +6221,7 @@ resourceMetrics: stringValue: vmnic0 startTimeUnixNano: "6000000" timeUnixNano: "5000000" - - asInt: "0" + - asDouble: "0" attributes: - key: direction value: @@ -4211,7 +6231,7 @@ resourceMetrics: stringValue: vmnic1 startTimeUnixNano: "6000000" timeUnixNano: "1000000" - - asInt: "0" + - asDouble: "0" attributes: - key: direction value: @@ -4221,7 +6241,7 @@ resourceMetrics: stringValue: vmnic1 startTimeUnixNano: "6000000" timeUnixNano: "2000000" - - asInt: "0" + - asDouble: "0" attributes: - key: direction value: @@ -4231,7 +6251,7 @@ resourceMetrics: stringValue: vmnic1 startTimeUnixNano: "6000000" timeUnixNano: "3000000" - - asInt: "0" + - asDouble: "0" attributes: - key: direction value: @@ -4241,7 +6261,7 @@ resourceMetrics: stringValue: vmnic1 startTimeUnixNano: "6000000" timeUnixNano: "4000000" - - asInt: "0" + - asDouble: "0" attributes: - key: direction value: @@ -4251,7 +6271,7 @@ resourceMetrics: stringValue: vmnic1 startTimeUnixNano: "6000000" timeUnixNano: "5000000" - - asInt: "0" + - asDouble: "0" attributes: - key: direction value: @@ -4261,7 +6281,7 @@ resourceMetrics: stringValue: vmnic2 startTimeUnixNano: "6000000" timeUnixNano: "1000000" - - asInt: "0" + - asDouble: "0" attributes: - key: direction value: @@ -4271,7 +6291,7 @@ resourceMetrics: stringValue: vmnic2 startTimeUnixNano: "6000000" timeUnixNano: "2000000" - - asInt: "0" + - asDouble: "0" attributes: - key: direction value: @@ -4281,7 +6301,7 @@ resourceMetrics: stringValue: vmnic2 startTimeUnixNano: "6000000" timeUnixNano: "3000000" - - asInt: "0" + - asDouble: "0" attributes: - key: direction value: @@ -4291,7 +6311,7 @@ resourceMetrics: stringValue: vmnic2 startTimeUnixNano: "6000000" timeUnixNano: "4000000" - - asInt: "0" + - asDouble: "0" attributes: - key: direction value: @@ -4301,7 +6321,7 @@ resourceMetrics: stringValue: vmnic2 startTimeUnixNano: "6000000" timeUnixNano: "5000000" - - asInt: "0" + - asDouble: "2040.5" attributes: - key: direction value: @@ -4311,7 +6331,7 @@ resourceMetrics: stringValue: vmnic3 startTimeUnixNano: "6000000" timeUnixNano: "1000000" - - asInt: "0" + - asDouble: "2085.15" attributes: - key: direction value: @@ -4321,7 +6341,7 @@ resourceMetrics: stringValue: vmnic3 startTimeUnixNano: "6000000" timeUnixNano: "2000000" - - asInt: "0" + - asDouble: "2148" attributes: - key: direction value: @@ -4331,7 +6351,7 @@ resourceMetrics: stringValue: vmnic3 startTimeUnixNano: "6000000" timeUnixNano: "3000000" - - asInt: "0" + - asDouble: "2110.3" attributes: - key: direction value: @@ -4341,7 +6361,7 @@ resourceMetrics: stringValue: vmnic3 startTimeUnixNano: "6000000" timeUnixNano: "4000000" - - asInt: "0" + - asDouble: "2074" attributes: - key: direction value: @@ -4351,7 +6371,7 @@ resourceMetrics: stringValue: vmnic3 startTimeUnixNano: "6000000" timeUnixNano: "5000000" - unit: '{errors}' + unit: '{packets/sec}' - description: The amount of data that was transmitted or received over the network by the host. name: vcenter.host.network.throughput sum: @@ -5349,17 +7369,267 @@ resourceMetrics: - description: The memory utilization of the VM. gauge: dataPoints: - - asDouble: 0.994873046875 - startTimeUnixNano: "1000000" - timeUnixNano: "2000000" - name: vcenter.vm.memory.utilization - unit: '%' - - description: The amount of packets that was received or transmitted over the instance's network. - name: vcenter.vm.network.packet.count - sum: - aggregationTemporality: 2 - dataPoints: - - asInt: "0" + - asDouble: 0.994873046875 + startTimeUnixNano: "1000000" + timeUnixNano: "2000000" + name: vcenter.vm.memory.utilization + unit: '%' + - description: The amount of packets that was received or transmitted over the instance's network. + name: vcenter.vm.network.packet.count + sum: + aggregationTemporality: 2 + dataPoints: + - asInt: "0" + attributes: + - key: direction + value: + stringValue: received + - key: object + value: + stringValue: "" + startTimeUnixNano: "2000000" + timeUnixNano: "1000000" + - asInt: "0" + attributes: + - key: direction + value: + stringValue: received + - key: object + value: + stringValue: "4000" + startTimeUnixNano: "2000000" + timeUnixNano: "1000000" + - asInt: "0" + attributes: + - key: direction + value: + stringValue: received + - key: object + value: + stringValue: vmnic0 + startTimeUnixNano: "2000000" + timeUnixNano: "1000000" + - asInt: "0" + attributes: + - key: direction + value: + stringValue: received + - key: object + value: + stringValue: vmnic1 + startTimeUnixNano: "2000000" + timeUnixNano: "1000000" + - asInt: "0" + attributes: + - key: direction + value: + stringValue: received + - key: object + value: + stringValue: vmnic2 + startTimeUnixNano: "2000000" + timeUnixNano: "1000000" + - asInt: "0" + attributes: + - key: direction + value: + stringValue: received + - key: object + value: + stringValue: vmnic3 + startTimeUnixNano: "2000000" + timeUnixNano: "1000000" + - asInt: "0" + attributes: + - key: direction + value: + stringValue: transmitted + - key: object + value: + stringValue: "" + startTimeUnixNano: "2000000" + timeUnixNano: "1000000" + - asInt: "0" + attributes: + - key: direction + value: + stringValue: transmitted + - key: object + value: + stringValue: "4000" + startTimeUnixNano: "2000000" + timeUnixNano: "1000000" + - asInt: "0" + attributes: + - key: direction + value: + stringValue: transmitted + - key: object + value: + stringValue: vmnic0 + startTimeUnixNano: "2000000" + timeUnixNano: "1000000" + - asInt: "0" + attributes: + - key: direction + value: + stringValue: transmitted + - key: object + value: + stringValue: vmnic1 + startTimeUnixNano: "2000000" + timeUnixNano: "1000000" + - asInt: "0" + attributes: + - key: direction + value: + stringValue: transmitted + - key: object + value: + stringValue: vmnic2 + startTimeUnixNano: "2000000" + timeUnixNano: "1000000" + - asInt: "0" + attributes: + - key: direction + value: + stringValue: transmitted + - key: object + value: + stringValue: vmnic3 + startTimeUnixNano: "2000000" + timeUnixNano: "1000000" + unit: '{packets/sec}' + - description: The rate of transmitted or received packets dropped by each vNIC (virtual network interface controller) on the virtual machine. + name: vcenter.vm.network.packet.drop.rate + gauge: + dataPoints: + - asDouble: "1" + attributes: + - key: direction + value: + stringValue: received + - key: object + value: + stringValue: "" + startTimeUnixNano: "2000000" + timeUnixNano: "1000000" + - asDouble: "1" + attributes: + - key: direction + value: + stringValue: received + - key: object + value: + stringValue: "4000" + startTimeUnixNano: "2000000" + timeUnixNano: "1000000" + - asDouble: "1" + attributes: + - key: direction + value: + stringValue: received + - key: object + value: + stringValue: vmnic0 + startTimeUnixNano: "2000000" + timeUnixNano: "1000000" + - asDouble: "1" + attributes: + - key: direction + value: + stringValue: received + - key: object + value: + stringValue: vmnic1 + startTimeUnixNano: "2000000" + timeUnixNano: "1000000" + - asDouble: "1" + attributes: + - key: direction + value: + stringValue: received + - key: object + value: + stringValue: vmnic2 + startTimeUnixNano: "2000000" + timeUnixNano: "1000000" + - asDouble: "1" + attributes: + - key: direction + value: + stringValue: received + - key: object + value: + stringValue: vmnic3 + startTimeUnixNano: "2000000" + timeUnixNano: "1000000" + - asDouble: "2" + attributes: + - key: direction + value: + stringValue: transmitted + - key: object + value: + stringValue: "" + startTimeUnixNano: "2000000" + timeUnixNano: "1000000" + - asDouble: "2" + attributes: + - key: direction + value: + stringValue: transmitted + - key: object + value: + stringValue: "4000" + startTimeUnixNano: "2000000" + timeUnixNano: "1000000" + - asDouble: "2" + attributes: + - key: direction + value: + stringValue: transmitted + - key: object + value: + stringValue: vmnic0 + startTimeUnixNano: "2000000" + timeUnixNano: "1000000" + - asDouble: "2" + attributes: + - key: direction + value: + stringValue: transmitted + - key: object + value: + stringValue: vmnic1 + startTimeUnixNano: "2000000" + timeUnixNano: "1000000" + - asDouble: "2" + attributes: + - key: direction + value: + stringValue: transmitted + - key: object + value: + stringValue: vmnic2 + startTimeUnixNano: "2000000" + timeUnixNano: "1000000" + - asDouble: "2" + attributes: + - key: direction + value: + stringValue: transmitted + - key: object + value: + stringValue: vmnic3 + startTimeUnixNano: "2000000" + timeUnixNano: "1000000" + unit: '{packets/sec}' + - description: The rate of packets transmitted or received by each vNIC (virtual network interface controller) on the virtual machine. + name: vcenter.vm.network.packet.rate + gauge: + dataPoints: + - asDouble: "0" attributes: - key: direction value: @@ -5369,7 +7639,7 @@ resourceMetrics: stringValue: "" startTimeUnixNano: "2000000" timeUnixNano: "1000000" - - asInt: "0" + - asDouble: "0" attributes: - key: direction value: @@ -5379,7 +7649,7 @@ resourceMetrics: stringValue: "4000" startTimeUnixNano: "2000000" timeUnixNano: "1000000" - - asInt: "0" + - asDouble: "0" attributes: - key: direction value: @@ -5389,7 +7659,7 @@ resourceMetrics: stringValue: vmnic0 startTimeUnixNano: "2000000" timeUnixNano: "1000000" - - asInt: "0" + - asDouble: "0" attributes: - key: direction value: @@ -5399,7 +7669,7 @@ resourceMetrics: stringValue: vmnic1 startTimeUnixNano: "2000000" timeUnixNano: "1000000" - - asInt: "0" + - asDouble: "0" attributes: - key: direction value: @@ -5409,7 +7679,7 @@ resourceMetrics: stringValue: vmnic2 startTimeUnixNano: "2000000" timeUnixNano: "1000000" - - asInt: "0" + - asDouble: "0" attributes: - key: direction value: @@ -5419,7 +7689,7 @@ resourceMetrics: stringValue: vmnic3 startTimeUnixNano: "2000000" timeUnixNano: "1000000" - - asInt: "0" + - asDouble: "0" attributes: - key: direction value: @@ -5429,7 +7699,7 @@ resourceMetrics: stringValue: "" startTimeUnixNano: "2000000" timeUnixNano: "1000000" - - asInt: "0" + - asDouble: "0" attributes: - key: direction value: @@ -5439,7 +7709,7 @@ resourceMetrics: stringValue: "4000" startTimeUnixNano: "2000000" timeUnixNano: "1000000" - - asInt: "0" + - asDouble: "0" attributes: - key: direction value: @@ -5449,7 +7719,7 @@ resourceMetrics: stringValue: vmnic0 startTimeUnixNano: "2000000" timeUnixNano: "1000000" - - asInt: "0" + - asDouble: "0" attributes: - key: direction value: @@ -5459,7 +7729,7 @@ resourceMetrics: stringValue: vmnic1 startTimeUnixNano: "2000000" timeUnixNano: "1000000" - - asInt: "0" + - asDouble: "0" attributes: - key: direction value: @@ -5469,7 +7739,7 @@ resourceMetrics: stringValue: vmnic2 startTimeUnixNano: "2000000" timeUnixNano: "1000000" - - asInt: "0" + - asDouble: "0" attributes: - key: direction value: @@ -5896,17 +8166,267 @@ resourceMetrics: - description: The memory utilization of the VM. gauge: dataPoints: - - asDouble: 0.994873046875 - startTimeUnixNano: "1000000" - timeUnixNano: "2000000" - name: vcenter.vm.memory.utilization - unit: '%' - - description: The amount of packets that was received or transmitted over the instance's network. - name: vcenter.vm.network.packet.count - sum: - aggregationTemporality: 2 - dataPoints: - - asInt: "0" + - asDouble: 0.994873046875 + startTimeUnixNano: "1000000" + timeUnixNano: "2000000" + name: vcenter.vm.memory.utilization + unit: '%' + - description: The amount of packets that was received or transmitted over the instance's network. + name: vcenter.vm.network.packet.count + sum: + aggregationTemporality: 2 + dataPoints: + - asInt: "0" + attributes: + - key: direction + value: + stringValue: received + - key: object + value: + stringValue: "" + startTimeUnixNano: "2000000" + timeUnixNano: "1000000" + - asInt: "0" + attributes: + - key: direction + value: + stringValue: received + - key: object + value: + stringValue: "4000" + startTimeUnixNano: "2000000" + timeUnixNano: "1000000" + - asInt: "0" + attributes: + - key: direction + value: + stringValue: received + - key: object + value: + stringValue: vmnic0 + startTimeUnixNano: "2000000" + timeUnixNano: "1000000" + - asInt: "0" + attributes: + - key: direction + value: + stringValue: received + - key: object + value: + stringValue: vmnic1 + startTimeUnixNano: "2000000" + timeUnixNano: "1000000" + - asInt: "0" + attributes: + - key: direction + value: + stringValue: received + - key: object + value: + stringValue: vmnic2 + startTimeUnixNano: "2000000" + timeUnixNano: "1000000" + - asInt: "0" + attributes: + - key: direction + value: + stringValue: received + - key: object + value: + stringValue: vmnic3 + startTimeUnixNano: "2000000" + timeUnixNano: "1000000" + - asInt: "0" + attributes: + - key: direction + value: + stringValue: transmitted + - key: object + value: + stringValue: "" + startTimeUnixNano: "2000000" + timeUnixNano: "1000000" + - asInt: "0" + attributes: + - key: direction + value: + stringValue: transmitted + - key: object + value: + stringValue: "4000" + startTimeUnixNano: "2000000" + timeUnixNano: "1000000" + - asInt: "0" + attributes: + - key: direction + value: + stringValue: transmitted + - key: object + value: + stringValue: vmnic0 + startTimeUnixNano: "2000000" + timeUnixNano: "1000000" + - asInt: "0" + attributes: + - key: direction + value: + stringValue: transmitted + - key: object + value: + stringValue: vmnic1 + startTimeUnixNano: "2000000" + timeUnixNano: "1000000" + - asInt: "0" + attributes: + - key: direction + value: + stringValue: transmitted + - key: object + value: + stringValue: vmnic2 + startTimeUnixNano: "2000000" + timeUnixNano: "1000000" + - asInt: "0" + attributes: + - key: direction + value: + stringValue: transmitted + - key: object + value: + stringValue: vmnic3 + startTimeUnixNano: "2000000" + timeUnixNano: "1000000" + unit: '{packets/sec}' + - description: The rate of transmitted or received packets dropped by each vNIC (virtual network interface controller) on the virtual machine. + name: vcenter.vm.network.packet.drop.rate + gauge: + dataPoints: + - asDouble: "1" + attributes: + - key: direction + value: + stringValue: received + - key: object + value: + stringValue: "" + startTimeUnixNano: "2000000" + timeUnixNano: "1000000" + - asDouble: "1" + attributes: + - key: direction + value: + stringValue: received + - key: object + value: + stringValue: "4000" + startTimeUnixNano: "2000000" + timeUnixNano: "1000000" + - asDouble: "1" + attributes: + - key: direction + value: + stringValue: received + - key: object + value: + stringValue: vmnic0 + startTimeUnixNano: "2000000" + timeUnixNano: "1000000" + - asDouble: "1" + attributes: + - key: direction + value: + stringValue: received + - key: object + value: + stringValue: vmnic1 + startTimeUnixNano: "2000000" + timeUnixNano: "1000000" + - asDouble: "1" + attributes: + - key: direction + value: + stringValue: received + - key: object + value: + stringValue: vmnic2 + startTimeUnixNano: "2000000" + timeUnixNano: "1000000" + - asDouble: "1" + attributes: + - key: direction + value: + stringValue: received + - key: object + value: + stringValue: vmnic3 + startTimeUnixNano: "2000000" + timeUnixNano: "1000000" + - asDouble: "2" + attributes: + - key: direction + value: + stringValue: transmitted + - key: object + value: + stringValue: "" + startTimeUnixNano: "2000000" + timeUnixNano: "1000000" + - asDouble: "2" + attributes: + - key: direction + value: + stringValue: transmitted + - key: object + value: + stringValue: "4000" + startTimeUnixNano: "2000000" + timeUnixNano: "1000000" + - asDouble: "2" + attributes: + - key: direction + value: + stringValue: transmitted + - key: object + value: + stringValue: vmnic0 + startTimeUnixNano: "2000000" + timeUnixNano: "1000000" + - asDouble: "2" + attributes: + - key: direction + value: + stringValue: transmitted + - key: object + value: + stringValue: vmnic1 + startTimeUnixNano: "2000000" + timeUnixNano: "1000000" + - asDouble: "2" + attributes: + - key: direction + value: + stringValue: transmitted + - key: object + value: + stringValue: vmnic2 + startTimeUnixNano: "2000000" + timeUnixNano: "1000000" + - asDouble: "2" + attributes: + - key: direction + value: + stringValue: transmitted + - key: object + value: + stringValue: vmnic3 + startTimeUnixNano: "2000000" + timeUnixNano: "1000000" + unit: '{packets/sec}' + - description: The rate of packets transmitted or received by each vNIC (virtual network interface controller) on the virtual machine. + name: vcenter.vm.network.packet.rate + gauge: + dataPoints: + - asDouble: "0" attributes: - key: direction value: @@ -5916,7 +8436,7 @@ resourceMetrics: stringValue: "" startTimeUnixNano: "2000000" timeUnixNano: "1000000" - - asInt: "0" + - asDouble: "0" attributes: - key: direction value: @@ -5926,7 +8446,7 @@ resourceMetrics: stringValue: "4000" startTimeUnixNano: "2000000" timeUnixNano: "1000000" - - asInt: "0" + - asDouble: "0" attributes: - key: direction value: @@ -5936,7 +8456,7 @@ resourceMetrics: stringValue: vmnic0 startTimeUnixNano: "2000000" timeUnixNano: "1000000" - - asInt: "0" + - asDouble: "0" attributes: - key: direction value: @@ -5946,7 +8466,7 @@ resourceMetrics: stringValue: vmnic1 startTimeUnixNano: "2000000" timeUnixNano: "1000000" - - asInt: "0" + - asDouble: "0" attributes: - key: direction value: @@ -5956,7 +8476,7 @@ resourceMetrics: stringValue: vmnic2 startTimeUnixNano: "2000000" timeUnixNano: "1000000" - - asInt: "0" + - asDouble: "0" attributes: - key: direction value: @@ -5966,7 +8486,7 @@ resourceMetrics: stringValue: vmnic3 startTimeUnixNano: "2000000" timeUnixNano: "1000000" - - asInt: "0" + - asDouble: "0" attributes: - key: direction value: @@ -5976,7 +8496,7 @@ resourceMetrics: stringValue: "" startTimeUnixNano: "2000000" timeUnixNano: "1000000" - - asInt: "0" + - asDouble: "0" attributes: - key: direction value: @@ -5986,7 +8506,7 @@ resourceMetrics: stringValue: "4000" startTimeUnixNano: "2000000" timeUnixNano: "1000000" - - asInt: "0" + - asDouble: "0" attributes: - key: direction value: @@ -5996,7 +8516,7 @@ resourceMetrics: stringValue: vmnic0 startTimeUnixNano: "2000000" timeUnixNano: "1000000" - - asInt: "0" + - asDouble: "0" attributes: - key: direction value: @@ -6006,7 +8526,7 @@ resourceMetrics: stringValue: vmnic1 startTimeUnixNano: "2000000" timeUnixNano: "1000000" - - asInt: "0" + - asDouble: "0" attributes: - key: direction value: @@ -6016,7 +8536,7 @@ resourceMetrics: stringValue: vmnic2 startTimeUnixNano: "2000000" timeUnixNano: "1000000" - - asInt: "0" + - asDouble: "0" attributes: - key: direction value: @@ -6529,6 +9049,256 @@ resourceMetrics: startTimeUnixNano: "2000000" timeUnixNano: "1000000" unit: '{packets/sec}' + - description: The rate of transmitted or received packets dropped by each vNIC (virtual network interface controller) on the virtual machine. + name: vcenter.vm.network.packet.drop.rate + gauge: + dataPoints: + - asDouble: "1" + attributes: + - key: direction + value: + stringValue: received + - key: object + value: + stringValue: "" + startTimeUnixNano: "2000000" + timeUnixNano: "1000000" + - asDouble: "1" + attributes: + - key: direction + value: + stringValue: received + - key: object + value: + stringValue: "4000" + startTimeUnixNano: "2000000" + timeUnixNano: "1000000" + - asDouble: "1" + attributes: + - key: direction + value: + stringValue: received + - key: object + value: + stringValue: vmnic0 + startTimeUnixNano: "2000000" + timeUnixNano: "1000000" + - asDouble: "1" + attributes: + - key: direction + value: + stringValue: received + - key: object + value: + stringValue: vmnic1 + startTimeUnixNano: "2000000" + timeUnixNano: "1000000" + - asDouble: "1" + attributes: + - key: direction + value: + stringValue: received + - key: object + value: + stringValue: vmnic2 + startTimeUnixNano: "2000000" + timeUnixNano: "1000000" + - asDouble: "1" + attributes: + - key: direction + value: + stringValue: received + - key: object + value: + stringValue: vmnic3 + startTimeUnixNano: "2000000" + timeUnixNano: "1000000" + - asDouble: "2" + attributes: + - key: direction + value: + stringValue: transmitted + - key: object + value: + stringValue: "" + startTimeUnixNano: "2000000" + timeUnixNano: "1000000" + - asDouble: "2" + attributes: + - key: direction + value: + stringValue: transmitted + - key: object + value: + stringValue: "4000" + startTimeUnixNano: "2000000" + timeUnixNano: "1000000" + - asDouble: "2" + attributes: + - key: direction + value: + stringValue: transmitted + - key: object + value: + stringValue: vmnic0 + startTimeUnixNano: "2000000" + timeUnixNano: "1000000" + - asDouble: "2" + attributes: + - key: direction + value: + stringValue: transmitted + - key: object + value: + stringValue: vmnic1 + startTimeUnixNano: "2000000" + timeUnixNano: "1000000" + - asDouble: "2" + attributes: + - key: direction + value: + stringValue: transmitted + - key: object + value: + stringValue: vmnic2 + startTimeUnixNano: "2000000" + timeUnixNano: "1000000" + - asDouble: "2" + attributes: + - key: direction + value: + stringValue: transmitted + - key: object + value: + stringValue: vmnic3 + startTimeUnixNano: "2000000" + timeUnixNano: "1000000" + unit: '{packets/sec}' + - description: The rate of packets transmitted or received by each vNIC (virtual network interface controller) on the virtual machine. + name: vcenter.vm.network.packet.rate + gauge: + dataPoints: + - asDouble: "0" + attributes: + - key: direction + value: + stringValue: received + - key: object + value: + stringValue: "" + startTimeUnixNano: "2000000" + timeUnixNano: "1000000" + - asDouble: "0" + attributes: + - key: direction + value: + stringValue: received + - key: object + value: + stringValue: "4000" + startTimeUnixNano: "2000000" + timeUnixNano: "1000000" + - asDouble: "0" + attributes: + - key: direction + value: + stringValue: received + - key: object + value: + stringValue: vmnic0 + startTimeUnixNano: "2000000" + timeUnixNano: "1000000" + - asDouble: "0" + attributes: + - key: direction + value: + stringValue: received + - key: object + value: + stringValue: vmnic1 + startTimeUnixNano: "2000000" + timeUnixNano: "1000000" + - asDouble: "0" + attributes: + - key: direction + value: + stringValue: received + - key: object + value: + stringValue: vmnic2 + startTimeUnixNano: "2000000" + timeUnixNano: "1000000" + - asDouble: "0" + attributes: + - key: direction + value: + stringValue: received + - key: object + value: + stringValue: vmnic3 + startTimeUnixNano: "2000000" + timeUnixNano: "1000000" + - asDouble: "0" + attributes: + - key: direction + value: + stringValue: transmitted + - key: object + value: + stringValue: "" + startTimeUnixNano: "2000000" + timeUnixNano: "1000000" + - asDouble: "0" + attributes: + - key: direction + value: + stringValue: transmitted + - key: object + value: + stringValue: "4000" + startTimeUnixNano: "2000000" + timeUnixNano: "1000000" + - asDouble: "0" + attributes: + - key: direction + value: + stringValue: transmitted + - key: object + value: + stringValue: vmnic0 + startTimeUnixNano: "2000000" + timeUnixNano: "1000000" + - asDouble: "0" + attributes: + - key: direction + value: + stringValue: transmitted + - key: object + value: + stringValue: vmnic1 + startTimeUnixNano: "2000000" + timeUnixNano: "1000000" + - asDouble: "0" + attributes: + - key: direction + value: + stringValue: transmitted + - key: object + value: + stringValue: vmnic2 + startTimeUnixNano: "2000000" + timeUnixNano: "1000000" + - asDouble: "0" + attributes: + - key: direction + value: + stringValue: transmitted + - key: object + value: + stringValue: vmnic3 + startTimeUnixNano: "2000000" + timeUnixNano: "1000000" + unit: '{packets/sec}' - description: The amount of data that was transmitted or received over the network of the virtual machine. name: vcenter.vm.network.throughput sum: From c5485bf4ddef9c0acce85bff1fed60f669aabf6a Mon Sep 17 00:00:00 2001 From: Joshua MacDonald Date: Mon, 13 May 2024 00:47:46 -0700 Subject: [PATCH 24/55] OpenTelemetry Protocol with Apache Arrow Exporter component (#31996) **Description:** This is the same code as OTel-Arrow at https://github.com/open-telemetry/otel-arrow/releases/tag/v0.23.0 (plus [backported lint and test fixes](https://github.com/open-telemetry/otel-arrow/commit/0910113d46454c80881db840e21f25485dce2499)). Only import statements change here, to match the host repository. **Link to tracking Issue:** #26491 **Testing:** Test coverage is approximately 90%. **Documentation:** I double-checked and the existing README had only a few updates needed. --- .chloggen/otelarrowexporter.yaml | 27 + exporter/otelarrowexporter/README.md | 15 +- exporter/otelarrowexporter/config.go | 36 +- exporter/otelarrowexporter/config_test.go | 28 +- exporter/otelarrowexporter/doc.go | 4 - exporter/otelarrowexporter/factory.go | 46 +- exporter/otelarrowexporter/factory_test.go | 13 +- .../generated_component_test.go | 132 ++ .../generated_package_test.go | 4 +- exporter/otelarrowexporter/go.mod | 28 +- exporter/otelarrowexporter/go.sum | 50 +- .../internal/arrow/bestofn.go | 152 +++ .../internal/arrow/common_test.go | 413 ++++++ .../internal/arrow/exporter.go | 345 ++++- .../internal/arrow/exporter_test.go | 890 ++++++++++++ .../internal/arrow/grpcmock/credentials.go | 74 + .../internal/arrow/prioritizer.go | 107 ++ .../internal/arrow/stream.go | 477 +++++++ .../internal/arrow/stream_test.go | 349 +++++ exporter/otelarrowexporter/metadata.yaml | 7 +- exporter/otelarrowexporter/otelarrow.go | 311 ++++- exporter/otelarrowexporter/otelarrow_test.go | 1189 +++++++++++++++++ .../otelarrowexporter/testdata/config.yaml | 1 + 23 files changed, 4568 insertions(+), 130 deletions(-) create mode 100644 .chloggen/otelarrowexporter.yaml create mode 100644 exporter/otelarrowexporter/internal/arrow/bestofn.go create mode 100644 exporter/otelarrowexporter/internal/arrow/common_test.go create mode 100644 exporter/otelarrowexporter/internal/arrow/exporter_test.go create mode 100644 exporter/otelarrowexporter/internal/arrow/grpcmock/credentials.go create mode 100644 exporter/otelarrowexporter/internal/arrow/prioritizer.go create mode 100644 exporter/otelarrowexporter/internal/arrow/stream.go create mode 100644 exporter/otelarrowexporter/internal/arrow/stream_test.go create mode 100644 exporter/otelarrowexporter/otelarrow_test.go diff --git a/.chloggen/otelarrowexporter.yaml b/.chloggen/otelarrowexporter.yaml new file mode 100644 index 000000000000..c1d7f0c8f147 --- /dev/null +++ b/.chloggen/otelarrowexporter.yaml @@ -0,0 +1,27 @@ +# Use this changelog template to create an entry for release notes. + +# One of 'breaking', 'deprecation', 'new_component', 'enhancement', 'bug_fix' +change_type: new_component + +# The name of the component, or a single word describing the area of concern, (e.g. filelogreceiver) +component: OpenTelemetry Protocol with Apache Arrow Exporter + +# A brief description of the change. Surround your text with quotes ("") if it needs to start with a backtick (`). +note: Implementation copied from opentelemetry/otel-arrow repository @v0.20.0. + +# Mandatory: One or more tracking issues related to the change. You can use the PR number here if no issue exists. +issues: [26491] + +# (Optional) One or more lines of additional information to render under the primary note. +# These lines will be padded with 2 spaces and then inserted directly into the document. +# Use pipe (|) for multiline entries. +subtext: + +# If your change doesn't affect end users or the exported elements of any package, +# you should instead start your pull request title with [chore] or use the "Skip Changelog" label. +# Optional: The change log or logs in which this entry should be included. +# e.g. '[user]' or '[user, api]' +# Include 'user' if the change is relevant to end users. +# Include 'api' if there is a change to a library API. +# Default: '[user]' +change_logs: [user] diff --git a/exporter/otelarrowexporter/README.md b/exporter/otelarrowexporter/README.md index 4fdc86c1896f..f712ca42c125 100644 --- a/exporter/otelarrowexporter/README.md +++ b/exporter/otelarrowexporter/README.md @@ -31,11 +31,8 @@ Apache Arrow. OpenTelemetry Protocol with Apache Arrow supports column-oriented data transport using the Apache Arrow data format. This component converts OTLP data into an optimized representation and then sends batches of -data using Apache Arrow to encode the stream. The OpenTelemetry -Protocol with Apache Arrow receiver component contains logic to reverse the process used in this +data using Apache Arrow to encode the stream. The [OpenTelemetry +Protocol with Apache Arrow receiver](https://github.com/open-telemetry/opentelemetry-collector-contrib/tree/main/receiver/otelarrowreceiver) component contains logic to reverse the process used in this component. The use of an OpenTelemetry Protocol with Apache Arrow @@ -51,7 +48,7 @@ exporter component. This is as simple as replacing "otlp" with To enable the OpenTelemetry Protocol with Apache Arrow exporter, include it in the list of exporters for a pipeline. The `endpoint` -setting is required. The `tls` setting is requirede for insecure +setting is required. The `tls` setting is required for insecure transport. - `endpoint` (no default): host:port to which the exporter is going to send OTLP trace data, @@ -143,13 +140,9 @@ exporters: When this is configured, the stream will terminate cleanly without causing retries, with `OK` gRPC status. -The corresponding `otelarrowreceiver` keepalive setting, that is +The [corresponding `otelarrowreceiver` keepalive setting](https://github.com/open-telemetry/opentelemetry-collector-contrib/tree/main/receiver/otelarrowreceiver#keepalive-configuration), that is compatible with the one above, reads: - - ``` receivers: otelarrow: diff --git a/exporter/otelarrowexporter/config.go b/exporter/otelarrowexporter/config.go index 96f5cb7d7c06..68837d818b16 100644 --- a/exporter/otelarrowexporter/config.go +++ b/exporter/otelarrowexporter/config.go @@ -15,6 +15,8 @@ import ( "go.opentelemetry.io/collector/config/configretry" "go.opentelemetry.io/collector/exporter/exporterhelper" "google.golang.org/grpc" + + "github.com/open-telemetry/opentelemetry-collector-contrib/exporter/otelarrowexporter/internal/arrow" ) // Config defines configuration for OTLP exporter. @@ -26,12 +28,12 @@ type Config struct { exporterhelper.TimeoutSettings `mapstructure:",squash"` // squash ensures fields are correctly decoded in embedded struct. exporterhelper.QueueSettings `mapstructure:"sending_queue"` - RetrySettings configretry.BackOffConfig `mapstructure:"retry_on_failure"` + RetryConfig configretry.BackOffConfig `mapstructure:"retry_on_failure"` configgrpc.ClientConfig `mapstructure:",squash"` // squash ensures fields are correctly decoded in embedded struct. // Arrow includes settings specific to OTel Arrow. - Arrow ArrowSettings `mapstructure:"arrow"` + Arrow ArrowConfig `mapstructure:"arrow"` // UserDialOptions cannot be configured via `mapstructure` // schemes. This is useful for custom purposes where the @@ -40,9 +42,9 @@ type Config struct { UserDialOptions []grpc.DialOption `mapstructure:"-"` } -// ArrowSettings includes whether Arrow is enabled and the number of +// ArrowConfig includes whether Arrow is enabled and the number of // concurrent Arrow streams. -type ArrowSettings struct { +type ArrowConfig struct { // NumStreams determines the number of OTel Arrow streams. NumStreams int `mapstructure:"num_streams"` @@ -65,7 +67,7 @@ type ArrowSettings struct { // Note that `Zstd` applies to gRPC, not Arrow compression. PayloadCompression configcompression.Type `mapstructure:"payload_compression"` - // Disabled prevents using OTel Arrow streams. The exporter + // Disabled prevents using OTel-Arrow streams. The exporter // falls back to standard OTLP. Disabled bool `mapstructure:"disabled"` @@ -73,24 +75,18 @@ type ArrowSettings struct { // to standard OTLP. If the Arrow service is unavailable, it // will retry and/or fail. DisableDowngrade bool `mapstructure:"disable_downgrade"` + + // Prioritizer is a policy name for how load is distributed + // across streams. + Prioritizer arrow.PrioritizerName `mapstructure:"prioritizer"` } var _ component.Config = (*Config)(nil) -// Validate checks if the exporter configuration is valid -func (cfg *Config) Validate() error { - if err := cfg.QueueSettings.Validate(); err != nil { - return fmt.Errorf("queue settings has invalid configuration: %w", err) - } - if err := cfg.Arrow.Validate(); err != nil { - return fmt.Errorf("arrow settings has invalid configuration: %w", err) - } - - return nil -} +var _ component.ConfigValidator = (*ArrowConfig)(nil) // Validate returns an error when the number of streams is less than 1. -func (cfg *ArrowSettings) Validate() error { +func (cfg *ArrowConfig) Validate() error { if cfg.NumStreams < 1 { return fmt.Errorf("stream count must be > 0: %d", cfg.NumStreams) } @@ -103,6 +99,10 @@ func (cfg *ArrowSettings) Validate() error { return fmt.Errorf("zstd encoder: invalid configuration: %w", err) } + if err := cfg.Prioritizer.Validate(); err != nil { + return fmt.Errorf("invalid prioritizer: %w", err) + } + // The cfg.PayloadCompression field is validated by the underlying library, // but we only support Zstd or none. switch cfg.PayloadCompression { @@ -113,7 +113,7 @@ func (cfg *ArrowSettings) Validate() error { return nil } -func (cfg *ArrowSettings) toArrowProducerOptions() (arrowOpts []config.Option) { +func (cfg *ArrowConfig) toArrowProducerOptions() (arrowOpts []config.Option) { switch cfg.PayloadCompression { case configcompression.TypeZstd: arrowOpts = append(arrowOpts, config.WithZstd()) diff --git a/exporter/otelarrowexporter/config_test.go b/exporter/otelarrowexporter/config_test.go index e855fa078d57..b1a6253837df 100644 --- a/exporter/otelarrowexporter/config_test.go +++ b/exporter/otelarrowexporter/config_test.go @@ -22,6 +22,8 @@ import ( "go.opentelemetry.io/collector/config/configtls" "go.opentelemetry.io/collector/confmap/confmaptest" "go.opentelemetry.io/collector/exporter/exporterhelper" + + "github.com/open-telemetry/opentelemetry-collector-contrib/exporter/otelarrowexporter/internal/arrow" ) func TestUnmarshalDefaultConfig(t *testing.T) { @@ -32,6 +34,7 @@ func TestUnmarshalDefaultConfig(t *testing.T) { assert.NoError(t, component.UnmarshalConfig(cm, cfg)) assert.Equal(t, factory.CreateDefaultConfig(), cfg) assert.Equal(t, "round_robin", cfg.(*Config).ClientConfig.BalancerName) + assert.Equal(t, arrow.DefaultPrioritizer, cfg.(*Config).Arrow.Prioritizer) } func TestUnmarshalConfig(t *testing.T) { @@ -45,7 +48,7 @@ func TestUnmarshalConfig(t *testing.T) { TimeoutSettings: exporterhelper.TimeoutSettings{ Timeout: 10 * time.Second, }, - RetrySettings: configretry.BackOffConfig{ + RetryConfig: configretry.BackOffConfig{ Enabled: true, InitialInterval: 10 * time.Second, RandomizationFactor: 0.7, @@ -79,20 +82,21 @@ func TestUnmarshalConfig(t *testing.T) { }, WriteBufferSize: 512 * 1024, BalancerName: "experimental", - Auth: &configauth.Authentication{AuthenticatorID: component.MustNewID("nop")}, + Auth: &configauth.Authentication{AuthenticatorID: component.NewID(component.MustNewType("nop"))}, }, - Arrow: ArrowSettings{ + Arrow: ArrowConfig{ NumStreams: 2, MaxStreamLifetime: 2 * time.Hour, PayloadCompression: configcompression.TypeZstd, Zstd: zstd.DefaultEncoderConfig(), + Prioritizer: "leastloaded8", }, }, cfg) } -func TestArrowSettingsValidate(t *testing.T) { - settings := func(enabled bool, numStreams int, maxStreamLifetime time.Duration, level zstd.Level) *ArrowSettings { - return &ArrowSettings{ +func TestArrowConfigValidate(t *testing.T) { + settings := func(enabled bool, numStreams int, maxStreamLifetime time.Duration, level zstd.Level) *ArrowConfig { + return &ArrowConfig{ Disabled: !enabled, NumStreams: numStreams, MaxStreamLifetime: maxStreamLifetime, @@ -118,16 +122,16 @@ func TestArrowSettingsValidate(t *testing.T) { require.Error(t, settings(true, math.MaxInt, 10*time.Second, zstd.MaxLevel+1).Validate()) } -func TestDefaultSettingsValid(t *testing.T) { +func TestDefaultConfigValid(t *testing.T) { cfg := createDefaultConfig() // this must be set by the user and config // validation always checks that a value is set. cfg.(*Config).Arrow.MaxStreamLifetime = 2 * time.Second - require.NoError(t, cfg.(*Config).Validate()) + require.NoError(t, component.ValidateConfig(cfg)) } -func TestArrowSettingsPayloadCompressionZstd(t *testing.T) { - settings := ArrowSettings{ +func TestArrowConfigPayloadCompressionZstd(t *testing.T) { + settings := ArrowConfig{ PayloadCompression: configcompression.TypeZstd, } var config config.Config @@ -137,9 +141,9 @@ func TestArrowSettingsPayloadCompressionZstd(t *testing.T) { require.True(t, config.Zstd) } -func TestArrowSettingsPayloadCompressionNone(t *testing.T) { +func TestArrowConfigPayloadCompressionNone(t *testing.T) { for _, value := range []string{"", "none"} { - settings := ArrowSettings{ + settings := ArrowConfig{ PayloadCompression: configcompression.Type(value), } var config config.Config diff --git a/exporter/otelarrowexporter/doc.go b/exporter/otelarrowexporter/doc.go index e76c5e35612b..bb6fcbefc7ad 100644 --- a/exporter/otelarrowexporter/doc.go +++ b/exporter/otelarrowexporter/doc.go @@ -3,8 +3,4 @@ //go:generate mdatagen metadata.yaml -// Package otelarrowexporter exports telemetry using OpenTelemetry -// Protocol with Apache Arrow and/or standard OpenTelemetry Protocol -// data using configuration structures similar to the core OTLP -// exporter. package otelarrowexporter // import "github.com/open-telemetry/opentelemetry-collector-contrib/exporter/otelarrowexporter" diff --git a/exporter/otelarrowexporter/factory.go b/exporter/otelarrowexporter/factory.go index edc1c5f2c3fc..9a459f14e8dc 100644 --- a/exporter/otelarrowexporter/factory.go +++ b/exporter/otelarrowexporter/factory.go @@ -25,7 +25,7 @@ import ( "github.com/open-telemetry/opentelemetry-collector-contrib/exporter/otelarrowexporter/internal/metadata" ) -// NewFactory creates a factory for OTel-Arrow exporter. +// NewFactory creates a factory for OTLP exporter. func NewFactory() exporter.Factory { return exporter.NewFactory( metadata.Type, @@ -39,9 +39,8 @@ func NewFactory() exporter.Factory { func createDefaultConfig() component.Config { return &Config{ TimeoutSettings: exporterhelper.NewDefaultTimeoutSettings(), - RetrySettings: configretry.NewDefaultBackOffConfig(), + RetryConfig: configretry.NewDefaultBackOffConfig(), QueueSettings: exporterhelper.NewDefaultQueueSettings(), - ClientConfig: configgrpc.ClientConfig{ Headers: map[string]configopaque.String{}, // Default to zstd compression @@ -54,11 +53,12 @@ func createDefaultConfig() component.Config { // destination. BalancerName: "round_robin", }, - Arrow: ArrowSettings{ + Arrow: ArrowConfig{ NumStreams: runtime.NumCPU(), MaxStreamLifetime: time.Hour, - Zstd: zstd.DefaultEncoderConfig(), + Zstd: zstd.DefaultEncoderConfig(), + Prioritizer: arrow.DefaultPrioritizer, // PayloadCompression is off by default because gRPC // compression is on by default, above. @@ -67,14 +67,14 @@ func createDefaultConfig() component.Config { } } -func (e *baseExporter) helperOptions() []exporterhelper.Option { +func (oce *baseExporter) helperOptions() []exporterhelper.Option { return []exporterhelper.Option{ exporterhelper.WithCapabilities(consumer.Capabilities{MutatesData: false}), - exporterhelper.WithTimeout(e.config.TimeoutSettings), - exporterhelper.WithRetry(e.config.RetrySettings), - exporterhelper.WithQueue(e.config.QueueSettings), - exporterhelper.WithStart(e.start), - exporterhelper.WithShutdown(e.shutdown), + exporterhelper.WithTimeout(oce.config.TimeoutSettings), + exporterhelper.WithRetry(oce.config.RetryConfig), + exporterhelper.WithQueue(oce.config.QueueSettings), + exporterhelper.WithStart(oce.start), + exporterhelper.WithShutdown(oce.shutdown), } } @@ -97,13 +97,13 @@ func createTracesExporter( set exporter.CreateSettings, cfg component.Config, ) (exporter.Traces, error) { - exp, err := newExporter(cfg, set, createArrowTracesStream) + oce, err := newExporter(cfg, set, createArrowTracesStream) if err != nil { return nil, err } - return exporterhelper.NewTracesExporter(ctx, exp.settings, exp.config, - exp.pushTraces, - exp.helperOptions()..., + return exporterhelper.NewTracesExporter(ctx, oce.settings, oce.config, + oce.pushTraces, + oce.helperOptions()..., ) } @@ -116,13 +116,13 @@ func createMetricsExporter( set exporter.CreateSettings, cfg component.Config, ) (exporter.Metrics, error) { - exp, err := newExporter(cfg, set, createArrowMetricsStream) + oce, err := newExporter(cfg, set, createArrowMetricsStream) if err != nil { return nil, err } - return exporterhelper.NewMetricsExporter(ctx, exp.settings, exp.config, - exp.pushMetrics, - exp.helperOptions()..., + return exporterhelper.NewMetricsExporter(ctx, oce.settings, oce.config, + oce.pushMetrics, + oce.helperOptions()..., ) } @@ -135,12 +135,12 @@ func createLogsExporter( set exporter.CreateSettings, cfg component.Config, ) (exporter.Logs, error) { - exp, err := newExporter(cfg, set, createArrowLogsStream) + oce, err := newExporter(cfg, set, createArrowLogsStream) if err != nil { return nil, err } - return exporterhelper.NewLogsExporter(ctx, exp.settings, exp.config, - exp.pushLogs, - exp.helperOptions()..., + return exporterhelper.NewLogsExporter(ctx, oce.settings, oce.config, + oce.pushLogs, + oce.helperOptions()..., ) } diff --git a/exporter/otelarrowexporter/factory_test.go b/exporter/otelarrowexporter/factory_test.go index 45d20e553288..75dfcc736a4b 100644 --- a/exporter/otelarrowexporter/factory_test.go +++ b/exporter/otelarrowexporter/factory_test.go @@ -22,6 +22,8 @@ import ( "go.opentelemetry.io/collector/config/configtls" "go.opentelemetry.io/collector/exporter/exporterhelper" "go.opentelemetry.io/collector/exporter/exportertest" + + "github.com/open-telemetry/opentelemetry-collector-contrib/exporter/otelarrowexporter/internal/arrow" ) func TestCreateDefaultConfig(t *testing.T) { @@ -31,16 +33,17 @@ func TestCreateDefaultConfig(t *testing.T) { assert.NoError(t, componenttest.CheckConfigStruct(cfg)) ocfg, ok := factory.CreateDefaultConfig().(*Config) assert.True(t, ok) - assert.Equal(t, ocfg.RetrySettings, configretry.NewDefaultBackOffConfig()) + assert.Equal(t, ocfg.RetryConfig, configretry.NewDefaultBackOffConfig()) assert.Equal(t, ocfg.QueueSettings, exporterhelper.NewDefaultQueueSettings()) assert.Equal(t, ocfg.TimeoutSettings, exporterhelper.NewDefaultTimeoutSettings()) assert.Equal(t, ocfg.Compression, configcompression.TypeZstd) - assert.Equal(t, ocfg.Arrow, ArrowSettings{ + assert.Equal(t, ocfg.Arrow, ArrowConfig{ Disabled: false, NumStreams: runtime.NumCPU(), MaxStreamLifetime: time.Hour, PayloadCompression: "", Zstd: zstd.DefaultEncoderConfig(), + Prioritizer: arrow.DefaultPrioritizer, }) } @@ -185,8 +188,8 @@ func TestCreateTracesExporter(t *testing.T) { t.Run(tt.name, func(t *testing.T) { factory := NewFactory() set := exportertest.NewNopCreateSettings() - config := tt.config - consumer, err := factory.CreateTracesExporter(context.Background(), set, &config) + cfg := tt.config + consumer, err := factory.CreateTracesExporter(context.Background(), set, &cfg) if tt.mustFailOnCreate { assert.NotNil(t, err) return @@ -225,7 +228,7 @@ func TestCreateArrowTracesExporter(t *testing.T) { factory := NewFactory() cfg := factory.CreateDefaultConfig().(*Config) cfg.ClientConfig.Endpoint = testutil.GetAvailableLocalAddress(t) - cfg.Arrow = ArrowSettings{ + cfg.Arrow = ArrowConfig{ NumStreams: 1, } set := exportertest.NewNopCreateSettings() diff --git a/exporter/otelarrowexporter/generated_component_test.go b/exporter/otelarrowexporter/generated_component_test.go index 0b323ce6937d..aa7ea0c9a47f 100644 --- a/exporter/otelarrowexporter/generated_component_test.go +++ b/exporter/otelarrowexporter/generated_component_test.go @@ -3,10 +3,20 @@ package otelarrowexporter import ( + "context" "testing" + "time" "github.com/stretchr/testify/require" + "go.opentelemetry.io/collector/component" "go.opentelemetry.io/collector/component/componenttest" + "go.opentelemetry.io/collector/confmap/confmaptest" + "go.opentelemetry.io/collector/exporter" + "go.opentelemetry.io/collector/exporter/exportertest" + "go.opentelemetry.io/collector/pdata/pcommon" + "go.opentelemetry.io/collector/pdata/plog" + "go.opentelemetry.io/collector/pdata/pmetric" + "go.opentelemetry.io/collector/pdata/ptrace" ) func TestComponentFactoryType(t *testing.T) { @@ -16,3 +26,125 @@ func TestComponentFactoryType(t *testing.T) { func TestComponentConfigStruct(t *testing.T) { require.NoError(t, componenttest.CheckConfigStruct(NewFactory().CreateDefaultConfig())) } + +func TestComponentLifecycle(t *testing.T) { + factory := NewFactory() + + tests := []struct { + name string + createFn func(ctx context.Context, set exporter.CreateSettings, cfg component.Config) (component.Component, error) + }{ + + { + name: "logs", + createFn: func(ctx context.Context, set exporter.CreateSettings, cfg component.Config) (component.Component, error) { + return factory.CreateLogsExporter(ctx, set, cfg) + }, + }, + + { + name: "metrics", + createFn: func(ctx context.Context, set exporter.CreateSettings, cfg component.Config) (component.Component, error) { + return factory.CreateMetricsExporter(ctx, set, cfg) + }, + }, + + { + name: "traces", + createFn: func(ctx context.Context, set exporter.CreateSettings, cfg component.Config) (component.Component, error) { + return factory.CreateTracesExporter(ctx, set, cfg) + }, + }, + } + + cm, err := confmaptest.LoadConf("metadata.yaml") + require.NoError(t, err) + cfg := factory.CreateDefaultConfig() + sub, err := cm.Sub("tests::config") + require.NoError(t, err) + require.NoError(t, component.UnmarshalConfig(sub, cfg)) + + for _, test := range tests { + t.Run(test.name+"-shutdown", func(t *testing.T) { + c, err := test.createFn(context.Background(), exportertest.NewNopCreateSettings(), cfg) + require.NoError(t, err) + err = c.Shutdown(context.Background()) + require.NoError(t, err) + }) + t.Run(test.name+"-lifecycle", func(t *testing.T) { + c, err := test.createFn(context.Background(), exportertest.NewNopCreateSettings(), cfg) + require.NoError(t, err) + host := componenttest.NewNopHost() + err = c.Start(context.Background(), host) + require.NoError(t, err) + require.NotPanics(t, func() { + switch test.name { + case "logs": + e, ok := c.(exporter.Logs) + require.True(t, ok) + logs := generateLifecycleTestLogs() + if !e.Capabilities().MutatesData { + logs.MarkReadOnly() + } + err = e.ConsumeLogs(context.Background(), logs) + case "metrics": + e, ok := c.(exporter.Metrics) + require.True(t, ok) + metrics := generateLifecycleTestMetrics() + if !e.Capabilities().MutatesData { + metrics.MarkReadOnly() + } + err = e.ConsumeMetrics(context.Background(), metrics) + case "traces": + e, ok := c.(exporter.Traces) + require.True(t, ok) + traces := generateLifecycleTestTraces() + if !e.Capabilities().MutatesData { + traces.MarkReadOnly() + } + err = e.ConsumeTraces(context.Background(), traces) + } + }) + + require.NoError(t, err) + + err = c.Shutdown(context.Background()) + require.NoError(t, err) + }) + } +} + +func generateLifecycleTestLogs() plog.Logs { + logs := plog.NewLogs() + rl := logs.ResourceLogs().AppendEmpty() + rl.Resource().Attributes().PutStr("resource", "R1") + l := rl.ScopeLogs().AppendEmpty().LogRecords().AppendEmpty() + l.Body().SetStr("test log message") + l.SetTimestamp(pcommon.NewTimestampFromTime(time.Now())) + return logs +} + +func generateLifecycleTestMetrics() pmetric.Metrics { + metrics := pmetric.NewMetrics() + rm := metrics.ResourceMetrics().AppendEmpty() + rm.Resource().Attributes().PutStr("resource", "R1") + m := rm.ScopeMetrics().AppendEmpty().Metrics().AppendEmpty() + m.SetName("test_metric") + dp := m.SetEmptyGauge().DataPoints().AppendEmpty() + dp.Attributes().PutStr("test_attr", "value_1") + dp.SetIntValue(123) + dp.SetTimestamp(pcommon.NewTimestampFromTime(time.Now())) + return metrics +} + +func generateLifecycleTestTraces() ptrace.Traces { + traces := ptrace.NewTraces() + rs := traces.ResourceSpans().AppendEmpty() + rs.Resource().Attributes().PutStr("resource", "R1") + span := rs.ScopeSpans().AppendEmpty().Spans().AppendEmpty() + span.Attributes().PutStr("test_attr", "value_1") + span.SetName("test_span") + span.SetStartTimestamp(pcommon.NewTimestampFromTime(time.Now().Add(-1 * time.Second))) + span.SetEndTimestamp(pcommon.NewTimestampFromTime(time.Now())) + return traces +} diff --git a/exporter/otelarrowexporter/generated_package_test.go b/exporter/otelarrowexporter/generated_package_test.go index eca1471d7dfd..c19cf02cbd7f 100644 --- a/exporter/otelarrowexporter/generated_package_test.go +++ b/exporter/otelarrowexporter/generated_package_test.go @@ -4,8 +4,10 @@ package otelarrowexporter import ( "testing" + + "go.uber.org/goleak" ) func TestMain(m *testing.M) { - // skipping goleak test as per metadata.yml configuration + goleak.VerifyTestMain(m) } diff --git a/exporter/otelarrowexporter/go.mod b/exporter/otelarrowexporter/go.mod index ddac000a2253..52d7279728ec 100644 --- a/exporter/otelarrowexporter/go.mod +++ b/exporter/otelarrowexporter/go.mod @@ -3,9 +3,11 @@ module github.com/open-telemetry/opentelemetry-collector-contrib/exporter/otelar go 1.21.0 require ( + github.com/apache/arrow/go/v14 v14.0.2 github.com/open-telemetry/otel-arrow v0.22.0 github.com/open-telemetry/otel-arrow/collector v0.22.0 github.com/stretchr/testify v1.9.0 + go.opentelemetry.io/collector v0.100.0 go.opentelemetry.io/collector/component v0.100.0 go.opentelemetry.io/collector/config/configauth v0.100.0 go.opentelemetry.io/collector/config/configcompression v1.7.0 @@ -16,19 +18,32 @@ require ( go.opentelemetry.io/collector/confmap v0.100.0 go.opentelemetry.io/collector/consumer v0.100.0 go.opentelemetry.io/collector/exporter v0.100.0 + go.opentelemetry.io/collector/extension v0.100.0 + go.opentelemetry.io/collector/extension/auth v0.100.0 go.opentelemetry.io/collector/pdata v1.7.0 + go.opentelemetry.io/otel v1.26.0 go.opentelemetry.io/otel/metric v1.26.0 go.opentelemetry.io/otel/trace v1.26.0 + go.uber.org/goleak v1.3.0 + go.uber.org/mock v0.4.0 + go.uber.org/multierr v1.11.0 + go.uber.org/zap v1.27.0 + golang.org/x/net v0.24.0 + google.golang.org/genproto/googleapis/rpc v0.0.0-20240401170217-c3f982113cda google.golang.org/grpc v1.63.2 + google.golang.org/protobuf v1.34.0 ) require ( - github.com/apache/arrow/go/v14 v14.0.2 // indirect + github.com/HdrHistogram/hdrhistogram-go v1.1.2 // indirect + github.com/axiomhq/hyperloglog v0.0.0-20230201085229-3ddf4bad03dc // indirect github.com/beorn7/perks v1.0.1 // indirect github.com/cenkalti/backoff/v4 v4.3.0 // indirect github.com/cespare/xxhash/v2 v2.2.0 // indirect github.com/davecgh/go-spew v1.1.1 // indirect + github.com/dgryski/go-metro v0.0.0-20180109044635-280f6062b5bc // indirect github.com/fsnotify/fsnotify v1.7.0 // indirect + github.com/fxamacker/cbor/v2 v2.4.0 // indirect github.com/go-logr/logr v1.4.1 // indirect github.com/go-logr/stdr v1.2.2 // indirect github.com/go-viper/mapstructure/v2 v2.0.0-alpha.1 // indirect @@ -49,34 +64,27 @@ require ( github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd // indirect github.com/modern-go/reflect2 v1.0.2 // indirect github.com/mostynb/go-grpc-compression v1.2.2 // indirect + github.com/pierrec/lz4/v4 v4.1.18 // indirect github.com/pmezard/go-difflib v1.0.0 // indirect github.com/prometheus/client_golang v1.19.0 // indirect github.com/prometheus/client_model v0.6.1 // indirect github.com/prometheus/common v0.53.0 // indirect github.com/prometheus/procfs v0.12.0 // indirect + github.com/x448/float16 v0.8.4 // indirect github.com/zeebo/xxh3 v1.0.2 // indirect - go.opentelemetry.io/collector v0.100.0 // indirect go.opentelemetry.io/collector/config/confignet v0.100.0 // indirect go.opentelemetry.io/collector/config/configtelemetry v0.100.0 // indirect go.opentelemetry.io/collector/config/internal v0.100.0 // indirect - go.opentelemetry.io/collector/extension v0.100.0 // indirect - go.opentelemetry.io/collector/extension/auth v0.100.0 // indirect go.opentelemetry.io/collector/featuregate v1.7.0 // indirect go.opentelemetry.io/collector/receiver v0.100.0 // indirect go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc v0.51.0 // indirect - go.opentelemetry.io/otel v1.26.0 // indirect go.opentelemetry.io/otel/exporters/prometheus v0.48.0 // indirect go.opentelemetry.io/otel/sdk v1.26.0 // indirect go.opentelemetry.io/otel/sdk/metric v1.26.0 // indirect - go.uber.org/multierr v1.11.0 // indirect - go.uber.org/zap v1.27.0 // indirect golang.org/x/mod v0.14.0 // indirect - golang.org/x/net v0.24.0 // indirect golang.org/x/sys v0.19.0 // indirect golang.org/x/text v0.14.0 // indirect golang.org/x/tools v0.15.0 // indirect golang.org/x/xerrors v0.0.0-20231012003039-104605ab7028 // indirect - google.golang.org/genproto/googleapis/rpc v0.0.0-20240401170217-c3f982113cda // indirect - google.golang.org/protobuf v1.34.0 // indirect gopkg.in/yaml.v3 v3.0.1 // indirect ) diff --git a/exporter/otelarrowexporter/go.sum b/exporter/otelarrowexporter/go.sum index db5468f4d691..f2ab85fc618f 100644 --- a/exporter/otelarrowexporter/go.sum +++ b/exporter/otelarrowexporter/go.sum @@ -1,16 +1,32 @@ +dmitri.shuralyov.com/gpu/mtl v0.0.0-20190408044501-666a987793e9/go.mod h1:H6x//7gZCb22OMCxBHrMx7a5I7Hp++hsVxbQ4BYO7hU= +github.com/BurntSushi/xgb v0.0.0-20160522181843-27f122750802/go.mod h1:IVnqGOEym/WlBOVXweHU+Q+/VP0lqqI8lqeDx9IjBqo= +github.com/HdrHistogram/hdrhistogram-go v1.1.2 h1:5IcZpTvzydCQeHzK4Ef/D5rrSqwxob0t8PQPMybUNFM= +github.com/HdrHistogram/hdrhistogram-go v1.1.2/go.mod h1:yDgFjdqOqDEKOvasDdhWNXYg9BVp4O+o5f6V/ehm6Oo= +github.com/ajstarks/svgo v0.0.0-20180226025133-644b8db467af/go.mod h1:K08gAheRH3/J6wwsYMMT4xOr94bZjxIelGM0+d/wbFw= github.com/apache/arrow/go/v14 v14.0.2 h1:N8OkaJEOfI3mEZt07BIkvo4sC6XDbL+48MBPWO5IONw= github.com/apache/arrow/go/v14 v14.0.2/go.mod h1:u3fgh3EdgN/YQ8cVQRguVW3R+seMybFg8QBQ5LU+eBY= +github.com/axiomhq/hyperloglog v0.0.0-20230201085229-3ddf4bad03dc h1:Keo7wQ7UODUaHcEi7ltENhbAK2VgZjfat6mLy03tQzo= +github.com/axiomhq/hyperloglog v0.0.0-20230201085229-3ddf4bad03dc/go.mod h1:k08r+Yj1PRAmuayFiRK6MYuR5Ve4IuZtTfxErMIh0+c= github.com/beorn7/perks v1.0.1 h1:VlbKKnNfV8bJzeqoa4cOKqO6bYr3WgKZxO8Z16+hsOM= github.com/beorn7/perks v1.0.1/go.mod h1:G2ZrVWU2WbWT9wwq4/hrbKbnv/1ERSJQ0ibhJ6rlkpw= +github.com/brianvoe/gofakeit/v6 v6.17.0 h1:obbQTJeHfktJtiZzq0Q1bEpsNUs+yHrYlPVWt7BtmJ4= +github.com/brianvoe/gofakeit/v6 v6.17.0/go.mod h1:Ow6qC71xtwm79anlwKRlWZW6zVq9D2XHE4QSSMP/rU8= github.com/cenkalti/backoff/v4 v4.3.0 h1:MyRJ/UdXutAwSAT+s3wNd7MfTIcy71VQueUuFK343L8= github.com/cenkalti/backoff/v4 v4.3.0/go.mod h1:Y3VNntkOUPxTVeUxJ/G5vcM//AlwfmyYozVcomhLiZE= github.com/cespare/xxhash/v2 v2.2.0 h1:DC2CZ1Ep5Y4k3ZQ899DldepgrayRUGE6BBZ/cd9Cj44= github.com/cespare/xxhash/v2 v2.2.0/go.mod h1:VGX0DQ3Q6kWi7AoAeZDth3/j3BFtOZR5XLFGgcrjCOs= +github.com/creack/pty v1.1.9/go.mod h1:oKZEueFk5CKHvIhNR5MUki03XCEU+Q6VDXinZuGJ33E= github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= github.com/davecgh/go-spew v1.1.1 h1:vj9j/u1bqnvCEfJOwUhtlOARqs3+rkHYY13jYWTU97c= github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= +github.com/dgryski/go-metro v0.0.0-20180109044635-280f6062b5bc h1:8WFBn63wegobsYAX0YjD+8suexZDga5CctH4CCTx2+8= +github.com/dgryski/go-metro v0.0.0-20180109044635-280f6062b5bc/go.mod h1:c9O8+fpSOX1DM8cPNSkX/qsBWdkD4yd2dpciOWQjpBw= +github.com/fogleman/gg v1.2.1-0.20190220221249-0403632d5b90/go.mod h1:R/bRT+9gY/C5z7JzPU0zXsXHKM4/ayA+zqcVNZzPa1k= github.com/fsnotify/fsnotify v1.7.0 h1:8JEhPFa5W2WU7YfeZzPNqzMP6Lwt7L2715Ggo0nosvA= github.com/fsnotify/fsnotify v1.7.0/go.mod h1:40Bi/Hjc2AVfZrqy+aj+yEI+/bRxZnMJyTJwOpGvigM= +github.com/fxamacker/cbor/v2 v2.4.0 h1:ri0ArlOR+5XunOP8CRUowT0pSJOwhW098ZCUyskZD88= +github.com/fxamacker/cbor/v2 v2.4.0/go.mod h1:TA1xS00nchWmaBnEIxPSE5oHLuJBAVvqrtAnWBwBCVo= +github.com/go-gl/glfw v0.0.0-20190409004039-e6da0acd62b1/go.mod h1:vR7hzQXu2zJy9AVAgeJqvqgH9Q5CA+iKCZ2gyEVpxRU= github.com/go-logr/logr v1.2.2/go.mod h1:jdQByPbusPIv2/zmleS9BjJVeZ6kBagPoEUsqbVz/1A= github.com/go-logr/logr v1.4.1 h1:pKouT5E8xu9zeFC39JXRDukb6JFQPXM5p5I91188VAQ= github.com/go-logr/logr v1.4.1/go.mod h1:9T104GzyrTigFIr8wt5mBrctHMim0Nb2HLGrmQ40KvY= @@ -22,10 +38,12 @@ github.com/goccy/go-json v0.10.2 h1:CrxCmQqYDkv1z7lO7Wbh2HN93uovUHgrECaO5ZrCXAU= github.com/goccy/go-json v0.10.2/go.mod h1:6MelG93GURQebXPDq3khkgXZkazVtN9CRI+MGFi0w8I= github.com/gogo/protobuf v1.3.2 h1:Ov1cvc58UF3b5XjBnZv7+opcTcQFZebYjWzi34vdm4Q= github.com/gogo/protobuf v1.3.2/go.mod h1:P1XiOD3dCwIKUDQYPy72D8LYyHL2YPYrpS2s69NZV8Q= +github.com/golang/freetype v0.0.0-20170609003504-e2365dfdc4a0/go.mod h1:E/TSTwGwJL78qG/PmXZO1EjYhfJinVAhrmmHX6Z8B9k= github.com/golang/snappy v0.0.5-0.20220116011046-fa5810519dcb h1:PBC98N2aIaM3XXiurYmW7fx4GZkL8feAMVq7nEjURHk= github.com/golang/snappy v0.0.5-0.20220116011046-fa5810519dcb/go.mod h1:/XxbfmMg8lxefKM7IXC3fBNl/7bRcc72aCRzEWrmP2Q= github.com/google/flatbuffers v23.5.26+incompatible h1:M9dgRyhJemaM4Sw8+66GHBu8ioaQmyPLg1b8VwK5WJg= github.com/google/flatbuffers v23.5.26+incompatible/go.mod h1:1AeVuKshWv4vARoZatz6mlQ0JxURH0Kv5+zNeJKJCa8= +github.com/google/go-cmp v0.5.4/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= github.com/google/go-cmp v0.6.0 h1:ofyhxvXcZhMsU5ulbFiLKl/XBFqE1GSq7atu8tAmTRI= github.com/google/go-cmp v0.6.0/go.mod h1:17dUlkBOakJ0+DkrSSNjCkIjxS6bF9zb3elmeNGIjoY= github.com/google/gofuzz v1.0.0/go.mod h1:dBl0BpW6vV/+mYPU4Po3pmUjxk6FQPldtuIdl/M65Eg= @@ -35,6 +53,7 @@ github.com/hashicorp/go-version v1.6.0 h1:feTTfFNnjP967rlCxM/I9g701jU+RN74YKx2mO github.com/hashicorp/go-version v1.6.0/go.mod h1:fltr4n8CU8Ke44wwGCBoEymUuxUHl09ZGVZPK5anwXA= github.com/json-iterator/go v1.1.12 h1:PV8peI4a0ysnczrg+LtxykD8LfKY9ML6u2jnxaEnrnM= github.com/json-iterator/go v1.1.12/go.mod h1:e30LSqwooZae/UwlEbR2852Gd8hjQvJoHmT4TnhNGBo= +github.com/jung-kurt/gofpdf v1.0.3-0.20190309125859-24315acbbda5/go.mod h1:7Id9E/uU8ce6rXgefFLlgrJj/GYY22cpxn+r32jIOes= github.com/kisielk/errcheck v1.5.0/go.mod h1:pFxgyoBC7bSaBwPgfKdkLd5X25qrDl4LWUI2bnpBCr8= github.com/kisielk/gotool v1.0.0/go.mod h1:XhKaO+MFFWcvkIS/tQcRk01m1F5IRFswLeQ+oQHNcck= github.com/klauspost/compress v1.17.8 h1:YcnTYrq7MikUT7k0Yb5eceMmALQPYBW/Xltxn0NAMnU= @@ -49,6 +68,8 @@ github.com/knadh/koanf/v2 v2.1.1 h1:/R8eXqasSTsmDCsAyYj+81Wteg8AqrV9CP6gvsTsOmM= github.com/knadh/koanf/v2 v2.1.1/go.mod h1:4mnTRbZCK+ALuBXHZMjDfG9y714L7TykVnZkXbMU3Es= github.com/kr/pretty v0.3.1 h1:flRD4NNwYAUpkphVc1HcthR4KEIFJ65n8Mw5qdRn3LE= github.com/kr/pretty v0.3.1/go.mod h1:hoEshYVHaxMs3cyo3Yncou5ZscifuDolrwPKZanG3xk= +github.com/kr/pty v1.1.1/go.mod h1:pFQYn66WHrOpPYNljwOMqo10TkYh1fy3cYio2l3bCsQ= +github.com/kr/text v0.1.0/go.mod h1:4Jbv+DJW3UT/LiOwJeYQe1efqtUx/iVham/4vfdArNI= github.com/kr/text v0.2.0 h1:5Nx0Ya0ZqY2ygV366QzturHI13Jq95ApcVaJBhpS+AY= github.com/kr/text v0.2.0/go.mod h1:eLer722TekiGuMkidMxC/pM04lWEeraHUUmBw8l2grE= github.com/mitchellh/copystructure v1.2.0 h1:vpKXTN4ewci03Vljg/q9QvCGUDttBOGBIa15WveJJGw= @@ -62,11 +83,11 @@ github.com/modern-go/reflect2 v1.0.2 h1:xBagoLtFs94CBntxluKeaWgTMpvLxC4ur3nMaC9G github.com/modern-go/reflect2 v1.0.2/go.mod h1:yWuevngMOJpCy52FWWMvUC8ws7m/LJsjYzDa0/r8luk= github.com/mostynb/go-grpc-compression v1.2.2 h1:XaDbnRvt2+1vgr0b/l0qh4mJAfIxE0bKXtz2Znl3GGI= github.com/mostynb/go-grpc-compression v1.2.2/go.mod h1:GOCr2KBxXcblCuczg3YdLQlcin1/NfyDA348ckuCH6w= +github.com/niemeyer/pretty v0.0.0-20200227124842-a10e7caefd8e/go.mod h1:zD1mROLANZcx1PVRCS0qkT7pwLkGfwJo4zjcN/Tysno= github.com/open-telemetry/otel-arrow v0.22.0 h1:G1jgtqAM2ho5pyKQ4tyrDzk9Y0VcJ+GZQRJgN26vRlI= github.com/open-telemetry/otel-arrow v0.22.0/go.mod h1:F50XFaiNfkfB0MYftZIUKFULm6pxfGqjbgQzevi+65M= github.com/open-telemetry/otel-arrow/collector v0.22.0 h1:lHFjzkh5PbsiW8B63SRntnP9W7bLCXV9lslO4zI0s/Y= github.com/open-telemetry/otel-arrow/collector v0.22.0/go.mod h1:R7hRwuGDxoGLB27dkJUFKDK7mGG7Yb02ODnLHx8Whis= -github.com/pierrec/lz4 v2.0.5+incompatible h1:2xWsjqPFWcplujydGg4WmhC/6fZqK42wMM8aXeqhl0I= github.com/pierrec/lz4/v4 v4.1.18 h1:xaKrnTkyoqfh1YItXl56+6KJNVYWlEEPuAQW9xsplYQ= github.com/pierrec/lz4/v4 v4.1.18/go.mod h1:gZWDp/Ze/IJXGXf23ltt2EXimqmTUXEy0GFuRQyBid4= github.com/pmezard/go-difflib v1.0.0 h1:4DBwDE0NGyQoBHbLQYPwSUPoCMWR5BEzIk/f1lZbAQM= @@ -83,8 +104,11 @@ github.com/rogpeppe/go-internal v1.11.0 h1:cWPaGQEPrBb5/AsnsZesgZZ9yb1OQ+GOISoDN github.com/rogpeppe/go-internal v1.11.0/go.mod h1:ddIwULY96R17DhadqLgMfk9H9tvdUzkipdSkR5nkCZA= github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME= github.com/stretchr/testify v1.3.0/go.mod h1:M5WIy9Dh21IEIfnGCwXGc5bZfKNJtfHm1UVUgZn+9EI= +github.com/stretchr/testify v1.7.0/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg= github.com/stretchr/testify v1.9.0 h1:HtqpIVDClZ4nwg75+f6Lvsy/wHu+3BoSGCbBAcpTsTg= github.com/stretchr/testify v1.9.0/go.mod h1:r2ic/lqez/lEtzL7wO/rwa5dbSLXVDPFyf8C91i36aY= +github.com/x448/float16 v0.8.4 h1:qLwI1I70+NjRFUR3zs1JPUCgaCXSh3SW62uAKT1mSBM= +github.com/x448/float16 v0.8.4/go.mod h1:14CWIYCyZA/cWjXOioeEpHeN/83MdbZDRQHoFcYsOfg= github.com/yuin/goldmark v1.1.27/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74= github.com/yuin/goldmark v1.2.1/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74= github.com/zeebo/assert v1.3.0 h1:g7C04CbJuIDKNPFHmsk4hwZDO5O+kntRxzaUoNXj+IQ= @@ -147,15 +171,28 @@ go.opentelemetry.io/otel/trace v1.26.0 h1:1ieeAUb4y0TE26jUFrCIXKpTuVK7uJGN9/Z/2L go.opentelemetry.io/otel/trace v1.26.0/go.mod h1:4iDxvGDQuUkHve82hJJ8UqrwswHYsZuWCBllGV2U2y0= go.uber.org/goleak v1.3.0 h1:2K3zAYmnTNqV73imy9J1T3WC+gmCePx2hEGkimedGto= go.uber.org/goleak v1.3.0/go.mod h1:CoHD4mav9JJNrW/WLlf7HGZPjdw8EucARQHekz1X6bE= +go.uber.org/mock v0.4.0 h1:VcM4ZOtdbR4f6VXfiOpwpVJDL6lCReaZ6mw31wqh7KU= +go.uber.org/mock v0.4.0/go.mod h1:a6FSlNadKUHUa9IP5Vyt1zh4fC7uAwxMutEAscFbkZc= go.uber.org/multierr v1.11.0 h1:blXXJkSxSSfBVBlC76pxqeO+LN3aDfLQo+309xJstO0= go.uber.org/multierr v1.11.0/go.mod h1:20+QtiLqy0Nd6FdQB9TLXag12DsQkrbs3htMFfDN80Y= go.uber.org/zap v1.27.0 h1:aJMhYGrd5QSmlpLMr2MftRKl7t8J8PTZPA732ud/XR8= go.uber.org/zap v1.27.0/go.mod h1:GB2qFLM7cTU87MWRP2mPIjqfIDnGu+VIO4V/SdhGo2E= golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w= +golang.org/x/crypto v0.0.0-20190510104115-cbcb75029529/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= golang.org/x/crypto v0.0.0-20191011191535-87dc89f01550/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= golang.org/x/crypto v0.0.0-20200622213623-75b288015ac9/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= +golang.org/x/exp v0.0.0-20180321215751-8460e604b9de/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA= +golang.org/x/exp v0.0.0-20180807140117-3d87b88a115f/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA= +golang.org/x/exp v0.0.0-20190125153040-c74c464bbbf2/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA= +golang.org/x/exp v0.0.0-20190306152737-a1d7652674e8/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA= +golang.org/x/exp v0.0.0-20191030013958-a1ab85dbe136/go.mod h1:JXzH8nQsPlswgeRAPE3MuO9GYsAcnJvJ4vnMwN/5qkY= golang.org/x/exp v0.0.0-20231110203233-9a3e6036ecaa h1:FRnLl4eNAQl8hwxVVC17teOw8kdjVDVAiFMtgUdTSRQ= golang.org/x/exp v0.0.0-20231110203233-9a3e6036ecaa/go.mod h1:zk2irFbV9DP96SEBUUAy67IdHUaZuSnrz1n472HUCLE= +golang.org/x/image v0.0.0-20180708004352-c73c2afc3b81/go.mod h1:ux5Hcp/YLpHSI86hEcLt0YII63i6oz57MZXIpbrjZUs= +golang.org/x/image v0.0.0-20190227222117-0694c2d4d067/go.mod h1:kZ7UVZpmo3dzQBMxlp+ypCbDeSB+sBbTgSJuh5dn5js= +golang.org/x/image v0.0.0-20190802002840-cff245a6509b/go.mod h1:FeLwcggjj3mMvU+oOTbSwawSJRM1uh48EjtB4UJZlP0= +golang.org/x/mobile v0.0.0-20190719004257-d2bd2a29d028/go.mod h1:E/iHnbuqvinMTCcRqshq8CkpyQDoeVncDDYHnLhea+o= +golang.org/x/mod v0.1.0/go.mod h1:0QHyrYULN0/3qlju5TqG8bIK38QM8yzMo5ekMj3DlcY= golang.org/x/mod v0.2.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= golang.org/x/mod v0.3.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= golang.org/x/mod v0.14.0 h1:dGoOF9QVLYng8IHTm7BAyWqCqSheQ5pYWGhzW00YJr0= @@ -172,6 +209,7 @@ golang.org/x/sync v0.0.0-20201020160332-67f06af15bc9/go.mod h1:RxMgew5VJxzue5/jJ golang.org/x/sync v0.6.0 h1:5BMeUDZ7vkXGfEr1x9B4bRcTH4lpkTkpdh0T/J+qjbQ= golang.org/x/sync v0.6.0/go.mod h1:Czt+wKu1gCyEFDUtn0jG5QVvpJ6rzVqr5aXyt9drQfk= golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= +golang.org/x/sys v0.0.0-20190312061237-fead79001313/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20190412213103-97732733099d/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20200930185726-fdedc70b468f/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.5.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= @@ -181,7 +219,10 @@ golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= golang.org/x/text v0.3.3/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= golang.org/x/text v0.14.0 h1:ScX5w1eTa3QqT8oi6+ziP7dTV1S2+ALU0bI+0zXKWiQ= golang.org/x/text v0.14.0/go.mod h1:18ZOQIKpY8NJVqYksKHtTdi31H5itFRjB5/qKTNYzSU= +golang.org/x/tools v0.0.0-20180525024113-a5b4c53f6e8b/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= golang.org/x/tools v0.0.0-20180917221912-90fa682c2a6e/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= +golang.org/x/tools v0.0.0-20190206041539-40960b6deb8e/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= +golang.org/x/tools v0.0.0-20191012152004-8de300cfc20a/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= golang.org/x/tools v0.0.0-20191119224855-298f0cb1881e/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= golang.org/x/tools v0.0.0-20200619180055-7c47624df98f/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE= golang.org/x/tools v0.0.0-20210106214847-113979e3529a/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA= @@ -193,8 +234,12 @@ golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543/go.mod h1:I/5z698sn9Ka8T golang.org/x/xerrors v0.0.0-20200804184101-5ec99f83aff1/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= golang.org/x/xerrors v0.0.0-20231012003039-104605ab7028 h1:+cNy6SZtPcJQH3LJVLOSmiC7MMxXNOb3PU/VUEz+EhU= golang.org/x/xerrors v0.0.0-20231012003039-104605ab7028/go.mod h1:NDW/Ps6MPRej6fsCIbMTohpP40sJ/P/vI1MoTEGwX90= +gonum.org/v1/gonum v0.0.0-20180816165407-929014505bf4/go.mod h1:Y+Yx5eoAFn32cQvJDxZx5Dpnq+c3wtXuadVZAcxbbBo= +gonum.org/v1/gonum v0.8.2/go.mod h1:oe/vMfY3deqTw+1EZJhuvEW2iwGF1bW9wwu7XCu0+v0= gonum.org/v1/gonum v0.15.0 h1:2lYxjRbTYyxkJxlhC+LvJIx3SsANPdRybu1tGj9/OrQ= gonum.org/v1/gonum v0.15.0/go.mod h1:xzZVBJBtS+Mz4q0Yl2LJTk+OxOg4jiXZ7qBoM0uISGo= +gonum.org/v1/netlib v0.0.0-20190313105609-8cb42192e0e0/go.mod h1:wa6Ws7BG/ESfp6dHfk7C6KdzKA7wR7u/rKwOGE66zvw= +gonum.org/v1/plot v0.0.0-20190515093506-e2840ee46a6b/go.mod h1:Wt8AAjI+ypCyYX3nZBvf6cAIx93T+c/OS2HFAYskSZc= google.golang.org/genproto/googleapis/rpc v0.0.0-20240401170217-c3f982113cda h1:LI5DOvAxUPMv/50agcLLoo+AdWc1irS9Rzz4vPuD1V4= google.golang.org/genproto/googleapis/rpc v0.0.0-20240401170217-c3f982113cda/go.mod h1:WtryC6hu0hhx87FDGxWCDptyssuo68sk10vYjF+T9fY= google.golang.org/grpc v1.63.2 h1:MUeiw1B2maTVZthpU5xvASfTh3LDbxHd6IJ6QQVU+xM= @@ -202,7 +247,10 @@ google.golang.org/grpc v1.63.2/go.mod h1:WAX/8DgncnokcFUldAxq7GeB5DXHDbMF+lLvDom google.golang.org/protobuf v1.34.0 h1:Qo/qEd2RZPCf2nKuorzksSknv0d3ERwp1vFG38gSmH4= google.golang.org/protobuf v1.34.0/go.mod h1:c6P6GXX6sHbq/GpV6MGZEdwhWPcYBgnhAHhKbcUYpos= gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= +gopkg.in/check.v1 v1.0.0-20200227125254-8fa46927fb4f/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c h1:Hei/4ADfdWqJk1ZMxUNpqntNwaWcugrBjAiHlqqRiVk= gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c/go.mod h1:JHkPIbrfpd72SG/EVd6muEfDQjcINNoR0C8j2r3qZ4Q= +gopkg.in/yaml.v3 v3.0.0-20200313102051-9f266ea9e77c/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= gopkg.in/yaml.v3 v3.0.1 h1:fxVm/GzAzEWqLHuvctI91KS9hhNmmWOoWu0XTYJS7CA= gopkg.in/yaml.v3 v3.0.1/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= +rsc.io/pdf v0.1.1/go.mod h1:n8OzWcQ6Sp37PL01nO98y4iUCRdTGarVfzxY20ICaU4= diff --git a/exporter/otelarrowexporter/internal/arrow/bestofn.go b/exporter/otelarrowexporter/internal/arrow/bestofn.go new file mode 100644 index 000000000000..ae4bce633643 --- /dev/null +++ b/exporter/otelarrowexporter/internal/arrow/bestofn.go @@ -0,0 +1,152 @@ +// Copyright The OpenTelemetry Authors +// SPDX-License-Identifier: Apache-2.0 + +package arrow // import "github.com/open-telemetry/opentelemetry-collector-contrib/exporter/otelarrowexporter/internal/arrow" + +import ( + "context" + "math/rand" + "runtime" + "sort" +) + +// bestOfNPrioritizer is a prioritizer that selects a less-loaded stream to write. +// https://smallrye.io/smallrye-stork/1.1.1/load-balancer/power-of-two-choices/ +type bestOfNPrioritizer struct { + doneCancel + + // input from the pipeline, as processed data with headers and + // a return channel for the result. This channel is never + // closed and is buffered. At shutdown, items of telemetry can + // be left in this channel, but users are expected to complete + // their requests before calling shutdown (and the collector's + // graph package ensures this). + input chan writeItem + + // state tracks the work being handled by all streams. + state []*streamWorkState + + // numChoices is the number of streams to consder in each decision. + numChoices int + + // loadFunc is the load function. + loadFunc loadFunc +} + +type loadFunc func(*streamWorkState) float64 + +type streamSorter struct { + work *streamWorkState + load float64 +} + +var _ streamPrioritizer = &bestOfNPrioritizer{} + +func newBestOfNPrioritizer(dc doneCancel, numChoices, numStreams int, lf loadFunc) (*bestOfNPrioritizer, []*streamWorkState) { + var state []*streamWorkState + + // Limit numChoices to the number of streams. + numChoices = min(numStreams, numChoices) + + for i := 0; i < numStreams; i++ { + ws := &streamWorkState{ + waiters: map[int64]chan<- error{}, + toWrite: make(chan writeItem, 1), + } + + state = append(state, ws) + } + + lp := &bestOfNPrioritizer{ + doneCancel: dc, + input: make(chan writeItem, runtime.NumCPU()), + state: state, + numChoices: numChoices, + loadFunc: lf, + } + + for i := 0; i < numStreams; i++ { + // TODO It's not clear if/when the the prioritizer can + // become a bottleneck. + go lp.run() + } + + return lp, state +} + +func (lp *bestOfNPrioritizer) downgrade(ctx context.Context) { + for _, ws := range lp.state { + go drain(ws.toWrite, ctx.Done()) + } +} + +func (lp *bestOfNPrioritizer) sendOne(item writeItem, rnd *rand.Rand, tmp []streamSorter) { + stream := lp.streamFor(item, rnd, tmp) + writeCh := stream.toWrite + select { + case writeCh <- item: + return + + case <-lp.done: + // All other cases: signal restart. + } + item.errCh <- ErrStreamRestarting +} + +func (lp *bestOfNPrioritizer) run() { + tmp := make([]streamSorter, len(lp.state)) + rnd := rand.New(rand.NewSource(rand.Int63())) + for { + select { + case <-lp.done: + return + case item := <-lp.input: + lp.sendOne(item, rnd, tmp) + } + } +} + +// sendAndWait implements streamWriter +func (lp *bestOfNPrioritizer) sendAndWait(ctx context.Context, errCh <-chan error, wri writeItem) error { + select { + case <-lp.done: + return ErrStreamRestarting + case <-ctx.Done(): + return context.Canceled + case lp.input <- wri: + return waitForWrite(ctx, errCh, lp.done) + } +} + +func (lp *bestOfNPrioritizer) nextWriter() streamWriter { + select { + case <-lp.done: + // In case of downgrade, return nil to return into a + // non-Arrow code path. + return nil + default: + // Fall through to sendAndWait(). + return lp + } +} + +func (lp *bestOfNPrioritizer) streamFor(_ writeItem, rnd *rand.Rand, tmp []streamSorter) *streamWorkState { + // Place all streams into the temporary slice. + for idx, item := range lp.state { + tmp[idx].work = item + } + // Select numChoices at random by shifting the selection into the start + // of the temporary slice. + for i := 0; i < lp.numChoices; i++ { + pick := rnd.Intn(lp.numChoices - i) + tmp[i], tmp[i+pick] = tmp[i+pick], tmp[i] + } + for i := 0; i < lp.numChoices; i++ { + // TODO: skip channels w/ a pending item (maybe) + tmp[i].load = lp.loadFunc(tmp[i].work) + } + sort.Slice(tmp[0:lp.numChoices], func(i, j int) bool { + return tmp[i].load < tmp[j].load + }) + return tmp[0].work +} diff --git a/exporter/otelarrowexporter/internal/arrow/common_test.go b/exporter/otelarrowexporter/internal/arrow/common_test.go new file mode 100644 index 000000000000..f0f6f3823c09 --- /dev/null +++ b/exporter/otelarrowexporter/internal/arrow/common_test.go @@ -0,0 +1,413 @@ +// Copyright The OpenTelemetry Authors +// SPDX-License-Identifier: Apache-2.0 + +package arrow + +import ( + "context" + "fmt" + "io" + + arrowpb "github.com/open-telemetry/otel-arrow/api/experimental/arrow/v1" + arrowCollectorMock "github.com/open-telemetry/otel-arrow/api/experimental/arrow/v1/mock" + "github.com/open-telemetry/otel-arrow/collector/testdata" + "go.opentelemetry.io/collector/component" + "go.opentelemetry.io/collector/component/componenttest" + "go.uber.org/mock/gomock" + "go.uber.org/zap" + "go.uber.org/zap/zapcore" + "go.uber.org/zap/zaptest" + "go.uber.org/zap/zaptest/observer" + "google.golang.org/grpc" + "google.golang.org/grpc/codes" + "google.golang.org/grpc/credentials" + "google.golang.org/grpc/status" + + "github.com/open-telemetry/opentelemetry-collector-contrib/exporter/otelarrowexporter/internal/arrow/grpcmock" +) + +var ( + twoTraces = testdata.GenerateTraces(2) + twoMetrics = testdata.GenerateMetrics(2) + twoLogs = testdata.GenerateLogs(2) +) + +type testChannel interface { + onRecv(context.Context) func() (*arrowpb.BatchStatus, error) + onSend(context.Context) func(*arrowpb.BatchArrowRecords) error + onConnect(context.Context) error + onCloseSend() func() error +} + +type commonTestCase struct { + ctrl *gomock.Controller + telset component.TelemetrySettings + observedLogs *observer.ObservedLogs + traceClient StreamClientFunc + traceCall *gomock.Call + perRPCCredentials credentials.PerRPCCredentials + requestMetadataCall *gomock.Call +} + +type noisyTest bool + +const Noisy noisyTest = true +const NotNoisy noisyTest = false + +func newTestTelemetry(t zaptest.TestingT, noisy noisyTest) (component.TelemetrySettings, *observer.ObservedLogs) { + telset := componenttest.NewNopTelemetrySettings() + if noisy { + return telset, nil + } + core, obslogs := observer.New(zapcore.InfoLevel) + telset.Logger = zap.New(zapcore.NewTee(core, zaptest.NewLogger(t).Core())) + return telset, obslogs +} + +type z2m struct { + zaptest.TestingT +} + +var _ gomock.TestReporter = z2m{} + +func (t z2m) Fatalf(format string, args ...any) { + t.Errorf(format, args...) + t.Fail() +} + +func newCommonTestCase(t zaptest.TestingT, noisy noisyTest) *commonTestCase { + ctrl := gomock.NewController(z2m{t}) + telset, obslogs := newTestTelemetry(t, noisy) + + creds := grpcmock.NewMockPerRPCCredentials(ctrl) + creds.EXPECT().RequireTransportSecurity().Times(0) // unused interface method + requestMetadataCall := creds.EXPECT().GetRequestMetadata( + gomock.Any(), // context.Context + gomock.Any(), // ...string (unused `uri` parameter) + ).Times(0) + + traceClient := arrowCollectorMock.NewMockArrowTracesServiceClient(ctrl) + + traceCall := traceClient.EXPECT().ArrowTraces( + gomock.Any(), // context.Context + gomock.Any(), // ...grpc.CallOption + ).Times(0) + return &commonTestCase{ + ctrl: ctrl, + telset: telset, + observedLogs: obslogs, + traceClient: MakeAnyStreamClient("ArrowTraces", traceClient.ArrowTraces), + traceCall: traceCall, + perRPCCredentials: creds, + requestMetadataCall: requestMetadataCall, + } +} + +type commonTestStream struct { + anyStreamClient AnyStreamClient + ctxCall *gomock.Call + sendCall *gomock.Call + recvCall *gomock.Call + closeSendCall *gomock.Call +} + +func (ctc *commonTestCase) newMockStream(ctx context.Context) *commonTestStream { + client := arrowCollectorMock.NewMockArrowTracesService_ArrowTracesClient(ctc.ctrl) + + testStream := &commonTestStream{ + anyStreamClient: client, + ctxCall: client.EXPECT().Context().AnyTimes().Return(ctx), + sendCall: client.EXPECT().Send( + gomock.Any(), // *arrowpb.BatchArrowRecords + ).Times(0), + recvCall: client.EXPECT().Recv().Times(0), + closeSendCall: client.EXPECT().CloseSend().Times(0), + } + return testStream +} + +// returnNewStream applies the list of test channels in order to +// construct new streams. The final entry is re-used for new streams +// when it is reached. +func (ctc *commonTestCase) returnNewStream(hs ...testChannel) func(context.Context, ...grpc.CallOption) ( + arrowpb.ArrowTracesService_ArrowTracesClient, + error, +) { + var pos int + return func(ctx context.Context, _ ...grpc.CallOption) ( + arrowpb.ArrowTracesService_ArrowTracesClient, + error, + ) { + h := hs[pos] + if pos < len(hs) { + pos++ + } + if err := h.onConnect(ctx); err != nil { + return nil, err + } + str := ctc.newMockStream(ctx) + str.sendCall.AnyTimes().DoAndReturn(h.onSend(ctx)) + str.recvCall.AnyTimes().DoAndReturn(h.onRecv(ctx)) + str.closeSendCall.AnyTimes().DoAndReturn(h.onCloseSend()) + return str.anyStreamClient, nil + } +} + +// repeatedNewStream returns a stream configured with a new test +// channel on every ArrowStream() request. +func (ctc *commonTestCase) repeatedNewStream(nc func() testChannel) func(context.Context, ...grpc.CallOption) ( + arrowpb.ArrowTracesService_ArrowTracesClient, + error, +) { + return func(ctx context.Context, _ ...grpc.CallOption) ( + arrowpb.ArrowTracesService_ArrowTracesClient, + error, + ) { + h := nc() + if err := h.onConnect(ctx); err != nil { + return nil, err + } + str := ctc.newMockStream(ctx) + str.sendCall.AnyTimes().DoAndReturn(h.onSend(ctx)) + str.recvCall.AnyTimes().DoAndReturn(h.onRecv(ctx)) + str.closeSendCall.AnyTimes().DoAndReturn(h.onCloseSend()) + return str.anyStreamClient, nil + } +} + +// healthyTestChannel accepts the connection and returns an OK status immediately. +type healthyTestChannel struct { + sent chan *arrowpb.BatchArrowRecords + recv chan *arrowpb.BatchStatus +} + +func newHealthyTestChannel() *healthyTestChannel { + return &healthyTestChannel{ + sent: make(chan *arrowpb.BatchArrowRecords), + recv: make(chan *arrowpb.BatchStatus), + } +} + +func (tc *healthyTestChannel) sendChannel() chan *arrowpb.BatchArrowRecords { + return tc.sent +} + +func (tc *healthyTestChannel) onConnect(_ context.Context) error { + return nil +} + +func (tc *healthyTestChannel) onCloseSend() func() error { + return func() error { + close(tc.sent) + return nil + } +} + +func (tc *healthyTestChannel) onSend(ctx context.Context) func(*arrowpb.BatchArrowRecords) error { + return func(req *arrowpb.BatchArrowRecords) error { + select { + case tc.sendChannel() <- req: + return nil + case <-ctx.Done(): + return ctx.Err() + } + } +} + +func (tc *healthyTestChannel) onRecv(ctx context.Context) func() (*arrowpb.BatchStatus, error) { + return func() (*arrowpb.BatchStatus, error) { + select { + case recv, ok := <-tc.recv: + if !ok { + return nil, io.EOF + } + + return recv, nil + case <-ctx.Done(): + return &arrowpb.BatchStatus{}, ctx.Err() + } + } +} + +// unresponsiveTestChannel accepts the connection and receives data, +// but never responds with status OK. +type unresponsiveTestChannel struct { + ch chan struct{} +} + +func newUnresponsiveTestChannel() *unresponsiveTestChannel { + return &unresponsiveTestChannel{ + ch: make(chan struct{}), + } +} + +func (tc *unresponsiveTestChannel) onConnect(_ context.Context) error { + return nil +} + +func (tc *unresponsiveTestChannel) onCloseSend() func() error { + return func() error { + return nil + } +} + +func (tc *unresponsiveTestChannel) onSend(ctx context.Context) func(*arrowpb.BatchArrowRecords) error { + return func(_ *arrowpb.BatchArrowRecords) error { + select { + case <-ctx.Done(): + return ctx.Err() + default: + return nil + } + } +} + +func (tc *unresponsiveTestChannel) onRecv(ctx context.Context) func() (*arrowpb.BatchStatus, error) { + return func() (*arrowpb.BatchStatus, error) { + select { + case <-tc.ch: + return nil, io.EOF + case <-ctx.Done(): + return &arrowpb.BatchStatus{}, ctx.Err() + } + } +} + +func (tc *unresponsiveTestChannel) unblock() { + close(tc.ch) +} + +// unsupportedTestChannel mimics gRPC's behavior when there is no +// arrow stream service registered with the server. +type arrowUnsupportedTestChannel struct { +} + +func newArrowUnsupportedTestChannel() *arrowUnsupportedTestChannel { + return &arrowUnsupportedTestChannel{} +} + +func (tc *arrowUnsupportedTestChannel) onConnect(_ context.Context) error { + // Note: this matches gRPC's apparent behavior. the stream + // connection succeeds and the unsupported code is returned to + // the Recv() call. + return nil +} + +func (tc *arrowUnsupportedTestChannel) onCloseSend() func() error { + return func() error { + return nil + } +} + +func (tc *arrowUnsupportedTestChannel) onSend(ctx context.Context) func(*arrowpb.BatchArrowRecords) error { + return func(_ *arrowpb.BatchArrowRecords) error { + <-ctx.Done() + return ctx.Err() + } +} + +func (tc *arrowUnsupportedTestChannel) onRecv(_ context.Context) func() (*arrowpb.BatchStatus, error) { + return func() (*arrowpb.BatchStatus, error) { + err := status.Error(codes.Unimplemented, "arrow will not be served") + return &arrowpb.BatchStatus{}, err + } +} + +// disconnectedTestChannel allows the connection to time out. +type disconnectedTestChannel struct { +} + +func newDisconnectedTestChannel() *disconnectedTestChannel { + return &disconnectedTestChannel{} +} + +func (tc *disconnectedTestChannel) onConnect(ctx context.Context) error { + <-ctx.Done() + return ctx.Err() +} + +func (tc *disconnectedTestChannel) onCloseSend() func() error { + return func() error { + panic("unreachable") + } +} + +func (tc *disconnectedTestChannel) onSend(_ context.Context) func(*arrowpb.BatchArrowRecords) error { + return func(_ *arrowpb.BatchArrowRecords) error { + panic("unreachable") + } +} + +func (tc *disconnectedTestChannel) onRecv(_ context.Context) func() (*arrowpb.BatchStatus, error) { + return func() (*arrowpb.BatchStatus, error) { + panic("unreachable") + } +} + +// sendErrorTestChannel returns an error in Send() +type sendErrorTestChannel struct { + release chan struct{} +} + +func newSendErrorTestChannel() *sendErrorTestChannel { + return &sendErrorTestChannel{ + release: make(chan struct{}), + } +} + +func (tc *sendErrorTestChannel) onConnect(_ context.Context) error { + return nil +} + +func (tc *sendErrorTestChannel) onCloseSend() func() error { + return func() error { + return nil + } +} + +func (tc *sendErrorTestChannel) onSend(_ context.Context) func(*arrowpb.BatchArrowRecords) error { + return func(*arrowpb.BatchArrowRecords) error { + return io.EOF + } +} + +func (tc *sendErrorTestChannel) unblock() { + close(tc.release) +} + +func (tc *sendErrorTestChannel) onRecv(_ context.Context) func() (*arrowpb.BatchStatus, error) { + return func() (*arrowpb.BatchStatus, error) { + <-tc.release + return &arrowpb.BatchStatus{}, io.EOF + } +} + +// connectErrorTestChannel returns an error from the ArrowTraces() call +type connectErrorTestChannel struct { +} + +func newConnectErrorTestChannel() *connectErrorTestChannel { + return &connectErrorTestChannel{} +} + +func (tc *connectErrorTestChannel) onConnect(_ context.Context) error { + return fmt.Errorf("test connect error") +} + +func (tc *connectErrorTestChannel) onCloseSend() func() error { + return func() error { + panic("unreachable") + } +} + +func (tc *connectErrorTestChannel) onSend(_ context.Context) func(*arrowpb.BatchArrowRecords) error { + return func(*arrowpb.BatchArrowRecords) error { + panic("not reached") + } +} + +func (tc *connectErrorTestChannel) onRecv(_ context.Context) func() (*arrowpb.BatchStatus, error) { + return func() (*arrowpb.BatchStatus, error) { + panic("not reached") + } +} diff --git a/exporter/otelarrowexporter/internal/arrow/exporter.go b/exporter/otelarrowexporter/internal/arrow/exporter.go index e4b2d766511b..18b3259d3b4a 100644 --- a/exporter/otelarrowexporter/internal/arrow/exporter.go +++ b/exporter/otelarrowexporter/internal/arrow/exporter.go @@ -5,40 +5,359 @@ package arrow // import "github.com/open-telemetry/opentelemetry-collector-contr import ( "context" + "errors" + "math/rand" + "strconv" + "sync" + "time" arrowpb "github.com/open-telemetry/otel-arrow/api/experimental/arrow/v1" + "github.com/open-telemetry/otel-arrow/collector/netstats" + arrowRecord "github.com/open-telemetry/otel-arrow/pkg/otel/arrow_record" + "go.opentelemetry.io/collector/component" + "go.opentelemetry.io/collector/pdata/plog" + "go.opentelemetry.io/collector/pdata/pmetric" + "go.opentelemetry.io/collector/pdata/ptrace" + "go.uber.org/zap" "google.golang.org/grpc" + "google.golang.org/grpc/credentials" ) -// Exporter exports OpenTelemetry Protocol with Apache Arrow protocol -// data for a specific signal. One of these structs is created per -// baseExporter, in the top-level module, when Arrow is enabled. +// Exporter is 1:1 with exporter, isolates arrow-specific +// functionality. type Exporter struct { - // TODO: Implementation + // numStreams is the number of streams that will be used. + numStreams int + + // prioritizerName the name of a balancer policy. + prioritizerName PrioritizerName + + // maxStreamLifetime is a limit on duration for streams. A + // slight "jitter" is applied relative to this value on a + // per-stream basis. + maxStreamLifetime time.Duration + + // disableDowngrade prevents downgrade from occurring, supports + // forcing Arrow transport. + disableDowngrade bool + + // telemetry includes logger, tracer, meter. + telemetry component.TelemetrySettings + + // grpcOptions includes options used by the unary RPC methods, + // e.g., WaitForReady. + grpcOptions []grpc.CallOption + + // newProducer returns a real (or mock) Producer. + newProducer func() arrowRecord.ProducerAPI + + // client is a stream corresponding with the signal's payload + // type. uses the exporter's gRPC ClientConn (or is a mock, in tests). + streamClient StreamClientFunc + + // perRPCCredentials derived from the exporter's gRPC auth settings. + perRPCCredentials credentials.PerRPCCredentials + + // returning is used to pass broken, gracefully-terminated, + // and otherwise to the stream controller. + returning chan *Stream + + // ready prioritizes streams that are ready to send + ready streamPrioritizer + + // doneCancel refers to and cancels the background context of + // this exporter. + doneCancel + + // wg counts one per active goroutine belonging to all streams + // of this exporter. The wait group has Add(1) called before + // starting goroutines so that they can be properly waited for + // in shutdown(), so the pattern is: + // + // wg.Add(1) + // go func() { + // defer wg.Done() + // ... + // }() + wg sync.WaitGroup + + // netReporter measures network traffic. + netReporter netstats.Interface } -// AnyStreamClient is the interface supported by all Arrow streams, -// i.e., any of the Arrow-supported signals having a single method w/ -// the appropriate per-signal name. +// doneCancel is used to store the done signal and cancelation +// function for a context returned by context.WithCancel. +type doneCancel struct { + done <-chan struct{} + cancel context.CancelFunc +} + +// AnyStreamClient is the interface supported by all Arrow streams. type AnyStreamClient interface { Send(*arrowpb.BatchArrowRecords) error Recv() (*arrowpb.BatchStatus, error) grpc.ClientStream } -// StreamClientFunc is a constructor for AnyStreamClients. These return +// streamClientFunc is a constructor for AnyStreamClients. These return // the method name to assist with instrumentation, since the gRPC stats // handler isn't able to see the correct uncompressed size. type StreamClientFunc func(context.Context, ...grpc.CallOption) (AnyStreamClient, string, error) -// MakeAnyStreamClient accepts any Arrow-like stream, which is one of -// the Arrow-supported signals having a single method w/ the -// appropriate name, and turns it into an AnyStreamClient. The method -// name is carried through because once constructed, gRPC clients will -// not reveal their service and method names. +// MakeAnyStreamClient accepts any Arrow-like stream and turns it into +// an AnyStreamClient. The method name is carried through because +// once constructed, gRPC clients will not reveal their service and +// method names. func MakeAnyStreamClient[T AnyStreamClient](method string, clientFunc func(ctx context.Context, opts ...grpc.CallOption) (T, error)) StreamClientFunc { return func(ctx context.Context, opts ...grpc.CallOption) (AnyStreamClient, string, error) { client, err := clientFunc(ctx, opts...) return client, method, err } } + +// NewExporter configures a new Exporter. +func NewExporter( + maxStreamLifetime time.Duration, + numStreams int, + prioritizerName PrioritizerName, + disableDowngrade bool, + telemetry component.TelemetrySettings, + grpcOptions []grpc.CallOption, + newProducer func() arrowRecord.ProducerAPI, + streamClient StreamClientFunc, + perRPCCredentials credentials.PerRPCCredentials, + netReporter netstats.Interface, +) *Exporter { + return &Exporter{ + maxStreamLifetime: maxStreamLifetime, + numStreams: numStreams, + prioritizerName: prioritizerName, + disableDowngrade: disableDowngrade, + telemetry: telemetry, + grpcOptions: grpcOptions, + newProducer: newProducer, + streamClient: streamClient, + perRPCCredentials: perRPCCredentials, + returning: make(chan *Stream, numStreams), + netReporter: netReporter, + } +} + +// Start creates the background context used by all streams and starts +// a stream controller, which initializes the initial set of streams. +func (e *Exporter) Start(ctx context.Context) error { + // this is the background context + ctx, e.doneCancel = newDoneCancel(ctx) + + // Starting N+1 goroutines + e.wg.Add(1) + + // this is the downgradeable context + downCtx, downDc := newDoneCancel(ctx) + + var sws []*streamWorkState + e.ready, sws = newStreamPrioritizer(downDc, e.prioritizerName, e.numStreams) + + for _, ws := range sws { + e.startArrowStream(downCtx, ws) + } + + go e.runStreamController(ctx, downCtx, downDc) + + return nil +} + +func (e *Exporter) startArrowStream(ctx context.Context, ws *streamWorkState) { + // this is the new stream context + ctx, dc := newDoneCancel(ctx) + + e.wg.Add(1) + + go e.runArrowStream(ctx, dc, ws) +} + +// runStreamController starts the initial set of streams, then waits for streams to +// terminate one at a time and restarts them. If streams come back with a nil +// client (meaning that OTel-Arrow was not supported by the endpoint), it will +// not be restarted. +func (e *Exporter) runStreamController(exportCtx, downCtx context.Context, downDc doneCancel) { + defer e.cancel() + defer e.wg.Done() + + running := e.numStreams + + for { + select { + case stream := <-e.returning: + if stream.client != nil || e.disableDowngrade { + // The stream closed or broken. Restart it. + e.startArrowStream(downCtx, stream.workState) + continue + } + // Otherwise, the stream never got started. It was + // downgraded and senders will use the standard OTLP path. + running-- + + // None of the streams were able to connect to + // an Arrow endpoint. + if running == 0 { + e.telemetry.Logger.Info("could not establish arrow streams, downgrading to standard OTLP export") + downDc.cancel() + // this call is allowed to block indefinitely, + // as to call drain(). + e.ready.downgrade(exportCtx) + return + } + + case <-exportCtx.Done(): + // We are shutting down. + return + } + } +} + +// addJitter is used to subtract 0-5% from max_stream_lifetime. Since +// the max_stream_lifetime value is expected to be close to the +// receiver's max_connection_age_grace setting, we do not add jitter, +// only subtract. +func addJitter(v time.Duration) time.Duration { + if v == 0 { + return 0 + } + return v - time.Duration(rand.Int63n(int64(v/20))) +} + +// runArrowStream begins one gRPC stream using a child of the background context. +// If the stream connection is successful, this goroutine starts another goroutine +// to call writeStream() and performs readStream() itself. When the stream shuts +// down this call synchronously waits for and unblocks the consumers. +func (e *Exporter) runArrowStream(ctx context.Context, dc doneCancel, state *streamWorkState) { + defer dc.cancel() + producer := e.newProducer() + + stream := newStream(producer, e.ready, e.telemetry, e.netReporter, state) + stream.maxStreamLifetime = addJitter(e.maxStreamLifetime) + + defer func() { + if err := producer.Close(); err != nil { + e.telemetry.Logger.Error("arrow producer close:", zap.Error(err)) + } + e.wg.Done() + e.returning <- stream + }() + + stream.run(ctx, dc, e.streamClient, e.grpcOptions) +} + +// SendAndWait tries to send using an Arrow stream. The results are: +// +// (true, nil): Arrow send: success at consumer +// (false, nil): Arrow is not supported by the server, caller expected to fallback. +// (true, non-nil): Arrow send: server response may be permanent or allow retry. +// (false, non-nil): Context timeout prevents retry. +// +// consumer should fall back to standard OTLP, (true, nil) +func (e *Exporter) SendAndWait(ctx context.Context, data any) (bool, error) { + errCh := make(chan error, 1) + + // Note that if the OTLP exporter's gRPC Headers field was + // set, those (static) headers were used to establish the + // stream. The caller's context was returned by + // baseExporter.enhanceContext() includes the static headers + // plus optional client metadata. Here, get whatever + // headers that gRPC would have transmitted for a unary RPC + // and convey them via the Arrow batch. + + // Note that the "uri" parameter to GetRequestMetadata is + // not used by the headersetter extension and is not well + // documented. Since it's an optional list, we omit it. + var md map[string]string + if e.perRPCCredentials != nil { + var err error + md, err = e.perRPCCredentials.GetRequestMetadata(ctx) + if err != nil { + return false, err + } + } + + // Note that the uncompressed size as measured by the receiver + // will be different than uncompressed size as measured by the + // exporter, because of the optimization phase performed in the + // conversion to Arrow. + var uncompSize int + switch data := data.(type) { + case ptrace.Traces: + var sizer ptrace.ProtoMarshaler + uncompSize = sizer.TracesSize(data) + case plog.Logs: + var sizer plog.ProtoMarshaler + uncompSize = sizer.LogsSize(data) + case pmetric.Metrics: + var sizer pmetric.ProtoMarshaler + uncompSize = sizer.MetricsSize(data) + } + + if md == nil { + md = make(map[string]string) + } + md["otlp-pdata-size"] = strconv.Itoa(uncompSize) + + wri := writeItem{ + records: data, + md: md, + uncompSize: uncompSize, + errCh: errCh, + producerCtx: ctx, + } + + for { + writer := e.ready.nextWriter() + + if writer == nil { + return false, nil // a downgraded connection + } + + err := writer.sendAndWait(ctx, errCh, wri) + if err != nil && errors.Is(err, ErrStreamRestarting) { + continue // an internal retry + + } + // result from arrow server (may be nil, may be + // permanent, etc.) + return true, err + } +} + +// Shutdown returns when all Arrow-associated goroutines have returned. +func (e *Exporter) Shutdown(_ context.Context) error { + e.cancel() + e.wg.Wait() + return nil +} + +// waitForWrite waits for the first of the following: +// 1. This context timeout +// 2. Completion with err == nil or err != nil +// 3. Downgrade +func waitForWrite(ctx context.Context, errCh <-chan error, down <-chan struct{}) error { + select { + case <-ctx.Done(): + // This caller's context timed out. + return ctx.Err() + case <-down: + return ErrStreamRestarting + case err := <-errCh: + // Note: includes err == nil and err != nil cases. + return err + } +} + +// newDoneCancel returns a doneCancel, which is a new context with +// type that carries its done and cancel function. +func newDoneCancel(ctx context.Context) (context.Context, doneCancel) { + ctx, cancel := context.WithCancel(ctx) + return ctx, doneCancel{ + done: ctx.Done(), + cancel: cancel, + } +} diff --git a/exporter/otelarrowexporter/internal/arrow/exporter_test.go b/exporter/otelarrowexporter/internal/arrow/exporter_test.go new file mode 100644 index 000000000000..276e5f3fa437 --- /dev/null +++ b/exporter/otelarrowexporter/internal/arrow/exporter_test.go @@ -0,0 +1,890 @@ +// Copyright The OpenTelemetry Authors +// SPDX-License-Identifier: Apache-2.0 + +package arrow + +import ( + "context" + "encoding/json" + "errors" + "fmt" + "sync" + "sync/atomic" + "testing" + "time" + + arrowpb "github.com/open-telemetry/otel-arrow/api/experimental/arrow/v1" + "github.com/open-telemetry/otel-arrow/collector/netstats" + "github.com/open-telemetry/otel-arrow/collector/testdata" + arrowRecord "github.com/open-telemetry/otel-arrow/pkg/otel/arrow_record" + arrowRecordMock "github.com/open-telemetry/otel-arrow/pkg/otel/arrow_record/mock" + otelAssert "github.com/open-telemetry/otel-arrow/pkg/otel/assert" + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" + "go.opentelemetry.io/collector/pdata/plog" + "go.opentelemetry.io/collector/pdata/pmetric" + "go.opentelemetry.io/collector/pdata/ptrace" + "go.opentelemetry.io/otel" + "go.opentelemetry.io/otel/propagation" + "go.opentelemetry.io/otel/trace" + "go.uber.org/mock/gomock" + "go.uber.org/zap/zaptest" + "golang.org/x/net/http2/hpack" + "google.golang.org/grpc" + "google.golang.org/grpc/metadata" +) + +var AllPrioritizers = []PrioritizerName{LeastLoadedPrioritizer, LeastLoadedTwoPrioritizer} + +const defaultMaxStreamLifetime = 11 * time.Second + +type compareJSONTraces struct{ ptrace.Traces } +type compareJSONMetrics struct{ pmetric.Metrics } +type compareJSONLogs struct{ plog.Logs } + +func (c compareJSONTraces) MarshalJSON() ([]byte, error) { + var m ptrace.JSONMarshaler + return m.MarshalTraces(c.Traces) +} + +func (c compareJSONMetrics) MarshalJSON() ([]byte, error) { + var m pmetric.JSONMarshaler + return m.MarshalMetrics(c.Metrics) +} + +func (c compareJSONLogs) MarshalJSON() ([]byte, error) { + var m plog.JSONMarshaler + return m.MarshalLogs(c.Logs) +} + +type exporterTestCase struct { + *commonTestCase + exporter *Exporter +} + +func newSingleStreamTestCase(t *testing.T, pname PrioritizerName) *exporterTestCase { + return newExporterTestCaseCommon(t, pname, NotNoisy, defaultMaxStreamLifetime, 1, false, nil) +} + +func newShortLifetimeStreamTestCase(t *testing.T, pname PrioritizerName, numStreams int) *exporterTestCase { + return newExporterTestCaseCommon(t, pname, NotNoisy, time.Second/2, numStreams, false, nil) +} + +func newSingleStreamDowngradeDisabledTestCase(t *testing.T, pname PrioritizerName) *exporterTestCase { + return newExporterTestCaseCommon(t, pname, NotNoisy, defaultMaxStreamLifetime, 1, true, nil) +} + +func newSingleStreamMetadataTestCase(t *testing.T) *exporterTestCase { + var count int + return newExporterTestCaseCommon(t, DefaultPrioritizer, NotNoisy, defaultMaxStreamLifetime, 1, false, func(_ context.Context) (map[string]string, error) { + defer func() { count++ }() + if count%2 == 0 { + return nil, nil + } + return map[string]string{ + "expected1": "metadata1", + "expected2": fmt.Sprint(count), + }, nil + }) +} + +func newExporterNoisyTestCase(t *testing.T, numStreams int) *exporterTestCase { + return newExporterTestCaseCommon(t, DefaultPrioritizer, Noisy, defaultMaxStreamLifetime, numStreams, false, nil) +} + +func copyBatch[T any](recordFunc func(T) (*arrowpb.BatchArrowRecords, error)) func(T) (*arrowpb.BatchArrowRecords, error) { + // Because Arrow-IPC uses zero copy, we have to copy inside the test + // instead of sharing pointers to BatchArrowRecords. + return func(data T) (*arrowpb.BatchArrowRecords, error) { + in, err := recordFunc(data) + if err != nil { + return nil, err + } + + hcpy := make([]byte, len(in.Headers)) + copy(hcpy, in.Headers) + + pays := make([]*arrowpb.ArrowPayload, len(in.ArrowPayloads)) + + for i, inp := range in.ArrowPayloads { + rcpy := make([]byte, len(inp.Record)) + copy(rcpy, inp.Record) + pays[i] = &arrowpb.ArrowPayload{ + SchemaId: inp.SchemaId, + Type: inp.Type, + Record: rcpy, + } + } + + return &arrowpb.BatchArrowRecords{ + BatchId: in.BatchId, + Headers: hcpy, + ArrowPayloads: pays, + }, nil + } +} + +func mockArrowProducer(ctc *commonTestCase) func() arrowRecord.ProducerAPI { + return func() arrowRecord.ProducerAPI { + // Mock the close function, use a real producer for testing dataflow. + mock := arrowRecordMock.NewMockProducerAPI(ctc.ctrl) + prod := arrowRecord.NewProducer() + + mock.EXPECT().BatchArrowRecordsFromTraces(gomock.Any()).AnyTimes().DoAndReturn( + copyBatch(prod.BatchArrowRecordsFromTraces)) + mock.EXPECT().BatchArrowRecordsFromLogs(gomock.Any()).AnyTimes().DoAndReturn( + copyBatch(prod.BatchArrowRecordsFromLogs)) + mock.EXPECT().BatchArrowRecordsFromMetrics(gomock.Any()).AnyTimes().DoAndReturn( + copyBatch(prod.BatchArrowRecordsFromMetrics)) + mock.EXPECT().Close().Times(1).Return(nil) + return mock + } +} + +func newExporterTestCaseCommon(t zaptest.TestingT, pname PrioritizerName, noisy noisyTest, maxLifetime time.Duration, numStreams int, disableDowngrade bool, metadataFunc func(ctx context.Context) (map[string]string, error)) *exporterTestCase { + ctc := newCommonTestCase(t, noisy) + + if metadataFunc == nil { + ctc.requestMetadataCall.AnyTimes().Return(nil, nil) + } else { + ctc.requestMetadataCall.AnyTimes().DoAndReturn(func(ctx context.Context, _ ...string) (map[string]string, error) { + return metadataFunc(ctx) + }) + } + + exp := NewExporter(maxLifetime, numStreams, pname, disableDowngrade, ctc.telset, nil, mockArrowProducer(ctc), ctc.traceClient, ctc.perRPCCredentials, netstats.Noop{}) + + return &exporterTestCase{ + commonTestCase: ctc, + exporter: exp, + } +} + +func statusOKFor(id int64) *arrowpb.BatchStatus { + return &arrowpb.BatchStatus{ + BatchId: id, + StatusCode: arrowpb.StatusCode_OK, + } +} + +func statusUnavailableFor(id int64) *arrowpb.BatchStatus { + return &arrowpb.BatchStatus{ + BatchId: id, + StatusCode: arrowpb.StatusCode_UNAVAILABLE, + StatusMessage: "test unavailable", + } +} + +func statusInvalidFor(id int64) *arrowpb.BatchStatus { + return &arrowpb.BatchStatus{ + BatchId: id, + StatusCode: arrowpb.StatusCode_INVALID_ARGUMENT, + StatusMessage: "test invalid", + } +} + +func statusUnrecognizedFor(id int64) *arrowpb.BatchStatus { + return &arrowpb.BatchStatus{ + BatchId: id, + StatusCode: 1 << 20, + StatusMessage: "test unrecognized", + } +} + +// TestArrowExporterSuccess tests a single Send through a healthy channel. +func TestArrowExporterSuccess(t *testing.T) { + stdTesting := otelAssert.NewStdUnitTest(t) + for _, pname := range AllPrioritizers { + t.Run(string(pname), func(t *testing.T) { + for _, inputData := range []any{twoTraces, twoMetrics, twoLogs} { + t.Run(fmt.Sprintf("%T", inputData), func(t *testing.T) { + tc := newSingleStreamTestCase(t, pname) + channel := newHealthyTestChannel() + + tc.traceCall.Times(1).DoAndReturn(tc.returnNewStream(channel)) + + ctx := context.Background() + require.NoError(t, tc.exporter.Start(ctx)) + + var wg sync.WaitGroup + var outputData *arrowpb.BatchArrowRecords + wg.Add(1) + go func() { + defer wg.Done() + outputData = <-channel.sendChannel() + channel.recv <- statusOKFor(outputData.BatchId) + }() + + sent, err := tc.exporter.SendAndWait(ctx, inputData) + require.NoError(t, err) + require.True(t, sent) + + wg.Wait() + + testCon := arrowRecord.NewConsumer() + switch testData := inputData.(type) { + case ptrace.Traces: + traces, err := testCon.TracesFrom(outputData) + require.NoError(t, err) + require.Equal(t, 1, len(traces)) + otelAssert.Equiv(stdTesting, []json.Marshaler{ + compareJSONTraces{testData}, + }, []json.Marshaler{ + compareJSONTraces{traces[0]}, + }) + case plog.Logs: + logs, err := testCon.LogsFrom(outputData) + require.NoError(t, err) + require.Equal(t, 1, len(logs)) + otelAssert.Equiv(stdTesting, []json.Marshaler{ + compareJSONLogs{testData}, + }, []json.Marshaler{ + compareJSONLogs{logs[0]}, + }) + case pmetric.Metrics: + metrics, err := testCon.MetricsFrom(outputData) + require.NoError(t, err) + require.Equal(t, 1, len(metrics)) + otelAssert.Equiv(stdTesting, []json.Marshaler{ + compareJSONMetrics{testData}, + }, []json.Marshaler{ + compareJSONMetrics{metrics[0]}, + }) + } + + require.NoError(t, tc.exporter.Shutdown(ctx)) + }) + } + }) + } +} + +// TestArrowExporterTimeout tests that single slow Send leads to context canceled. +func TestArrowExporterTimeout(t *testing.T) { + for _, pname := range AllPrioritizers { + t.Run(string(pname), func(t *testing.T) { + tc := newSingleStreamTestCase(t, pname) + channel := newUnresponsiveTestChannel() + + tc.traceCall.Times(1).DoAndReturn(tc.returnNewStream(channel)) + + ctx, cancel := context.WithCancel(context.Background()) + require.NoError(t, tc.exporter.Start(ctx)) + + go func() { + time.Sleep(200 * time.Millisecond) + cancel() + }() + sent, err := tc.exporter.SendAndWait(ctx, twoTraces) + require.True(t, sent) + require.Error(t, err) + require.True(t, errors.Is(err, context.Canceled)) + + require.NoError(t, tc.exporter.Shutdown(ctx)) + }) + } +} + +// TestConnectError tests that if the connetions fail fast the +// stream object for some reason is nil. This causes downgrade. +func TestArrowExporterStreamConnectError(t *testing.T) { + for _, pname := range AllPrioritizers { + t.Run(string(pname), func(t *testing.T) { + tc := newSingleStreamTestCase(t, pname) + channel := newConnectErrorTestChannel() + + tc.traceCall.AnyTimes().DoAndReturn(tc.returnNewStream(channel)) + + bg := context.Background() + require.NoError(t, tc.exporter.Start(bg)) + + sent, err := tc.exporter.SendAndWait(bg, twoTraces) + require.False(t, sent) + require.NoError(t, err) + + require.NoError(t, tc.exporter.Shutdown(bg)) + + require.Less(t, 0, len(tc.observedLogs.All()), "should have at least one log: %v", tc.observedLogs.All()) + require.Equal(t, tc.observedLogs.All()[0].Message, "cannot start arrow stream") + }) + } +} + +// TestArrowExporterDowngrade tests that if the Recv() returns an +// Unimplemented code (as gRPC does) that the connection is downgraded +// without error. +func TestArrowExporterDowngrade(t *testing.T) { + for _, pname := range AllPrioritizers { + t.Run(string(pname), func(t *testing.T) { + tc := newSingleStreamTestCase(t, pname) + channel := newArrowUnsupportedTestChannel() + + tc.traceCall.AnyTimes().DoAndReturn(tc.returnNewStream(channel)) + + bg := context.Background() + require.NoError(t, tc.exporter.Start(bg)) + + sent, err := tc.exporter.SendAndWait(bg, twoTraces) + require.False(t, sent) + require.NoError(t, err) + + require.NoError(t, tc.exporter.Shutdown(bg)) + + require.Less(t, 1, len(tc.observedLogs.All()), "should have at least two logs: %v", tc.observedLogs.All()) + require.Equal(t, tc.observedLogs.All()[0].Message, "arrow is not supported") + require.Contains(t, tc.observedLogs.All()[1].Message, "downgrading") + }) + } +} + +// TestArrowExporterDisableDowngrade tests that if the Recv() returns +// any error downgrade still does not occur amd that the connection is +// retried without error. +func TestArrowExporterDisableDowngrade(t *testing.T) { + for _, pname := range AllPrioritizers { + t.Run(string(pname), func(t *testing.T) { + tc := newSingleStreamDowngradeDisabledTestCase(t, pname) + badChannel := newArrowUnsupportedTestChannel() + goodChannel := newHealthyTestChannel() + + fails := 0 + tc.traceCall.AnyTimes().DoAndReturn(func(ctx context.Context, opts ...grpc.CallOption) ( + arrowpb.ArrowTracesService_ArrowTracesClient, + error, + ) { + defer func() { fails++ }() + + if fails < 3 { + return tc.returnNewStream(badChannel)(ctx, opts...) + } + return tc.returnNewStream(goodChannel)(ctx, opts...) + }) + + var wg sync.WaitGroup + wg.Add(1) + go func() { + defer wg.Done() + outputData := <-goodChannel.sendChannel() + goodChannel.recv <- statusOKFor(outputData.BatchId) + }() + + bg := context.Background() + require.NoError(t, tc.exporter.Start(bg)) + + sent, err := tc.exporter.SendAndWait(bg, twoTraces) + require.True(t, sent) + require.NoError(t, err) + + wg.Wait() + + require.NoError(t, tc.exporter.Shutdown(bg)) + + require.Less(t, 1, len(tc.observedLogs.All()), "should have at least two logs: %v", tc.observedLogs.All()) + require.Equal(t, tc.observedLogs.All()[0].Message, "arrow is not supported") + require.NotContains(t, tc.observedLogs.All()[1].Message, "downgrading") + }) + } +} + +// TestArrowExporterConnectTimeout tests that an error is returned to +// the caller if the response does not arrive in time. +func TestArrowExporterConnectTimeout(t *testing.T) { + for _, pname := range AllPrioritizers { + t.Run(string(pname), func(t *testing.T) { + tc := newSingleStreamTestCase(t, pname) + channel := newDisconnectedTestChannel() + + tc.traceCall.AnyTimes().DoAndReturn(tc.returnNewStream(channel)) + + bg := context.Background() + ctx, cancel := context.WithCancel(bg) + require.NoError(t, tc.exporter.Start(bg)) + + go func() { + time.Sleep(200 * time.Millisecond) + cancel() + }() + _, err := tc.exporter.SendAndWait(ctx, twoTraces) + require.Error(t, err) + require.True(t, errors.Is(err, context.Canceled)) + + require.NoError(t, tc.exporter.Shutdown(bg)) + }) + } +} + +// TestArrowExporterStreamFailure tests that a single stream failure +// followed by a healthy stream. +func TestArrowExporterStreamFailure(t *testing.T) { + for _, pname := range AllPrioritizers { + t.Run(string(pname), func(t *testing.T) { + tc := newSingleStreamTestCase(t, pname) + channel0 := newUnresponsiveTestChannel() + channel1 := newHealthyTestChannel() + + tc.traceCall.AnyTimes().DoAndReturn(tc.returnNewStream(channel0, channel1)) + + bg := context.Background() + require.NoError(t, tc.exporter.Start(bg)) + + go func() { + time.Sleep(200 * time.Millisecond) + channel0.unblock() + }() + + var wg sync.WaitGroup + var outputData *arrowpb.BatchArrowRecords + wg.Add(1) + go func() { + defer wg.Done() + outputData = <-channel1.sendChannel() + channel1.recv <- statusOKFor(outputData.BatchId) + }() + + sent, err := tc.exporter.SendAndWait(bg, twoTraces) + require.NoError(t, err) + require.True(t, sent) + + wg.Wait() + + require.NoError(t, tc.exporter.Shutdown(bg)) + }) + } +} + +// TestArrowExporterStreamRace reproduces the situation needed for a +// race between stream send and stream cancel, causing it to fully +// exercise the removeReady() code path. +func TestArrowExporterStreamRace(t *testing.T) { + // This creates the conditions likely to produce a + // stream race in prioritizer.go. + tc := newExporterNoisyTestCase(t, 20) + + var tries atomic.Int32 + + tc.traceCall.AnyTimes().DoAndReturn(tc.repeatedNewStream(func() testChannel { + noResponse := newUnresponsiveTestChannel() + // Immediately unblock to return the EOF to the stream + // receiver and shut down the stream. + go noResponse.unblock() + tries.Add(1) + return noResponse + })) + + var wg sync.WaitGroup + + bg := context.Background() + require.NoError(t, tc.exporter.Start(bg)) + + callctx, cancel := context.WithCancel(bg) + + // These goroutines will repeatedly try for an available + // stream, but none will become available. Eventually the + // context will be canceled and cause these goroutines to + // return. + for i := 0; i < 5; i++ { + wg.Add(1) + go func() { + defer wg.Done() + // This blocks until the cancelation. + _, err := tc.exporter.SendAndWait(callctx, twoTraces) + require.Error(t, err) + require.True(t, errors.Is(err, context.Canceled)) + }() + } + + // Wait until 1000 streams have started. + assert.Eventually(t, func() bool { + return tries.Load() >= 1000 + }, 10*time.Second, 5*time.Millisecond) + + cancel() + wg.Wait() + require.NoError(t, tc.exporter.Shutdown(bg)) +} + +// TestArrowExporterStreaming tests 10 sends in a row. +func TestArrowExporterStreaming(t *testing.T) { + for _, pname := range AllPrioritizers { + t.Run(string(pname), func(t *testing.T) { + tc := newSingleStreamTestCase(t, pname) + channel := newHealthyTestChannel() + + tc.traceCall.AnyTimes().DoAndReturn(tc.returnNewStream(channel)) + + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + + require.NoError(t, tc.exporter.Start(ctx)) + + var expectOutput []ptrace.Traces + var actualOutput []ptrace.Traces + testCon := arrowRecord.NewConsumer() + + var wg sync.WaitGroup + wg.Add(1) + go func() { + defer wg.Done() + for data := range channel.sendChannel() { + traces, err := testCon.TracesFrom(data) + require.NoError(t, err) + require.Equal(t, 1, len(traces)) + actualOutput = append(actualOutput, traces[0]) + channel.recv <- statusOKFor(data.BatchId) + } + }() + + for times := 0; times < 10; times++ { + input := testdata.GenerateTraces(2) + + sent, err := tc.exporter.SendAndWait(context.Background(), input) + require.NoError(t, err) + require.True(t, sent) + + expectOutput = append(expectOutput, input) + } + // Stop the test conduit started above. + cancel() + wg.Wait() + + // As this equality check doesn't support out of order slices, + // we sort the slices directly in the GenerateTraces function. + require.Equal(t, expectOutput, actualOutput) + require.NoError(t, tc.exporter.Shutdown(ctx)) + }) + } +} + +// TestArrowExporterHeaders tests a mix of outgoing context headers. +func TestArrowExporterHeaders(t *testing.T) { + tc := newSingleStreamMetadataTestCase(t) + channel := newHealthyTestChannel() + + tc.traceCall.AnyTimes().DoAndReturn(tc.returnNewStream(channel)) + + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + require.NoError(t, tc.exporter.Start(ctx)) + + var expectOutput []metadata.MD + var actualOutput []metadata.MD + + var wg sync.WaitGroup + wg.Add(1) + go func() { + defer wg.Done() + md := metadata.MD{} + hpd := hpack.NewDecoder(4096, func(f hpack.HeaderField) { + md[f.Name] = append(md[f.Name], f.Value) + }) + for data := range channel.sendChannel() { + if len(data.Headers) == 0 { + actualOutput = append(actualOutput, nil) + } else { + _, err := hpd.Write(data.Headers) + require.NoError(t, err) + actualOutput = append(actualOutput, md) + md = metadata.MD{} + } + channel.recv <- statusOKFor(data.BatchId) + } + }() + + for times := 0; times < 10; times++ { + input := testdata.GenerateTraces(2) + + if times%2 == 1 { + md := metadata.MD{ + "expected1": []string{"metadata1"}, + "expected2": []string{fmt.Sprint(times)}, + "otlp-pdata-size": []string{"329"}, + } + expectOutput = append(expectOutput, md) + } else { + expectOutput = append(expectOutput, metadata.MD{ + "otlp-pdata-size": []string{"329"}, + }) + } + + sent, err := tc.exporter.SendAndWait(context.Background(), input) + require.NoError(t, err) + require.True(t, sent) + } + // Stop the test conduit started above. + cancel() + wg.Wait() + + require.Equal(t, expectOutput, actualOutput) + require.NoError(t, tc.exporter.Shutdown(ctx)) +} + +// TestArrowExporterIsTraced tests whether trace and span ID are +// propagated. +func TestArrowExporterIsTraced(t *testing.T) { + otel.SetTextMapPropagator(propagation.TraceContext{}) + + for _, pname := range AllPrioritizers { + t.Run(string(pname), func(t *testing.T) { + tc := newSingleStreamTestCase(t, pname) + channel := newHealthyTestChannel() + + tc.traceCall.AnyTimes().DoAndReturn(tc.returnNewStream(channel)) + + ctx, cancel := context.WithCancel(context.Background()) + require.NoError(t, tc.exporter.Start(ctx)) + + var expectOutput []metadata.MD + var actualOutput []metadata.MD + + var wg sync.WaitGroup + wg.Add(1) + go func() { + defer wg.Done() + md := metadata.MD{} + hpd := hpack.NewDecoder(4096, func(f hpack.HeaderField) { + md[f.Name] = append(md[f.Name], f.Value) + }) + for data := range channel.sendChannel() { + if len(data.Headers) == 0 { + actualOutput = append(actualOutput, nil) + } else { + _, err := hpd.Write(data.Headers) + require.NoError(t, err) + actualOutput = append(actualOutput, md) + md = metadata.MD{} + } + channel.recv <- statusOKFor(data.BatchId) + } + }() + + for times := 0; times < 10; times++ { + input := testdata.GenerateTraces(2) + callCtx := context.Background() + + if times%2 == 1 { + callCtx = trace.ContextWithSpanContext(callCtx, + trace.NewSpanContext(trace.SpanContextConfig{ + TraceID: [16]byte{byte(times), 1, 2, 3, 4, 5, 6, 7, 8, 9, 0xa, 0xb, 0xc, 0xd, 0xe, 0xf}, + SpanID: [8]byte{byte(times), 1, 2, 3, 4, 5, 6, 7}, + }), + ) + expectMap := map[string]string{} + propagation.TraceContext{}.Inject(callCtx, propagation.MapCarrier(expectMap)) + + md := metadata.MD{ + "traceparent": []string{expectMap["traceparent"]}, + "otlp-pdata-size": []string{"329"}, + } + expectOutput = append(expectOutput, md) + } else { + expectOutput = append(expectOutput, metadata.MD{ + "otlp-pdata-size": []string{"329"}, + }) + } + + sent, err := tc.exporter.SendAndWait(callCtx, input) + require.NoError(t, err) + require.True(t, sent) + } + // Stop the test conduit started above. + cancel() + wg.Wait() + + require.Equal(t, expectOutput, actualOutput) + require.NoError(t, tc.exporter.Shutdown(ctx)) + }) + } +} + +func TestAddJitter(t *testing.T) { + require.Equal(t, time.Duration(0), addJitter(0)) + + // Expect no more than 5% less in each trial. + for i := 0; i < 100; i++ { + x := addJitter(20 * time.Minute) + require.LessOrEqual(t, 19*time.Minute, x) + require.Less(t, x, 20*time.Minute) + } +} + +// TestArrowExporterStreamLifetimeAndShutdown exercises multiple +// stream lifetimes and then shuts down, inspects the logs for +// legibility. +func TestArrowExporterStreamLifetimeAndShutdown(t *testing.T) { + for _, pname := range AllPrioritizers { + t.Run(string(pname), func(t *testing.T) { + for _, numStreams := range []int{1, 2, 8} { + t.Run(fmt.Sprint(numStreams), func(t *testing.T) { + tc := newShortLifetimeStreamTestCase(t, pname, numStreams) + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + + var wg sync.WaitGroup + + var expectCount uint64 + var actualCount uint64 + + tc.traceCall.AnyTimes().DoAndReturn(func(ctx context.Context, opts ...grpc.CallOption) ( + arrowpb.ArrowTracesService_ArrowTracesClient, + error, + ) { + wg.Add(1) + channel := newHealthyTestChannel() + + go func() { + defer wg.Done() + testCon := arrowRecord.NewConsumer() + + for data := range channel.sendChannel() { + traces, err := testCon.TracesFrom(data) + require.NoError(t, err) + require.Equal(t, 1, len(traces)) + atomic.AddUint64(&actualCount, 1) + channel.recv <- statusOKFor(data.BatchId) + } + + // Closing the recv channel causes the exporter to see EOF. + close(channel.recv) + }() + + return tc.returnNewStream(channel)(ctx, opts...) + }) + + require.NoError(t, tc.exporter.Start(ctx)) + + start := time.Now() + // This is 10 stream lifetimes using the "ShortLifetime" test. + for time.Since(start) < 5*time.Second { + input := testdata.GenerateTraces(2) + + sent, err := tc.exporter.SendAndWait(ctx, input) + require.NoError(t, err) + require.True(t, sent) + + expectCount++ + } + + require.NoError(t, tc.exporter.Shutdown(ctx)) + + require.Equal(t, expectCount, actualCount) + + cancel() + wg.Wait() + + require.Empty(t, tc.observedLogs.All()) + }) + } + }) + } +} + +func BenchmarkLeastLoadedTwo4(b *testing.B) { + benchmarkPrioritizer(b, 4, LeastLoadedTwoPrioritizer) +} + +func BenchmarkLeastLoadedTwo8(b *testing.B) { + benchmarkPrioritizer(b, 8, LeastLoadedTwoPrioritizer) +} + +func BenchmarkLeastLoadedTwo16(b *testing.B) { + benchmarkPrioritizer(b, 16, LeastLoadedTwoPrioritizer) +} + +func BenchmarkLeastLoadedTwo32(b *testing.B) { + benchmarkPrioritizer(b, 32, LeastLoadedTwoPrioritizer) +} + +func BenchmarkLeastLoadedTwo64(b *testing.B) { + benchmarkPrioritizer(b, 64, LeastLoadedTwoPrioritizer) +} + +func BenchmarkLeastLoadedTwo128(b *testing.B) { + benchmarkPrioritizer(b, 128, LeastLoadedTwoPrioritizer) +} + +func BenchmarkLeastLoadedFour4(b *testing.B) { + benchmarkPrioritizer(b, 4, LeastLoadedFourPrioritizer) +} + +func BenchmarkLeastLoadedFour8(b *testing.B) { + benchmarkPrioritizer(b, 8, LeastLoadedFourPrioritizer) +} + +func BenchmarkLeastLoadedFour16(b *testing.B) { + benchmarkPrioritizer(b, 16, LeastLoadedFourPrioritizer) +} + +func BenchmarkLeastLoadedFour32(b *testing.B) { + benchmarkPrioritizer(b, 32, LeastLoadedFourPrioritizer) +} + +func BenchmarkLeastLoadedFour64(b *testing.B) { + benchmarkPrioritizer(b, 64, LeastLoadedFourPrioritizer) +} + +func BenchmarkLeastLoadedFour128(b *testing.B) { + benchmarkPrioritizer(b, 128, LeastLoadedFourPrioritizer) +} + +func benchmarkPrioritizer(b *testing.B, numStreams int, pname PrioritizerName) { + tc := newExporterTestCaseCommon(z2m{b}, pname, Noisy, defaultMaxStreamLifetime, numStreams, true, nil) + + var wg sync.WaitGroup + var cnt atomic.Int32 + + tc.traceCall.AnyTimes().DoAndReturn(func(ctx context.Context, opts ...grpc.CallOption) ( + arrowpb.ArrowTracesService_ArrowTracesClient, + error, + ) { + wg.Add(1) + num := cnt.Add(1) + channel := newHealthyTestChannel() + + delay := time.Duration(num) * time.Millisecond + + go func() { + defer wg.Done() + var mine sync.WaitGroup + for data := range channel.sendChannel() { + mine.Add(1) + go func(<-chan time.Time) { + defer mine.Done() + channel.recv <- statusOKFor(data.BatchId) + }(time.After(delay)) + } + + mine.Wait() + + close(channel.recv) + }() + + return tc.returnNewStream(channel)(ctx, opts...) + }) + + bg, cancel := context.WithCancel(context.Background()) + defer cancel() + if err := tc.exporter.Start(bg); err != nil { + b.Errorf("start failed: %v", err) + return + } + + input := testdata.GenerateTraces(2) + + wg.Add(1) + defer func() { + if err := tc.exporter.Shutdown(bg); err != nil { + b.Errorf("shutdown failed: %v", err) + } + wg.Done() + wg.Wait() + }() + + b.ResetTimer() + + for i := 0; i < b.N; i++ { + sent, err := tc.exporter.SendAndWait(bg, input) + if err != nil || !sent { + b.Errorf("send failed: %v: %v", sent, err) + } + } +} diff --git a/exporter/otelarrowexporter/internal/arrow/grpcmock/credentials.go b/exporter/otelarrowexporter/internal/arrow/grpcmock/credentials.go new file mode 100644 index 000000000000..c9ddc953e5b1 --- /dev/null +++ b/exporter/otelarrowexporter/internal/arrow/grpcmock/credentials.go @@ -0,0 +1,74 @@ +// Code generated by MockGen. DO NOT EDIT. +// Source: google.golang.org/grpc/credentials (interfaces: PerRPCCredentials) +// +// Generated by this command: +// +// mockgen -package grpcmock google.golang.org/grpc/credentials PerRPCCredentials +// + +// Package grpcmock is a generated GoMock package. +package grpcmock + +import ( + context "context" + reflect "reflect" + + gomock "go.uber.org/mock/gomock" +) + +// MockPerRPCCredentials is a mock of PerRPCCredentials interface. +type MockPerRPCCredentials struct { + ctrl *gomock.Controller + recorder *MockPerRPCCredentialsMockRecorder +} + +// MockPerRPCCredentialsMockRecorder is the mock recorder for MockPerRPCCredentials. +type MockPerRPCCredentialsMockRecorder struct { + mock *MockPerRPCCredentials +} + +// NewMockPerRPCCredentials creates a new mock instance. +func NewMockPerRPCCredentials(ctrl *gomock.Controller) *MockPerRPCCredentials { + mock := &MockPerRPCCredentials{ctrl: ctrl} + mock.recorder = &MockPerRPCCredentialsMockRecorder{mock} + return mock +} + +// EXPECT returns an object that allows the caller to indicate expected use. +func (m *MockPerRPCCredentials) EXPECT() *MockPerRPCCredentialsMockRecorder { + return m.recorder +} + +// GetRequestMetadata mocks base method. +func (m *MockPerRPCCredentials) GetRequestMetadata(arg0 context.Context, arg1 ...string) (map[string]string, error) { + m.ctrl.T.Helper() + varargs := []any{arg0} + for _, a := range arg1 { + varargs = append(varargs, a) + } + ret := m.ctrl.Call(m, "GetRequestMetadata", varargs...) + ret0, _ := ret[0].(map[string]string) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// GetRequestMetadata indicates an expected call of GetRequestMetadata. +func (mr *MockPerRPCCredentialsMockRecorder) GetRequestMetadata(arg0 any, arg1 ...any) *gomock.Call { + mr.mock.ctrl.T.Helper() + varargs := append([]any{arg0}, arg1...) + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetRequestMetadata", reflect.TypeOf((*MockPerRPCCredentials)(nil).GetRequestMetadata), varargs...) +} + +// RequireTransportSecurity mocks base method. +func (m *MockPerRPCCredentials) RequireTransportSecurity() bool { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "RequireTransportSecurity") + ret0, _ := ret[0].(bool) + return ret0 +} + +// RequireTransportSecurity indicates an expected call of RequireTransportSecurity. +func (mr *MockPerRPCCredentialsMockRecorder) RequireTransportSecurity() *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "RequireTransportSecurity", reflect.TypeOf((*MockPerRPCCredentials)(nil).RequireTransportSecurity)) +} diff --git a/exporter/otelarrowexporter/internal/arrow/prioritizer.go b/exporter/otelarrowexporter/internal/arrow/prioritizer.go new file mode 100644 index 000000000000..84220338348f --- /dev/null +++ b/exporter/otelarrowexporter/internal/arrow/prioritizer.go @@ -0,0 +1,107 @@ +// Copyright The OpenTelemetry Authors +// SPDX-License-Identifier: Apache-2.0 + +package arrow // import "github.com/open-telemetry/opentelemetry-collector-contrib/exporter/otelarrowexporter/internal/arrow" + +import ( + "context" + "fmt" + "strconv" + "strings" + + "go.opentelemetry.io/collector/component" + "google.golang.org/grpc/codes" + "google.golang.org/grpc/status" +) + +var ErrStreamRestarting = status.Error(codes.Aborted, "stream is restarting") + +type PrioritizerName string + +var _ component.ConfigValidator = PrioritizerName("") + +const ( + DefaultPrioritizer PrioritizerName = LeastLoadedPrioritizer + LeastLoadedPrioritizer PrioritizerName = llPrefix + LeastLoadedTwoPrioritizer PrioritizerName = llPrefix + "2" + LeastLoadedFourPrioritizer PrioritizerName = llPrefix + "4" + unsetPrioritizer PrioritizerName = "" + + llPrefix = "leastloaded" +) + +// streamPrioritizer is an interface for prioritizing multiple +// streams. +type streamPrioritizer interface { + // nextWriter gets the next stream writer. In case the exporter + // was downgraded, returns nil. + nextWriter() streamWriter + + // downgrade is called with the root context of the exporter, + // and may block indefinitely. this allows the prioritizer to + // drain its channel(s) until the exporter shuts down. + downgrade(context.Context) +} + +// streamWriter is the caller's interface to a stream. +type streamWriter interface { + // sendAndWait is called to begin a write. After completing + // the call, wait on writeItem.errCh for the response. + sendAndWait(context.Context, <-chan error, writeItem) error +} + +func newStreamPrioritizer(dc doneCancel, name PrioritizerName, numStreams int) (streamPrioritizer, []*streamWorkState) { + if name == unsetPrioritizer { + name = DefaultPrioritizer + } + if strings.HasPrefix(string(name), llPrefix) { + // error was checked and reported in Validate + n, err := strconv.Atoi(string(name[len(llPrefix):])) + if err == nil { + return newBestOfNPrioritizer(dc, n, numStreams, pendingRequests) + } + } + return newBestOfNPrioritizer(dc, numStreams, numStreams, pendingRequests) +} + +// pendingRequests is the load function used by leastloadedN. +func pendingRequests(sws *streamWorkState) float64 { + sws.lock.Lock() + defer sws.lock.Unlock() + return float64(len(sws.waiters) + len(sws.toWrite)) +} + +// Validate implements component.ConfigValidator +func (p PrioritizerName) Validate() error { + switch p { + // Exact match cases + case LeastLoadedPrioritizer, unsetPrioritizer: + return nil + } + // "leastloadedN" cases + if !strings.HasPrefix(string(p), llPrefix) { + return fmt.Errorf("unrecognized prioritizer: %q", string(p)) + } + _, err := strconv.Atoi(string(p[len(llPrefix):])) + if err != nil { + return fmt.Errorf("invalid prioritizer: %q", string(p)) + } + return nil +} + +// drain helps avoid a race condition when downgrade happens, it ensures that +// any late-arriving work will immediately see ErrStreamRestarting, and this +// continues until the exporter shuts down. +// +// Note: the downgrade function is a major source of complexity and it is +// probably best removed, instead of having this level of complexity. +func drain(ch <-chan writeItem, done <-chan struct{}) { + for { + select { + case <-done: + return + case item := <-ch: + item.errCh <- ErrStreamRestarting + } + } +} diff --git a/exporter/otelarrowexporter/internal/arrow/stream.go b/exporter/otelarrowexporter/internal/arrow/stream.go new file mode 100644 index 000000000000..7070d8c6ea42 --- /dev/null +++ b/exporter/otelarrowexporter/internal/arrow/stream.go @@ -0,0 +1,477 @@ +// Copyright The OpenTelemetry Authors +// SPDX-License-Identifier: Apache-2.0 + +package arrow // import "github.com/open-telemetry/opentelemetry-collector-contrib/exporter/otelarrowexporter/internal/arrow" + +import ( + "bytes" + "context" + "errors" + "fmt" + "io" + "sync" + "time" + + arrowpb "github.com/open-telemetry/otel-arrow/api/experimental/arrow/v1" + "github.com/open-telemetry/otel-arrow/collector/netstats" + arrowRecord "github.com/open-telemetry/otel-arrow/pkg/otel/arrow_record" + "go.opentelemetry.io/collector/component" + "go.opentelemetry.io/collector/consumer/consumererror" + "go.opentelemetry.io/collector/pdata/plog" + "go.opentelemetry.io/collector/pdata/pmetric" + "go.opentelemetry.io/collector/pdata/ptrace" + "go.opentelemetry.io/otel" + otelcodes "go.opentelemetry.io/otel/codes" + "go.opentelemetry.io/otel/propagation" + "go.opentelemetry.io/otel/trace" + "go.uber.org/multierr" + "go.uber.org/zap" + "golang.org/x/net/http2/hpack" + "google.golang.org/grpc" + "google.golang.org/grpc/codes" + "google.golang.org/grpc/status" +) + +// Stream is 1:1 with gRPC stream. +type Stream struct { + // maxStreamLifetime is the max timeout before stream + // should be closed on the client side. This ensures a + // graceful shutdown before max_connection_age is reached + // on the server side. + maxStreamLifetime time.Duration + + // producer is exclusive to the holder of the stream. + producer arrowRecord.ProducerAPI + + // prioritizer has a reference to the stream, this allows it to be severed. + prioritizer streamPrioritizer + + // telemetry are a copy of the exporter's telemetry settings + telemetry component.TelemetrySettings + + // tracer is used to create a span describing the export. + tracer trace.Tracer + + // client uses the exporter's grpc.ClientConn. this is + // initially nil only set when ArrowStream() calls meaning the + // endpoint recognizes OTel-Arrow. + client AnyStreamClient + + // method the gRPC method name, used for additional instrumentation. + method string + + // netReporter provides network-level metrics. + netReporter netstats.Interface + + // streamWorkState is the interface to prioritizer/balancer, contains + // outstanding request (by batch ID) and the write channel used by + // the stream. All of this state will be inherited by the successor + // stream. + workState *streamWorkState +} + +// streamWorkState contains the state assigned to an Arrow stream. When +// a stream shuts down, the work state is handed to the replacement stream. +type streamWorkState struct { + // toWrite is used to pass pending data between a caller, the + // prioritizer and a stream. + toWrite chan writeItem + + // lock protects waiters + lock sync.Mutex + + // waiters is the response channel for each active batch. + waiters map[int64]chan<- error +} + +// writeItem is passed from the sender (a pipeline consumer) to the +// stream writer, which is not bound by the sender's context. +type writeItem struct { + // records is a ptrace.Traces, plog.Logs, or pmetric.Metrics + records any + // md is the caller's metadata, derived from its context. + md map[string]string + // errCh is used by the stream reader to unblock the sender + // to the stream side, this is a `chan<-`. to the send side, + // this is a `<-chan`. + errCh chan<- error + // uncompSize is computed by the appropriate sizer (in the + // caller's goroutine) + uncompSize int + // producerCtx is used for tracing purposes. + producerCtx context.Context +} + +// newStream constructs a stream +func newStream( + producer arrowRecord.ProducerAPI, + prioritizer streamPrioritizer, + telemetry component.TelemetrySettings, + netReporter netstats.Interface, + workState *streamWorkState, +) *Stream { + tracer := telemetry.TracerProvider.Tracer("otel-arrow-exporter") + return &Stream{ + producer: producer, + prioritizer: prioritizer, + telemetry: telemetry, + tracer: tracer, + netReporter: netReporter, + workState: workState, + } +} + +// setBatchChannel places a waiting consumer's batchID into the waiters map, where +// the stream reader may find it. +func (s *Stream) setBatchChannel(batchID int64, errCh chan<- error) { + s.workState.lock.Lock() + defer s.workState.lock.Unlock() + + s.workState.waiters[batchID] = errCh +} + +// logStreamError decides how to log an error. `which` indicates the +// stream direction, will be "reader" or "writer". +func (s *Stream) logStreamError(which string, err error) { + var code codes.Code + var msg string + // gRPC tends to supply status-wrapped errors, so we always + // unpack them. A wrapped Canceled code indicates intentional + // shutdown, which can be due to normal causes (EOF, e.g., + // max-stream-lifetime reached) or unusual causes (Canceled, + // e.g., because the other stream direction reached an error). + if status, ok := status.FromError(err); ok { + code = status.Code() + msg = status.Message() + } else if errors.Is(err, io.EOF) || errors.Is(err, context.Canceled) { + code = codes.Canceled + msg = err.Error() + } else { + code = codes.Internal + msg = err.Error() + } + if code == codes.Canceled { + s.telemetry.Logger.Debug("arrow stream shutdown", zap.String("which", which), zap.String("message", msg)) + } else { + s.telemetry.Logger.Error("arrow stream error", zap.String("which", which), zap.String("message", msg), zap.Int("code", int(code))) + } +} + +// run blocks the calling goroutine while executing stream logic. run +// will return when the reader and writer are finished. errors will be logged. +func (s *Stream) run(ctx context.Context, dc doneCancel, streamClient StreamClientFunc, grpcOptions []grpc.CallOption) { + sc, method, err := streamClient(ctx, grpcOptions...) + if err != nil { + // Returning with stream.client == nil signals the + // lack of an Arrow stream endpoint. When all the + // streams return with .client == nil, the ready + // channel will be closed, which causes downgrade. + // + // Note: These are gRPC server internal errors and + // will cause downgrade to standard OTLP. These + // cannot be simulated by connecting to a gRPC server + // that does not support the ArrowStream service, with + // or without the WaitForReady flag set. In a real + // gRPC server the first Unimplemented code is + // generally delivered to the Recv() call below, so + // this code path is not taken for an ordinary downgrade. + s.telemetry.Logger.Error("cannot start arrow stream", zap.Error(err)) + return + } + // Setting .client != nil indicates that the endpoint was valid, + // streaming may start. When this stream finishes, it will be + // restarted. + s.method = method + s.client = sc + + // ww is used to wait for the writer. Since we wait for the writer, + // the writer's goroutine is not added to exporter waitgroup (e.wg). + var ww sync.WaitGroup + + var writeErr error + ww.Add(1) + go func() { + defer ww.Done() + writeErr = s.write(ctx) + if writeErr != nil { + dc.cancel() + } + }() + + // the result from read() is processed after cancel and wait, + // so we can set s.client = nil in case of a delayed Unimplemented. + err = s.read(ctx) + + // Wait for the writer to ensure that all waiters are known. + dc.cancel() + ww.Wait() + + if err != nil { + // This branch is reached with an unimplemented status + // with or without the WaitForReady flag. + if status, ok := status.FromError(err); ok && status.Code() == codes.Unimplemented { + // This (client == nil) signals the controller to + // downgrade when all streams have returned in that + // status. + // + // This is a special case because we reset s.client, + // which sets up a downgrade after the streams return. + s.client = nil + s.telemetry.Logger.Info("arrow is not supported", + zap.String("message", status.Message()), + ) + } else { + // All other cases, use the standard log handler. + s.logStreamError("reader", err) + } + } + if writeErr != nil { + s.logStreamError("writer", writeErr) + } + + s.workState.lock.Lock() + defer s.workState.lock.Unlock() + + // The reader and writer have both finished; respond to any + // outstanding waiters. + for _, ch := range s.workState.waiters { + // Note: the top-level OTLP exporter will retry. + ch <- ErrStreamRestarting + } + + s.workState.waiters = map[int64]chan<- error{} +} + +// write repeatedly places this stream into the next-available queue, then +// performs a blocking send(). This returns when the data is in the write buffer, +// the caller waiting on its error channel. +func (s *Stream) write(ctx context.Context) (retErr error) { + // always close the send channel when this function returns. + defer func() { _ = s.client.CloseSend() }() + + // headers are encoding using hpack, reusing a buffer on each call. + var hdrsBuf bytes.Buffer + hdrsEnc := hpack.NewEncoder(&hdrsBuf) + + var timerCh <-chan time.Time + if s.maxStreamLifetime != 0 { + timer := time.NewTimer(s.maxStreamLifetime) + timerCh = timer.C + defer timer.Stop() + } + + for { + // this can block, and if the context is canceled we + // wait for the reader to find this stream. + var wri writeItem + select { + case <-timerCh: + return nil + case wri = <-s.workState.toWrite: + case <-ctx.Done(): + return ctx.Err() + } + + err := s.encodeAndSend(wri, &hdrsBuf, hdrsEnc) + if err != nil { + // Note: For the return statement below, there is no potential + // sender race because the stream is not available, as indicated by + // the successful <-stream.toWrite above + return err + } + } +} + +func (s *Stream) encodeAndSend(wri writeItem, hdrsBuf *bytes.Buffer, hdrsEnc *hpack.Encoder) (retErr error) { + ctx, span := s.tracer.Start(wri.producerCtx, "otel_arrow_stream_send") + defer span.End() + + defer func() { + // Set span status if an error is returned. + if retErr != nil { + span := trace.SpanFromContext(ctx) + span.SetStatus(otelcodes.Error, retErr.Error()) + } + }() + // Get the global propagator, to inject context. When there + // are no fields, it's a no-op propagator implementation and + // we can skip the allocations inside this block. + prop := otel.GetTextMapPropagator() + + if len(prop.Fields()) > 0 { + // When the incoming context carries nothing, the map + // will be nil. Allocate, if necessary. + if wri.md == nil { + wri.md = map[string]string{} + } + // Use the global propagator to inject trace context. Note that + // OpenTelemetry Collector will set a global propagator from the + // service::telemetry::traces configuration. + otel.GetTextMapPropagator().Inject(ctx, propagation.MapCarrier(wri.md)) + } + + batch, err := s.encode(wri.records) + if err != nil { + // This is some kind of internal error. We will restart the + // stream and mark this record as a permanent one. + err = fmt.Errorf("encode: %w", err) + wri.errCh <- consumererror.NewPermanent(err) + return err + } + + // Optionally include outgoing metadata, if present. + if len(wri.md) != 0 { + hdrsBuf.Reset() + for key, val := range wri.md { + err := hdrsEnc.WriteField(hpack.HeaderField{ + Name: key, + Value: val, + }) + if err != nil { + // This case is like the encode-failure case + // above, we will restart the stream but consider + // this a permenent error. + err = fmt.Errorf("hpack: %w", err) + wri.errCh <- consumererror.NewPermanent(err) + return err + } + } + batch.Headers = hdrsBuf.Bytes() + } + + // Let the receiver knows what to look for. + s.setBatchChannel(batch.BatchId, wri.errCh) + + // The netstats code knows that uncompressed size is + // unreliable for arrow transport, so we instrument it + // directly here. Only the primary direction of transport + // is instrumented this way. + if wri.uncompSize != 0 { + var sized netstats.SizesStruct + sized.Method = s.method + sized.Length = int64(wri.uncompSize) + s.netReporter.CountSend(ctx, sized) + s.netReporter.SetSpanSizeAttributes(ctx, sized) + } + + if err := s.client.Send(batch); err != nil { + // The error will be sent to errCh during cleanup for this stream. + // Note: do not wrap this error, it may contain a Status. + return err + } + + return nil +} + +// read repeatedly reads a batch status and releases the consumers waiting for +// a response. +func (s *Stream) read(_ context.Context) error { + // Note we do not use the context, the stream context might + // cancel a call to Recv() but the call to processBatchStatus + // is non-blocking. + for { + // Note: if the client has called CloseSend() and is waiting for a response from the server. + // And if the server fails for some reason, we will wait until some other condition, such as a context + // timeout. TODO: possibly, improve to wait for no outstanding requests and then stop reading. + resp, err := s.client.Recv() + if err != nil { + // Note: do not wrap, contains a Status. + return err + } + + if err = s.processBatchStatus(resp); err != nil { + return fmt.Errorf("process: %w", err) + } + } +} + +// getSenderChannel takes the stream lock and removes the corresonding +// sender channel. +func (sws *streamWorkState) getSenderChannel(status *arrowpb.BatchStatus) (chan<- error, error) { + sws.lock.Lock() + defer sws.lock.Unlock() + + ch, ok := sws.waiters[status.BatchId] + if !ok { + // Will break the stream. + return nil, fmt.Errorf("unrecognized batch ID: %d", status.BatchId) + } + + delete(sws.waiters, status.BatchId) + return ch, nil +} + +// processBatchStatus processes a single response from the server and unblocks the +// associated sender. +func (s *Stream) processBatchStatus(ss *arrowpb.BatchStatus) error { + ch, ret := s.workState.getSenderChannel(ss) + + if ch == nil { + // In case getSenderChannels encounters a problem, the + // channel is nil. + return ret + } + + if ss.StatusCode == arrowpb.StatusCode_OK { + ch <- nil + return nil + } + // See ../../otelarrow.go's `shouldRetry()` method, the retry + // behavior described here is achieved there by setting these + // recognized codes. + var err error + switch ss.StatusCode { + case arrowpb.StatusCode_UNAVAILABLE: + // Retryable + err = status.Errorf(codes.Unavailable, "destination unavailable: %d: %s", ss.BatchId, ss.StatusMessage) + case arrowpb.StatusCode_INVALID_ARGUMENT: + // Not retryable + err = status.Errorf(codes.InvalidArgument, "invalid argument: %d: %s", ss.BatchId, ss.StatusMessage) + case arrowpb.StatusCode_RESOURCE_EXHAUSTED: + // Retry behavior is configurable + err = status.Errorf(codes.ResourceExhausted, "resource exhausted: %d: %s", ss.BatchId, ss.StatusMessage) + default: + // Note: a Canceled StatusCode was once returned by receivers following + // a CloseSend() from the exporter. This is now handled using error + // status codes. If an exporter is upgraded before a receiver, the exporter + // will log this error when the receiver closes streams. + + // Unrecognized status code. + err = status.Errorf(codes.Internal, "unexpected stream response: %d: %s", ss.BatchId, ss.StatusMessage) + + // Will break the stream. + ret = multierr.Append(ret, err) + } + ch <- err + return ret +} + +// encode produces the next batch of Arrow records. +func (s *Stream) encode(records any) (_ *arrowpb.BatchArrowRecords, retErr error) { + // Defensively, protect against panics in the Arrow producer function. + defer func() { + if err := recover(); err != nil { + // When this happens, the stacktrace is + // important and lost if we don't capture it + // here. + s.telemetry.Logger.Debug("panic detail in otel-arrow-adapter", + zap.Reflect("recovered", err), + zap.Stack("stacktrace"), + ) + retErr = fmt.Errorf("panic in otel-arrow-adapter: %v", err) + } + }() + var batch *arrowpb.BatchArrowRecords + var err error + switch data := records.(type) { + case ptrace.Traces: + batch, err = s.producer.BatchArrowRecordsFromTraces(data) + case plog.Logs: + batch, err = s.producer.BatchArrowRecordsFromLogs(data) + case pmetric.Metrics: + batch, err = s.producer.BatchArrowRecordsFromMetrics(data) + default: + return nil, fmt.Errorf("unsupported OTLP type: %T", records) + } + return batch, err +} diff --git a/exporter/otelarrowexporter/internal/arrow/stream_test.go b/exporter/otelarrowexporter/internal/arrow/stream_test.go new file mode 100644 index 000000000000..e916667c455c --- /dev/null +++ b/exporter/otelarrowexporter/internal/arrow/stream_test.go @@ -0,0 +1,349 @@ +// Copyright The OpenTelemetry Authors +// SPDX-License-Identifier: Apache-2.0 + +package arrow + +import ( + "context" + "errors" + "fmt" + "sync" + "testing" + "time" + + arrowpb "github.com/open-telemetry/otel-arrow/api/experimental/arrow/v1" + "github.com/open-telemetry/otel-arrow/collector/netstats" + arrowRecordMock "github.com/open-telemetry/otel-arrow/pkg/otel/arrow_record/mock" + "github.com/stretchr/testify/require" + "go.opentelemetry.io/collector/consumer/consumererror" + "go.uber.org/mock/gomock" + "google.golang.org/grpc" +) + +var oneBatch = &arrowpb.BatchArrowRecords{ + BatchId: 1, +} + +type streamTestCase struct { + *commonTestCase + *commonTestStream + + producer *arrowRecordMock.MockProducerAPI + prioritizer streamPrioritizer + bgctx context.Context + doneCancel + fromTracesCall *gomock.Call + fromMetricsCall *gomock.Call + fromLogsCall *gomock.Call + stream *Stream + wait sync.WaitGroup +} + +func newStreamTestCase(t *testing.T, pname PrioritizerName) *streamTestCase { + ctrl := gomock.NewController(t) + producer := arrowRecordMock.NewMockProducerAPI(ctrl) + + bg, dc := newDoneCancel(context.Background()) + prio, state := newStreamPrioritizer(dc, pname, 1) + + ctc := newCommonTestCase(t, NotNoisy) + cts := ctc.newMockStream(bg) + + // metadata functionality is tested in exporter_test.go + ctc.requestMetadataCall.AnyTimes().Return(nil, nil) + + stream := newStream(producer, prio, ctc.telset, netstats.Noop{}, state[0]) + stream.maxStreamLifetime = 10 * time.Second + + fromTracesCall := producer.EXPECT().BatchArrowRecordsFromTraces(gomock.Any()).Times(0) + fromMetricsCall := producer.EXPECT().BatchArrowRecordsFromMetrics(gomock.Any()).Times(0) + fromLogsCall := producer.EXPECT().BatchArrowRecordsFromLogs(gomock.Any()).Times(0) + + return &streamTestCase{ + commonTestCase: ctc, + commonTestStream: cts, + producer: producer, + prioritizer: prio, + bgctx: bg, + doneCancel: dc, + stream: stream, + fromTracesCall: fromTracesCall, + fromMetricsCall: fromMetricsCall, + fromLogsCall: fromLogsCall, + } +} + +// start runs a test stream according to the behavior of testChannel. +func (tc *streamTestCase) start(channel testChannel) { + tc.traceCall.Times(1).DoAndReturn(tc.connectTestStream(channel)) + + tc.wait.Add(1) + + go func() { + defer tc.wait.Done() + tc.stream.run(tc.bgctx, tc.doneCancel, tc.traceClient, nil) + }() +} + +// cancelAndWait cancels the context and waits for the runner to return. +func (tc *streamTestCase) cancelAndWaitForShutdown() { + tc.cancel() + tc.wait.Wait() +} + +// cancel waits for the runner to exit without canceling the context. +func (tc *streamTestCase) waitForShutdown() { + tc.wait.Wait() +} + +// connectTestStream returns the stream under test from the common test's mock ArrowStream(). +func (tc *streamTestCase) connectTestStream(h testChannel) func(context.Context, ...grpc.CallOption) ( + arrowpb.ArrowTracesService_ArrowTracesClient, + error, +) { + return func(ctx context.Context, _ ...grpc.CallOption) ( + arrowpb.ArrowTracesService_ArrowTracesClient, + error, + ) { + if err := h.onConnect(ctx); err != nil { + return nil, err + } + tc.sendCall.AnyTimes().DoAndReturn(h.onSend(ctx)) + tc.recvCall.AnyTimes().DoAndReturn(h.onRecv(ctx)) + tc.closeSendCall.AnyTimes().DoAndReturn(h.onCloseSend()) + return tc.anyStreamClient, nil + } +} + +// get returns the stream via the prioritizer it is registered with. +func (tc *streamTestCase) mustGet() streamWriter { + stream := tc.prioritizer.nextWriter() + if stream == nil { + panic("unexpected nil stream") + } + return stream +} + +func (tc *streamTestCase) mustSendAndWait() error { + ctx := context.Background() + ch := make(chan error, 1) + wri := writeItem{ + producerCtx: context.Background(), + records: twoTraces, + errCh: ch, + } + return tc.mustGet().sendAndWait(ctx, ch, wri) +} + +// TestStreamNoMaxLifetime verifies that configuring +// max_stream_lifetime==0 works and the client never +// calls CloseSend(). +func TestStreamNoMaxLifetime(t *testing.T) { + for _, pname := range AllPrioritizers { + t.Run(string(pname), func(t *testing.T) { + + tc := newStreamTestCase(t, pname) + tc.stream.maxStreamLifetime = 0 + + tc.fromTracesCall.Times(1).Return(oneBatch, nil) + tc.closeSendCall.Times(0) + + channel := newHealthyTestChannel() + tc.start(channel) + defer tc.cancelAndWaitForShutdown() + var wg sync.WaitGroup + wg.Add(1) + defer wg.Wait() + go func() { + defer wg.Done() + batch := <-channel.sent + channel.recv <- statusOKFor(batch.BatchId) + }() + + err := tc.mustSendAndWait() + require.NoError(t, err) + }) + } +} + +// TestStreamEncodeError verifies that an encoder error in the sender +// yields a permanent error. +func TestStreamEncodeError(t *testing.T) { + for _, pname := range AllPrioritizers { + t.Run(string(pname), func(t *testing.T) { + tc := newStreamTestCase(t, pname) + + testErr := fmt.Errorf("test encode error") + tc.fromTracesCall.Times(1).Return(nil, testErr) + + tc.start(newHealthyTestChannel()) + defer tc.cancelAndWaitForShutdown() + + // sender should get a permanent testErr + err := tc.mustSendAndWait() + require.Error(t, err) + require.True(t, errors.Is(err, testErr)) + require.True(t, consumererror.IsPermanent(err)) + }) + } +} + +// TestStreamUnknownBatchError verifies that the stream reader handles +// a unknown BatchID. +func TestStreamUnknownBatchError(t *testing.T) { + for _, pname := range AllPrioritizers { + t.Run(string(pname), func(t *testing.T) { + tc := newStreamTestCase(t, pname) + + tc.fromTracesCall.Times(1).Return(oneBatch, nil) + + channel := newHealthyTestChannel() + tc.start(channel) + defer tc.cancelAndWaitForShutdown() + + var wg sync.WaitGroup + wg.Add(1) + defer wg.Wait() + go func() { + defer wg.Done() + <-channel.sent + channel.recv <- statusOKFor(-1 /*unknown*/) + }() + // sender should get ErrStreamRestarting + err := tc.mustSendAndWait() + require.Error(t, err) + require.True(t, errors.Is(err, ErrStreamRestarting)) + }) + } +} + +// TestStreamStatusUnavailableInvalid verifies that the stream reader handles +// an unavailable or invalid status w/o breaking the stream. +func TestStreamStatusUnavailableInvalid(t *testing.T) { + for _, pname := range AllPrioritizers { + t.Run(string(pname), func(t *testing.T) { + tc := newStreamTestCase(t, pname) + + tc.fromTracesCall.Times(3).Return(oneBatch, nil) + + channel := newHealthyTestChannel() + tc.start(channel) + defer tc.cancelAndWaitForShutdown() + + var wg sync.WaitGroup + wg.Add(1) + defer wg.Wait() + go func() { + defer wg.Done() + batch := <-channel.sent + channel.recv <- statusUnavailableFor(batch.BatchId) + batch = <-channel.sent + channel.recv <- statusInvalidFor(batch.BatchId) + batch = <-channel.sent + channel.recv <- statusOKFor(batch.BatchId) + }() + // sender should get "test unavailable" once, success second time. + err := tc.mustSendAndWait() + require.Error(t, err) + require.Contains(t, err.Error(), "test unavailable") + + err = tc.mustSendAndWait() + require.Error(t, err) + require.Contains(t, err.Error(), "test invalid") + + err = tc.mustSendAndWait() + require.NoError(t, err) + }) + } +} + +// TestStreamStatusUnrecognized verifies that the stream reader handles +// an unrecognized status by breaking the stream. +func TestStreamStatusUnrecognized(t *testing.T) { + for _, pname := range AllPrioritizers { + t.Run(string(pname), func(t *testing.T) { + tc := newStreamTestCase(t, pname) + + tc.fromTracesCall.Times(1).Return(oneBatch, nil) + + channel := newHealthyTestChannel() + tc.start(channel) + defer tc.cancelAndWaitForShutdown() + + var wg sync.WaitGroup + wg.Add(1) + defer wg.Wait() + go func() { + defer wg.Done() + batch := <-channel.sent + channel.recv <- statusUnrecognizedFor(batch.BatchId) + }() + err := tc.mustSendAndWait() + require.Error(t, err) + require.Contains(t, err.Error(), "test unrecognized") + + // Note: do not cancel the context, the stream should be + // shutting down due to the error. + tc.waitForShutdown() + }) + } +} + +// TestStreamUnsupported verifies that the stream signals downgrade +// when an Unsupported code is received, which is how the gRPC client +// responds when the server does not support arrow. +func TestStreamUnsupported(t *testing.T) { + for _, pname := range AllPrioritizers { + t.Run(string(pname), func(t *testing.T) { + tc := newStreamTestCase(t, pname) + + // If the write succeeds before the read, then the FromTraces + // call will occur. Otherwise, it will not. + + tc.fromTracesCall.MinTimes(0).MaxTimes(1).Return(oneBatch, nil) + + channel := newArrowUnsupportedTestChannel() + tc.start(channel) + defer func() { + // When the stream returns, the downgrade is needed to + // cause the request to respond or else it waits for a new + // stream. + tc.waitForShutdown() + tc.cancel() + }() + + err := tc.mustSendAndWait() + require.Equal(t, ErrStreamRestarting, err) + + tc.waitForShutdown() + + require.Less(t, 0, len(tc.observedLogs.All()), "should have at least one log: %v", tc.observedLogs.All()) + require.Equal(t, tc.observedLogs.All()[0].Message, "arrow is not supported") + }) + } +} + +// TestStreamSendError verifies that the stream reader handles a +// Send() error. +func TestStreamSendError(t *testing.T) { + for _, pname := range AllPrioritizers { + t.Run(string(pname), func(t *testing.T) { + tc := newStreamTestCase(t, pname) + + tc.fromTracesCall.Times(1).Return(oneBatch, nil) + + channel := newSendErrorTestChannel() + tc.start(channel) + defer tc.cancelAndWaitForShutdown() + + go func() { + time.Sleep(200 * time.Millisecond) + channel.unblock() + }() + // sender should get ErrStreamRestarting + err := tc.mustSendAndWait() + require.Error(t, err) + require.True(t, errors.Is(err, ErrStreamRestarting)) + }) + } +} diff --git a/exporter/otelarrowexporter/metadata.yaml b/exporter/otelarrowexporter/metadata.yaml index c2e8b6f8e2dd..3a830d26c436 100644 --- a/exporter/otelarrowexporter/metadata.yaml +++ b/exporter/otelarrowexporter/metadata.yaml @@ -9,9 +9,6 @@ status: codeowners: active: [jmacd, moh-osman3] -# TODO: Update the exporter to pass the tests tests: - skip_lifecycle: true - skip_shutdown: true - goleak: - skip: true + config: + endpoint: http://127.0.0.1:4317 diff --git a/exporter/otelarrowexporter/otelarrow.go b/exporter/otelarrowexporter/otelarrow.go index c1e689e3c01a..01b21e392b00 100644 --- a/exporter/otelarrowexporter/otelarrow.go +++ b/exporter/otelarrowexporter/otelarrow.go @@ -7,72 +7,329 @@ import ( "context" "errors" "fmt" + "runtime" + "time" + arrowPkg "github.com/apache/arrow/go/v14/arrow" + "github.com/open-telemetry/otel-arrow/collector/compression/zstd" + "github.com/open-telemetry/otel-arrow/collector/netstats" + arrowRecord "github.com/open-telemetry/otel-arrow/pkg/otel/arrow_record" "go.opentelemetry.io/collector/component" + "go.opentelemetry.io/collector/config/configcompression" + "go.opentelemetry.io/collector/consumer/consumererror" "go.opentelemetry.io/collector/exporter" + "go.opentelemetry.io/collector/exporter/exporterhelper" "go.opentelemetry.io/collector/pdata/plog" + "go.opentelemetry.io/collector/pdata/plog/plogotlp" "go.opentelemetry.io/collector/pdata/pmetric" + "go.opentelemetry.io/collector/pdata/pmetric/pmetricotlp" "go.opentelemetry.io/collector/pdata/ptrace" + "go.opentelemetry.io/collector/pdata/ptrace/ptraceotlp" + "go.uber.org/multierr" + "go.uber.org/zap" + "google.golang.org/genproto/googleapis/rpc/errdetails" "google.golang.org/grpc" + "google.golang.org/grpc/codes" + "google.golang.org/grpc/credentials" + "google.golang.org/grpc/metadata" + "google.golang.org/grpc/status" "github.com/open-telemetry/opentelemetry-collector-contrib/exporter/otelarrowexporter/internal/arrow" ) -// baseExporter is used as the basis for all OpenTelemetry signal types. type baseExporter struct { - // config is the active component.Config. + // Input configuration. config *Config - // settings are the active collector-wide settings. - settings exporter.CreateSettings + // gRPC clients and connection. + traceExporter ptraceotlp.GRPCClient + metricExporter pmetricotlp.GRPCClient + logExporter plogotlp.GRPCClient + clientConn *grpc.ClientConn + metadata metadata.MD + callOptions []grpc.CallOption + settings exporter.CreateSettings + netReporter *netstats.NetworkReporter - // TODO: implementation + // Default user-agent header. + userAgent string + + // OTel-Arrow optional state + arrow *arrow.Exporter + // streamClientFunc is the stream constructor + streamClientFactory streamClientFactory } type streamClientFactory func(conn *grpc.ClientConn) arrow.StreamClientFunc -// newExporter configures a new exporter using the associated stream factory for Arrow. -func newExporter(cfg component.Config, set exporter.CreateSettings, _ streamClientFactory) (*baseExporter, error) { - // TODO: Implementation. - oCfg, ok := cfg.(*Config) - if !ok { - return nil, fmt.Errorf("unrecognized configuration type: %T", cfg) - } +// Crete new exporter and start it. The exporter will begin connecting but +// this function may return before the connection is established. +func newExporter(cfg component.Config, set exporter.CreateSettings, streamClientFactory streamClientFactory) (*baseExporter, error) { + oCfg := cfg.(*Config) + if oCfg.Endpoint == "" { - return nil, errors.New("OTel-Arrow exporter config requires an Endpoint") + return nil, errors.New("OTLP exporter config requires an Endpoint") + } + + netReporter, err := netstats.NewExporterNetworkReporter(set) + if err != nil { + return nil, err } + userAgent := fmt.Sprintf("%s/%s (%s/%s)", + set.BuildInfo.Description, set.BuildInfo.Version, runtime.GOOS, runtime.GOARCH) + + if !oCfg.Arrow.Disabled { + // Ignoring an error because Validate() was called. + _ = zstd.SetEncoderConfig(oCfg.Arrow.Zstd) + + userAgent += fmt.Sprintf(" ApacheArrow/%s (NumStreams/%d)", arrowPkg.PkgVersion, oCfg.Arrow.NumStreams) + } + return &baseExporter{ - config: oCfg, - settings: set, + config: oCfg, + settings: set, + userAgent: userAgent, + netReporter: netReporter, + streamClientFactory: streamClientFactory, }, nil } -// start configures and starts the gRPC client connection. +// start actually creates the gRPC connection. The client construction is deferred till this point as this +// is the only place we get hold of Extensions which are required to construct auth round tripper. func (e *baseExporter) start(ctx context.Context, host component.Host) (err error) { - // TODO: Implementation: the following is a placeholder used - // to satisfy gRPC configuration-related configuration errors. - if _, err = e.config.ClientConfig.ToClientConn(ctx, host, e.settings.TelemetrySettings); err != nil { + dialOpts := []grpc.DialOption{ + grpc.WithUserAgent(e.userAgent), + } + if e.netReporter != nil { + dialOpts = append(dialOpts, grpc.WithStatsHandler(e.netReporter.Handler())) + } + dialOpts = append(dialOpts, e.config.UserDialOptions...) + if e.clientConn, err = e.config.ClientConfig.ToClientConn(ctx, host, e.settings.TelemetrySettings, dialOpts...); err != nil { return err } + e.traceExporter = ptraceotlp.NewGRPCClient(e.clientConn) + e.metricExporter = pmetricotlp.NewGRPCClient(e.clientConn) + e.logExporter = plogotlp.NewGRPCClient(e.clientConn) + headers := map[string]string{} + for k, v := range e.config.ClientConfig.Headers { + headers[k] = string(v) + } + e.metadata = metadata.New(headers) + e.callOptions = []grpc.CallOption{ + grpc.WaitForReady(e.config.ClientConfig.WaitForReady), + } + + if !e.config.Arrow.Disabled { + // Note this sets static outgoing context for all future stream requests. + ctx := e.enhanceContext(context.Background()) + + var perRPCCreds credentials.PerRPCCredentials + if e.config.ClientConfig.Auth != nil { + // Get the auth extension, we'll use it to enrich the request context. + authClient, err := e.config.ClientConfig.Auth.GetClientAuthenticator(host.GetExtensions()) + if err != nil { + return err + } + + perRPCCreds, err = authClient.PerRPCCredentials() + if err != nil { + return err + } + } + + arrowOpts := e.config.Arrow.toArrowProducerOptions() + + arrowCallOpts := e.callOptions + + if e.config.ClientConfig.Compression == configcompression.TypeZstd { + // ignore the error below b/c Validate() was called + _ = zstd.SetEncoderConfig(e.config.Arrow.Zstd) + // use the configured compressor. + arrowCallOpts = append(arrowCallOpts, e.config.Arrow.Zstd.CallOption()) + } + + e.arrow = arrow.NewExporter(e.config.Arrow.MaxStreamLifetime, e.config.Arrow.NumStreams, e.config.Arrow.Prioritizer, e.config.Arrow.DisableDowngrade, e.settings.TelemetrySettings, arrowCallOpts, func() arrowRecord.ProducerAPI { + return arrowRecord.NewProducerWithOptions(arrowOpts...) + }, e.streamClientFactory(e.clientConn), perRPCCreds, e.netReporter) + + if err := e.arrow.Start(ctx); err != nil { + return err + } + } + return nil } -func (e *baseExporter) shutdown(_ context.Context) error { - // TODO: Implementation. +func (e *baseExporter) shutdown(ctx context.Context) error { + var err error + if e.arrow != nil { + err = multierr.Append(err, e.arrow.Shutdown(ctx)) + } + if e.clientConn != nil { + err = multierr.Append(err, e.clientConn.Close()) + } + return err +} + +// arrowSendAndWait gets an available stream and tries to send using +// Arrow if it is configured. A (false, nil) result indicates for the +// caller to fall back to ordinary OTLP. +// +// Note that ctx is has not had enhanceContext() called, meaning it +// will have outgoing gRPC metadata only when an upstream processor or +// receiver placed it there. +func (e *baseExporter) arrowSendAndWait(ctx context.Context, data any) (sent bool, _ error) { + if e.arrow == nil { + return false, nil + } + sent, err := e.arrow.SendAndWait(ctx, data) + if err != nil { + return sent, processError(err) + } + return sent, nil +} + +func (e *baseExporter) pushTraces(ctx context.Context, td ptrace.Traces) error { + if sent, err := e.arrowSendAndWait(ctx, td); err != nil { + return err + } else if sent { + return nil + } + req := ptraceotlp.NewExportRequestFromTraces(td) + resp, respErr := e.traceExporter.Export(e.enhanceContext(ctx), req, e.callOptions...) + if err := processError(respErr); err != nil { + return err + } + partialSuccess := resp.PartialSuccess() + if !(partialSuccess.ErrorMessage() == "" && partialSuccess.RejectedSpans() == 0) { + // TODO: These should be counted, similar to dropped items. + e.settings.Logger.Warn("partial success", + zap.String("message", resp.PartialSuccess().ErrorMessage()), + zap.Int64("num_rejected", resp.PartialSuccess().RejectedSpans()), + ) + } return nil } -func (e *baseExporter) pushTraces(_ context.Context, _ ptrace.Traces) error { - // TODO: Implementation. +func (e *baseExporter) pushMetrics(ctx context.Context, md pmetric.Metrics) error { + if sent, err := e.arrowSendAndWait(ctx, md); err != nil { + return err + } else if sent { + return nil + } + req := pmetricotlp.NewExportRequestFromMetrics(md) + resp, respErr := e.metricExporter.Export(e.enhanceContext(ctx), req, e.callOptions...) + if err := processError(respErr); err != nil { + return err + } + partialSuccess := resp.PartialSuccess() + if !(partialSuccess.ErrorMessage() == "" && partialSuccess.RejectedDataPoints() == 0) { + // TODO: These should be counted, similar to dropped items. + e.settings.Logger.Warn("partial success", + zap.String("message", resp.PartialSuccess().ErrorMessage()), + zap.Int64("num_rejected", resp.PartialSuccess().RejectedDataPoints()), + ) + } return nil } -func (e *baseExporter) pushMetrics(_ context.Context, _ pmetric.Metrics) error { - // TODO: Implementation. +func (e *baseExporter) pushLogs(ctx context.Context, ld plog.Logs) error { + if sent, err := e.arrowSendAndWait(ctx, ld); err != nil { + return err + } else if sent { + return nil + } + req := plogotlp.NewExportRequestFromLogs(ld) + resp, respErr := e.logExporter.Export(e.enhanceContext(ctx), req, e.callOptions...) + if err := processError(respErr); err != nil { + return err + } + partialSuccess := resp.PartialSuccess() + if !(partialSuccess.ErrorMessage() == "" && partialSuccess.RejectedLogRecords() == 0) { + // TODO: These should be counted, similar to dropped items. + e.settings.Logger.Warn("partial success", + zap.String("message", resp.PartialSuccess().ErrorMessage()), + zap.Int64("num_rejected", resp.PartialSuccess().RejectedLogRecords()), + ) + } return nil } -func (e *baseExporter) pushLogs(_ context.Context, _ plog.Logs) error { - // TODO: Implementation. +func (e *baseExporter) enhanceContext(ctx context.Context) context.Context { + if e.metadata.Len() > 0 { + ctx = metadata.NewOutgoingContext(ctx, e.metadata) + } + return ctx +} + +func processError(err error) error { + if err == nil { + // Request is successful, we are done. + return nil + } + + // We have an error, check gRPC status code. + st := status.Convert(err) + if st.Code() == codes.OK { + // Not really an error, still success. + return nil + } + + // Now, this is this a real error. + + retryInfo := getRetryInfo(st) + + if !shouldRetry(st.Code(), retryInfo) { + // It is not a retryable error, we should not retry. + return consumererror.NewPermanent(err) + } + + // Check if server returned throttling information. + throttleDuration := getThrottleDuration(retryInfo) + if throttleDuration != 0 { + // We are throttled. Wait before retrying as requested by the server. + return exporterhelper.NewThrottleRetry(err, throttleDuration) + } + + // Need to retry. + + return err +} + +func shouldRetry(code codes.Code, retryInfo *errdetails.RetryInfo) bool { + switch code { + case codes.Canceled, + codes.DeadlineExceeded, + codes.Aborted, + codes.OutOfRange, + codes.Unavailable, + codes.DataLoss: + // These are retryable errors. + return true + case codes.ResourceExhausted: + // Retry only if RetryInfo was supplied by the server. + // This indicates that the server can still recover from resource exhaustion. + return retryInfo != nil + } + // Don't retry on any other code. + return false +} + +func getRetryInfo(status *status.Status) *errdetails.RetryInfo { + for _, detail := range status.Details() { + if t, ok := detail.(*errdetails.RetryInfo); ok { + return t + } + } return nil } + +func getThrottleDuration(t *errdetails.RetryInfo) time.Duration { + if t == nil || t.RetryDelay == nil { + return 0 + } + if t.RetryDelay.Seconds > 0 || t.RetryDelay.Nanos > 0 { + return time.Duration(t.RetryDelay.Seconds)*time.Second + time.Duration(t.RetryDelay.Nanos)*time.Nanosecond + } + return 0 +} diff --git a/exporter/otelarrowexporter/otelarrow_test.go b/exporter/otelarrowexporter/otelarrow_test.go new file mode 100644 index 000000000000..3d84fd6ca618 --- /dev/null +++ b/exporter/otelarrowexporter/otelarrow_test.go @@ -0,0 +1,1189 @@ +// Copyright The OpenTelemetry Authors +// SPDX-License-Identifier: Apache-2.0 + +package otelarrowexporter + +import ( + "context" + "fmt" + "io" + "net" + "net/http" + "path/filepath" + "runtime" + "sync" + "sync/atomic" + "testing" + "time" + + arrowpb "github.com/open-telemetry/otel-arrow/api/experimental/arrow/v1" + arrowpbMock "github.com/open-telemetry/otel-arrow/api/experimental/arrow/v1/mock" + "github.com/open-telemetry/otel-arrow/collector/testdata" + arrowRecord "github.com/open-telemetry/otel-arrow/pkg/otel/arrow_record" + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" + "go.opentelemetry.io/collector/client" + "go.opentelemetry.io/collector/component" + "go.opentelemetry.io/collector/component/componenttest" + "go.opentelemetry.io/collector/config/configauth" + "go.opentelemetry.io/collector/config/configgrpc" + "go.opentelemetry.io/collector/config/configopaque" + "go.opentelemetry.io/collector/config/configtls" + "go.opentelemetry.io/collector/exporter" + "go.opentelemetry.io/collector/exporter/exportertest" + "go.opentelemetry.io/collector/extension" + "go.opentelemetry.io/collector/extension/auth" + "go.opentelemetry.io/collector/pdata/plog" + "go.opentelemetry.io/collector/pdata/plog/plogotlp" + "go.opentelemetry.io/collector/pdata/pmetric" + "go.opentelemetry.io/collector/pdata/pmetric/pmetricotlp" + "go.opentelemetry.io/collector/pdata/ptrace" + "go.opentelemetry.io/collector/pdata/ptrace/ptraceotlp" + "go.uber.org/mock/gomock" + "go.uber.org/zap/zaptest" + "golang.org/x/net/http2/hpack" + "google.golang.org/genproto/googleapis/rpc/errdetails" + "google.golang.org/grpc" + "google.golang.org/grpc/codes" + "google.golang.org/grpc/credentials" + "google.golang.org/grpc/metadata" + "google.golang.org/grpc/status" + "google.golang.org/protobuf/types/known/durationpb" + + "github.com/open-telemetry/opentelemetry-collector-contrib/exporter/otelarrowexporter/internal/arrow/grpcmock" +) + +type mockReceiver struct { + srv *grpc.Server + ln net.Listener + requestCount *atomic.Int32 + totalItems *atomic.Int32 + mux sync.Mutex + metadata metadata.MD + exportError error +} + +func (r *mockReceiver) getMetadata() metadata.MD { + r.mux.Lock() + defer r.mux.Unlock() + return r.metadata +} + +func (r *mockReceiver) setExportError(err error) { + r.mux.Lock() + defer r.mux.Unlock() + r.exportError = err +} + +type mockTracesReceiver struct { + ptraceotlp.UnimplementedGRPCServer + mockReceiver + exportResponse func() ptraceotlp.ExportResponse + lastRequest ptrace.Traces +} + +func (r *mockTracesReceiver) Export(ctx context.Context, req ptraceotlp.ExportRequest) (ptraceotlp.ExportResponse, error) { + r.requestCount.Add(int32(1)) + td := req.Traces() + r.totalItems.Add(int32(td.SpanCount())) + r.mux.Lock() + defer r.mux.Unlock() + r.lastRequest = td + r.metadata, _ = metadata.FromIncomingContext(ctx) + return r.exportResponse(), r.exportError +} + +func (r *mockTracesReceiver) getLastRequest() ptrace.Traces { + r.mux.Lock() + defer r.mux.Unlock() + return r.lastRequest +} + +func (r *mockTracesReceiver) setExportResponse(fn func() ptraceotlp.ExportResponse) { + r.mux.Lock() + defer r.mux.Unlock() + r.exportResponse = fn +} + +func otelArrowTracesReceiverOnGRPCServer(ln net.Listener, useTLS bool) (*mockTracesReceiver, error) { + sopts := []grpc.ServerOption{} + + if useTLS { + _, currentFile, _, _ := runtime.Caller(0) + basepath := filepath.Dir(currentFile) + certpath := filepath.Join(basepath, filepath.Join("testdata", "test_cert.pem")) + keypath := filepath.Join(basepath, filepath.Join("testdata", "test_key.pem")) + + creds, err := credentials.NewServerTLSFromFile(certpath, keypath) + if err != nil { + return nil, err + } + sopts = append(sopts, grpc.Creds(creds)) + } + + rcv := &mockTracesReceiver{ + mockReceiver: mockReceiver{ + srv: grpc.NewServer(sopts...), + ln: ln, + requestCount: &atomic.Int32{}, + totalItems: &atomic.Int32{}, + }, + exportResponse: ptraceotlp.NewExportResponse, + } + + ptraceotlp.RegisterGRPCServer(rcv.srv, rcv) + + return rcv, nil +} + +func (r *mockTracesReceiver) start() { + go func() { + _ = r.srv.Serve(r.ln) + }() +} + +type mockLogsReceiver struct { + plogotlp.UnimplementedGRPCServer + mockReceiver + exportResponse func() plogotlp.ExportResponse + lastRequest plog.Logs +} + +func (r *mockLogsReceiver) Export(ctx context.Context, req plogotlp.ExportRequest) (plogotlp.ExportResponse, error) { + r.requestCount.Add(int32(1)) + ld := req.Logs() + r.totalItems.Add(int32(ld.LogRecordCount())) + r.mux.Lock() + defer r.mux.Unlock() + r.lastRequest = ld + r.metadata, _ = metadata.FromIncomingContext(ctx) + return r.exportResponse(), r.exportError +} + +func (r *mockLogsReceiver) getLastRequest() plog.Logs { + r.mux.Lock() + defer r.mux.Unlock() + return r.lastRequest +} + +func (r *mockLogsReceiver) setExportResponse(fn func() plogotlp.ExportResponse) { + r.mux.Lock() + defer r.mux.Unlock() + r.exportResponse = fn +} + +func otelArrowLogsReceiverOnGRPCServer(ln net.Listener) *mockLogsReceiver { + rcv := &mockLogsReceiver{ + mockReceiver: mockReceiver{ + srv: grpc.NewServer(), + requestCount: &atomic.Int32{}, + totalItems: &atomic.Int32{}, + }, + exportResponse: plogotlp.NewExportResponse, + } + + // Now run it as a gRPC server + plogotlp.RegisterGRPCServer(rcv.srv, rcv) + go func() { + _ = rcv.srv.Serve(ln) + }() + + return rcv +} + +type mockMetricsReceiver struct { + pmetricotlp.UnimplementedGRPCServer + mockReceiver + exportResponse func() pmetricotlp.ExportResponse + lastRequest pmetric.Metrics +} + +func (r *mockMetricsReceiver) Export(ctx context.Context, req pmetricotlp.ExportRequest) (pmetricotlp.ExportResponse, error) { + md := req.Metrics() + r.requestCount.Add(int32(1)) + r.totalItems.Add(int32(md.DataPointCount())) + r.mux.Lock() + defer r.mux.Unlock() + r.lastRequest = md + r.metadata, _ = metadata.FromIncomingContext(ctx) + return r.exportResponse(), r.exportError +} + +func (r *mockMetricsReceiver) getLastRequest() pmetric.Metrics { + r.mux.Lock() + defer r.mux.Unlock() + return r.lastRequest +} + +func (r *mockMetricsReceiver) setExportResponse(fn func() pmetricotlp.ExportResponse) { + r.mux.Lock() + defer r.mux.Unlock() + r.exportResponse = fn +} + +func otelArrowMetricsReceiverOnGRPCServer(ln net.Listener) *mockMetricsReceiver { + rcv := &mockMetricsReceiver{ + mockReceiver: mockReceiver{ + srv: grpc.NewServer(), + requestCount: &atomic.Int32{}, + totalItems: &atomic.Int32{}, + }, + exportResponse: pmetricotlp.NewExportResponse, + } + + // Now run it as a gRPC server + pmetricotlp.RegisterGRPCServer(rcv.srv, rcv) + go func() { + _ = rcv.srv.Serve(ln) + }() + + return rcv +} + +type hostWithExtensions struct { + component.Host + exts map[component.ID]component.Component +} + +func newHostWithExtensions(exts map[component.ID]component.Component) component.Host { + return &hostWithExtensions{ + Host: componenttest.NewNopHost(), + exts: exts, + } +} + +func (h *hostWithExtensions) GetExtensions() map[component.ID]component.Component { + return h.exts +} + +type testAuthExtension struct { + extension.Extension + + prc credentials.PerRPCCredentials +} + +func newTestAuthExtension(t *testing.T, mdf func(ctx context.Context) map[string]string) auth.Client { + ctrl := gomock.NewController(t) + prc := grpcmock.NewMockPerRPCCredentials(ctrl) + prc.EXPECT().RequireTransportSecurity().AnyTimes().Return(false) + prc.EXPECT().GetRequestMetadata(gomock.Any(), gomock.Any()).AnyTimes().DoAndReturn( + func(ctx context.Context, _ ...string) (map[string]string, error) { + return mdf(ctx), nil + }, + ) + return &testAuthExtension{ + prc: prc, + } +} + +func (a *testAuthExtension) RoundTripper(_ http.RoundTripper) (http.RoundTripper, error) { + return nil, fmt.Errorf("unused") +} + +func (a *testAuthExtension) PerRPCCredentials() (credentials.PerRPCCredentials, error) { + return a.prc, nil +} + +func TestSendTraces(t *testing.T) { + // Start an OTel-Arrow receiver. + ln, err := net.Listen("tcp", "localhost:") + require.NoError(t, err, "Failed to find an available address to run the gRPC server: %v", err) + rcv, _ := otelArrowTracesReceiverOnGRPCServer(ln, false) + rcv.start() + // Also closes the connection. + defer rcv.srv.GracefulStop() + + // Start an OTLP exporter and point to the receiver. + factory := NewFactory() + authID := component.NewID(component.MustNewType("testauth")) + expectedHeader := []string{"header-value"} + + cfg := factory.CreateDefaultConfig().(*Config) + // Disable queuing to ensure that we execute the request when calling ConsumeTraces + // otherwise we will not see any errors. + cfg.QueueSettings.Enabled = false + cfg.ClientConfig = configgrpc.ClientConfig{ + Endpoint: ln.Addr().String(), + TLSSetting: configtls.ClientConfig{ + Insecure: true, + }, + Headers: map[string]configopaque.String{ + "header": configopaque.String(expectedHeader[0]), + }, + Auth: &configauth.Authentication{ + AuthenticatorID: authID, + }, + } + // This test fails w/ Arrow enabled because the function + // passed to newTestAuthExtension() below requires it the + // caller's context, and the newStream doesn't have it. + cfg.Arrow.Disabled = true + + set := exportertest.NewNopCreateSettings() + set.BuildInfo.Description = "Collector" + set.BuildInfo.Version = "1.2.3test" + exp, err := factory.CreateTracesExporter(context.Background(), set, cfg) + require.NoError(t, err) + require.NotNil(t, exp) + + defer func() { + assert.NoError(t, exp.Shutdown(context.Background())) + }() + + host := newHostWithExtensions( + map[component.ID]component.Component{ + authID: newTestAuthExtension(t, func(ctx context.Context) map[string]string { + return map[string]string{ + "callerid": client.FromContext(ctx).Metadata.Get("in_callerid")[0], + } + }), + }, + ) + assert.NoError(t, exp.Start(context.Background(), host)) + + // Ensure that initially there is no data in the receiver. + assert.EqualValues(t, 0, rcv.requestCount.Load()) + + newCallerContext := func(value string) context.Context { + return client.NewContext(context.Background(), + client.Info{ + Metadata: client.NewMetadata(map[string][]string{ + "in_callerid": {value}, + }), + }, + ) + } + const caller1 = "caller1" + const caller2 = "caller2" + callCtx1 := newCallerContext(caller1) + callCtx2 := newCallerContext(caller2) + + // Send empty trace. + td := ptrace.NewTraces() + assert.NoError(t, exp.ConsumeTraces(callCtx1, td)) + + // Wait until it is received. + assert.Eventually(t, func() bool { + return rcv.requestCount.Load() > 0 + }, 10*time.Second, 5*time.Millisecond) + + // Ensure it was received empty. + assert.EqualValues(t, 0, rcv.totalItems.Load()) + md := rcv.getMetadata() + + // Expect caller1 and the static header + require.EqualValues(t, expectedHeader, md.Get("header")) + require.EqualValues(t, []string{caller1}, md.Get("callerid")) + + // A trace with 2 spans. + td = testdata.GenerateTraces(2) + + err = exp.ConsumeTraces(callCtx2, td) + assert.NoError(t, err) + + // Wait until it is received. + assert.Eventually(t, func() bool { + return rcv.requestCount.Load() > 1 + }, 10*time.Second, 5*time.Millisecond) + + // Verify received span. + assert.EqualValues(t, 2, rcv.totalItems.Load()) + assert.EqualValues(t, 2, rcv.requestCount.Load()) + assert.EqualValues(t, td, rcv.getLastRequest()) + + // Test the static metadata + md = rcv.getMetadata() + require.EqualValues(t, expectedHeader, md.Get("header")) + require.Equal(t, len(md.Get("User-Agent")), 1) + require.Contains(t, md.Get("User-Agent")[0], "Collector/1.2.3test") + + // Test the caller's dynamic metadata + require.EqualValues(t, []string{caller2}, md.Get("callerid")) + + // Return partial success + rcv.setExportResponse(func() ptraceotlp.ExportResponse { + response := ptraceotlp.NewExportResponse() + partialSuccess := response.PartialSuccess() + partialSuccess.SetErrorMessage("Some spans were not ingested") + partialSuccess.SetRejectedSpans(1) + + return response + }) + + // A request with 2 Trace entries. + td = testdata.GenerateTraces(2) + + // PartialSuccess is not an error. + err = exp.ConsumeTraces(callCtx1, td) + assert.NoError(t, err) +} + +func TestSendTracesWhenEndpointHasHttpScheme(t *testing.T) { + tests := []struct { + name string + useTLS bool + scheme string + gRPCClientSettings configgrpc.ClientConfig + }{ + { + name: "Use https scheme", + useTLS: true, + scheme: "https://", + gRPCClientSettings: configgrpc.ClientConfig{}, + }, + { + name: "Use http scheme", + useTLS: false, + scheme: "http://", + gRPCClientSettings: configgrpc.ClientConfig{ + TLSSetting: configtls.ClientConfig{ + Insecure: true, + }, + }, + }, + } + + for _, test := range tests { + t.Run(test.name, func(t *testing.T) { + // Start an OTel-Arrow receiver. + ln, err := net.Listen("tcp", "localhost:") + require.NoError(t, err, "Failed to find an available address to run the gRPC server: %v", err) + rcv, err := otelArrowTracesReceiverOnGRPCServer(ln, test.useTLS) + rcv.start() + require.NoError(t, err, "Failed to start mock OTLP receiver") + // Also closes the connection. + defer rcv.srv.GracefulStop() + + // Start an OTLP exporter and point to the receiver. + factory := NewFactory() + cfg := factory.CreateDefaultConfig().(*Config) + cfg.ClientConfig = test.gRPCClientSettings + cfg.ClientConfig.Endpoint = test.scheme + ln.Addr().String() + cfg.Arrow.MaxStreamLifetime = 100 * time.Second + if test.useTLS { + cfg.ClientConfig.TLSSetting.InsecureSkipVerify = true + } + set := exportertest.NewNopCreateSettings() + exp, err := factory.CreateTracesExporter(context.Background(), set, cfg) + require.NoError(t, err) + require.NotNil(t, exp) + + defer func() { + assert.NoError(t, exp.Shutdown(context.Background())) + }() + + host := componenttest.NewNopHost() + assert.NoError(t, exp.Start(context.Background(), host)) + + // Ensure that initially there is no data in the receiver. + assert.EqualValues(t, 0, rcv.requestCount.Load()) + + // Send empty trace. + td := ptrace.NewTraces() + assert.NoError(t, exp.ConsumeTraces(context.Background(), td)) + + // Wait until it is received. + assert.Eventually(t, func() bool { + return rcv.requestCount.Load() > 0 + }, 10*time.Second, 5*time.Millisecond) + + // Ensure it was received empty. + assert.EqualValues(t, 0, rcv.totalItems.Load()) + }) + } +} + +func TestSendMetrics(t *testing.T) { + // Start an OTel-Arrow receiver. + ln, err := net.Listen("tcp", "localhost:") + require.NoError(t, err, "Failed to find an available address to run the gRPC server: %v", err) + rcv := otelArrowMetricsReceiverOnGRPCServer(ln) + // Also closes the connection. + defer rcv.srv.GracefulStop() + + // Start an OTLP exporter and point to the receiver. + factory := NewFactory() + cfg := factory.CreateDefaultConfig().(*Config) + // Disable queuing to ensure that we execute the request when calling ConsumeMetrics + // otherwise we will not see any errors. + cfg.QueueSettings.Enabled = false + cfg.RetryConfig.Enabled = false + cfg.ClientConfig = configgrpc.ClientConfig{ + Endpoint: ln.Addr().String(), + TLSSetting: configtls.ClientConfig{ + Insecure: true, + }, + Headers: map[string]configopaque.String{ + "header": "header-value", + }, + } + cfg.Arrow.MaxStreamLifetime = 100 * time.Second + set := exportertest.NewNopCreateSettings() + set.BuildInfo.Description = "Collector" + set.BuildInfo.Version = "1.2.3test" + exp, err := factory.CreateMetricsExporter(context.Background(), set, cfg) + require.NoError(t, err) + require.NotNil(t, exp) + defer func() { + assert.NoError(t, exp.Shutdown(context.Background())) + }() + + host := componenttest.NewNopHost() + + assert.NoError(t, exp.Start(context.Background(), host)) + + // Ensure that initially there is no data in the receiver. + assert.EqualValues(t, 0, rcv.requestCount.Load()) + + // Send empty metric. + md := pmetric.NewMetrics() + assert.NoError(t, exp.ConsumeMetrics(context.Background(), md)) + + // Wait until it is received. + assert.Eventually(t, func() bool { + return rcv.requestCount.Load() > 0 + }, 10*time.Second, 5*time.Millisecond) + + // Ensure it was received empty. + assert.EqualValues(t, 0, rcv.totalItems.Load()) + + // Send two metrics. + md = testdata.GenerateMetrics(2) + + err = exp.ConsumeMetrics(context.Background(), md) + assert.NoError(t, err) + + // Wait until it is received. + assert.Eventually(t, func() bool { + return rcv.requestCount.Load() > 1 + }, 10*time.Second, 5*time.Millisecond) + + expectedHeader := []string{"header-value"} + + // Verify received metrics. + assert.EqualValues(t, uint32(2), rcv.requestCount.Load()) + assert.EqualValues(t, uint32(4), rcv.totalItems.Load()) + assert.EqualValues(t, md, rcv.getLastRequest()) + + mdata := rcv.getMetadata() + require.EqualValues(t, mdata.Get("header"), expectedHeader) + require.Equal(t, len(mdata.Get("User-Agent")), 1) + require.Contains(t, mdata.Get("User-Agent")[0], "Collector/1.2.3test") + + st := status.New(codes.InvalidArgument, "Invalid argument") + rcv.setExportError(st.Err()) + + // Send two metrics.. + md = testdata.GenerateMetrics(2) + + err = exp.ConsumeMetrics(context.Background(), md) + assert.Error(t, err) + + rcv.setExportError(nil) + + // Return partial success + rcv.setExportResponse(func() pmetricotlp.ExportResponse { + response := pmetricotlp.NewExportResponse() + partialSuccess := response.PartialSuccess() + partialSuccess.SetErrorMessage("Some data points were not ingested") + partialSuccess.SetRejectedDataPoints(1) + + return response + }) + + // Send two metrics. + md = testdata.GenerateMetrics(2) + assert.NoError(t, exp.ConsumeMetrics(context.Background(), md)) +} + +func TestSendTraceDataServerDownAndUp(t *testing.T) { + // Find the addr, but don't start the server. + ln, err := net.Listen("tcp", "localhost:") + require.NoError(t, err, "Failed to find an available address to run the gRPC server: %v", err) + + // Start an OTel-Arrow exporter and point to the receiver. + factory := NewFactory() + cfg := factory.CreateDefaultConfig().(*Config) + // Disable queuing to ensure that we execute the request when calling ConsumeTraces + // otherwise we will not see the error. + cfg.QueueSettings.Enabled = false + cfg.ClientConfig = configgrpc.ClientConfig{ + Endpoint: ln.Addr().String(), + TLSSetting: configtls.ClientConfig{ + Insecure: true, + }, + // Need to wait for every request blocking until either request timeouts or succeed. + // Do not rely on external retry logic here, if that is intended set InitialInterval to 100ms. + WaitForReady: true, + } + cfg.Arrow.MaxStreamLifetime = 100 * time.Second + set := exportertest.NewNopCreateSettings() + exp, err := factory.CreateTracesExporter(context.Background(), set, cfg) + require.NoError(t, err) + require.NotNil(t, exp) + defer func() { + assert.NoError(t, exp.Shutdown(context.Background())) + }() + + host := componenttest.NewNopHost() + + assert.NoError(t, exp.Start(context.Background(), host)) + + // A trace with 2 spans. + td := testdata.GenerateTraces(2) + ctx, cancel := context.WithTimeout(context.Background(), 1*time.Second) + assert.Error(t, exp.ConsumeTraces(ctx, td)) + assert.EqualValues(t, context.DeadlineExceeded, ctx.Err()) + cancel() + + ctx, cancel = context.WithTimeout(context.Background(), 1*time.Second) + assert.Error(t, exp.ConsumeTraces(ctx, td)) + assert.EqualValues(t, context.DeadlineExceeded, ctx.Err()) + cancel() + + startServerAndMakeRequest(t, exp, td, ln) + + ctx, cancel = context.WithTimeout(context.Background(), 1*time.Second) + assert.Error(t, exp.ConsumeTraces(ctx, td)) + assert.EqualValues(t, context.DeadlineExceeded, ctx.Err()) + cancel() + + // First call to startServerAndMakeRequest closed the connection. There is a race condition here that the + // port may be reused, if this gets flaky rethink what to do. + ln, err = net.Listen("tcp", ln.Addr().String()) + require.NoError(t, err, "Failed to find an available address to run the gRPC server: %v", err) + startServerAndMakeRequest(t, exp, td, ln) + + ctx, cancel = context.WithTimeout(context.Background(), 1*time.Second) + assert.Error(t, exp.ConsumeTraces(ctx, td)) + assert.EqualValues(t, context.DeadlineExceeded, ctx.Err()) + cancel() +} + +func TestSendTraceDataServerStartWhileRequest(t *testing.T) { + // Find the addr, but don't start the server. + ln, err := net.Listen("tcp", "localhost:") + require.NoError(t, err, "Failed to find an available address to run the gRPC server: %v", err) + + // Start an OTel-Arrow exporter and point to the receiver. + factory := NewFactory() + cfg := factory.CreateDefaultConfig().(*Config) + cfg.ClientConfig = configgrpc.ClientConfig{ + Endpoint: ln.Addr().String(), + TLSSetting: configtls.ClientConfig{ + Insecure: true, + }, + } + cfg.Arrow.MaxStreamLifetime = 100 * time.Second + set := exportertest.NewNopCreateSettings() + exp, err := factory.CreateTracesExporter(context.Background(), set, cfg) + require.NoError(t, err) + require.NotNil(t, exp) + defer func() { + assert.NoError(t, exp.Shutdown(context.Background())) + }() + + host := componenttest.NewNopHost() + + assert.NoError(t, exp.Start(context.Background(), host)) + + // A trace with 2 spans. + td := testdata.GenerateTraces(2) + done := make(chan bool, 1) + defer close(done) + ctx, cancel := context.WithTimeout(context.Background(), 10*time.Second) + go func() { + assert.NoError(t, exp.ConsumeTraces(ctx, td)) + done <- true + }() + + time.Sleep(2 * time.Second) + rcv, _ := otelArrowTracesReceiverOnGRPCServer(ln, false) + rcv.start() + defer rcv.srv.GracefulStop() + // Wait until one of the conditions below triggers. + select { + case <-ctx.Done(): + t.Fail() + case <-done: + assert.NoError(t, ctx.Err()) + } + cancel() +} + +func TestSendTracesOnResourceExhaustion(t *testing.T) { + ln, err := net.Listen("tcp", "localhost:") + require.NoError(t, err) + rcv, _ := otelArrowTracesReceiverOnGRPCServer(ln, false) + rcv.setExportError(status.Error(codes.ResourceExhausted, "resource exhausted")) + rcv.start() + defer rcv.srv.GracefulStop() + + factory := NewFactory() + cfg := factory.CreateDefaultConfig().(*Config) + cfg.RetryConfig.InitialInterval = 0 + cfg.ClientConfig = configgrpc.ClientConfig{ + Endpoint: ln.Addr().String(), + TLSSetting: configtls.ClientConfig{ + Insecure: true, + }, + } + cfg.Arrow.MaxStreamLifetime = 100 * time.Second + set := exportertest.NewNopCreateSettings() + exp, err := factory.CreateTracesExporter(context.Background(), set, cfg) + require.NoError(t, err) + require.NotNil(t, exp) + + defer func() { + assert.NoError(t, exp.Shutdown(context.Background())) + }() + + host := componenttest.NewNopHost() + assert.NoError(t, exp.Start(context.Background(), host)) + + assert.EqualValues(t, 0, rcv.requestCount.Load()) + + td := ptrace.NewTraces() + assert.NoError(t, exp.ConsumeTraces(context.Background(), td)) + + assert.Never(t, func() bool { + return rcv.requestCount.Load() > 1 + }, 1*time.Second, 5*time.Millisecond, "Should not retry if RetryInfo is not included into status details by the server.") + + rcv.requestCount.Swap(0) + + st := status.New(codes.ResourceExhausted, "resource exhausted") + st, _ = st.WithDetails(&errdetails.RetryInfo{ + RetryDelay: durationpb.New(100 * time.Millisecond), + }) + rcv.setExportError(st.Err()) + + assert.NoError(t, exp.ConsumeTraces(context.Background(), td)) + + assert.Eventually(t, func() bool { + return rcv.requestCount.Load() > 1 + }, 10*time.Second, 5*time.Millisecond, "Should retry if RetryInfo is included into status details by the server.") +} + +func startServerAndMakeRequest(t *testing.T, exp exporter.Traces, td ptrace.Traces, ln net.Listener) { + rcv, _ := otelArrowTracesReceiverOnGRPCServer(ln, false) + rcv.start() + defer rcv.srv.GracefulStop() + // Ensure that initially there is no data in the receiver. + assert.EqualValues(t, 0, rcv.requestCount.Load()) + + // Clone the request and store as expected. + expectedData := ptrace.NewTraces() + td.CopyTo(expectedData) + + // Resend the request, this should succeed. + ctx, cancel := context.WithTimeout(context.Background(), 5*time.Second) + assert.NoError(t, exp.ConsumeTraces(ctx, td)) + cancel() + + // Wait until it is received. + assert.Eventually(t, func() bool { + return rcv.requestCount.Load() > 0 + }, 10*time.Second, 5*time.Millisecond) + + // Verify received span. + assert.EqualValues(t, 2, rcv.totalItems.Load()) + assert.EqualValues(t, expectedData, rcv.getLastRequest()) +} + +func TestSendLogData(t *testing.T) { + // Start an OTel-Arrow receiver. + ln, err := net.Listen("tcp", "localhost:") + require.NoError(t, err, "Failed to find an available address to run the gRPC server: %v", err) + rcv := otelArrowLogsReceiverOnGRPCServer(ln) + // Also closes the connection. + defer rcv.srv.GracefulStop() + + // Start an OTel-Arrow exporter and point to the receiver. + factory := NewFactory() + cfg := factory.CreateDefaultConfig().(*Config) + // Disable queuing to ensure that we execute the request when calling ConsumeLogs + // otherwise we will not see any errors. + cfg.QueueSettings.Enabled = false + cfg.ClientConfig = configgrpc.ClientConfig{ + Endpoint: ln.Addr().String(), + TLSSetting: configtls.ClientConfig{ + Insecure: true, + }, + } + cfg.Arrow.MaxStreamLifetime = 100 * time.Second + set := exportertest.NewNopCreateSettings() + set.BuildInfo.Description = "Collector" + set.BuildInfo.Version = "1.2.3test" + exp, err := factory.CreateLogsExporter(context.Background(), set, cfg) + require.NoError(t, err) + require.NotNil(t, exp) + defer func() { + assert.NoError(t, exp.Shutdown(context.Background())) + }() + + host := componenttest.NewNopHost() + + assert.NoError(t, exp.Start(context.Background(), host)) + + // Ensure that initially there is no data in the receiver. + assert.EqualValues(t, 0, rcv.requestCount.Load()) + + // Send empty request. + ld := plog.NewLogs() + assert.NoError(t, exp.ConsumeLogs(context.Background(), ld)) + + // Wait until it is received. + assert.Eventually(t, func() bool { + return rcv.requestCount.Load() > 0 + }, 10*time.Second, 5*time.Millisecond) + + // Ensure it was received empty. + assert.EqualValues(t, 0, rcv.totalItems.Load()) + + // A request with 2 log entries. + ld = testdata.GenerateLogs(2) + + err = exp.ConsumeLogs(context.Background(), ld) + assert.NoError(t, err) + + // Wait until it is received. + assert.Eventually(t, func() bool { + return rcv.requestCount.Load() > 1 + }, 10*time.Second, 5*time.Millisecond) + + // Verify received logs. + assert.EqualValues(t, 2, rcv.requestCount.Load()) + assert.EqualValues(t, 2, rcv.totalItems.Load()) + assert.EqualValues(t, ld, rcv.getLastRequest()) + + md := rcv.getMetadata() + require.Equal(t, len(md.Get("User-Agent")), 1) + require.Contains(t, md.Get("User-Agent")[0], "Collector/1.2.3test") + + st := status.New(codes.InvalidArgument, "Invalid argument") + rcv.setExportError(st.Err()) + + // A request with 2 log entries. + ld = testdata.GenerateLogs(2) + + err = exp.ConsumeLogs(context.Background(), ld) + assert.Error(t, err) + + rcv.setExportError(nil) + + // Return partial success + rcv.setExportResponse(func() plogotlp.ExportResponse { + response := plogotlp.NewExportResponse() + partialSuccess := response.PartialSuccess() + partialSuccess.SetErrorMessage("Some log records were not ingested") + partialSuccess.SetRejectedLogRecords(1) + + return response + }) + + // A request with 2 log entries. + ld = testdata.GenerateLogs(2) + + err = exp.ConsumeLogs(context.Background(), ld) + assert.NoError(t, err) +} + +// TestSendArrowTracesNotSupported tests a successful OTel-Arrow export w/ +// and without Arrow, w/ WaitForReady and without. +func TestSendArrowTracesNotSupported(t *testing.T) { + for _, waitForReady := range []bool{true, false} { + for _, available := range []bool{true, false} { + t.Run(fmt.Sprintf("waitForReady=%v available=%v", waitForReady, available), + func(t *testing.T) { testSendArrowTraces(t, waitForReady, available) }) + } + } +} + +func testSendArrowTraces(t *testing.T, clientWaitForReady, streamServiceAvailable bool) { + // Start an OTel-Arrow receiver. + ln, err := net.Listen("tcp", "127.0.0.1:") + require.NoError(t, err, "Failed to find an available address to run the gRPC server: %v", err) + + // Start an OTel-Arrow exporter and point to the receiver. + factory := NewFactory() + authID := component.NewID(component.MustNewType("testauth")) + expectedHeader := []string{"arrow-ftw"} + cfg := factory.CreateDefaultConfig().(*Config) + cfg.ClientConfig = configgrpc.ClientConfig{ + Endpoint: ln.Addr().String(), + TLSSetting: configtls.ClientConfig{ + Insecure: true, + }, + WaitForReady: clientWaitForReady, + Headers: map[string]configopaque.String{ + "header": configopaque.String(expectedHeader[0]), + }, + Auth: &configauth.Authentication{ + AuthenticatorID: authID, + }, + } + // Arrow client is enabled, but the server doesn't support it. + cfg.Arrow = ArrowConfig{ + NumStreams: 1, + MaxStreamLifetime: 100 * time.Second, + } + + set := exportertest.NewNopCreateSettings() + set.TelemetrySettings.Logger = zaptest.NewLogger(t) + exp, err := factory.CreateTracesExporter(context.Background(), set, cfg) + require.NoError(t, err) + require.NotNil(t, exp) + + defer func() { + assert.NoError(t, exp.Shutdown(context.Background())) + }() + + type isUserCall struct{} + + host := newHostWithExtensions( + map[component.ID]component.Component{ + authID: newTestAuthExtension(t, func(ctx context.Context) map[string]string { + if ctx.Value(isUserCall{}) == nil { + return nil + } + return map[string]string{ + "callerid": "arrow", + } + }), + }, + ) + assert.NoError(t, exp.Start(context.Background(), host)) + + rcv, _ := otelArrowTracesReceiverOnGRPCServer(ln, false) + if streamServiceAvailable { + rcv.startStreamMockArrowTraces(t, okStatusFor) + } + + // Delay the server start, slightly. + go func() { + time.Sleep(100 * time.Millisecond) + rcv.start() + }() + + // Send two trace items. + td := testdata.GenerateTraces(2) + + // Set the context key indicating this is per-request state, + // so the auth extension returns data. + err = exp.ConsumeTraces(context.WithValue(context.Background(), isUserCall{}, true), td) + assert.NoError(t, err) + + // Wait until it is received. + assert.Eventually(t, func() bool { + return rcv.requestCount.Load() > 0 + }, 10*time.Second, 5*time.Millisecond) + + // Verify two items, one request received. + assert.EqualValues(t, int32(2), rcv.totalItems.Load()) + assert.EqualValues(t, int32(1), rcv.requestCount.Load()) + assert.EqualValues(t, td, rcv.getLastRequest()) + + // Expect the correct metadata, with or without arrow. + md := rcv.getMetadata() + require.EqualValues(t, []string{"arrow"}, md.Get("callerid")) + require.EqualValues(t, expectedHeader, md.Get("header")) + + rcv.srv.GracefulStop() +} + +func okStatusFor(id int64) *arrowpb.BatchStatus { + return &arrowpb.BatchStatus{ + BatchId: id, + StatusCode: arrowpb.StatusCode_OK, + } +} + +func failedStatusFor(id int64) *arrowpb.BatchStatus { + return &arrowpb.BatchStatus{ + BatchId: id, + StatusCode: arrowpb.StatusCode_INVALID_ARGUMENT, + StatusMessage: "test failed", + } +} + +type anyStreamServer interface { + Send(*arrowpb.BatchStatus) error + Recv() (*arrowpb.BatchArrowRecords, error) + grpc.ServerStream +} + +func (r *mockTracesReceiver) startStreamMockArrowTraces(t *testing.T, statusFor func(int64) *arrowpb.BatchStatus) { + ctrl := gomock.NewController(t) + + doer := func(server anyStreamServer) error { + consumer := arrowRecord.NewConsumer() + var hdrs []hpack.HeaderField + hdrsDecoder := hpack.NewDecoder(4096, func(hdr hpack.HeaderField) { + hdrs = append(hdrs, hdr) + }) + for { + records, err := server.Recv() + if status, ok := status.FromError(err); ok && status.Code() == codes.Canceled { + break + } + if err != nil { + // No errors are allowed, except EOF. + require.Equal(t, io.EOF, err) + break + } + + got, err := consumer.TracesFrom(records) + require.NoError(t, err) + + // Reset and parse headers + hdrs = nil + _, err = hdrsDecoder.Write(records.Headers) + require.NoError(t, err) + md, ok := metadata.FromIncomingContext(server.Context()) + require.True(t, ok) + + for _, hf := range hdrs { + md[hf.Name] = append(md[hf.Name], hf.Value) + } + + // Place the metadata into the context, where + // the test framework (independent of Arrow) + // receives it. + ctx := metadata.NewIncomingContext(context.Background(), md) + + for _, traces := range got { + _, err := r.Export(ctx, ptraceotlp.NewExportRequestFromTraces(traces)) + require.NoError(t, err) + } + require.NoError(t, server.Send(statusFor(records.BatchId))) + } + return nil + } + + type singleBinding struct { + arrowpb.UnsafeArrowTracesServiceServer + *arrowpbMock.MockArrowTracesServiceServer + } + svc := arrowpbMock.NewMockArrowTracesServiceServer(ctrl) + + arrowpb.RegisterArrowTracesServiceServer(r.srv, singleBinding{ + MockArrowTracesServiceServer: svc, + }) + svc.EXPECT().ArrowTraces(gomock.Any()).Times(1).DoAndReturn(doer) + +} + +func TestSendArrowFailedTraces(t *testing.T) { + // Start an OTel-Arrow receiver. + ln, err := net.Listen("tcp", "127.0.0.1:") + require.NoError(t, err, "Failed to find an available address to run the gRPC server: %v", err) + + // Start an OTel-Arrow exporter and point to the receiver. + factory := NewFactory() + cfg := factory.CreateDefaultConfig().(*Config) + cfg.ClientConfig = configgrpc.ClientConfig{ + Endpoint: ln.Addr().String(), + TLSSetting: configtls.ClientConfig{ + Insecure: true, + }, + WaitForReady: true, + } + // Arrow client is enabled, but the server doesn't support it. + cfg.Arrow = ArrowConfig{ + NumStreams: 1, + MaxStreamLifetime: 100 * time.Second, + } + cfg.QueueSettings.Enabled = false + + set := exportertest.NewNopCreateSettings() + set.TelemetrySettings.Logger = zaptest.NewLogger(t) + exp, err := factory.CreateTracesExporter(context.Background(), set, cfg) + require.NoError(t, err) + require.NotNil(t, exp) + + defer func() { + assert.NoError(t, exp.Shutdown(context.Background())) + }() + + host := componenttest.NewNopHost() + assert.NoError(t, exp.Start(context.Background(), host)) + + rcv, _ := otelArrowTracesReceiverOnGRPCServer(ln, false) + rcv.startStreamMockArrowTraces(t, failedStatusFor) + + // Delay the server start, slightly. + go func() { + time.Sleep(100 * time.Millisecond) + rcv.start() + }() + + // Send two trace items. + td := testdata.GenerateTraces(2) + err = exp.ConsumeTraces(context.Background(), td) + assert.Error(t, err) + assert.Contains(t, err.Error(), "test failed") + + // Wait until it is received. + assert.Eventually(t, func() bool { + return rcv.requestCount.Load() > 0 + }, 10*time.Second, 5*time.Millisecond) + + // Verify two items, one request received. + assert.EqualValues(t, int32(2), rcv.totalItems.Load()) + assert.EqualValues(t, int32(1), rcv.requestCount.Load()) + assert.EqualValues(t, td, rcv.getLastRequest()) + + rcv.srv.GracefulStop() +} + +func TestUserDialOptions(t *testing.T) { + // Start an OTel-Arrow receiver. + ln, err := net.Listen("tcp", "127.0.0.1:") + require.NoError(t, err, "Failed to find an available address to run the gRPC server: %v", err) + + // Start an OTel-Arrow exporter and point to the receiver. + factory := NewFactory() + cfg := factory.CreateDefaultConfig().(*Config) + cfg.ClientConfig = configgrpc.ClientConfig{ + Endpoint: ln.Addr().String(), + TLSSetting: configtls.ClientConfig{ + Insecure: true, + }, + WaitForReady: true, + } + cfg.Arrow.Disabled = true + cfg.QueueSettings.Enabled = false + + const testAgent = "test-user-agent (release=:+1:)" + + // This overrides the default provided in otelArrow.go + cfg.UserDialOptions = []grpc.DialOption{ + grpc.WithUserAgent(testAgent), + } + + set := exportertest.NewNopCreateSettings() + set.TelemetrySettings.Logger = zaptest.NewLogger(t) + exp, err := factory.CreateTracesExporter(context.Background(), set, cfg) + require.NoError(t, err) + require.NotNil(t, exp) + + defer func() { + assert.NoError(t, exp.Shutdown(context.Background())) + }() + + host := componenttest.NewNopHost() + assert.NoError(t, exp.Start(context.Background(), host)) + + td := testdata.GenerateTraces(2) + + rcv, _ := otelArrowTracesReceiverOnGRPCServer(ln, false) + rcv.start() + defer rcv.srv.GracefulStop() + + err = exp.ConsumeTraces(context.Background(), td) + assert.NoError(t, err) + + require.Equal(t, len(rcv.getMetadata().Get("User-Agent")), 1) + require.Contains(t, rcv.getMetadata().Get("User-Agent")[0], testAgent) +} diff --git a/exporter/otelarrowexporter/testdata/config.yaml b/exporter/otelarrowexporter/testdata/config.yaml index 46134951f462..db9e8016ce0e 100644 --- a/exporter/otelarrowexporter/testdata/config.yaml +++ b/exporter/otelarrowexporter/testdata/config.yaml @@ -30,3 +30,4 @@ arrow: disabled: false max_stream_lifetime: 2h payload_compression: "zstd" + prioritizer: leastloaded8 From a133a8efefbe34dd45d8d4c8473ebbd75f4bdcc3 Mon Sep 17 00:00:00 2001 From: Dominik Rosiek <58699848+sumo-drosiek@users.noreply.github.com> Date: Mon, 13 May 2024 10:05:36 +0200 Subject: [PATCH 25/55] [exporter/sumologic] change logs behavior (#32939) **Description:** * set OTLP as default format * add support for OTLP format * do not support metadata attributes * do not support source headers **Link to tracking Issue:** #32315 **Testing:** * unit tests **Documentation:** * inline comments * readme --------- Signed-off-by: Dominik Rosiek --- .chloggen/drosiek-exporter-logs.yaml | 31 + exporter/sumologicexporter/README.md | 43 +- exporter/sumologicexporter/compress.go | 77 -- exporter/sumologicexporter/compress_test.go | 142 --- exporter/sumologicexporter/config.go | 36 +- exporter/sumologicexporter/config_test.go | 99 +- exporter/sumologicexporter/exporter.go | 113 +-- exporter/sumologicexporter/exporter_test.go | 275 ++++-- exporter/sumologicexporter/factory.go | 2 - exporter/sumologicexporter/factory_test.go | 13 +- exporter/sumologicexporter/go.mod | 2 +- exporter/sumologicexporter/sender.go | 205 +++-- exporter/sumologicexporter/sender_test.go | 941 ++++++++++++++------ 13 files changed, 1117 insertions(+), 862 deletions(-) create mode 100644 .chloggen/drosiek-exporter-logs.yaml delete mode 100644 exporter/sumologicexporter/compress.go delete mode 100644 exporter/sumologicexporter/compress_test.go diff --git a/.chloggen/drosiek-exporter-logs.yaml b/.chloggen/drosiek-exporter-logs.yaml new file mode 100644 index 000000000000..c0181af0e02d --- /dev/null +++ b/.chloggen/drosiek-exporter-logs.yaml @@ -0,0 +1,31 @@ +# Use this changelog template to create an entry for release notes. + +# One of 'breaking', 'deprecation', 'new_component', 'enhancement', 'bug_fix' +change_type: breaking + +# The name of the component, or a single word describing the area of concern, (e.g. filelogreceiver) +component: sumologicexporter + +# A brief description of the change. Surround your text with quotes ("") if it needs to start with a backtick (`). +note: change logs behavior + +# Mandatory: One or more tracking issues related to the change. You can use the PR number here if no issue exists. +issues: [31479] + +# (Optional) One or more lines of additional information to render under the primary note. +# These lines will be padded with 2 spaces and then inserted directly into the document. +# Use pipe (|) for multiline entries. +subtext: | + * set OTLP as default format + * add support for OTLP format + * do not support metadata attributes + * do not support source headers + +# If your change doesn't affect end users or the exported elements of any package, +# you should instead start your pull request title with [chore] or use the "Skip Changelog" label. +# Optional: The change log or logs in which this entry should be included. +# e.g. '[user]' or '[user, api]' +# Include 'user' if the change is relevant to end users. +# Include 'api' if there is a change to a library API. +# Default: '[user]' +change_logs: [user] diff --git a/exporter/sumologicexporter/README.md b/exporter/sumologicexporter/README.md index c678df2c3213..39267e26e61e 100644 --- a/exporter/sumologicexporter/README.md +++ b/exporter/sumologicexporter/README.md @@ -18,7 +18,7 @@ For some time we have been developing the [new Sumo Logic exporter](https://github.com/SumoLogic/sumologic-otel-collector/tree/main/pkg/exporter/sumologicexporter#sumo-logic-exporter) and now we are in the process of moving it into this repository. -The following options are deprecated for logs and already do not work for metrics: +The following options are no longer supported: - `metric_format: {carbon2, graphite}` - `metadata_attributes: []` @@ -30,7 +30,7 @@ The following options are deprecated for logs and already do not work for metric After the new exporter will be moved to this repository: - `carbon2` and `graphite` are no longer supported and `prometheus` or `otlp` format should be used -- all resource level attributes are going to be treated (are treated for metrics) as `metadata_attributes`. You can use [Group by Attributes processor](https://github.com/open-telemetry/opentelemetry-collector-contrib/tree/main/processor/groupbyattrsprocessor) to move attributes from record level to resource level. For example: +- all resource level attributes are treated as `metadata_attributes` so this option is no longer supported. You can use [Group by Attributes processor](https://github.com/open-telemetry/opentelemetry-collector-contrib/tree/main/processor/groupbyattrsprocessor) to move attributes from record level to resource level. For example: ```yaml # before switch to new collector @@ -45,7 +45,7 @@ After the new exporter will be moved to this repository: - my_attribute ``` -- Source templates (`source_category`, `source_name` and `source_host`) are going to be removed from the exporter and sources may be set using `_sourceCategory`, `sourceName` or `_sourceHost` resource attributes. This feature has been already disabled for metrics. We recommend to use [Transform Processor](https://github.com/open-telemetry/opentelemetry-collector-contrib/tree/main/processor/transformprocessor/). For example: +- Source templates (`source_category`, `source_name` and `source_host`) are no longer supported. We recommend to use [Transform Processor](https://github.com/open-telemetry/opentelemetry-collector-contrib/tree/main/processor/transformprocessor/). For example: ```yaml # before switch to new collector @@ -88,12 +88,12 @@ exporters: # List of regexes for attributes which should be send as metadata # default = [] # - # This option is deprecated: + # This option is unsupported: # https://github.com/open-telemetry/opentelemetry-collector-contrib/tree/main/exporter/sumologicexporter#migration-to-new-architecture metadata_attributes: [] - # format to use when sending logs to Sumo Logic, default = json, - log_format: {json, text} + # format to use when sending logs to Sumo Logic, default = otlp, + log_format: {otlp, json, text} # format to use when sending metrics to Sumo Logic, default = otlp, # NOTE: only `otlp` is supported when used with sumologicextension @@ -112,7 +112,7 @@ exporters: # Please regfer to Source temmplates for formatting explanation: # https://github.com/open-telemetry/opentelemetry-collector-contrib/tree/main/exporter/sumologicexporter#source-templates # - # This option is deprecated: + # This option is unsupported: # https://github.com/open-telemetry/opentelemetry-collector-contrib/tree/main/exporter/sumologicexporter#migration-to-new-architecture graphite_template: