diff --git a/receiver/postgresqlreceiver/documentation.md b/receiver/postgresqlreceiver/documentation.md index bc0c448645e3..0bf06a9c41d7 100644 --- a/receiver/postgresqlreceiver/documentation.md +++ b/receiver/postgresqlreceiver/documentation.md @@ -20,12 +20,6 @@ The number of backends. | ---- | ----------- | ---------- | ----------------------- | --------- | | 1 | Sum | Int | Cumulative | false | -#### Attributes - -| Name | Description | Values | -| ---- | ----------- | ------ | -| database | The name of the database. | Any Str | - ### postgresql.bgwriter.buffers.allocated Number of buffers allocated. @@ -96,8 +90,6 @@ The number of blocks read. | Name | Description | Values | | ---- | ----------- | ------ | -| database | The name of the database. | Any Str | -| table | The schema name followed by the table name. | Any Str | | source | The block read source type. | Str: ``heap_read``, ``heap_hit``, ``idx_read``, ``idx_hit``, ``toast_read``, ``toast_hit``, ``tidx_read``, ``tidx_hit`` | ### postgresql.commits @@ -108,12 +100,6 @@ The number of commits. | ---- | ----------- | ---------- | ----------------------- | --------- | | 1 | Sum | Int | Cumulative | true | -#### Attributes - -| Name | Description | Values | -| ---- | ----------- | ------ | -| database | The name of the database. | Any Str | - ### postgresql.connection.max Configured maximum number of client connections allowed @@ -138,12 +124,6 @@ The database disk usage. | ---- | ----------- | ---------- | ----------------------- | --------- | | By | Sum | Int | Cumulative | false | -#### Attributes - -| Name | Description | Values | -| ---- | ----------- | ------ | -| database | The name of the database. | Any Str | - ### postgresql.index.scans The number of index scans on a table. @@ -172,8 +152,6 @@ The number of db row operations. | Name | Description | Values | | ---- | ----------- | ------ | -| database | The name of the database. | Any Str | -| table | The schema name followed by the table name. | Any Str | | operation | The database operation. | Str: ``ins``, ``upd``, ``del``, ``hot_upd`` | ### postgresql.replication.data_delay @@ -198,12 +176,6 @@ The number of rollbacks. | ---- | ----------- | ---------- | ----------------------- | --------- | | 1 | Sum | Int | Cumulative | true | -#### Attributes - -| Name | Description | Values | -| ---- | ----------- | ------ | -| database | The name of the database. | Any Str | - ### postgresql.rows The number of rows in the database. @@ -216,8 +188,6 @@ The number of rows in the database. | Name | Description | Values | | ---- | ----------- | ------ | -| database | The name of the database. | Any Str | -| table | The schema name followed by the table name. | Any Str | | state | The tuple (row) state. | Str: ``dead``, ``live`` | ### postgresql.table.count @@ -291,12 +261,6 @@ The number of deadlocks. | ---- | ----------- | ---------- | ----------------------- | --------- | | {deadlock} | Sum | Int | Cumulative | true | -#### Attributes - -| Name | Description | Values | -| ---- | ----------- | ------ | -| database | The name of the database. | Any Str | - ## Resource Attributes | Name | Description | Values | Enabled | diff --git a/receiver/postgresqlreceiver/internal/metadata/custom.go b/receiver/postgresqlreceiver/internal/metadata/custom.go deleted file mode 100644 index ec411e883695..000000000000 --- a/receiver/postgresqlreceiver/internal/metadata/custom.go +++ /dev/null @@ -1,129 +0,0 @@ -// Copyright The OpenTelemetry Authors -// SPDX-License-Identifier: Apache-2.0 - -package metadata // import "github.com/open-telemetry/opentelemetry-collector-contrib/receiver/postgresqlreceiver/internal/metadata" - -import "go.opentelemetry.io/collector/pdata/pcommon" - -// RecordPostgresqlDbSizeDataPointWithoutDatabase adds a data point to postgresql.db_size metric without a database metric attribute -func (mb *MetricsBuilder) RecordPostgresqlDbSizeDataPointWithoutDatabase(ts pcommon.Timestamp, val int64) { - mb.metricPostgresqlDbSize.recordDatapointWithoutDatabase(mb.startTime, ts, val) -} - -func (m *metricPostgresqlDbSize) recordDatapointWithoutDatabase(start pcommon.Timestamp, ts pcommon.Timestamp, val int64) { - if !m.config.Enabled { - return - } - dp := m.data.Sum().DataPoints().AppendEmpty() - dp.SetStartTimestamp(start) - dp.SetTimestamp(ts) - dp.SetIntValue(val) -} - -// RecordPostgresqlBackendsDataPointWithoutDatabase adds a data point to postgresql.backends metric. -func (mb *MetricsBuilder) RecordPostgresqlBackendsDataPointWithoutDatabase(ts pcommon.Timestamp, val int64) { - mb.metricPostgresqlBackends.recordDatapointWithoutDatabase(mb.startTime, ts, val) -} - -func (m *metricPostgresqlBackends) recordDatapointWithoutDatabase(start pcommon.Timestamp, ts pcommon.Timestamp, val int64) { - if !m.config.Enabled { - return - } - dp := m.data.Sum().DataPoints().AppendEmpty() - dp.SetStartTimestamp(start) - dp.SetTimestamp(ts) - dp.SetIntValue(val) -} - -// RecordPostgresqlBlocksReadDataPointWithoutDatabaseAndTable adds a data point to postgresql.blocks_read metric. -func (mb *MetricsBuilder) RecordPostgresqlBlocksReadDataPointWithoutDatabaseAndTable(ts pcommon.Timestamp, val int64, sourceAttributeValue AttributeSource) { - mb.metricPostgresqlBlocksRead.recordDatapointWithoutDatabaseAndTable(mb.startTime, ts, val, sourceAttributeValue.String()) -} - -func (m *metricPostgresqlBlocksRead) recordDatapointWithoutDatabaseAndTable(start pcommon.Timestamp, ts pcommon.Timestamp, val int64, sourceAttributeValue string) { - if !m.config.Enabled { - return - } - dp := m.data.Sum().DataPoints().AppendEmpty() - dp.SetStartTimestamp(start) - dp.SetTimestamp(ts) - dp.SetIntValue(val) - dp.Attributes().PutStr("source", sourceAttributeValue) -} - -// RecordPostgresqlCommitsDataPointWithoutDatabase adds a data point to postgresql.commits metric without the database metric attribute -func (mb *MetricsBuilder) RecordPostgresqlCommitsDataPointWithoutDatabase(ts pcommon.Timestamp, val int64) { - mb.metricPostgresqlCommits.recordDatapointWithoutDatabase(mb.startTime, ts, val) -} - -func (m *metricPostgresqlCommits) recordDatapointWithoutDatabase(start pcommon.Timestamp, ts pcommon.Timestamp, val int64) { - if !m.config.Enabled { - return - } - dp := m.data.Sum().DataPoints().AppendEmpty() - dp.SetStartTimestamp(start) - dp.SetTimestamp(ts) - dp.SetIntValue(val) -} - -// RecordPostgresqlRollbacksDataPointWithoutDatabase adds a data point to postgresql.commits metric without the database metric attribute -func (mb *MetricsBuilder) RecordPostgresqlRollbacksDataPointWithoutDatabase(ts pcommon.Timestamp, val int64) { - mb.metricPostgresqlRollbacks.recordDatapointWithoutDatabase(mb.startTime, ts, val) -} - -func (m *metricPostgresqlRollbacks) recordDatapointWithoutDatabase(start pcommon.Timestamp, ts pcommon.Timestamp, val int64) { - if !m.config.Enabled { - return - } - dp := m.data.Sum().DataPoints().AppendEmpty() - dp.SetStartTimestamp(start) - dp.SetTimestamp(ts) - dp.SetIntValue(val) -} - -// RecordPostgresqlDeadlocksDataPointWithoutDatabase adds a data point to postgresql.deadlocks metric without the database metric attribute. -func (mb *MetricsBuilder) RecordPostgresqlDeadlocksDataPointWithoutDatabase(ts pcommon.Timestamp, val int64) { - mb.metricPostgresqlDeadlocks.recordDatapointWithoutDatabase(mb.startTime, ts, val) -} - -func (m *metricPostgresqlDeadlocks) recordDatapointWithoutDatabase(start pcommon.Timestamp, ts pcommon.Timestamp, val int64) { - if !m.config.Enabled { - return - } - dp := m.data.Sum().DataPoints().AppendEmpty() - dp.SetStartTimestamp(start) - dp.SetTimestamp(ts) - dp.SetIntValue(val) -} - -// RecordPostgresqlRowsDataPointWithoutDatabaseAndTable adds a data point to postgresql.rows metric without the database or table metric attribute. -func (mb *MetricsBuilder) RecordPostgresqlRowsDataPointWithoutDatabaseAndTable(ts pcommon.Timestamp, val int64, stateAttributeValue AttributeState) { - mb.metricPostgresqlRows.recordDatapointWithoutDatabaseAndTable(mb.startTime, ts, val, stateAttributeValue.String()) -} - -func (m *metricPostgresqlRows) recordDatapointWithoutDatabaseAndTable(start pcommon.Timestamp, ts pcommon.Timestamp, val int64, stateAttributeValue string) { - if !m.config.Enabled { - return - } - dp := m.data.Sum().DataPoints().AppendEmpty() - dp.SetStartTimestamp(start) - dp.SetTimestamp(ts) - dp.SetIntValue(val) - dp.Attributes().PutStr("state", stateAttributeValue) -} - -// RecordPostgresqlOperationsDataPointWithoutDatabaseAndTable adds a data point to postgresql.operations metric without the database or table metric attribute -func (mb *MetricsBuilder) RecordPostgresqlOperationsDataPointWithoutDatabaseAndTable(ts pcommon.Timestamp, val int64, operationAttributeValue AttributeOperation) { - mb.metricPostgresqlOperations.recordDatapointWithoutDatabaseAndTable(mb.startTime, ts, val, operationAttributeValue.String()) -} - -func (m *metricPostgresqlOperations) recordDatapointWithoutDatabaseAndTable(start pcommon.Timestamp, ts pcommon.Timestamp, val int64, operationAttributeValue string) { - if !m.config.Enabled { - return - } - dp := m.data.Sum().DataPoints().AppendEmpty() - dp.SetStartTimestamp(start) - dp.SetTimestamp(ts) - dp.SetIntValue(val) - dp.Attributes().PutStr("operation", operationAttributeValue) -} diff --git a/receiver/postgresqlreceiver/internal/metadata/generated_metrics.go b/receiver/postgresqlreceiver/internal/metadata/generated_metrics.go index b9a1aa84fdce..33111225a22c 100644 --- a/receiver/postgresqlreceiver/internal/metadata/generated_metrics.go +++ b/receiver/postgresqlreceiver/internal/metadata/generated_metrics.go @@ -251,10 +251,9 @@ func (m *metricPostgresqlBackends) init() { m.data.SetEmptySum() m.data.Sum().SetIsMonotonic(false) m.data.Sum().SetAggregationTemporality(pmetric.AggregationTemporalityCumulative) - m.data.Sum().DataPoints().EnsureCapacity(m.capacity) } -func (m *metricPostgresqlBackends) recordDataPoint(start pcommon.Timestamp, ts pcommon.Timestamp, val int64, databaseAttributeValue string) { +func (m *metricPostgresqlBackends) recordDataPoint(start pcommon.Timestamp, ts pcommon.Timestamp, val int64) { if !m.config.Enabled { return } @@ -262,7 +261,6 @@ func (m *metricPostgresqlBackends) recordDataPoint(start pcommon.Timestamp, ts p dp.SetStartTimestamp(start) dp.SetTimestamp(ts) dp.SetIntValue(val) - dp.Attributes().PutStr("database", databaseAttributeValue) } // updateCapacity saves max length of data point slices that will be used for the slice capacity. @@ -568,7 +566,7 @@ func (m *metricPostgresqlBlocksRead) init() { m.data.Sum().DataPoints().EnsureCapacity(m.capacity) } -func (m *metricPostgresqlBlocksRead) recordDataPoint(start pcommon.Timestamp, ts pcommon.Timestamp, val int64, databaseAttributeValue string, tableAttributeValue string, sourceAttributeValue string) { +func (m *metricPostgresqlBlocksRead) recordDataPoint(start pcommon.Timestamp, ts pcommon.Timestamp, val int64, sourceAttributeValue string) { if !m.config.Enabled { return } @@ -576,8 +574,6 @@ func (m *metricPostgresqlBlocksRead) recordDataPoint(start pcommon.Timestamp, ts dp.SetStartTimestamp(start) dp.SetTimestamp(ts) dp.SetIntValue(val) - dp.Attributes().PutStr("database", databaseAttributeValue) - dp.Attributes().PutStr("table", tableAttributeValue) dp.Attributes().PutStr("source", sourceAttributeValue) } @@ -620,10 +616,9 @@ func (m *metricPostgresqlCommits) init() { m.data.SetEmptySum() m.data.Sum().SetIsMonotonic(true) m.data.Sum().SetAggregationTemporality(pmetric.AggregationTemporalityCumulative) - m.data.Sum().DataPoints().EnsureCapacity(m.capacity) } -func (m *metricPostgresqlCommits) recordDataPoint(start pcommon.Timestamp, ts pcommon.Timestamp, val int64, databaseAttributeValue string) { +func (m *metricPostgresqlCommits) recordDataPoint(start pcommon.Timestamp, ts pcommon.Timestamp, val int64) { if !m.config.Enabled { return } @@ -631,7 +626,6 @@ func (m *metricPostgresqlCommits) recordDataPoint(start pcommon.Timestamp, ts pc dp.SetStartTimestamp(start) dp.SetTimestamp(ts) dp.SetIntValue(val) - dp.Attributes().PutStr("database", databaseAttributeValue) } // updateCapacity saves max length of data point slices that will be used for the slice capacity. @@ -773,10 +767,9 @@ func (m *metricPostgresqlDbSize) init() { m.data.SetEmptySum() m.data.Sum().SetIsMonotonic(false) m.data.Sum().SetAggregationTemporality(pmetric.AggregationTemporalityCumulative) - m.data.Sum().DataPoints().EnsureCapacity(m.capacity) } -func (m *metricPostgresqlDbSize) recordDataPoint(start pcommon.Timestamp, ts pcommon.Timestamp, val int64, databaseAttributeValue string) { +func (m *metricPostgresqlDbSize) recordDataPoint(start pcommon.Timestamp, ts pcommon.Timestamp, val int64) { if !m.config.Enabled { return } @@ -784,7 +777,6 @@ func (m *metricPostgresqlDbSize) recordDataPoint(start pcommon.Timestamp, ts pco dp.SetStartTimestamp(start) dp.SetTimestamp(ts) dp.SetIntValue(val) - dp.Attributes().PutStr("database", databaseAttributeValue) } // updateCapacity saves max length of data point slices that will be used for the slice capacity. @@ -826,10 +818,9 @@ func (m *metricPostgresqlDeadlocks) init() { m.data.SetEmptySum() m.data.Sum().SetIsMonotonic(true) m.data.Sum().SetAggregationTemporality(pmetric.AggregationTemporalityCumulative) - m.data.Sum().DataPoints().EnsureCapacity(m.capacity) } -func (m *metricPostgresqlDeadlocks) recordDataPoint(start pcommon.Timestamp, ts pcommon.Timestamp, val int64, databaseAttributeValue string) { +func (m *metricPostgresqlDeadlocks) recordDataPoint(start pcommon.Timestamp, ts pcommon.Timestamp, val int64) { if !m.config.Enabled { return } @@ -837,7 +828,6 @@ func (m *metricPostgresqlDeadlocks) recordDataPoint(start pcommon.Timestamp, ts dp.SetStartTimestamp(start) dp.SetTimestamp(ts) dp.SetIntValue(val) - dp.Attributes().PutStr("database", databaseAttributeValue) } // updateCapacity saves max length of data point slices that will be used for the slice capacity. @@ -982,7 +972,7 @@ func (m *metricPostgresqlOperations) init() { m.data.Sum().DataPoints().EnsureCapacity(m.capacity) } -func (m *metricPostgresqlOperations) recordDataPoint(start pcommon.Timestamp, ts pcommon.Timestamp, val int64, databaseAttributeValue string, tableAttributeValue string, operationAttributeValue string) { +func (m *metricPostgresqlOperations) recordDataPoint(start pcommon.Timestamp, ts pcommon.Timestamp, val int64, operationAttributeValue string) { if !m.config.Enabled { return } @@ -990,8 +980,6 @@ func (m *metricPostgresqlOperations) recordDataPoint(start pcommon.Timestamp, ts dp.SetStartTimestamp(start) dp.SetTimestamp(ts) dp.SetIntValue(val) - dp.Attributes().PutStr("database", databaseAttributeValue) - dp.Attributes().PutStr("table", tableAttributeValue) dp.Attributes().PutStr("operation", operationAttributeValue) } @@ -1085,10 +1073,9 @@ func (m *metricPostgresqlRollbacks) init() { m.data.SetEmptySum() m.data.Sum().SetIsMonotonic(true) m.data.Sum().SetAggregationTemporality(pmetric.AggregationTemporalityCumulative) - m.data.Sum().DataPoints().EnsureCapacity(m.capacity) } -func (m *metricPostgresqlRollbacks) recordDataPoint(start pcommon.Timestamp, ts pcommon.Timestamp, val int64, databaseAttributeValue string) { +func (m *metricPostgresqlRollbacks) recordDataPoint(start pcommon.Timestamp, ts pcommon.Timestamp, val int64) { if !m.config.Enabled { return } @@ -1096,7 +1083,6 @@ func (m *metricPostgresqlRollbacks) recordDataPoint(start pcommon.Timestamp, ts dp.SetStartTimestamp(start) dp.SetTimestamp(ts) dp.SetIntValue(val) - dp.Attributes().PutStr("database", databaseAttributeValue) } // updateCapacity saves max length of data point slices that will be used for the slice capacity. @@ -1141,7 +1127,7 @@ func (m *metricPostgresqlRows) init() { m.data.Sum().DataPoints().EnsureCapacity(m.capacity) } -func (m *metricPostgresqlRows) recordDataPoint(start pcommon.Timestamp, ts pcommon.Timestamp, val int64, databaseAttributeValue string, tableAttributeValue string, stateAttributeValue string) { +func (m *metricPostgresqlRows) recordDataPoint(start pcommon.Timestamp, ts pcommon.Timestamp, val int64, stateAttributeValue string) { if !m.config.Enabled { return } @@ -1149,8 +1135,6 @@ func (m *metricPostgresqlRows) recordDataPoint(start pcommon.Timestamp, ts pcomm dp.SetStartTimestamp(start) dp.SetTimestamp(ts) dp.SetIntValue(val) - dp.Attributes().PutStr("database", databaseAttributeValue) - dp.Attributes().PutStr("table", tableAttributeValue) dp.Attributes().PutStr("state", stateAttributeValue) } @@ -1610,8 +1594,8 @@ func (mb *MetricsBuilder) Emit(rmo ...ResourceMetricsOption) pmetric.Metrics { } // RecordPostgresqlBackendsDataPoint adds a data point to postgresql.backends metric. -func (mb *MetricsBuilder) RecordPostgresqlBackendsDataPoint(ts pcommon.Timestamp, val int64, databaseAttributeValue string) { - mb.metricPostgresqlBackends.recordDataPoint(mb.startTime, ts, val, databaseAttributeValue) +func (mb *MetricsBuilder) RecordPostgresqlBackendsDataPoint(ts pcommon.Timestamp, val int64) { + mb.metricPostgresqlBackends.recordDataPoint(mb.startTime, ts, val) } // RecordPostgresqlBgwriterBuffersAllocatedDataPoint adds a data point to postgresql.bgwriter.buffers.allocated metric. @@ -1640,13 +1624,13 @@ func (mb *MetricsBuilder) RecordPostgresqlBgwriterMaxwrittenDataPoint(ts pcommon } // RecordPostgresqlBlocksReadDataPoint adds a data point to postgresql.blocks_read metric. -func (mb *MetricsBuilder) RecordPostgresqlBlocksReadDataPoint(ts pcommon.Timestamp, val int64, databaseAttributeValue string, tableAttributeValue string, sourceAttributeValue AttributeSource) { - mb.metricPostgresqlBlocksRead.recordDataPoint(mb.startTime, ts, val, databaseAttributeValue, tableAttributeValue, sourceAttributeValue.String()) +func (mb *MetricsBuilder) RecordPostgresqlBlocksReadDataPoint(ts pcommon.Timestamp, val int64, sourceAttributeValue AttributeSource) { + mb.metricPostgresqlBlocksRead.recordDataPoint(mb.startTime, ts, val, sourceAttributeValue.String()) } // RecordPostgresqlCommitsDataPoint adds a data point to postgresql.commits metric. -func (mb *MetricsBuilder) RecordPostgresqlCommitsDataPoint(ts pcommon.Timestamp, val int64, databaseAttributeValue string) { - mb.metricPostgresqlCommits.recordDataPoint(mb.startTime, ts, val, databaseAttributeValue) +func (mb *MetricsBuilder) RecordPostgresqlCommitsDataPoint(ts pcommon.Timestamp, val int64) { + mb.metricPostgresqlCommits.recordDataPoint(mb.startTime, ts, val) } // RecordPostgresqlConnectionMaxDataPoint adds a data point to postgresql.connection.max metric. @@ -1660,13 +1644,13 @@ func (mb *MetricsBuilder) RecordPostgresqlDatabaseCountDataPoint(ts pcommon.Time } // RecordPostgresqlDbSizeDataPoint adds a data point to postgresql.db_size metric. -func (mb *MetricsBuilder) RecordPostgresqlDbSizeDataPoint(ts pcommon.Timestamp, val int64, databaseAttributeValue string) { - mb.metricPostgresqlDbSize.recordDataPoint(mb.startTime, ts, val, databaseAttributeValue) +func (mb *MetricsBuilder) RecordPostgresqlDbSizeDataPoint(ts pcommon.Timestamp, val int64) { + mb.metricPostgresqlDbSize.recordDataPoint(mb.startTime, ts, val) } // RecordPostgresqlDeadlocksDataPoint adds a data point to postgresql.deadlocks metric. -func (mb *MetricsBuilder) RecordPostgresqlDeadlocksDataPoint(ts pcommon.Timestamp, val int64, databaseAttributeValue string) { - mb.metricPostgresqlDeadlocks.recordDataPoint(mb.startTime, ts, val, databaseAttributeValue) +func (mb *MetricsBuilder) RecordPostgresqlDeadlocksDataPoint(ts pcommon.Timestamp, val int64) { + mb.metricPostgresqlDeadlocks.recordDataPoint(mb.startTime, ts, val) } // RecordPostgresqlIndexScansDataPoint adds a data point to postgresql.index.scans metric. @@ -1680,8 +1664,8 @@ func (mb *MetricsBuilder) RecordPostgresqlIndexSizeDataPoint(ts pcommon.Timestam } // RecordPostgresqlOperationsDataPoint adds a data point to postgresql.operations metric. -func (mb *MetricsBuilder) RecordPostgresqlOperationsDataPoint(ts pcommon.Timestamp, val int64, databaseAttributeValue string, tableAttributeValue string, operationAttributeValue AttributeOperation) { - mb.metricPostgresqlOperations.recordDataPoint(mb.startTime, ts, val, databaseAttributeValue, tableAttributeValue, operationAttributeValue.String()) +func (mb *MetricsBuilder) RecordPostgresqlOperationsDataPoint(ts pcommon.Timestamp, val int64, operationAttributeValue AttributeOperation) { + mb.metricPostgresqlOperations.recordDataPoint(mb.startTime, ts, val, operationAttributeValue.String()) } // RecordPostgresqlReplicationDataDelayDataPoint adds a data point to postgresql.replication.data_delay metric. @@ -1690,13 +1674,13 @@ func (mb *MetricsBuilder) RecordPostgresqlReplicationDataDelayDataPoint(ts pcomm } // RecordPostgresqlRollbacksDataPoint adds a data point to postgresql.rollbacks metric. -func (mb *MetricsBuilder) RecordPostgresqlRollbacksDataPoint(ts pcommon.Timestamp, val int64, databaseAttributeValue string) { - mb.metricPostgresqlRollbacks.recordDataPoint(mb.startTime, ts, val, databaseAttributeValue) +func (mb *MetricsBuilder) RecordPostgresqlRollbacksDataPoint(ts pcommon.Timestamp, val int64) { + mb.metricPostgresqlRollbacks.recordDataPoint(mb.startTime, ts, val) } // RecordPostgresqlRowsDataPoint adds a data point to postgresql.rows metric. -func (mb *MetricsBuilder) RecordPostgresqlRowsDataPoint(ts pcommon.Timestamp, val int64, databaseAttributeValue string, tableAttributeValue string, stateAttributeValue AttributeState) { - mb.metricPostgresqlRows.recordDataPoint(mb.startTime, ts, val, databaseAttributeValue, tableAttributeValue, stateAttributeValue.String()) +func (mb *MetricsBuilder) RecordPostgresqlRowsDataPoint(ts pcommon.Timestamp, val int64, stateAttributeValue AttributeState) { + mb.metricPostgresqlRows.recordDataPoint(mb.startTime, ts, val, stateAttributeValue.String()) } // RecordPostgresqlTableCountDataPoint adds a data point to postgresql.table.count metric. diff --git a/receiver/postgresqlreceiver/internal/metadata/generated_metrics_test.go b/receiver/postgresqlreceiver/internal/metadata/generated_metrics_test.go index b256e8e7baef..934516024fb2 100644 --- a/receiver/postgresqlreceiver/internal/metadata/generated_metrics_test.go +++ b/receiver/postgresqlreceiver/internal/metadata/generated_metrics_test.go @@ -56,7 +56,7 @@ func TestMetricsBuilder(t *testing.T) { defaultMetricsCount++ allMetricsCount++ - mb.RecordPostgresqlBackendsDataPoint(ts, 1, "database-val") + mb.RecordPostgresqlBackendsDataPoint(ts, 1) defaultMetricsCount++ allMetricsCount++ @@ -80,11 +80,11 @@ func TestMetricsBuilder(t *testing.T) { defaultMetricsCount++ allMetricsCount++ - mb.RecordPostgresqlBlocksReadDataPoint(ts, 1, "database-val", "table-val", AttributeSourceHeapRead) + mb.RecordPostgresqlBlocksReadDataPoint(ts, 1, AttributeSourceHeapRead) defaultMetricsCount++ allMetricsCount++ - mb.RecordPostgresqlCommitsDataPoint(ts, 1, "database-val") + mb.RecordPostgresqlCommitsDataPoint(ts, 1) defaultMetricsCount++ allMetricsCount++ @@ -96,10 +96,10 @@ func TestMetricsBuilder(t *testing.T) { defaultMetricsCount++ allMetricsCount++ - mb.RecordPostgresqlDbSizeDataPoint(ts, 1, "database-val") + mb.RecordPostgresqlDbSizeDataPoint(ts, 1) allMetricsCount++ - mb.RecordPostgresqlDeadlocksDataPoint(ts, 1, "database-val") + mb.RecordPostgresqlDeadlocksDataPoint(ts, 1) defaultMetricsCount++ allMetricsCount++ @@ -111,7 +111,7 @@ func TestMetricsBuilder(t *testing.T) { defaultMetricsCount++ allMetricsCount++ - mb.RecordPostgresqlOperationsDataPoint(ts, 1, "database-val", "table-val", AttributeOperationIns) + mb.RecordPostgresqlOperationsDataPoint(ts, 1, AttributeOperationIns) defaultMetricsCount++ allMetricsCount++ @@ -119,11 +119,11 @@ func TestMetricsBuilder(t *testing.T) { defaultMetricsCount++ allMetricsCount++ - mb.RecordPostgresqlRollbacksDataPoint(ts, 1, "database-val") + mb.RecordPostgresqlRollbacksDataPoint(ts, 1) defaultMetricsCount++ allMetricsCount++ - mb.RecordPostgresqlRowsDataPoint(ts, 1, "database-val", "table-val", AttributeStateDead) + mb.RecordPostgresqlRowsDataPoint(ts, 1, AttributeStateDead) defaultMetricsCount++ allMetricsCount++ @@ -185,9 +185,6 @@ func TestMetricsBuilder(t *testing.T) { assert.Equal(t, ts, dp.Timestamp()) assert.Equal(t, pmetric.NumberDataPointValueTypeInt, dp.ValueType()) assert.Equal(t, int64(1), dp.IntValue()) - attrVal, ok := dp.Attributes().Get("database") - assert.True(t, ok) - assert.EqualValues(t, "database-val", attrVal.Str()) case "postgresql.bgwriter.buffers.allocated": assert.False(t, validatedMetrics["postgresql.bgwriter.buffers.allocated"], "Found a duplicate in the metrics slice: postgresql.bgwriter.buffers.allocated") validatedMetrics["postgresql.bgwriter.buffers.allocated"] = true @@ -281,13 +278,7 @@ func TestMetricsBuilder(t *testing.T) { assert.Equal(t, ts, dp.Timestamp()) assert.Equal(t, pmetric.NumberDataPointValueTypeInt, dp.ValueType()) assert.Equal(t, int64(1), dp.IntValue()) - attrVal, ok := dp.Attributes().Get("database") - assert.True(t, ok) - assert.EqualValues(t, "database-val", attrVal.Str()) - attrVal, ok = dp.Attributes().Get("table") - assert.True(t, ok) - assert.EqualValues(t, "table-val", attrVal.Str()) - attrVal, ok = dp.Attributes().Get("source") + attrVal, ok := dp.Attributes().Get("source") assert.True(t, ok) assert.EqualValues(t, "heap_read", attrVal.Str()) case "postgresql.commits": @@ -304,9 +295,6 @@ func TestMetricsBuilder(t *testing.T) { assert.Equal(t, ts, dp.Timestamp()) assert.Equal(t, pmetric.NumberDataPointValueTypeInt, dp.ValueType()) assert.Equal(t, int64(1), dp.IntValue()) - attrVal, ok := dp.Attributes().Get("database") - assert.True(t, ok) - assert.EqualValues(t, "database-val", attrVal.Str()) case "postgresql.connection.max": assert.False(t, validatedMetrics["postgresql.connection.max"], "Found a duplicate in the metrics slice: postgresql.connection.max") validatedMetrics["postgresql.connection.max"] = true @@ -347,9 +335,6 @@ func TestMetricsBuilder(t *testing.T) { assert.Equal(t, ts, dp.Timestamp()) assert.Equal(t, pmetric.NumberDataPointValueTypeInt, dp.ValueType()) assert.Equal(t, int64(1), dp.IntValue()) - attrVal, ok := dp.Attributes().Get("database") - assert.True(t, ok) - assert.EqualValues(t, "database-val", attrVal.Str()) case "postgresql.deadlocks": assert.False(t, validatedMetrics["postgresql.deadlocks"], "Found a duplicate in the metrics slice: postgresql.deadlocks") validatedMetrics["postgresql.deadlocks"] = true @@ -364,9 +349,6 @@ func TestMetricsBuilder(t *testing.T) { assert.Equal(t, ts, dp.Timestamp()) assert.Equal(t, pmetric.NumberDataPointValueTypeInt, dp.ValueType()) assert.Equal(t, int64(1), dp.IntValue()) - attrVal, ok := dp.Attributes().Get("database") - assert.True(t, ok) - assert.EqualValues(t, "database-val", attrVal.Str()) case "postgresql.index.scans": assert.False(t, validatedMetrics["postgresql.index.scans"], "Found a duplicate in the metrics slice: postgresql.index.scans") validatedMetrics["postgresql.index.scans"] = true @@ -407,13 +389,7 @@ func TestMetricsBuilder(t *testing.T) { assert.Equal(t, ts, dp.Timestamp()) assert.Equal(t, pmetric.NumberDataPointValueTypeInt, dp.ValueType()) assert.Equal(t, int64(1), dp.IntValue()) - attrVal, ok := dp.Attributes().Get("database") - assert.True(t, ok) - assert.EqualValues(t, "database-val", attrVal.Str()) - attrVal, ok = dp.Attributes().Get("table") - assert.True(t, ok) - assert.EqualValues(t, "table-val", attrVal.Str()) - attrVal, ok = dp.Attributes().Get("operation") + attrVal, ok := dp.Attributes().Get("operation") assert.True(t, ok) assert.EqualValues(t, "ins", attrVal.Str()) case "postgresql.replication.data_delay": @@ -445,9 +421,6 @@ func TestMetricsBuilder(t *testing.T) { assert.Equal(t, ts, dp.Timestamp()) assert.Equal(t, pmetric.NumberDataPointValueTypeInt, dp.ValueType()) assert.Equal(t, int64(1), dp.IntValue()) - attrVal, ok := dp.Attributes().Get("database") - assert.True(t, ok) - assert.EqualValues(t, "database-val", attrVal.Str()) case "postgresql.rows": assert.False(t, validatedMetrics["postgresql.rows"], "Found a duplicate in the metrics slice: postgresql.rows") validatedMetrics["postgresql.rows"] = true @@ -462,13 +435,7 @@ func TestMetricsBuilder(t *testing.T) { assert.Equal(t, ts, dp.Timestamp()) assert.Equal(t, pmetric.NumberDataPointValueTypeInt, dp.ValueType()) assert.Equal(t, int64(1), dp.IntValue()) - attrVal, ok := dp.Attributes().Get("database") - assert.True(t, ok) - assert.EqualValues(t, "database-val", attrVal.Str()) - attrVal, ok = dp.Attributes().Get("table") - assert.True(t, ok) - assert.EqualValues(t, "table-val", attrVal.Str()) - attrVal, ok = dp.Attributes().Get("state") + attrVal, ok := dp.Attributes().Get("state") assert.True(t, ok) assert.EqualValues(t, "dead", attrVal.Str()) case "postgresql.table.count": diff --git a/receiver/postgresqlreceiver/metadata.yaml b/receiver/postgresqlreceiver/metadata.yaml index 43c34b098887..f27da4640262 100644 --- a/receiver/postgresqlreceiver/metadata.yaml +++ b/receiver/postgresqlreceiver/metadata.yaml @@ -46,12 +46,6 @@ attributes: - sync - write name_override: type - database: - description: The name of the database. - type: string - table: - description: The schema name followed by the table name. - type: string source: description: The block read source type. type: string @@ -83,7 +77,6 @@ attributes: metrics: postgresql.bgwriter.buffers.allocated: - attributes: [] description: Number of buffers allocated. enabled: true sum: @@ -137,7 +130,7 @@ metrics: value_type: int monotonic: true aggregation_temporality: cumulative - attributes: [database, table, source] + attributes: [source] postgresql.commits: enabled: true description: The number of commits. @@ -146,9 +139,7 @@ metrics: value_type: int monotonic: true aggregation_temporality: cumulative - attributes: [database] postgresql.database.count: - attributes: [] description: Number of user databases. enabled: true sum: @@ -164,7 +155,6 @@ metrics: value_type: int monotonic: false aggregation_temporality: cumulative - attributes: [database] postgresql.backends: enabled: true description: The number of backends. @@ -173,7 +163,6 @@ metrics: value_type: int monotonic: false aggregation_temporality: cumulative - attributes: [database] postgresql.connection.max: enabled: true description: Configured maximum number of client connections allowed @@ -188,9 +177,8 @@ metrics: value_type: int monotonic: false aggregation_temporality: cumulative - attributes: [database, table, state] + attributes: [state] postgresql.index.scans: - attributes: [] description: The number of index scans on a table. enabled: true sum: @@ -199,7 +187,6 @@ metrics: value_type: int unit: "{scans}" postgresql.index.size: - attributes: [] description: The size of the index on disk. enabled: true gauge: @@ -213,7 +200,7 @@ metrics: value_type: int monotonic: true aggregation_temporality: cumulative - attributes: [database, table, operation] + attributes: [operation] postgresql.replication.data_delay: attributes: [replication_client] description: The amount of data delayed in replication. @@ -229,7 +216,6 @@ metrics: value_type: int monotonic: true aggregation_temporality: cumulative - attributes: [database] postgresql.deadlocks: enabled: false description: The number of deadlocks. @@ -238,9 +224,7 @@ metrics: value_type: int monotonic: true aggregation_temporality: cumulative - attributes: [database] postgresql.table.count: - attributes: [] description: Number of user tables in a database. enabled: true sum: @@ -249,7 +233,6 @@ metrics: value_type: int unit: "{table}" postgresql.table.size: - attributes: [] description: Disk space used by a table. enabled: true unit: By @@ -258,7 +241,6 @@ metrics: monotonic: false value_type: int postgresql.table.vacuum.count: - attributes: [] description: Number of times a table has manually been vacuumed. enabled: true unit: "{vacuums}" @@ -267,7 +249,6 @@ metrics: monotonic: true value_type: int postgresql.wal.age: - attributes: [] description: Age of the oldest WAL file. extended_documentation: | This metric requires WAL to be enabled with at least one replica. diff --git a/receiver/postgresqlreceiver/scraper.go b/receiver/postgresqlreceiver/scraper.go index f1d3ad5edf47..224f2f86871b 100644 --- a/receiver/postgresqlreceiver/scraper.go +++ b/receiver/postgresqlreceiver/scraper.go @@ -157,15 +157,15 @@ func (p *postgreSQLScraper) recordDatabase(now pcommon.Timestamp, db string, r * dbName := databaseName(db) p.mb.RecordPostgresqlTableCountDataPoint(now, numTables) if activeConnections, ok := r.activityMap[dbName]; ok { - p.mb.RecordPostgresqlBackendsDataPointWithoutDatabase(now, activeConnections) + p.mb.RecordPostgresqlBackendsDataPoint(now, activeConnections) } if size, ok := r.dbSizeMap[dbName]; ok { - p.mb.RecordPostgresqlDbSizeDataPointWithoutDatabase(now, size) + p.mb.RecordPostgresqlDbSizeDataPoint(now, size) } if stats, ok := r.dbStats[dbName]; ok { - p.mb.RecordPostgresqlCommitsDataPointWithoutDatabase(now, stats.transactionCommitted) - p.mb.RecordPostgresqlRollbacksDataPointWithoutDatabase(now, stats.transactionRollback) - p.mb.RecordPostgresqlDeadlocksDataPointWithoutDatabase(now, stats.deadlocks) + p.mb.RecordPostgresqlCommitsDataPoint(now, stats.transactionCommitted) + p.mb.RecordPostgresqlRollbacksDataPoint(now, stats.transactionRollback) + p.mb.RecordPostgresqlDeadlocksDataPoint(now, stats.deadlocks) } rb := p.mb.NewResourceBuilder() rb.SetPostgresqlDatabaseName(db) @@ -184,25 +184,25 @@ func (p *postgreSQLScraper) collectTables(ctx context.Context, now pcommon.Times } for tableKey, tm := range tableMetrics { - p.mb.RecordPostgresqlRowsDataPointWithoutDatabaseAndTable(now, tm.dead, metadata.AttributeStateDead) - p.mb.RecordPostgresqlRowsDataPointWithoutDatabaseAndTable(now, tm.live, metadata.AttributeStateLive) - p.mb.RecordPostgresqlOperationsDataPointWithoutDatabaseAndTable(now, tm.inserts, metadata.AttributeOperationIns) - p.mb.RecordPostgresqlOperationsDataPointWithoutDatabaseAndTable(now, tm.del, metadata.AttributeOperationDel) - p.mb.RecordPostgresqlOperationsDataPointWithoutDatabaseAndTable(now, tm.upd, metadata.AttributeOperationUpd) - p.mb.RecordPostgresqlOperationsDataPointWithoutDatabaseAndTable(now, tm.hotUpd, metadata.AttributeOperationHotUpd) + p.mb.RecordPostgresqlRowsDataPoint(now, tm.dead, metadata.AttributeStateDead) + p.mb.RecordPostgresqlRowsDataPoint(now, tm.live, metadata.AttributeStateLive) + p.mb.RecordPostgresqlOperationsDataPoint(now, tm.inserts, metadata.AttributeOperationIns) + p.mb.RecordPostgresqlOperationsDataPoint(now, tm.del, metadata.AttributeOperationDel) + p.mb.RecordPostgresqlOperationsDataPoint(now, tm.upd, metadata.AttributeOperationUpd) + p.mb.RecordPostgresqlOperationsDataPoint(now, tm.hotUpd, metadata.AttributeOperationHotUpd) p.mb.RecordPostgresqlTableSizeDataPoint(now, tm.size) p.mb.RecordPostgresqlTableVacuumCountDataPoint(now, tm.vacuumCount) br, ok := blockReads[tableKey] if ok { - p.mb.RecordPostgresqlBlocksReadDataPointWithoutDatabaseAndTable(now, br.heapRead, metadata.AttributeSourceHeapRead) - p.mb.RecordPostgresqlBlocksReadDataPointWithoutDatabaseAndTable(now, br.heapHit, metadata.AttributeSourceHeapHit) - p.mb.RecordPostgresqlBlocksReadDataPointWithoutDatabaseAndTable(now, br.idxRead, metadata.AttributeSourceIdxRead) - p.mb.RecordPostgresqlBlocksReadDataPointWithoutDatabaseAndTable(now, br.idxHit, metadata.AttributeSourceIdxHit) - p.mb.RecordPostgresqlBlocksReadDataPointWithoutDatabaseAndTable(now, br.toastHit, metadata.AttributeSourceToastHit) - p.mb.RecordPostgresqlBlocksReadDataPointWithoutDatabaseAndTable(now, br.toastRead, metadata.AttributeSourceToastRead) - p.mb.RecordPostgresqlBlocksReadDataPointWithoutDatabaseAndTable(now, br.tidxRead, metadata.AttributeSourceTidxRead) - p.mb.RecordPostgresqlBlocksReadDataPointWithoutDatabaseAndTable(now, br.tidxHit, metadata.AttributeSourceTidxHit) + p.mb.RecordPostgresqlBlocksReadDataPoint(now, br.heapRead, metadata.AttributeSourceHeapRead) + p.mb.RecordPostgresqlBlocksReadDataPoint(now, br.heapHit, metadata.AttributeSourceHeapHit) + p.mb.RecordPostgresqlBlocksReadDataPoint(now, br.idxRead, metadata.AttributeSourceIdxRead) + p.mb.RecordPostgresqlBlocksReadDataPoint(now, br.idxHit, metadata.AttributeSourceIdxHit) + p.mb.RecordPostgresqlBlocksReadDataPoint(now, br.toastHit, metadata.AttributeSourceToastHit) + p.mb.RecordPostgresqlBlocksReadDataPoint(now, br.toastRead, metadata.AttributeSourceToastRead) + p.mb.RecordPostgresqlBlocksReadDataPoint(now, br.tidxRead, metadata.AttributeSourceTidxRead) + p.mb.RecordPostgresqlBlocksReadDataPoint(now, br.tidxHit, metadata.AttributeSourceTidxHit) } rb := p.mb.NewResourceBuilder() rb.SetPostgresqlDatabaseName(db) diff --git a/receiver/postgresqlreceiver/scraper_test.go b/receiver/postgresqlreceiver/scraper_test.go index a7380a4511ef..27b1040ee627 100644 --- a/receiver/postgresqlreceiver/scraper_test.go +++ b/receiver/postgresqlreceiver/scraper_test.go @@ -100,7 +100,7 @@ func TestScraperWithResourceAttributeFeatureGate(t *testing.T) { actualMetrics, err := scraper.scrape(context.Background()) require.NoError(t, err) - expectedFile := filepath.Join("testdata", "scraper", "multiple", "expected_with_resource.yaml") + expectedFile := filepath.Join("testdata", "scraper", "multiple", "expected.yaml") expectedMetrics, err := golden.ReadMetrics(expectedFile) require.NoError(t, err) @@ -119,7 +119,7 @@ func TestScraperWithResourceAttributeFeatureGateSingle(t *testing.T) { actualMetrics, err := scraper.scrape(context.Background()) require.NoError(t, err) - expectedFile := filepath.Join("testdata", "scraper", "otel", "expected_with_resource.yaml") + expectedFile := filepath.Join("testdata", "scraper", "otel", "expected.yaml") expectedMetrics, err := golden.ReadMetrics(expectedFile) require.NoError(t, err) diff --git a/receiver/postgresqlreceiver/testdata/scraper/multiple/expected_with_resource.yaml b/receiver/postgresqlreceiver/testdata/scraper/multiple/expected_with_resource.yaml deleted file mode 100644 index bbbeeda87d47..000000000000 --- a/receiver/postgresqlreceiver/testdata/scraper/multiple/expected_with_resource.yaml +++ /dev/null @@ -1,1482 +0,0 @@ -resourceMetrics: - - resource: - attributes: [] - scopeMetrics: - - metrics: - - description: Number of buffers allocated. - name: postgresql.bgwriter.buffers.allocated - sum: - aggregationTemporality: 2 - dataPoints: - - asInt: "10" - startTimeUnixNano: "1687780221131489000" - timeUnixNano: "1687780221131628000" - isMonotonic: true - unit: '{buffers}' - - description: Number of buffers written. - name: postgresql.bgwriter.buffers.writes - sum: - aggregationTemporality: 2 - dataPoints: - - asInt: "7" - attributes: - - key: source - value: - stringValue: backend - startTimeUnixNano: "1687780221131489000" - timeUnixNano: "1687780221131628000" - - asInt: "8" - attributes: - - key: source - value: - stringValue: backend_fsync - startTimeUnixNano: "1687780221131489000" - timeUnixNano: "1687780221131628000" - - asInt: "5" - attributes: - - key: source - value: - stringValue: bgwriter - startTimeUnixNano: "1687780221131489000" - timeUnixNano: "1687780221131628000" - - asInt: "9" - attributes: - - key: source - value: - stringValue: checkpoints - startTimeUnixNano: "1687780221131489000" - timeUnixNano: "1687780221131628000" - isMonotonic: true - unit: '{buffers}' - - description: The number of checkpoints performed. - name: postgresql.bgwriter.checkpoint.count - sum: - aggregationTemporality: 2 - dataPoints: - - asInt: "1" - attributes: - - key: type - value: - stringValue: requested - startTimeUnixNano: "1687780221131489000" - timeUnixNano: "1687780221131628000" - - asInt: "2" - attributes: - - key: type - value: - stringValue: scheduled - startTimeUnixNano: "1687780221131489000" - timeUnixNano: "1687780221131628000" - isMonotonic: true - unit: '{checkpoints}' - - description: Total time spent writing and syncing files to disk by checkpoints. - name: postgresql.bgwriter.duration - sum: - aggregationTemporality: 2 - dataPoints: - - asDouble: 4.23 - attributes: - - key: type - value: - stringValue: sync - startTimeUnixNano: "1687780221131489000" - timeUnixNano: "1687780221131628000" - - asDouble: 3.12 - attributes: - - key: type - value: - stringValue: write - startTimeUnixNano: "1687780221131489000" - timeUnixNano: "1687780221131628000" - isMonotonic: true - unit: ms - - description: Number of times the background writer stopped a cleaning scan because it had written too many buffers. - name: postgresql.bgwriter.maxwritten - sum: - aggregationTemporality: 2 - dataPoints: - - asInt: "11" - startTimeUnixNano: "1687780221131489000" - timeUnixNano: "1687780221131628000" - isMonotonic: true - unit: "1" - - description: Configured maximum number of client connections allowed - gauge: - dataPoints: - - asInt: "100" - startTimeUnixNano: "1687780221131489000" - timeUnixNano: "1687780221131628000" - name: postgresql.connection.max - unit: '{connections}' - - description: Number of user databases. - name: postgresql.database.count - sum: - aggregationTemporality: 2 - dataPoints: - - asInt: "3" - startTimeUnixNano: "1687780221131489000" - timeUnixNano: "1687780221131628000" - unit: '{databases}' - - description: The amount of data delayed in replication. - gauge: - dataPoints: - - asInt: "1024" - attributes: - - key: replication_client - value: - stringValue: unix - startTimeUnixNano: "1687780221131489000" - timeUnixNano: "1687780221131628000" - name: postgresql.replication.data_delay - unit: By - - description: Age of the oldest WAL file. - gauge: - dataPoints: - - asInt: "3600" - startTimeUnixNano: "1687780221131489000" - timeUnixNano: "1687780221131628000" - name: postgresql.wal.age - unit: s - - description: Time between flushing recent WAL locally and receiving notification that the standby server has completed an operation with it. - gauge: - dataPoints: - - asInt: "600" - attributes: - - key: operation - value: - stringValue: flush - - key: replication_client - value: - stringValue: unix - startTimeUnixNano: "1687780221131489000" - timeUnixNano: "1687780221131628000" - - asInt: "700" - attributes: - - key: operation - value: - stringValue: replay - - key: replication_client - value: - stringValue: unix - startTimeUnixNano: "1687780221131489000" - timeUnixNano: "1687780221131628000" - - asInt: "800" - attributes: - - key: operation - value: - stringValue: write - - key: replication_client - value: - stringValue: unix - startTimeUnixNano: "1687780221131489000" - timeUnixNano: "1687780221131628000" - name: postgresql.wal.lag - unit: s - scope: - name: otelcol/postgresqlreceiver - version: latest - - resource: - attributes: - - key: postgresql.database.name - value: - stringValue: open - scopeMetrics: - - metrics: - - description: The number of backends. - name: postgresql.backends - sum: - aggregationTemporality: 2 - dataPoints: - - asInt: "4" - startTimeUnixNano: "1687780221131489000" - timeUnixNano: "1687780221131628000" - unit: "1" - - description: The number of commits. - name: postgresql.commits - sum: - aggregationTemporality: 2 - dataPoints: - - asInt: "2" - startTimeUnixNano: "1687780221131489000" - timeUnixNano: "1687780221131628000" - isMonotonic: true - unit: "1" - - description: The database disk usage. - name: postgresql.db_size - sum: - aggregationTemporality: 2 - dataPoints: - - asInt: "5" - startTimeUnixNano: "1687780221131489000" - timeUnixNano: "1687780221131628000" - unit: By - - description: The number of deadlocks. - name: postgresql.deadlocks - sum: - aggregationTemporality: 2 - dataPoints: - - asInt: "4" - startTimeUnixNano: "1687780221131489000" - timeUnixNano: "1687780221131628000" - isMonotonic: true - unit: "{deadlock}" - - description: The number of rollbacks. - name: postgresql.rollbacks - sum: - aggregationTemporality: 2 - dataPoints: - - asInt: "3" - startTimeUnixNano: "1687780221131489000" - timeUnixNano: "1687780221131628000" - isMonotonic: true - unit: "1" - - description: Number of user tables in a database. - name: postgresql.table.count - sum: - aggregationTemporality: 2 - dataPoints: - - asInt: "2" - startTimeUnixNano: "1687780221131489000" - timeUnixNano: "1687780221131628000" - unit: '{table}' - scope: - name: otelcol/postgresqlreceiver - version: latest - - resource: - attributes: - - key: postgresql.database.name - value: - stringValue: otel - scopeMetrics: - - metrics: - - description: The number of backends. - name: postgresql.backends - sum: - aggregationTemporality: 2 - dataPoints: - - asInt: "3" - startTimeUnixNano: "1687780221131489000" - timeUnixNano: "1687780221131628000" - unit: "1" - - description: The number of commits. - name: postgresql.commits - sum: - aggregationTemporality: 2 - dataPoints: - - asInt: "1" - startTimeUnixNano: "1687780221131489000" - timeUnixNano: "1687780221131628000" - isMonotonic: true - unit: "1" - - description: The database disk usage. - name: postgresql.db_size - sum: - aggregationTemporality: 2 - dataPoints: - - asInt: "4" - startTimeUnixNano: "1687780221131489000" - timeUnixNano: "1687780221131628000" - unit: By - - description: The number of deadlocks. - name: postgresql.deadlocks - sum: - aggregationTemporality: 2 - dataPoints: - - asInt: "3" - startTimeUnixNano: "1687780221131489000" - timeUnixNano: "1687780221131628000" - isMonotonic: true - unit: "{deadlock}" - - description: The number of rollbacks. - name: postgresql.rollbacks - sum: - aggregationTemporality: 2 - dataPoints: - - asInt: "2" - startTimeUnixNano: "1687780221131489000" - timeUnixNano: "1687780221131628000" - isMonotonic: true - unit: "1" - - description: Number of user tables in a database. - name: postgresql.table.count - sum: - aggregationTemporality: 2 - dataPoints: - - asInt: "2" - startTimeUnixNano: "1687780221131489000" - timeUnixNano: "1687780221131628000" - unit: '{table}' - scope: - name: otelcol/postgresqlreceiver - version: latest - - resource: - attributes: - - key: postgresql.database.name - value: - stringValue: telemetry - scopeMetrics: - - metrics: - - description: The number of backends. - name: postgresql.backends - sum: - aggregationTemporality: 2 - dataPoints: - - asInt: "5" - startTimeUnixNano: "1687780221131489000" - timeUnixNano: "1687780221131628000" - unit: "1" - - description: The number of commits. - name: postgresql.commits - sum: - aggregationTemporality: 2 - dataPoints: - - asInt: "3" - startTimeUnixNano: "1687780221131489000" - timeUnixNano: "1687780221131628000" - isMonotonic: true - unit: "1" - - description: The database disk usage. - name: postgresql.db_size - sum: - aggregationTemporality: 2 - dataPoints: - - asInt: "6" - startTimeUnixNano: "1687780221131489000" - timeUnixNano: "1687780221131628000" - unit: By - - description: The number of deadlocks. - name: postgresql.deadlocks - sum: - aggregationTemporality: 2 - dataPoints: - - asInt: "5" - startTimeUnixNano: "1687780221131489000" - timeUnixNano: "1687780221131628000" - isMonotonic: true - unit: "{deadlock}" - - description: The number of rollbacks. - name: postgresql.rollbacks - sum: - aggregationTemporality: 2 - dataPoints: - - asInt: "4" - startTimeUnixNano: "1687780221131489000" - timeUnixNano: "1687780221131628000" - isMonotonic: true - unit: "1" - - description: Number of user tables in a database. - name: postgresql.table.count - sum: - aggregationTemporality: 2 - dataPoints: - - asInt: "2" - startTimeUnixNano: "1687780221131489000" - timeUnixNano: "1687780221131628000" - unit: '{table}' - scope: - name: otelcol/postgresqlreceiver - version: latest - - resource: - attributes: - - key: postgresql.database.name - value: - stringValue: open - - key: postgresql.table.name - value: - stringValue: public.table1 - scopeMetrics: - - metrics: - - description: The number of blocks read. - name: postgresql.blocks_read - sum: - aggregationTemporality: 2 - dataPoints: - - asInt: "21" - attributes: - - key: source - value: - stringValue: heap_hit - startTimeUnixNano: "1687780221131489000" - timeUnixNano: "1687780221131628000" - - asInt: "20" - attributes: - - key: source - value: - stringValue: heap_read - startTimeUnixNano: "1687780221131489000" - timeUnixNano: "1687780221131628000" - - asInt: "23" - attributes: - - key: source - value: - stringValue: idx_hit - startTimeUnixNano: "1687780221131489000" - timeUnixNano: "1687780221131628000" - - asInt: "22" - attributes: - - key: source - value: - stringValue: idx_read - startTimeUnixNano: "1687780221131489000" - timeUnixNano: "1687780221131628000" - - asInt: "27" - attributes: - - key: source - value: - stringValue: tidx_hit - startTimeUnixNano: "1687780221131489000" - timeUnixNano: "1687780221131628000" - - asInt: "26" - attributes: - - key: source - value: - stringValue: tidx_read - startTimeUnixNano: "1687780221131489000" - timeUnixNano: "1687780221131628000" - - asInt: "25" - attributes: - - key: source - value: - stringValue: toast_hit - startTimeUnixNano: "1687780221131489000" - timeUnixNano: "1687780221131628000" - - asInt: "24" - attributes: - - key: source - value: - stringValue: toast_read - startTimeUnixNano: "1687780221131489000" - timeUnixNano: "1687780221131628000" - isMonotonic: true - unit: "1" - - description: The number of db row operations. - name: postgresql.operations - sum: - aggregationTemporality: 2 - dataPoints: - - asInt: "42" - attributes: - - key: operation - value: - stringValue: del - startTimeUnixNano: "1687780221131489000" - timeUnixNano: "1687780221131628000" - - asInt: "43" - attributes: - - key: operation - value: - stringValue: hot_upd - startTimeUnixNano: "1687780221131489000" - timeUnixNano: "1687780221131628000" - - asInt: "40" - attributes: - - key: operation - value: - stringValue: ins - startTimeUnixNano: "1687780221131489000" - timeUnixNano: "1687780221131628000" - - asInt: "41" - attributes: - - key: operation - value: - stringValue: upd - startTimeUnixNano: "1687780221131489000" - timeUnixNano: "1687780221131628000" - isMonotonic: true - unit: "1" - - description: The number of rows in the database. - name: postgresql.rows - sum: - aggregationTemporality: 2 - dataPoints: - - asInt: "9" - attributes: - - key: state - value: - stringValue: dead - startTimeUnixNano: "1687780221131489000" - timeUnixNano: "1687780221131628000" - - asInt: "8" - attributes: - - key: state - value: - stringValue: live - startTimeUnixNano: "1687780221131489000" - timeUnixNano: "1687780221131628000" - unit: "1" - - description: Disk space used by a table. - name: postgresql.table.size - sum: - aggregationTemporality: 2 - dataPoints: - - asInt: "44" - startTimeUnixNano: "1687780221131489000" - timeUnixNano: "1687780221131628000" - unit: By - - description: Number of times a table has manually been vacuumed. - name: postgresql.table.vacuum.count - sum: - aggregationTemporality: 2 - dataPoints: - - asInt: "45" - startTimeUnixNano: "1687780221131489000" - timeUnixNano: "1687780221131628000" - isMonotonic: true - unit: '{vacuums}' - scope: - name: otelcol/postgresqlreceiver - version: latest - - resource: - attributes: - - key: postgresql.database.name - value: - stringValue: open - - key: postgresql.table.name - value: - stringValue: public.table2 - scopeMetrics: - - metrics: - - description: The number of blocks read. - name: postgresql.blocks_read - sum: - aggregationTemporality: 2 - dataPoints: - - asInt: "29" - attributes: - - key: source - value: - stringValue: heap_hit - startTimeUnixNano: "1687780221131489000" - timeUnixNano: "1687780221131628000" - - asInt: "28" - attributes: - - key: source - value: - stringValue: heap_read - startTimeUnixNano: "1687780221131489000" - timeUnixNano: "1687780221131628000" - - asInt: "31" - attributes: - - key: source - value: - stringValue: idx_hit - startTimeUnixNano: "1687780221131489000" - timeUnixNano: "1687780221131628000" - - asInt: "30" - attributes: - - key: source - value: - stringValue: idx_read - startTimeUnixNano: "1687780221131489000" - timeUnixNano: "1687780221131628000" - - asInt: "35" - attributes: - - key: source - value: - stringValue: tidx_hit - startTimeUnixNano: "1687780221131489000" - timeUnixNano: "1687780221131628000" - - asInt: "34" - attributes: - - key: source - value: - stringValue: tidx_read - startTimeUnixNano: "1687780221131489000" - timeUnixNano: "1687780221131628000" - - asInt: "33" - attributes: - - key: source - value: - stringValue: toast_hit - startTimeUnixNano: "1687780221131489000" - timeUnixNano: "1687780221131628000" - - asInt: "32" - attributes: - - key: source - value: - stringValue: toast_read - startTimeUnixNano: "1687780221131489000" - timeUnixNano: "1687780221131628000" - isMonotonic: true - unit: "1" - - description: The number of db row operations. - name: postgresql.operations - sum: - aggregationTemporality: 2 - dataPoints: - - asInt: "46" - attributes: - - key: operation - value: - stringValue: del - startTimeUnixNano: "1687780221131489000" - timeUnixNano: "1687780221131628000" - - asInt: "47" - attributes: - - key: operation - value: - stringValue: hot_upd - startTimeUnixNano: "1687780221131489000" - timeUnixNano: "1687780221131628000" - - asInt: "44" - attributes: - - key: operation - value: - stringValue: ins - startTimeUnixNano: "1687780221131489000" - timeUnixNano: "1687780221131628000" - - asInt: "45" - attributes: - - key: operation - value: - stringValue: upd - startTimeUnixNano: "1687780221131489000" - timeUnixNano: "1687780221131628000" - isMonotonic: true - unit: "1" - - description: The number of rows in the database. - name: postgresql.rows - sum: - aggregationTemporality: 2 - dataPoints: - - asInt: "11" - attributes: - - key: state - value: - stringValue: dead - startTimeUnixNano: "1687780221131489000" - timeUnixNano: "1687780221131628000" - - asInt: "10" - attributes: - - key: state - value: - stringValue: live - startTimeUnixNano: "1687780221131489000" - timeUnixNano: "1687780221131628000" - unit: "1" - - description: Disk space used by a table. - name: postgresql.table.size - sum: - aggregationTemporality: 2 - dataPoints: - - asInt: "48" - startTimeUnixNano: "1687780221131489000" - timeUnixNano: "1687780221131628000" - unit: By - - description: Number of times a table has manually been vacuumed. - name: postgresql.table.vacuum.count - sum: - aggregationTemporality: 2 - dataPoints: - - asInt: "49" - startTimeUnixNano: "1687780221131489000" - timeUnixNano: "1687780221131628000" - isMonotonic: true - unit: '{vacuums}' - scope: - name: otelcol/postgresqlreceiver - version: latest - - resource: - attributes: - - key: postgresql.database.name - value: - stringValue: otel - - key: postgresql.table.name - value: - stringValue: public.table1 - scopeMetrics: - - metrics: - - description: The number of blocks read. - name: postgresql.blocks_read - sum: - aggregationTemporality: 2 - dataPoints: - - asInt: "20" - attributes: - - key: source - value: - stringValue: heap_hit - startTimeUnixNano: "1687780221131489000" - timeUnixNano: "1687780221131628000" - - asInt: "19" - attributes: - - key: source - value: - stringValue: heap_read - startTimeUnixNano: "1687780221131489000" - timeUnixNano: "1687780221131628000" - - asInt: "22" - attributes: - - key: source - value: - stringValue: idx_hit - startTimeUnixNano: "1687780221131489000" - timeUnixNano: "1687780221131628000" - - asInt: "21" - attributes: - - key: source - value: - stringValue: idx_read - startTimeUnixNano: "1687780221131489000" - timeUnixNano: "1687780221131628000" - - asInt: "26" - attributes: - - key: source - value: - stringValue: tidx_hit - startTimeUnixNano: "1687780221131489000" - timeUnixNano: "1687780221131628000" - - asInt: "25" - attributes: - - key: source - value: - stringValue: tidx_read - startTimeUnixNano: "1687780221131489000" - timeUnixNano: "1687780221131628000" - - asInt: "24" - attributes: - - key: source - value: - stringValue: toast_hit - startTimeUnixNano: "1687780221131489000" - timeUnixNano: "1687780221131628000" - - asInt: "23" - attributes: - - key: source - value: - stringValue: toast_read - startTimeUnixNano: "1687780221131489000" - timeUnixNano: "1687780221131628000" - isMonotonic: true - unit: "1" - - description: The number of db row operations. - name: postgresql.operations - sum: - aggregationTemporality: 2 - dataPoints: - - asInt: "41" - attributes: - - key: operation - value: - stringValue: del - startTimeUnixNano: "1687780221131489000" - timeUnixNano: "1687780221131628000" - - asInt: "42" - attributes: - - key: operation - value: - stringValue: hot_upd - startTimeUnixNano: "1687780221131489000" - timeUnixNano: "1687780221131628000" - - asInt: "39" - attributes: - - key: operation - value: - stringValue: ins - startTimeUnixNano: "1687780221131489000" - timeUnixNano: "1687780221131628000" - - asInt: "40" - attributes: - - key: operation - value: - stringValue: upd - startTimeUnixNano: "1687780221131489000" - timeUnixNano: "1687780221131628000" - isMonotonic: true - unit: "1" - - description: The number of rows in the database. - name: postgresql.rows - sum: - aggregationTemporality: 2 - dataPoints: - - asInt: "8" - attributes: - - key: state - value: - stringValue: dead - startTimeUnixNano: "1687780221131489000" - timeUnixNano: "1687780221131628000" - - asInt: "7" - attributes: - - key: state - value: - stringValue: live - startTimeUnixNano: "1687780221131489000" - timeUnixNano: "1687780221131628000" - unit: "1" - - description: Disk space used by a table. - name: postgresql.table.size - sum: - aggregationTemporality: 2 - dataPoints: - - asInt: "43" - startTimeUnixNano: "1687780221131489000" - timeUnixNano: "1687780221131628000" - unit: By - - description: Number of times a table has manually been vacuumed. - name: postgresql.table.vacuum.count - sum: - aggregationTemporality: 2 - dataPoints: - - asInt: "44" - startTimeUnixNano: "1687780221131489000" - timeUnixNano: "1687780221131628000" - isMonotonic: true - unit: '{vacuums}' - scope: - name: otelcol/postgresqlreceiver - version: latest - - resource: - attributes: - - key: postgresql.database.name - value: - stringValue: otel - - key: postgresql.table.name - value: - stringValue: public.table2 - scopeMetrics: - - metrics: - - description: The number of blocks read. - name: postgresql.blocks_read - sum: - aggregationTemporality: 2 - dataPoints: - - asInt: "28" - attributes: - - key: source - value: - stringValue: heap_hit - startTimeUnixNano: "1687780221131489000" - timeUnixNano: "1687780221131628000" - - asInt: "27" - attributes: - - key: source - value: - stringValue: heap_read - startTimeUnixNano: "1687780221131489000" - timeUnixNano: "1687780221131628000" - - asInt: "30" - attributes: - - key: source - value: - stringValue: idx_hit - startTimeUnixNano: "1687780221131489000" - timeUnixNano: "1687780221131628000" - - asInt: "29" - attributes: - - key: source - value: - stringValue: idx_read - startTimeUnixNano: "1687780221131489000" - timeUnixNano: "1687780221131628000" - - asInt: "34" - attributes: - - key: source - value: - stringValue: tidx_hit - startTimeUnixNano: "1687780221131489000" - timeUnixNano: "1687780221131628000" - - asInt: "33" - attributes: - - key: source - value: - stringValue: tidx_read - startTimeUnixNano: "1687780221131489000" - timeUnixNano: "1687780221131628000" - - asInt: "32" - attributes: - - key: source - value: - stringValue: toast_hit - startTimeUnixNano: "1687780221131489000" - timeUnixNano: "1687780221131628000" - - asInt: "31" - attributes: - - key: source - value: - stringValue: toast_read - startTimeUnixNano: "1687780221131489000" - timeUnixNano: "1687780221131628000" - isMonotonic: true - unit: "1" - - description: The number of db row operations. - name: postgresql.operations - sum: - aggregationTemporality: 2 - dataPoints: - - asInt: "45" - attributes: - - key: operation - value: - stringValue: del - startTimeUnixNano: "1687780221131489000" - timeUnixNano: "1687780221131628000" - - asInt: "46" - attributes: - - key: operation - value: - stringValue: hot_upd - startTimeUnixNano: "1687780221131489000" - timeUnixNano: "1687780221131628000" - - asInt: "43" - attributes: - - key: operation - value: - stringValue: ins - startTimeUnixNano: "1687780221131489000" - timeUnixNano: "1687780221131628000" - - asInt: "44" - attributes: - - key: operation - value: - stringValue: upd - startTimeUnixNano: "1687780221131489000" - timeUnixNano: "1687780221131628000" - isMonotonic: true - unit: "1" - - description: The number of rows in the database. - name: postgresql.rows - sum: - aggregationTemporality: 2 - dataPoints: - - asInt: "10" - attributes: - - key: state - value: - stringValue: dead - startTimeUnixNano: "1687780221131489000" - timeUnixNano: "1687780221131628000" - - asInt: "9" - attributes: - - key: state - value: - stringValue: live - startTimeUnixNano: "1687780221131489000" - timeUnixNano: "1687780221131628000" - unit: "1" - - description: Disk space used by a table. - name: postgresql.table.size - sum: - aggregationTemporality: 2 - dataPoints: - - asInt: "47" - startTimeUnixNano: "1687780221131489000" - timeUnixNano: "1687780221131628000" - unit: By - - description: Number of times a table has manually been vacuumed. - name: postgresql.table.vacuum.count - sum: - aggregationTemporality: 2 - dataPoints: - - asInt: "48" - startTimeUnixNano: "1687780221131489000" - timeUnixNano: "1687780221131628000" - isMonotonic: true - unit: '{vacuums}' - scope: - name: otelcol/postgresqlreceiver - version: latest - - resource: - attributes: - - key: postgresql.database.name - value: - stringValue: telemetry - - key: postgresql.table.name - value: - stringValue: public.table1 - scopeMetrics: - - metrics: - - description: The number of blocks read. - name: postgresql.blocks_read - sum: - aggregationTemporality: 2 - dataPoints: - - asInt: "22" - attributes: - - key: source - value: - stringValue: heap_hit - startTimeUnixNano: "1687780221131489000" - timeUnixNano: "1687780221131628000" - - asInt: "21" - attributes: - - key: source - value: - stringValue: heap_read - startTimeUnixNano: "1687780221131489000" - timeUnixNano: "1687780221131628000" - - asInt: "24" - attributes: - - key: source - value: - stringValue: idx_hit - startTimeUnixNano: "1687780221131489000" - timeUnixNano: "1687780221131628000" - - asInt: "23" - attributes: - - key: source - value: - stringValue: idx_read - startTimeUnixNano: "1687780221131489000" - timeUnixNano: "1687780221131628000" - - asInt: "28" - attributes: - - key: source - value: - stringValue: tidx_hit - startTimeUnixNano: "1687780221131489000" - timeUnixNano: "1687780221131628000" - - asInt: "27" - attributes: - - key: source - value: - stringValue: tidx_read - startTimeUnixNano: "1687780221131489000" - timeUnixNano: "1687780221131628000" - - asInt: "26" - attributes: - - key: source - value: - stringValue: toast_hit - startTimeUnixNano: "1687780221131489000" - timeUnixNano: "1687780221131628000" - - asInt: "25" - attributes: - - key: source - value: - stringValue: toast_read - startTimeUnixNano: "1687780221131489000" - timeUnixNano: "1687780221131628000" - isMonotonic: true - unit: "1" - - description: The number of db row operations. - name: postgresql.operations - sum: - aggregationTemporality: 2 - dataPoints: - - asInt: "43" - attributes: - - key: operation - value: - stringValue: del - startTimeUnixNano: "1687780221131489000" - timeUnixNano: "1687780221131628000" - - asInt: "44" - attributes: - - key: operation - value: - stringValue: hot_upd - startTimeUnixNano: "1687780221131489000" - timeUnixNano: "1687780221131628000" - - asInt: "41" - attributes: - - key: operation - value: - stringValue: ins - startTimeUnixNano: "1687780221131489000" - timeUnixNano: "1687780221131628000" - - asInt: "42" - attributes: - - key: operation - value: - stringValue: upd - startTimeUnixNano: "1687780221131489000" - timeUnixNano: "1687780221131628000" - isMonotonic: true - unit: "1" - - description: The number of rows in the database. - name: postgresql.rows - sum: - aggregationTemporality: 2 - dataPoints: - - asInt: "10" - attributes: - - key: state - value: - stringValue: dead - startTimeUnixNano: "1687780221131489000" - timeUnixNano: "1687780221131628000" - - asInt: "9" - attributes: - - key: state - value: - stringValue: live - startTimeUnixNano: "1687780221131489000" - timeUnixNano: "1687780221131628000" - unit: "1" - - description: Disk space used by a table. - name: postgresql.table.size - sum: - aggregationTemporality: 2 - dataPoints: - - asInt: "45" - startTimeUnixNano: "1687780221131489000" - timeUnixNano: "1687780221131628000" - unit: By - - description: Number of times a table has manually been vacuumed. - name: postgresql.table.vacuum.count - sum: - aggregationTemporality: 2 - dataPoints: - - asInt: "46" - startTimeUnixNano: "1687780221131489000" - timeUnixNano: "1687780221131628000" - isMonotonic: true - unit: '{vacuums}' - scope: - name: otelcol/postgresqlreceiver - version: latest - - resource: - attributes: - - key: postgresql.database.name - value: - stringValue: telemetry - - key: postgresql.table.name - value: - stringValue: public.table2 - scopeMetrics: - - metrics: - - description: The number of blocks read. - name: postgresql.blocks_read - sum: - aggregationTemporality: 2 - dataPoints: - - asInt: "30" - attributes: - - key: source - value: - stringValue: heap_hit - startTimeUnixNano: "1687780221131489000" - timeUnixNano: "1687780221131628000" - - asInt: "29" - attributes: - - key: source - value: - stringValue: heap_read - startTimeUnixNano: "1687780221131489000" - timeUnixNano: "1687780221131628000" - - asInt: "32" - attributes: - - key: source - value: - stringValue: idx_hit - startTimeUnixNano: "1687780221131489000" - timeUnixNano: "1687780221131628000" - - asInt: "31" - attributes: - - key: source - value: - stringValue: idx_read - startTimeUnixNano: "1687780221131489000" - timeUnixNano: "1687780221131628000" - - asInt: "36" - attributes: - - key: source - value: - stringValue: tidx_hit - startTimeUnixNano: "1687780221131489000" - timeUnixNano: "1687780221131628000" - - asInt: "35" - attributes: - - key: source - value: - stringValue: tidx_read - startTimeUnixNano: "1687780221131489000" - timeUnixNano: "1687780221131628000" - - asInt: "34" - attributes: - - key: source - value: - stringValue: toast_hit - startTimeUnixNano: "1687780221131489000" - timeUnixNano: "1687780221131628000" - - asInt: "33" - attributes: - - key: source - value: - stringValue: toast_read - startTimeUnixNano: "1687780221131489000" - timeUnixNano: "1687780221131628000" - isMonotonic: true - unit: "1" - - description: The number of db row operations. - name: postgresql.operations - sum: - aggregationTemporality: 2 - dataPoints: - - asInt: "47" - attributes: - - key: operation - value: - stringValue: del - startTimeUnixNano: "1687780221131489000" - timeUnixNano: "1687780221131628000" - - asInt: "48" - attributes: - - key: operation - value: - stringValue: hot_upd - startTimeUnixNano: "1687780221131489000" - timeUnixNano: "1687780221131628000" - - asInt: "45" - attributes: - - key: operation - value: - stringValue: ins - startTimeUnixNano: "1687780221131489000" - timeUnixNano: "1687780221131628000" - - asInt: "46" - attributes: - - key: operation - value: - stringValue: upd - startTimeUnixNano: "1687780221131489000" - timeUnixNano: "1687780221131628000" - isMonotonic: true - unit: "1" - - description: The number of rows in the database. - name: postgresql.rows - sum: - aggregationTemporality: 2 - dataPoints: - - asInt: "12" - attributes: - - key: state - value: - stringValue: dead - startTimeUnixNano: "1687780221131489000" - timeUnixNano: "1687780221131628000" - - asInt: "11" - attributes: - - key: state - value: - stringValue: live - startTimeUnixNano: "1687780221131489000" - timeUnixNano: "1687780221131628000" - unit: "1" - - description: Disk space used by a table. - name: postgresql.table.size - sum: - aggregationTemporality: 2 - dataPoints: - - asInt: "49" - startTimeUnixNano: "1687780221131489000" - timeUnixNano: "1687780221131628000" - unit: By - - description: Number of times a table has manually been vacuumed. - name: postgresql.table.vacuum.count - sum: - aggregationTemporality: 2 - dataPoints: - - asInt: "50" - startTimeUnixNano: "1687780221131489000" - timeUnixNano: "1687780221131628000" - isMonotonic: true - unit: '{vacuums}' - scope: - name: otelcol/postgresqlreceiver - version: latest - - resource: - attributes: - - key: postgresql.database.name - value: - stringValue: open - - key: postgresql.index.name - value: - stringValue: open_test1_pkey - - key: postgresql.table.name - value: - stringValue: public.table1 - scopeMetrics: - - metrics: - - description: The number of index scans on a table. - name: postgresql.index.scans - sum: - aggregationTemporality: 2 - dataPoints: - - asInt: "36" - startTimeUnixNano: "1687780221131489000" - timeUnixNano: "1687780221131628000" - isMonotonic: true - unit: '{scans}' - - description: The size of the index on disk. - gauge: - dataPoints: - - asInt: "37" - startTimeUnixNano: "1687780221131489000" - timeUnixNano: "1687780221131628000" - name: postgresql.index.size - unit: By - scope: - name: otelcol/postgresqlreceiver - version: latest - - resource: - attributes: - - key: postgresql.database.name - value: - stringValue: open - - key: postgresql.index.name - value: - stringValue: open_test2_pkey - - key: postgresql.table.name - value: - stringValue: public.table2 - scopeMetrics: - - metrics: - - description: The number of index scans on a table. - name: postgresql.index.scans - sum: - aggregationTemporality: 2 - dataPoints: - - asInt: "38" - startTimeUnixNano: "1687780221131489000" - timeUnixNano: "1687780221131628000" - isMonotonic: true - unit: '{scans}' - - description: The size of the index on disk. - gauge: - dataPoints: - - asInt: "39" - startTimeUnixNano: "1687780221131489000" - timeUnixNano: "1687780221131628000" - name: postgresql.index.size - unit: By - scope: - name: otelcol/postgresqlreceiver - version: latest - - resource: - attributes: - - key: postgresql.database.name - value: - stringValue: otel - - key: postgresql.index.name - value: - stringValue: otel_test1_pkey - - key: postgresql.table.name - value: - stringValue: public.table1 - scopeMetrics: - - metrics: - - description: The number of index scans on a table. - name: postgresql.index.scans - sum: - aggregationTemporality: 2 - dataPoints: - - asInt: "35" - startTimeUnixNano: "1687780221131489000" - timeUnixNano: "1687780221131628000" - isMonotonic: true - unit: '{scans}' - - description: The size of the index on disk. - gauge: - dataPoints: - - asInt: "36" - startTimeUnixNano: "1687780221131489000" - timeUnixNano: "1687780221131628000" - name: postgresql.index.size - unit: By - scope: - name: otelcol/postgresqlreceiver - version: latest - - resource: - attributes: - - key: postgresql.database.name - value: - stringValue: otel - - key: postgresql.index.name - value: - stringValue: otel_test2_pkey - - key: postgresql.table.name - value: - stringValue: public.table2 - scopeMetrics: - - metrics: - - description: The number of index scans on a table. - name: postgresql.index.scans - sum: - aggregationTemporality: 2 - dataPoints: - - asInt: "37" - startTimeUnixNano: "1687780221131489000" - timeUnixNano: "1687780221131628000" - isMonotonic: true - unit: '{scans}' - - description: The size of the index on disk. - gauge: - dataPoints: - - asInt: "38" - startTimeUnixNano: "1687780221131489000" - timeUnixNano: "1687780221131628000" - name: postgresql.index.size - unit: By - scope: - name: otelcol/postgresqlreceiver - version: latest - - resource: - attributes: - - key: postgresql.database.name - value: - stringValue: telemetry - - key: postgresql.index.name - value: - stringValue: telemetry_test1_pkey - - key: postgresql.table.name - value: - stringValue: public.table1 - scopeMetrics: - - metrics: - - description: The number of index scans on a table. - name: postgresql.index.scans - sum: - aggregationTemporality: 2 - dataPoints: - - asInt: "37" - startTimeUnixNano: "1687780221131489000" - timeUnixNano: "1687780221131628000" - isMonotonic: true - unit: '{scans}' - - description: The size of the index on disk. - gauge: - dataPoints: - - asInt: "38" - startTimeUnixNano: "1687780221131489000" - timeUnixNano: "1687780221131628000" - name: postgresql.index.size - unit: By - scope: - name: otelcol/postgresqlreceiver - version: latest - - resource: - attributes: - - key: postgresql.database.name - value: - stringValue: telemetry - - key: postgresql.index.name - value: - stringValue: telemetry_test2_pkey - - key: postgresql.table.name - value: - stringValue: public.table2 - scopeMetrics: - - metrics: - - description: The number of index scans on a table. - name: postgresql.index.scans - sum: - aggregationTemporality: 2 - dataPoints: - - asInt: "39" - startTimeUnixNano: "1687780221131489000" - timeUnixNano: "1687780221131628000" - isMonotonic: true - unit: '{scans}' - - description: The size of the index on disk. - gauge: - dataPoints: - - asInt: "40" - startTimeUnixNano: "1687780221131489000" - timeUnixNano: "1687780221131628000" - name: postgresql.index.size - unit: By - scope: - name: otelcol/postgresqlreceiver - version: latest diff --git a/receiver/postgresqlreceiver/testdata/scraper/otel/expected_with_resource.yaml b/receiver/postgresqlreceiver/testdata/scraper/otel/expected_with_resource.yaml deleted file mode 100644 index 789f816c425f..000000000000 --- a/receiver/postgresqlreceiver/testdata/scraper/otel/expected_with_resource.yaml +++ /dev/null @@ -1,612 +0,0 @@ -resourceMetrics: - - resource: - attributes: [] - scopeMetrics: - - metrics: - - description: Number of buffers allocated. - name: postgresql.bgwriter.buffers.allocated - sum: - aggregationTemporality: 2 - dataPoints: - - asInt: "10" - startTimeUnixNano: "1687780223205928000" - timeUnixNano: "1687780223206028000" - isMonotonic: true - unit: '{buffers}' - - description: Number of buffers written. - name: postgresql.bgwriter.buffers.writes - sum: - aggregationTemporality: 2 - dataPoints: - - asInt: "7" - attributes: - - key: source - value: - stringValue: backend - startTimeUnixNano: "1687780223205928000" - timeUnixNano: "1687780223206028000" - - asInt: "8" - attributes: - - key: source - value: - stringValue: backend_fsync - startTimeUnixNano: "1687780223205928000" - timeUnixNano: "1687780223206028000" - - asInt: "5" - attributes: - - key: source - value: - stringValue: bgwriter - startTimeUnixNano: "1687780223205928000" - timeUnixNano: "1687780223206028000" - - asInt: "9" - attributes: - - key: source - value: - stringValue: checkpoints - startTimeUnixNano: "1687780223205928000" - timeUnixNano: "1687780223206028000" - isMonotonic: true - unit: '{buffers}' - - description: The number of checkpoints performed. - name: postgresql.bgwriter.checkpoint.count - sum: - aggregationTemporality: 2 - dataPoints: - - asInt: "1" - attributes: - - key: type - value: - stringValue: requested - startTimeUnixNano: "1687780223205928000" - timeUnixNano: "1687780223206028000" - - asInt: "2" - attributes: - - key: type - value: - stringValue: scheduled - startTimeUnixNano: "1687780223205928000" - timeUnixNano: "1687780223206028000" - isMonotonic: true - unit: '{checkpoints}' - - description: Total time spent writing and syncing files to disk by checkpoints. - name: postgresql.bgwriter.duration - sum: - aggregationTemporality: 2 - dataPoints: - - asDouble: 4.23 - attributes: - - key: type - value: - stringValue: sync - startTimeUnixNano: "1687780223205928000" - timeUnixNano: "1687780223206028000" - - asDouble: 3.12 - attributes: - - key: type - value: - stringValue: write - startTimeUnixNano: "1687780223205928000" - timeUnixNano: "1687780223206028000" - isMonotonic: true - unit: ms - - description: Number of times the background writer stopped a cleaning scan because it had written too many buffers. - name: postgresql.bgwriter.maxwritten - sum: - aggregationTemporality: 2 - dataPoints: - - asInt: "11" - startTimeUnixNano: "1687780223205928000" - timeUnixNano: "1687780223206028000" - isMonotonic: true - unit: "1" - - description: Configured maximum number of client connections allowed - gauge: - dataPoints: - - asInt: "100" - startTimeUnixNano: "1687780223205928000" - timeUnixNano: "1687780223206028000" - name: postgresql.connection.max - unit: '{connections}' - - description: Number of user databases. - name: postgresql.database.count - sum: - aggregationTemporality: 2 - dataPoints: - - asInt: "1" - startTimeUnixNano: "1687780223205928000" - timeUnixNano: "1687780223206028000" - unit: '{databases}' - - description: The amount of data delayed in replication. - gauge: - dataPoints: - - asInt: "1024" - attributes: - - key: replication_client - value: - stringValue: unix - startTimeUnixNano: "1687780223205928000" - timeUnixNano: "1687780223206028000" - name: postgresql.replication.data_delay - unit: By - - description: Age of the oldest WAL file. - gauge: - dataPoints: - - asInt: "3600" - startTimeUnixNano: "1687780223205928000" - timeUnixNano: "1687780223206028000" - name: postgresql.wal.age - unit: s - - description: Time between flushing recent WAL locally and receiving notification that the standby server has completed an operation with it. - gauge: - dataPoints: - - asInt: "600" - attributes: - - key: operation - value: - stringValue: flush - - key: replication_client - value: - stringValue: unix - startTimeUnixNano: "1687780223205928000" - timeUnixNano: "1687780223206028000" - - asInt: "700" - attributes: - - key: operation - value: - stringValue: replay - - key: replication_client - value: - stringValue: unix - startTimeUnixNano: "1687780223205928000" - timeUnixNano: "1687780223206028000" - - asInt: "800" - attributes: - - key: operation - value: - stringValue: write - - key: replication_client - value: - stringValue: unix - startTimeUnixNano: "1687780223205928000" - timeUnixNano: "1687780223206028000" - name: postgresql.wal.lag - unit: s - scope: - name: otelcol/postgresqlreceiver - version: latest - - resource: - attributes: - - key: postgresql.database.name - value: - stringValue: otel - scopeMetrics: - - metrics: - - description: The number of backends. - name: postgresql.backends - sum: - aggregationTemporality: 2 - dataPoints: - - asInt: "3" - startTimeUnixNano: "1687780223205928000" - timeUnixNano: "1687780223206028000" - unit: "1" - - description: The number of commits. - name: postgresql.commits - sum: - aggregationTemporality: 2 - dataPoints: - - asInt: "1" - startTimeUnixNano: "1687780223205928000" - timeUnixNano: "1687780223206028000" - isMonotonic: true - unit: "1" - - description: The database disk usage. - name: postgresql.db_size - sum: - aggregationTemporality: 2 - dataPoints: - - asInt: "4" - startTimeUnixNano: "1687780223205928000" - timeUnixNano: "1687780223206028000" - unit: By - - description: The number of deadlocks. - name: postgresql.deadlocks - sum: - aggregationTemporality: 2 - dataPoints: - - asInt: "3" - startTimeUnixNano: "1687780223205928000" - timeUnixNano: "1687780223206028000" - isMonotonic: true - unit: "{deadlock}" - - description: The number of rollbacks. - name: postgresql.rollbacks - sum: - aggregationTemporality: 2 - dataPoints: - - asInt: "2" - startTimeUnixNano: "1687780223205928000" - timeUnixNano: "1687780223206028000" - isMonotonic: true - unit: "1" - - description: Number of user tables in a database. - name: postgresql.table.count - sum: - aggregationTemporality: 2 - dataPoints: - - asInt: "2" - startTimeUnixNano: "1687780223205928000" - timeUnixNano: "1687780223206028000" - unit: '{table}' - scope: - name: otelcol/postgresqlreceiver - version: latest - - resource: - attributes: - - key: postgresql.database.name - value: - stringValue: otel - - key: postgresql.table.name - value: - stringValue: public.table1 - scopeMetrics: - - metrics: - - description: The number of blocks read. - name: postgresql.blocks_read - sum: - aggregationTemporality: 2 - dataPoints: - - asInt: "20" - attributes: - - key: source - value: - stringValue: heap_hit - startTimeUnixNano: "1687780223205928000" - timeUnixNano: "1687780223206028000" - - asInt: "19" - attributes: - - key: source - value: - stringValue: heap_read - startTimeUnixNano: "1687780223205928000" - timeUnixNano: "1687780223206028000" - - asInt: "22" - attributes: - - key: source - value: - stringValue: idx_hit - startTimeUnixNano: "1687780223205928000" - timeUnixNano: "1687780223206028000" - - asInt: "21" - attributes: - - key: source - value: - stringValue: idx_read - startTimeUnixNano: "1687780223205928000" - timeUnixNano: "1687780223206028000" - - asInt: "26" - attributes: - - key: source - value: - stringValue: tidx_hit - startTimeUnixNano: "1687780223205928000" - timeUnixNano: "1687780223206028000" - - asInt: "25" - attributes: - - key: source - value: - stringValue: tidx_read - startTimeUnixNano: "1687780223205928000" - timeUnixNano: "1687780223206028000" - - asInt: "24" - attributes: - - key: source - value: - stringValue: toast_hit - startTimeUnixNano: "1687780223205928000" - timeUnixNano: "1687780223206028000" - - asInt: "23" - attributes: - - key: source - value: - stringValue: toast_read - startTimeUnixNano: "1687780223205928000" - timeUnixNano: "1687780223206028000" - isMonotonic: true - unit: "1" - - description: The number of db row operations. - name: postgresql.operations - sum: - aggregationTemporality: 2 - dataPoints: - - asInt: "41" - attributes: - - key: operation - value: - stringValue: del - startTimeUnixNano: "1687780223205928000" - timeUnixNano: "1687780223206028000" - - asInt: "42" - attributes: - - key: operation - value: - stringValue: hot_upd - startTimeUnixNano: "1687780223205928000" - timeUnixNano: "1687780223206028000" - - asInt: "39" - attributes: - - key: operation - value: - stringValue: ins - startTimeUnixNano: "1687780223205928000" - timeUnixNano: "1687780223206028000" - - asInt: "40" - attributes: - - key: operation - value: - stringValue: upd - startTimeUnixNano: "1687780223205928000" - timeUnixNano: "1687780223206028000" - isMonotonic: true - unit: "1" - - description: The number of rows in the database. - name: postgresql.rows - sum: - aggregationTemporality: 2 - dataPoints: - - asInt: "8" - attributes: - - key: state - value: - stringValue: dead - startTimeUnixNano: "1687780223205928000" - timeUnixNano: "1687780223206028000" - - asInt: "7" - attributes: - - key: state - value: - stringValue: live - startTimeUnixNano: "1687780223205928000" - timeUnixNano: "1687780223206028000" - unit: "1" - - description: Disk space used by a table. - name: postgresql.table.size - sum: - aggregationTemporality: 2 - dataPoints: - - asInt: "43" - startTimeUnixNano: "1687780223205928000" - timeUnixNano: "1687780223206028000" - unit: By - - description: Number of times a table has manually been vacuumed. - name: postgresql.table.vacuum.count - sum: - aggregationTemporality: 2 - dataPoints: - - asInt: "44" - startTimeUnixNano: "1687780223205928000" - timeUnixNano: "1687780223206028000" - isMonotonic: true - unit: '{vacuums}' - scope: - name: otelcol/postgresqlreceiver - version: latest - - resource: - attributes: - - key: postgresql.database.name - value: - stringValue: otel - - key: postgresql.table.name - value: - stringValue: public.table2 - scopeMetrics: - - metrics: - - description: The number of blocks read. - name: postgresql.blocks_read - sum: - aggregationTemporality: 2 - dataPoints: - - asInt: "28" - attributes: - - key: source - value: - stringValue: heap_hit - startTimeUnixNano: "1687780223205928000" - timeUnixNano: "1687780223206028000" - - asInt: "27" - attributes: - - key: source - value: - stringValue: heap_read - startTimeUnixNano: "1687780223205928000" - timeUnixNano: "1687780223206028000" - - asInt: "30" - attributes: - - key: source - value: - stringValue: idx_hit - startTimeUnixNano: "1687780223205928000" - timeUnixNano: "1687780223206028000" - - asInt: "29" - attributes: - - key: source - value: - stringValue: idx_read - startTimeUnixNano: "1687780223205928000" - timeUnixNano: "1687780223206028000" - - asInt: "34" - attributes: - - key: source - value: - stringValue: tidx_hit - startTimeUnixNano: "1687780223205928000" - timeUnixNano: "1687780223206028000" - - asInt: "33" - attributes: - - key: source - value: - stringValue: tidx_read - startTimeUnixNano: "1687780223205928000" - timeUnixNano: "1687780223206028000" - - asInt: "32" - attributes: - - key: source - value: - stringValue: toast_hit - startTimeUnixNano: "1687780223205928000" - timeUnixNano: "1687780223206028000" - - asInt: "31" - attributes: - - key: source - value: - stringValue: toast_read - startTimeUnixNano: "1687780223205928000" - timeUnixNano: "1687780223206028000" - isMonotonic: true - unit: "1" - - description: The number of db row operations. - name: postgresql.operations - sum: - aggregationTemporality: 2 - dataPoints: - - asInt: "45" - attributes: - - key: operation - value: - stringValue: del - startTimeUnixNano: "1687780223205928000" - timeUnixNano: "1687780223206028000" - - asInt: "46" - attributes: - - key: operation - value: - stringValue: hot_upd - startTimeUnixNano: "1687780223205928000" - timeUnixNano: "1687780223206028000" - - asInt: "43" - attributes: - - key: operation - value: - stringValue: ins - startTimeUnixNano: "1687780223205928000" - timeUnixNano: "1687780223206028000" - - asInt: "44" - attributes: - - key: operation - value: - stringValue: upd - startTimeUnixNano: "1687780223205928000" - timeUnixNano: "1687780223206028000" - isMonotonic: true - unit: "1" - - description: The number of rows in the database. - name: postgresql.rows - sum: - aggregationTemporality: 2 - dataPoints: - - asInt: "10" - attributes: - - key: state - value: - stringValue: dead - startTimeUnixNano: "1687780223205928000" - timeUnixNano: "1687780223206028000" - - asInt: "9" - attributes: - - key: state - value: - stringValue: live - startTimeUnixNano: "1687780223205928000" - timeUnixNano: "1687780223206028000" - unit: "1" - - description: Disk space used by a table. - name: postgresql.table.size - sum: - aggregationTemporality: 2 - dataPoints: - - asInt: "47" - startTimeUnixNano: "1687780223205928000" - timeUnixNano: "1687780223206028000" - unit: By - - description: Number of times a table has manually been vacuumed. - name: postgresql.table.vacuum.count - sum: - aggregationTemporality: 2 - dataPoints: - - asInt: "48" - startTimeUnixNano: "1687780223205928000" - timeUnixNano: "1687780223206028000" - isMonotonic: true - unit: '{vacuums}' - scope: - name: otelcol/postgresqlreceiver - version: latest - - resource: - attributes: - - key: postgresql.database.name - value: - stringValue: otel - - key: postgresql.index.name - value: - stringValue: otel_test1_pkey - - key: postgresql.table.name - value: - stringValue: public.table1 - scopeMetrics: - - metrics: - - description: The number of index scans on a table. - name: postgresql.index.scans - sum: - aggregationTemporality: 2 - dataPoints: - - asInt: "35" - startTimeUnixNano: "1687780223205928000" - timeUnixNano: "1687780223206028000" - isMonotonic: true - unit: '{scans}' - - description: The size of the index on disk. - gauge: - dataPoints: - - asInt: "36" - startTimeUnixNano: "1687780223205928000" - timeUnixNano: "1687780223206028000" - name: postgresql.index.size - unit: By - scope: - name: otelcol/postgresqlreceiver - version: latest - - resource: - attributes: - - key: postgresql.database.name - value: - stringValue: otel - - key: postgresql.index.name - value: - stringValue: otel_test2_pkey - - key: postgresql.table.name - value: - stringValue: public.table2 - scopeMetrics: - - metrics: - - description: The number of index scans on a table. - name: postgresql.index.scans - sum: - aggregationTemporality: 2 - dataPoints: - - asInt: "37" - startTimeUnixNano: "1687780223205928000" - timeUnixNano: "1687780223206028000" - isMonotonic: true - unit: '{scans}' - - description: The size of the index on disk. - gauge: - dataPoints: - - asInt: "38" - startTimeUnixNano: "1687780223205928000" - timeUnixNano: "1687780223206028000" - name: postgresql.index.size - unit: By - scope: - name: otelcol/postgresqlreceiver - version: latest