From 731fb3596d17b7a96c6998d0ff958965365edf25 Mon Sep 17 00:00:00 2001 From: Samantha Wang Date: Tue, 28 Jul 2020 11:49:11 -0700 Subject: [PATCH 001/176] add external plugins info --- CONTRIBUTING.md | 6 ++++-- 1 file changed, 4 insertions(+), 2 deletions(-) diff --git a/CONTRIBUTING.md b/CONTRIBUTING.md index d68d726dca829..11c30d78538a9 100644 --- a/CONTRIBUTING.md +++ b/CONTRIBUTING.md @@ -14,9 +14,11 @@ 1. Open a new [pull request][]. #### Contributing an External Plugin *(experimental)* -Input plugins written for internal Telegraf can be run as externally-compiled plugins through the [Execd Input Plugin](https://github.com/influxdata/telegraf/tree/master/plugins/inputs/execd) without having to change the plugin code. +Input, output, and processor plugins written for internal Telegraf can be run as externally-compiled plugins through the [Execd Input](plugins/inputs/execd), [Execd Output](/plugins/inputs/execd), and [Execd Processor](plugins/processors/execd) Plugins without having to change the plugin code. -Follow the guidelines of how to integrate your plugin with the [Execd Go Shim](https://github.com/influxdata/telegraf/tree/master/plugins/inputs/execd/shim) to easily compile it as a separate app and run it from the inputs.execd plugin. +Follow the guidelines of how to integrate your plugin with the [Execd Go Shim](/plugins/common/shim) to easily compile it as a separate app and run it with the respective `execd` plugin. + +Check out some guidelines on how to build and set up your external plugins to run with `execd`. #### Security Vulnerability Reporting InfluxData takes security and our users' trust very seriously. If you believe you have found a security issue in any of our From 2344a01e739ddb5f849d26a42e6e74acfb298ee6 Mon Sep 17 00:00:00 2001 From: Samantha Wang Date: Tue, 28 Jul 2020 11:54:44 -0700 Subject: [PATCH 002/176] add external plugins in docs --- docs/EXTERNAL_PLUGINS.md | 31 +++++++++++++++++++++++++++++++ 1 file changed, 31 insertions(+) create mode 100644 docs/EXTERNAL_PLUGINS.md diff --git a/docs/EXTERNAL_PLUGINS.md b/docs/EXTERNAL_PLUGINS.md new file mode 100644 index 0000000000000..ca13e856351e7 --- /dev/null +++ b/docs/EXTERNAL_PLUGINS.md @@ -0,0 +1,31 @@ +### External Plugins + +External plugins are external programs that are built outside of Telegraf that +can run through an `execd` plugin. These external plugins allow for more flexibility +compared to internal Telegraf plugins. + +- External plugins can be written in any language (internal Telegraf plugins can only written in Go) +- External plugins can access to libraries not written in Go +- Utilize licensed software that isn't available to the open source community +- Can include large dependencies that would otherwise bloat Telegraf + +### External Plugin Guidelines +The guidelines of writing external plugins would follow those for our general [input](docs/INPUTS.md), +[output](docs/OUTPUTS.md), [processor](docs/PROCESSORS.md), and [aggregator](docs/AGGREGATOR.md) plugins. +Please reference the documentation on how to create these plugins written in Go. + + +## Execd Go Shim +For Go plugins, there is a [Execd Go Shim](plugins/common/shim) that will make it trivial to extract an internal input, processor, or output plugin from the main Telegraf repo out to a stand-alone repo. This shim This allows anyone to build and run it as a separate app using one of the `execd`plugins: +- [inputs.execd](/plugins/inputs/execd) +- [processors.execd](/plugins/processors/execd) +- [outputs.execd](/plugins/outputs/execd) + +Follow the [Steps to externalize a plugin](plugins/common/shim#steps-to-externalize-a-plugin) and [Steps to build and run your plugin](plugins/common/shim#steps-to-build-and-run-your-plugin) to properly with the Execd Go Shim + +#### Step-by-Step guidelines +To-be-added + + + + From 72aa771891c7e09be08295c51294771d5b5509f1 Mon Sep 17 00:00:00 2001 From: Samantha Wang Date: Tue, 28 Jul 2020 12:13:26 -0700 Subject: [PATCH 003/176] edit docs --- CONTRIBUTING.md | 2 +- docs/EXTERNAL_PLUGINS.md | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/CONTRIBUTING.md b/CONTRIBUTING.md index 11c30d78538a9..0fcca43068797 100644 --- a/CONTRIBUTING.md +++ b/CONTRIBUTING.md @@ -18,7 +18,7 @@ Input, output, and processor plugins written for internal Telegraf can be run as Follow the guidelines of how to integrate your plugin with the [Execd Go Shim](/plugins/common/shim) to easily compile it as a separate app and run it with the respective `execd` plugin. -Check out some guidelines on how to build and set up your external plugins to run with `execd`. +Check out our [guidelines](docs/EXTERNAL_PLUGINS.md#external-plugin-guidelines) on how to build and set up your external plugins to run with `execd`. #### Security Vulnerability Reporting InfluxData takes security and our users' trust very seriously. If you believe you have found a security issue in any of our diff --git a/docs/EXTERNAL_PLUGINS.md b/docs/EXTERNAL_PLUGINS.md index ca13e856351e7..28ea924bbfc58 100644 --- a/docs/EXTERNAL_PLUGINS.md +++ b/docs/EXTERNAL_PLUGINS.md @@ -15,7 +15,7 @@ The guidelines of writing external plugins would follow those for our general [i Please reference the documentation on how to create these plugins written in Go. -## Execd Go Shim +#### Execd Go Shim For Go plugins, there is a [Execd Go Shim](plugins/common/shim) that will make it trivial to extract an internal input, processor, or output plugin from the main Telegraf repo out to a stand-alone repo. This shim This allows anyone to build and run it as a separate app using one of the `execd`plugins: - [inputs.execd](/plugins/inputs/execd) - [processors.execd](/plugins/processors/execd) From be90a96fedf27efca04e6a9c2e9e5c75a1cc72f0 Mon Sep 17 00:00:00 2001 From: Samantha Wang Date: Thu, 15 Oct 2020 12:26:26 -0700 Subject: [PATCH 004/176] add netflow plugin --- plugins/inputs/netflow/README.md | 82 ++++++++++ plugins/inputs/netflow/netflow.go | 216 +++++++++++++++++++++++++ plugins/inputs/netflow/netflow_test.go | 92 +++++++++++ 3 files changed, 390 insertions(+) create mode 100644 plugins/inputs/netflow/README.md create mode 100644 plugins/inputs/netflow/netflow.go create mode 100644 plugins/inputs/netflow/netflow_test.go diff --git a/plugins/inputs/netflow/README.md b/plugins/inputs/netflow/README.md new file mode 100644 index 0000000000000..29ae3275502db --- /dev/null +++ b/plugins/inputs/netflow/README.md @@ -0,0 +1,82 @@ +# SFlow Input Plugin + +The Netflow Input Plugin provides support for acting as an Netflow V9/V10 collector in accordance with the specification from [IETF](https://tools.ietf.org/html/rfc7011). + + +# Configuration +The following configuration options are availabe: + +| Name | Description +|---|---| +| service_address| URL to listen on expressed as UDP (IPv4 or 6) OP address and port number +| | Example: ```service_address = "udp://:2055"``` +| read_buffer_size | Maximum socket buffer size (in bytes when no unit specified). Once the buffer fills up, metrics will start dropping. Defaults to the OS default. +||Example = ```read_buffer_size"64KiB"``` | +| dns_multi_name_processor | An optional regexp and template to use to transform a DNS resolve name. Particularily useful when DNS resolves an IP address to more than one name, and they alternative in order when queried. Using this processor command it is possible to tranform the name into something common irrespect of which entry is first - if the names conform to a regular naming schema. Note TOML [escape sequences](https://github.com/toml-lang/toml) may be required. +||Example: ````s/(.*)(?:-net[0-9])/$1```` will strip ```-net``` from the host name thereby converting, as an example, ```hostx-net1``` and ```hostx-net2``` both to ```hostx``` +|dns_fqdn_resolve|Determines whether IP addresses should be resolved to Host names. +||Example: ```dns_fqdn_resolve = true``` +|dns_fqdn_cache_ttl|The time to live for entries in the DNS name cache expressed in seconds. Default is 0 which is infinite +||Example: ```dns_fwdn_cache_ttl = 3600``` + +## Configuration: + +This is a sample configuration for the plugin. + +```toml +[[inputs.netflow]] + ## URL to listen on + # service_address = "udp://:2055" + # service_address = "udp4://:2055" + # service_address = "udp6://:2055" + + ## Maximum socket buffer size (in bytes when no unit specified). + ## For stream sockets, once the buffer fills up, the sender will start backing up. + ## For datagram sockets, once the buffer fills up, metrics will start dropping. + ## Defaults to the OS default. + # read_buffer_size = "64KiB" + + # Whether IP addresses should be resolved to host names + # dns_fqdn_resolve = true + + # How long should resolved IP->Hostnames be cached (in seconds) + # dns_fqdn_cache_ttl = 3600 + + # Optional processing instructions for transforming DNS resolve host names + # dns_multi_name_processor = "s/(.*)(?:-net[0-9])/$1" +``` + +## DNS Name and SNMP Interface name resolution and caching + +Raw Netflow packets, and their sample data, communicate IP addresses which are not very useful to humans. + +The Netflow plugin can be configured to attempt to resolve IP addresses to host names via DNS. + +The resolved names, or in the case of a resolution error the ip/id will be used as 'the' name, are configurably cached for a period of time to avoid continual lookups. + +| Source IP Tag | Resolved Host Tag +|---|---| +|agentAddress|agentHost| +|sourceIPv4Address|sourceIPv4Host| +|destinationIPv4Address|sourceIPv4Host| +|sourceIPv6Address|sourceIPv6Host| +|destinationIPv6Address|destinationIPv6Host| +|exporterIPv4Address|exporterIPv4Host| +|exporterIPv6Address|exporterIPv6Host| + + +### Multipe DNS Name resolution & processing + +In some cases DNS servers may maintain multiple entries for the same IP address in support of load balancing. In this setup the same IP address may be resolved to multiple DNS names, via a single DNS query, and it is likely the order of those DNS names will change over time. + +In order to provide some stability to the names recorded against flow records, it is possible to provide a regular expression and template transformation that should be capable of converting multiple names to a single common name where a mathodical naming scheme has been used. + +Example: ````s/(.*)(?:-net[0-9])/$1```` will strip ```-net``` from the host name thereby converting, as an example, ```hostx-net1``` and ```hostx-net2``` both to ```hostx``` + +# Schema + +The parsing of Netflow packets is handled by the Netflow Parser and the schema is described [here](../../parsers/network_flow/netflow/README.md). + +At a high level, individual Flow Samples within the V10 Flow Packet are translated to individual Metric objects. + + diff --git a/plugins/inputs/netflow/netflow.go b/plugins/inputs/netflow/netflow.go new file mode 100644 index 0000000000000..4a00189436bfb --- /dev/null +++ b/plugins/inputs/netflow/netflow.go @@ -0,0 +1,216 @@ +// Package sflow contains a Telegraf input plugin that listens for SFLow V5 network flow sample monitoring packets, parses them to extract flow +// samples which it turns into Metrics for output +package sflow + +import ( + "fmt" + "io" + "log" + "net" + "strings" + "time" + + "github.com/influxdata/telegraf" + "github.com/influxdata/telegraf/internal" + "github.com/influxdata/telegraf/plugins/inputs" + "github.com/influxdata/telegraf/plugins/inputs/network_flow" + "github.com/influxdata/telegraf/plugins/parsers" + "github.com/influxdata/telegraf/plugins/parsers/network_flow/netflow" +) + +type setReadBufferer interface { + SetReadBuffer(bytes int) error +} + +type packetListener struct { + net.PacketConn + *Listener + network_flow.Resolver +} + +func (psl *packetListener) listen() { + buf := make([]byte, 64*1024) // 64kb - maximum size of IP packet + for { + n, a, err := psl.ReadFrom(buf) + if err != nil { + if !strings.HasSuffix(err.Error(), ": use of closed network connection") { + psl.AddError(err) + } + break + } + psl.process(a, buf[:n]) + } +} + +func (psl *packetListener) process(addr net.Addr, buf []byte) { + fmt.Println("netflow received len(buf)", len(buf)) + metrics, err := psl.Parse(buf) + if err != nil { + psl.AddError(fmt.Errorf("unable to parse incoming packet: %s", err)) + + } + fmt.Println("netflow resulted in len(metrisc), err", len(metrics), err) + + for _, m := range metrics { + if h, _, e := net.SplitHostPort(addr.String()); e == nil { + m.AddTag("agentAddress", h) + } + psl.Resolver.Resolve(m, func(resolvedM telegraf.Metric) { + psl.AddMetric(resolvedM) + }) + } +} + +// Listener configuration structure +type Listener struct { + ServiceAddress string `toml:"service_address"` + ReadBufferSize internal.Size `toml:"read_buffer_size"` + + SNMPCommunity string `toml:"snmp_community"` + SNMPIfaceResolve bool `toml:"snmp_iface_resolve"` + SNMPIfaceCacheTTL int `toml:"snmp_iface_cache_ttl"` + + DNSFQDNResolve bool `toml:"dns_fqdn_resolve"` + DNSFQDNCacheTTL int `toml:"dns_fqdn_cache_ttl"` + DNSMultiNameProcessor string `toml:"dns_multi_name_processor"` + + nameResolver network_flow.Resolver + parsers.Parser + telegraf.Accumulator + io.Closer +} + +// Description answers a description of this input plugin +func (sl *Listener) Description() string { + return "Netflow v9/v10 Protocol Listener" +} + +// SampleConfig answers a sample configuration +func (sl *Listener) SampleConfig() string { + return ` + ## URL to listen on + # service_address = "udp://:2055" + # service_address = "udp4://:2055" + # service_address = "udp6://:2055" + + ## Maximum socket buffer size (in bytes when no unit specified). + ## For stream sockets, once the buffer fills up, the sender will start backing up. + ## For datagram sockets, once the buffer fills up, metrics will start dropping. + ## Defaults to the OS default. + # read_buffer_size = "64KiB" + + # Whether IP addresses should be resolved to host names + # dns_fqdn_resolve = true + + # How long should resolved IP->Hostnames be cached (in seconds) + # dns_fqdn_cache_ttl = 3600 + + # Optional processing instructions for transforming DNS resolve host names + # dns_multi_name_processor = "s/(.*)(?:-net[0-9])/$1" + + # Whether Interface Indexes should be resolved to Interface Names via SNMP + # snmp_iface_resolve = true + + # SNMP Community string to use when resolving Interface Names + # snmp_community = "public" + + # How long should resolved Iface Index->Iface Name be cached (in seconds) + # snmp_iface_cache_ttl = 3600 + ` +} + +// Gather is a NOP for sFlow as it receives, asynchronously, sFlow network packets +func (sl *Listener) Gather(_ telegraf.Accumulator) error { + return nil +} + +// Start starts this sFlow listener listening on the configured network for sFlow packets +func (sl *Listener) Start(acc telegraf.Accumulator) error { + + dnsToResolve := map[string]string{ + "agentAddress": "agentHost", + "sourceIPv4Address": "sourceIPv4Host", + "destinationIPv4Address": "sourceIPv4Host", + "sourceIPv6Address": "sourceIPv6Host", + "destinationIPv6Address": "destinationIPv6Host", + "exporterIPv4Address": "exporterIPv4Host", + "exporterIPv6Address": "exporterIPv6Host", + } + + sl.Accumulator = acc + sl.nameResolver = network_flow.NewAsyncResolver(sl.DNSFQDNResolve, time.Duration(sl.DNSFQDNCacheTTL)*time.Second, sl.DNSMultiNameProcessor, sl.SNMPIfaceResolve, time.Duration(sl.SNMPIfaceCacheTTL)*time.Second, sl.SNMPCommunity, "netflow", dnsToResolve) + sl.nameResolver.Start() + + parser, err := netflow.NewParser("netflow", make(map[string]string)) + if err != nil { + return err + } + sl.Parser = parser + + spl := strings.SplitN(sl.ServiceAddress, "://", 2) + if len(spl) != 2 { + return fmt.Errorf("invalid service address: %s", sl.ServiceAddress) + } + + protocol := spl[0] + addr := spl[1] + + pc, err := newUDPListener(protocol, addr) + if err != nil { + return err + } + if sl.ReadBufferSize.Size > 0 { + if srb, ok := pc.(setReadBufferer); ok { + srb.SetReadBuffer(int(sl.ReadBufferSize.Size)) + } else { + log.Printf("W! Unable to set read buffer on a %s socket", protocol) + } + } + + log.Printf("I! [inputs.netflow] Listening on %s://%s", protocol, pc.LocalAddr()) + + psl := &packetListener{ + PacketConn: pc, + Listener: sl, + Resolver: sl.nameResolver, + } + + sl.Closer = psl + go psl.listen() + + return nil +} + +// Stop this Listener +func (sl *Listener) Stop() { + if sl.Closer != nil { + sl.Close() + sl.Closer = nil + } + sl.nameResolver.Stop() +} + +// newListener constructs a new vanilla, unconfigured, listener and returns it +func newListener() *Listener { + p, _ := netflow.NewParser("netflow", make(map[string]string)) + return &Listener{Parser: p} +} + +// newUDPListener answers a net.PacketConn for the expected UDP network and address passed in +func newUDPListener(network string, address string) (net.PacketConn, error) { + switch network { + case "udp", "udp4", "udp6": + addr, err := net.ResolveUDPAddr(network, address) + if err != nil { + return nil, err + } + return net.ListenUDP(network, addr) + default: + return nil, fmt.Errorf("unsupported network type %s", network) + } +} + +// init registers this SFflow input plug in with the Telegraf framework +func init() { + inputs.Add("netflow", func() telegraf.Input { return newListener() }) +} diff --git a/plugins/inputs/netflow/netflow_test.go b/plugins/inputs/netflow/netflow_test.go new file mode 100644 index 0000000000000..40d1bfc6437da --- /dev/null +++ b/plugins/inputs/netflow/netflow_test.go @@ -0,0 +1,92 @@ +package sflow + +import ( + "bytes" + "encoding/hex" + "fmt" + "io" + "log" + "net" + "os" + "testing" + + "github.com/influxdata/telegraf/internal" + "github.com/influxdata/telegraf/testutil" + "github.com/influxdata/wlog" + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" +) + +// testEmptyLog is a helper function to ensure no data is written to log. +// Should be called at the start of the test, and returns a function which should run at the end. +func testEmptyLog(t *testing.T) func() { + buf := bytes.NewBuffer(nil) + log.SetOutput(wlog.NewWriter(buf)) + + level := wlog.WARN + wlog.SetLevel(level) + + return func() { + log.SetOutput(os.Stderr) + + for { + line, err := buf.ReadBytes('\n') + if err != nil { + assert.Equal(t, io.EOF, err) + break + } + assert.Empty(t, string(line), "log not empty") + } + } +} + +func TestNetflowDescription(t *testing.T) { + sl := newListener() + assert.NotEmpty(t, sl.Description()) +} + +func TestNetflowSampleConfig(t *testing.T) { + sl := newListener() + assert.NotEmpty(t, sl.SampleConfig()) +} + +func TestNetflowGather(t *testing.T) { + sl := newListener() + assert.Nil(t, sl.Gather(nil)) +} + +func TestNetflowToMetrics(t *testing.T) { + defer testEmptyLog(t)() + + sl := newListener() + sl.ServiceAddress = "udp://127.0.0.1:0" + sl.ReadBufferSize = internal.Size{Size: 1024} + sl.DNSFQDNResolve = false + + acc := &testutil.Accumulator{} + err := sl.Start(acc) + require.NoError(t, err) + defer sl.Stop() + + client, err := net.Dial("udp", sl.Closer.(net.PacketConn).LocalAddr().String()) + require.NoError(t, err) + + template257And258 := []byte("00090004000071d45dc583690000000000000041000000840101000f00010004000200040004000100050001000600010007000200080004000a0004000b0002000c0004000e0004001000040011000400150004001600040102000f000100040002000400040001000500010006000100070002000a0004000b0002000e000400100004001100040015000400160004001b0010001c00100001001801030004000800010004002a000400290004000001030010000000000000000100000000") + dataAgainst257And258 := []byte("00090004000071d45dc583690000000100000041010100340000004800000001110000e115ac10ec0100000000e115ac10ecff000000000000000000000000000000000000000004") + expected := "[netflow map[agentAddress:127.0.0.1 bgpDestinationAsNumber:0 bgpSourceAsNumber:0 destinationIPv4Address:172.16.236.255 destinationTransportPort:57621 destinationTransportSvc:57621 egressInterface:0 ingressInterface:0 ipClassOfService:0 protocolIdentifier:17 sourceID:65 sourceIPv4Address:172.16.236.1 sourceTransportPort:57621 sourceTransportSvc:57621 tcpControlBits:0] map[flowEndSysUpTime:0 flowStartSysUpTime:0 octetDeltaCount:72 packetDeltaCount:1]]" + + packetBytes := make([]byte, hex.DecodedLen(len(template257And258))) + _, err = hex.Decode(packetBytes, template257And258) + client.Write(packetBytes) + + packetBytes = make([]byte, hex.DecodedLen(len(dataAgainst257And258))) + _, err = hex.Decode(packetBytes, dataAgainst257And258) + client.Write(packetBytes) + + acc.Wait(1) + acc.Lock() + actual := fmt.Sprintf(("%s"), acc.Metrics) + acc.Unlock() + + assert.Equal(t, expected, actual) +} From 61c31e73af09a411ec4fd6979a2e428ef561ec1a Mon Sep 17 00:00:00 2001 From: Samantha Wang Date: Thu, 15 Oct 2020 12:27:54 -0700 Subject: [PATCH 005/176] Revert "add netflow plugin" This reverts commit be90a96fedf27efca04e6a9c2e9e5c75a1cc72f0. --- plugins/inputs/netflow/README.md | 82 ---------- plugins/inputs/netflow/netflow.go | 216 ------------------------- plugins/inputs/netflow/netflow_test.go | 92 ----------- 3 files changed, 390 deletions(-) delete mode 100644 plugins/inputs/netflow/README.md delete mode 100644 plugins/inputs/netflow/netflow.go delete mode 100644 plugins/inputs/netflow/netflow_test.go diff --git a/plugins/inputs/netflow/README.md b/plugins/inputs/netflow/README.md deleted file mode 100644 index 29ae3275502db..0000000000000 --- a/plugins/inputs/netflow/README.md +++ /dev/null @@ -1,82 +0,0 @@ -# SFlow Input Plugin - -The Netflow Input Plugin provides support for acting as an Netflow V9/V10 collector in accordance with the specification from [IETF](https://tools.ietf.org/html/rfc7011). - - -# Configuration -The following configuration options are availabe: - -| Name | Description -|---|---| -| service_address| URL to listen on expressed as UDP (IPv4 or 6) OP address and port number -| | Example: ```service_address = "udp://:2055"``` -| read_buffer_size | Maximum socket buffer size (in bytes when no unit specified). Once the buffer fills up, metrics will start dropping. Defaults to the OS default. -||Example = ```read_buffer_size"64KiB"``` | -| dns_multi_name_processor | An optional regexp and template to use to transform a DNS resolve name. Particularily useful when DNS resolves an IP address to more than one name, and they alternative in order when queried. Using this processor command it is possible to tranform the name into something common irrespect of which entry is first - if the names conform to a regular naming schema. Note TOML [escape sequences](https://github.com/toml-lang/toml) may be required. -||Example: ````s/(.*)(?:-net[0-9])/$1```` will strip ```-net``` from the host name thereby converting, as an example, ```hostx-net1``` and ```hostx-net2``` both to ```hostx``` -|dns_fqdn_resolve|Determines whether IP addresses should be resolved to Host names. -||Example: ```dns_fqdn_resolve = true``` -|dns_fqdn_cache_ttl|The time to live for entries in the DNS name cache expressed in seconds. Default is 0 which is infinite -||Example: ```dns_fwdn_cache_ttl = 3600``` - -## Configuration: - -This is a sample configuration for the plugin. - -```toml -[[inputs.netflow]] - ## URL to listen on - # service_address = "udp://:2055" - # service_address = "udp4://:2055" - # service_address = "udp6://:2055" - - ## Maximum socket buffer size (in bytes when no unit specified). - ## For stream sockets, once the buffer fills up, the sender will start backing up. - ## For datagram sockets, once the buffer fills up, metrics will start dropping. - ## Defaults to the OS default. - # read_buffer_size = "64KiB" - - # Whether IP addresses should be resolved to host names - # dns_fqdn_resolve = true - - # How long should resolved IP->Hostnames be cached (in seconds) - # dns_fqdn_cache_ttl = 3600 - - # Optional processing instructions for transforming DNS resolve host names - # dns_multi_name_processor = "s/(.*)(?:-net[0-9])/$1" -``` - -## DNS Name and SNMP Interface name resolution and caching - -Raw Netflow packets, and their sample data, communicate IP addresses which are not very useful to humans. - -The Netflow plugin can be configured to attempt to resolve IP addresses to host names via DNS. - -The resolved names, or in the case of a resolution error the ip/id will be used as 'the' name, are configurably cached for a period of time to avoid continual lookups. - -| Source IP Tag | Resolved Host Tag -|---|---| -|agentAddress|agentHost| -|sourceIPv4Address|sourceIPv4Host| -|destinationIPv4Address|sourceIPv4Host| -|sourceIPv6Address|sourceIPv6Host| -|destinationIPv6Address|destinationIPv6Host| -|exporterIPv4Address|exporterIPv4Host| -|exporterIPv6Address|exporterIPv6Host| - - -### Multipe DNS Name resolution & processing - -In some cases DNS servers may maintain multiple entries for the same IP address in support of load balancing. In this setup the same IP address may be resolved to multiple DNS names, via a single DNS query, and it is likely the order of those DNS names will change over time. - -In order to provide some stability to the names recorded against flow records, it is possible to provide a regular expression and template transformation that should be capable of converting multiple names to a single common name where a mathodical naming scheme has been used. - -Example: ````s/(.*)(?:-net[0-9])/$1```` will strip ```-net``` from the host name thereby converting, as an example, ```hostx-net1``` and ```hostx-net2``` both to ```hostx``` - -# Schema - -The parsing of Netflow packets is handled by the Netflow Parser and the schema is described [here](../../parsers/network_flow/netflow/README.md). - -At a high level, individual Flow Samples within the V10 Flow Packet are translated to individual Metric objects. - - diff --git a/plugins/inputs/netflow/netflow.go b/plugins/inputs/netflow/netflow.go deleted file mode 100644 index 4a00189436bfb..0000000000000 --- a/plugins/inputs/netflow/netflow.go +++ /dev/null @@ -1,216 +0,0 @@ -// Package sflow contains a Telegraf input plugin that listens for SFLow V5 network flow sample monitoring packets, parses them to extract flow -// samples which it turns into Metrics for output -package sflow - -import ( - "fmt" - "io" - "log" - "net" - "strings" - "time" - - "github.com/influxdata/telegraf" - "github.com/influxdata/telegraf/internal" - "github.com/influxdata/telegraf/plugins/inputs" - "github.com/influxdata/telegraf/plugins/inputs/network_flow" - "github.com/influxdata/telegraf/plugins/parsers" - "github.com/influxdata/telegraf/plugins/parsers/network_flow/netflow" -) - -type setReadBufferer interface { - SetReadBuffer(bytes int) error -} - -type packetListener struct { - net.PacketConn - *Listener - network_flow.Resolver -} - -func (psl *packetListener) listen() { - buf := make([]byte, 64*1024) // 64kb - maximum size of IP packet - for { - n, a, err := psl.ReadFrom(buf) - if err != nil { - if !strings.HasSuffix(err.Error(), ": use of closed network connection") { - psl.AddError(err) - } - break - } - psl.process(a, buf[:n]) - } -} - -func (psl *packetListener) process(addr net.Addr, buf []byte) { - fmt.Println("netflow received len(buf)", len(buf)) - metrics, err := psl.Parse(buf) - if err != nil { - psl.AddError(fmt.Errorf("unable to parse incoming packet: %s", err)) - - } - fmt.Println("netflow resulted in len(metrisc), err", len(metrics), err) - - for _, m := range metrics { - if h, _, e := net.SplitHostPort(addr.String()); e == nil { - m.AddTag("agentAddress", h) - } - psl.Resolver.Resolve(m, func(resolvedM telegraf.Metric) { - psl.AddMetric(resolvedM) - }) - } -} - -// Listener configuration structure -type Listener struct { - ServiceAddress string `toml:"service_address"` - ReadBufferSize internal.Size `toml:"read_buffer_size"` - - SNMPCommunity string `toml:"snmp_community"` - SNMPIfaceResolve bool `toml:"snmp_iface_resolve"` - SNMPIfaceCacheTTL int `toml:"snmp_iface_cache_ttl"` - - DNSFQDNResolve bool `toml:"dns_fqdn_resolve"` - DNSFQDNCacheTTL int `toml:"dns_fqdn_cache_ttl"` - DNSMultiNameProcessor string `toml:"dns_multi_name_processor"` - - nameResolver network_flow.Resolver - parsers.Parser - telegraf.Accumulator - io.Closer -} - -// Description answers a description of this input plugin -func (sl *Listener) Description() string { - return "Netflow v9/v10 Protocol Listener" -} - -// SampleConfig answers a sample configuration -func (sl *Listener) SampleConfig() string { - return ` - ## URL to listen on - # service_address = "udp://:2055" - # service_address = "udp4://:2055" - # service_address = "udp6://:2055" - - ## Maximum socket buffer size (in bytes when no unit specified). - ## For stream sockets, once the buffer fills up, the sender will start backing up. - ## For datagram sockets, once the buffer fills up, metrics will start dropping. - ## Defaults to the OS default. - # read_buffer_size = "64KiB" - - # Whether IP addresses should be resolved to host names - # dns_fqdn_resolve = true - - # How long should resolved IP->Hostnames be cached (in seconds) - # dns_fqdn_cache_ttl = 3600 - - # Optional processing instructions for transforming DNS resolve host names - # dns_multi_name_processor = "s/(.*)(?:-net[0-9])/$1" - - # Whether Interface Indexes should be resolved to Interface Names via SNMP - # snmp_iface_resolve = true - - # SNMP Community string to use when resolving Interface Names - # snmp_community = "public" - - # How long should resolved Iface Index->Iface Name be cached (in seconds) - # snmp_iface_cache_ttl = 3600 - ` -} - -// Gather is a NOP for sFlow as it receives, asynchronously, sFlow network packets -func (sl *Listener) Gather(_ telegraf.Accumulator) error { - return nil -} - -// Start starts this sFlow listener listening on the configured network for sFlow packets -func (sl *Listener) Start(acc telegraf.Accumulator) error { - - dnsToResolve := map[string]string{ - "agentAddress": "agentHost", - "sourceIPv4Address": "sourceIPv4Host", - "destinationIPv4Address": "sourceIPv4Host", - "sourceIPv6Address": "sourceIPv6Host", - "destinationIPv6Address": "destinationIPv6Host", - "exporterIPv4Address": "exporterIPv4Host", - "exporterIPv6Address": "exporterIPv6Host", - } - - sl.Accumulator = acc - sl.nameResolver = network_flow.NewAsyncResolver(sl.DNSFQDNResolve, time.Duration(sl.DNSFQDNCacheTTL)*time.Second, sl.DNSMultiNameProcessor, sl.SNMPIfaceResolve, time.Duration(sl.SNMPIfaceCacheTTL)*time.Second, sl.SNMPCommunity, "netflow", dnsToResolve) - sl.nameResolver.Start() - - parser, err := netflow.NewParser("netflow", make(map[string]string)) - if err != nil { - return err - } - sl.Parser = parser - - spl := strings.SplitN(sl.ServiceAddress, "://", 2) - if len(spl) != 2 { - return fmt.Errorf("invalid service address: %s", sl.ServiceAddress) - } - - protocol := spl[0] - addr := spl[1] - - pc, err := newUDPListener(protocol, addr) - if err != nil { - return err - } - if sl.ReadBufferSize.Size > 0 { - if srb, ok := pc.(setReadBufferer); ok { - srb.SetReadBuffer(int(sl.ReadBufferSize.Size)) - } else { - log.Printf("W! Unable to set read buffer on a %s socket", protocol) - } - } - - log.Printf("I! [inputs.netflow] Listening on %s://%s", protocol, pc.LocalAddr()) - - psl := &packetListener{ - PacketConn: pc, - Listener: sl, - Resolver: sl.nameResolver, - } - - sl.Closer = psl - go psl.listen() - - return nil -} - -// Stop this Listener -func (sl *Listener) Stop() { - if sl.Closer != nil { - sl.Close() - sl.Closer = nil - } - sl.nameResolver.Stop() -} - -// newListener constructs a new vanilla, unconfigured, listener and returns it -func newListener() *Listener { - p, _ := netflow.NewParser("netflow", make(map[string]string)) - return &Listener{Parser: p} -} - -// newUDPListener answers a net.PacketConn for the expected UDP network and address passed in -func newUDPListener(network string, address string) (net.PacketConn, error) { - switch network { - case "udp", "udp4", "udp6": - addr, err := net.ResolveUDPAddr(network, address) - if err != nil { - return nil, err - } - return net.ListenUDP(network, addr) - default: - return nil, fmt.Errorf("unsupported network type %s", network) - } -} - -// init registers this SFflow input plug in with the Telegraf framework -func init() { - inputs.Add("netflow", func() telegraf.Input { return newListener() }) -} diff --git a/plugins/inputs/netflow/netflow_test.go b/plugins/inputs/netflow/netflow_test.go deleted file mode 100644 index 40d1bfc6437da..0000000000000 --- a/plugins/inputs/netflow/netflow_test.go +++ /dev/null @@ -1,92 +0,0 @@ -package sflow - -import ( - "bytes" - "encoding/hex" - "fmt" - "io" - "log" - "net" - "os" - "testing" - - "github.com/influxdata/telegraf/internal" - "github.com/influxdata/telegraf/testutil" - "github.com/influxdata/wlog" - "github.com/stretchr/testify/assert" - "github.com/stretchr/testify/require" -) - -// testEmptyLog is a helper function to ensure no data is written to log. -// Should be called at the start of the test, and returns a function which should run at the end. -func testEmptyLog(t *testing.T) func() { - buf := bytes.NewBuffer(nil) - log.SetOutput(wlog.NewWriter(buf)) - - level := wlog.WARN - wlog.SetLevel(level) - - return func() { - log.SetOutput(os.Stderr) - - for { - line, err := buf.ReadBytes('\n') - if err != nil { - assert.Equal(t, io.EOF, err) - break - } - assert.Empty(t, string(line), "log not empty") - } - } -} - -func TestNetflowDescription(t *testing.T) { - sl := newListener() - assert.NotEmpty(t, sl.Description()) -} - -func TestNetflowSampleConfig(t *testing.T) { - sl := newListener() - assert.NotEmpty(t, sl.SampleConfig()) -} - -func TestNetflowGather(t *testing.T) { - sl := newListener() - assert.Nil(t, sl.Gather(nil)) -} - -func TestNetflowToMetrics(t *testing.T) { - defer testEmptyLog(t)() - - sl := newListener() - sl.ServiceAddress = "udp://127.0.0.1:0" - sl.ReadBufferSize = internal.Size{Size: 1024} - sl.DNSFQDNResolve = false - - acc := &testutil.Accumulator{} - err := sl.Start(acc) - require.NoError(t, err) - defer sl.Stop() - - client, err := net.Dial("udp", sl.Closer.(net.PacketConn).LocalAddr().String()) - require.NoError(t, err) - - template257And258 := []byte("00090004000071d45dc583690000000000000041000000840101000f00010004000200040004000100050001000600010007000200080004000a0004000b0002000c0004000e0004001000040011000400150004001600040102000f000100040002000400040001000500010006000100070002000a0004000b0002000e000400100004001100040015000400160004001b0010001c00100001001801030004000800010004002a000400290004000001030010000000000000000100000000") - dataAgainst257And258 := []byte("00090004000071d45dc583690000000100000041010100340000004800000001110000e115ac10ec0100000000e115ac10ecff000000000000000000000000000000000000000004") - expected := "[netflow map[agentAddress:127.0.0.1 bgpDestinationAsNumber:0 bgpSourceAsNumber:0 destinationIPv4Address:172.16.236.255 destinationTransportPort:57621 destinationTransportSvc:57621 egressInterface:0 ingressInterface:0 ipClassOfService:0 protocolIdentifier:17 sourceID:65 sourceIPv4Address:172.16.236.1 sourceTransportPort:57621 sourceTransportSvc:57621 tcpControlBits:0] map[flowEndSysUpTime:0 flowStartSysUpTime:0 octetDeltaCount:72 packetDeltaCount:1]]" - - packetBytes := make([]byte, hex.DecodedLen(len(template257And258))) - _, err = hex.Decode(packetBytes, template257And258) - client.Write(packetBytes) - - packetBytes = make([]byte, hex.DecodedLen(len(dataAgainst257And258))) - _, err = hex.Decode(packetBytes, dataAgainst257And258) - client.Write(packetBytes) - - acc.Wait(1) - acc.Lock() - actual := fmt.Sprintf(("%s"), acc.Metrics) - acc.Unlock() - - assert.Equal(t, expected, actual) -} From cfd7348e5c13612fe63bad3237e9e254eebc9c9b Mon Sep 17 00:00:00 2001 From: Alexander Krantz Date: Tue, 17 Aug 2021 14:13:43 -0700 Subject: [PATCH 006/176] docs: improve redis commands documentation (#9606) --- plugins/inputs/redis/README.md | 12 ++++++++--- plugins/inputs/redis/redis.go | 38 +++++++++++++++++++++++++--------- 2 files changed, 37 insertions(+), 13 deletions(-) diff --git a/plugins/inputs/redis/README.md b/plugins/inputs/redis/README.md index 4327a28bb98ee..bd89ea75346b2 100644 --- a/plugins/inputs/redis/README.md +++ b/plugins/inputs/redis/README.md @@ -10,15 +10,21 @@ ## e.g. ## tcp://localhost:6379 ## tcp://:password@192.168.99.100 + ## unix:///var/run/redis.sock ## ## If no servers are specified, then localhost is used as the host. ## If no port is specified, 6379 is used servers = ["tcp://localhost:6379"] + ## Optional. Specify redis commands to retrieve values # [[inputs.redis.commands]] - # command = ["get", "sample-key"] - # field = "sample-key-value" - # type = "string" + # # The command to run where each argument is a separate element + # command = ["get", "sample-key"] + # # The field to store the result in + # field = "sample-key-value" + # # The type of the result + # # Can be "string", "integer", or "float" + # type = "string" ## specify server password # password = "s#cr@t%" diff --git a/plugins/inputs/redis/redis.go b/plugins/inputs/redis/redis.go index fdc5dcd14cb12..b66d4ea41d36b 100644 --- a/plugins/inputs/redis/redis.go +++ b/plugins/inputs/redis/redis.go @@ -32,8 +32,8 @@ type Redis struct { Log telegraf.Logger - clients []Client - initialized bool + clients []Client + connected bool } type Client interface { @@ -201,9 +201,13 @@ var sampleConfig = ` ## Optional. Specify redis commands to retrieve values # [[inputs.redis.commands]] - # command = ["get", "sample-key"] - # field = "sample-key-value" - # type = "string" + # # The command to run where each argument is a separate element + # command = ["get", "sample-key"] + # # The field to store the result in + # field = "sample-key-value" + # # The type of the result + # # Can be "string", "integer", or "float" + # type = "string" ## specify server password # password = "s#cr@t%" @@ -230,8 +234,18 @@ var Tracking = map[string]string{ "role": "replication_role", } -func (r *Redis) init() error { - if r.initialized { +func (r *Redis) Init() error { + for _, command := range r.Commands { + if command.Type != "string" && command.Type != "integer" && command.Type != "float" { + return fmt.Errorf(`unknown result type: expected one of "string", "integer", "float"; got %q`, command.Type) + } + } + + return nil +} + +func (r *Redis) connect() error { + if r.connected { return nil } @@ -299,15 +313,15 @@ func (r *Redis) init() error { } } - r.initialized = true + r.connected = true return nil } // Reads stats from all configured servers accumulates stats. // Returns one of the errors encountered while gather stats (if any). func (r *Redis) Gather(acc telegraf.Accumulator) error { - if !r.initialized { - err := r.init() + if !r.connected { + err := r.connect() if err != nil { return err } @@ -333,6 +347,10 @@ func (r *Redis) gatherCommandValues(client Client, acc telegraf.Accumulator) err for _, command := range r.Commands { val, err := client.Do(command.Type, command.Command...) if err != nil { + if strings.Contains(err.Error(), "unexpected type=") { + return fmt.Errorf("could not get command result: %s", err) + } + return err } From 967e31e3036fa7a1fb9be83eb9249d8bca265ae8 Mon Sep 17 00:00:00 2001 From: Sebastian Spaink <3441183+sspaink@users.noreply.github.com> Date: Tue, 17 Aug 2021 16:14:15 -0500 Subject: [PATCH 007/176] fix: wireguard unknown revision when using direct (#9620) --- go.mod | 6 ++++++ go.sum | 4 ++-- 2 files changed, 8 insertions(+), 2 deletions(-) diff --git a/go.mod b/go.mod index 3cbedd83680a8..0f6bdcd55c1cc 100644 --- a/go.mod +++ b/go.mod @@ -171,3 +171,9 @@ replace github.com/satori/go.uuid => github.com/gofrs/uuid v3.2.0+incompatible // replaced due to https//github.com/mdlayher/apcupsd/issues/10 replace github.com/mdlayher/apcupsd => github.com/influxdata/apcupsd v0.0.0-20210427145308-694d5caead0e + +//proxy.golang.org has versions of golang.zx2c4.com/wireguard with leading v's, whereas the git repo has tags without leading v's: https://git.zx2c4.com/wireguard-go/refs/tags +//So, fetching this module with version v0.0.20200121 (as done by the transitive dependency +//https://github.com/WireGuard/wgctrl-go/blob/e35592f146e40ce8057113d14aafcc3da231fbac/go.mod#L12 ) was not working when using GOPROXY=direct. +//Replacing with the pseudo-version works around this. +replace golang.zx2c4.com/wireguard v0.0.20200121 => golang.zx2c4.com/wireguard v0.0.0-20200121152719-05b03c675090 diff --git a/go.sum b/go.sum index 90806c2041df6..d17f8209df7da 100644 --- a/go.sum +++ b/go.sum @@ -1943,8 +1943,8 @@ golang.org/x/xerrors v0.0.0-20191011141410-1b5146add898/go.mod h1:I/5z698sn9Ka8T golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= golang.org/x/xerrors v0.0.0-20200804184101-5ec99f83aff1 h1:go1bK/D/BFZV2I8cIQd1NKEZ+0owSTG1fDTci4IqFcE= golang.org/x/xerrors v0.0.0-20200804184101-5ec99f83aff1/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= -golang.zx2c4.com/wireguard v0.0.20200121 h1:vcswa5Q6f+sylDfjqyrVNNrjsFUUbPsgAQTBCAg/Qf8= -golang.zx2c4.com/wireguard v0.0.20200121/go.mod h1:P2HsVp8SKwZEufsnezXZA4GRX/T49/HlU7DGuelXsU4= +golang.zx2c4.com/wireguard v0.0.0-20200121152719-05b03c675090 h1:LJ5Rrj8y0yBul+KpB2v9dFhYuHRs1s9caVu4VK6MgMo= +golang.zx2c4.com/wireguard v0.0.0-20200121152719-05b03c675090/go.mod h1:P2HsVp8SKwZEufsnezXZA4GRX/T49/HlU7DGuelXsU4= golang.zx2c4.com/wireguard/wgctrl v0.0.0-20200205215550-e35592f146e4 h1:KTi97NIQGgSMaN0v/oxniJV0MEzfzmrDUOAWxombQVc= golang.zx2c4.com/wireguard/wgctrl v0.0.0-20200205215550-e35592f146e4/go.mod h1:UdS9frhv65KTfwxME1xE8+rHYoFpbm36gOud1GhBe9c= gonum.org/v1/gonum v0.0.0-20180816165407-929014505bf4/go.mod h1:Y+Yx5eoAFn32cQvJDxZx5Dpnq+c3wtXuadVZAcxbbBo= From 41c384a978743edba728bf64134d1d48447d2eb5 Mon Sep 17 00:00:00 2001 From: Nicolai Scheer <5212218+nscheer@users.noreply.github.com> Date: Tue, 17 Aug 2021 23:19:02 +0200 Subject: [PATCH 008/176] feat: add bool datatype for sql output plugin (#9598) Co-authored-by: Nicolai Scheer --- plugins/outputs/sql/README.md | 1 + plugins/outputs/sql/sql.go | 4 ++++ plugins/outputs/sql/sql_test.go | 8 ++++++++ plugins/outputs/sql/testdata/mariadb/expected.sql | 6 ++++-- plugins/outputs/sql/testdata/postgres/expected.sql | 8 +++++--- 5 files changed, 22 insertions(+), 5 deletions(-) diff --git a/plugins/outputs/sql/README.md b/plugins/outputs/sql/README.md index 6fb215612ecaf..77b89762a7a87 100644 --- a/plugins/outputs/sql/README.md +++ b/plugins/outputs/sql/README.md @@ -104,6 +104,7 @@ through the convert settings. # timestamp = "TIMESTAMP" # defaultvalue = "TEXT" # unsigned = "UNSIGNED" + # bool = "BOOL" ``` ## Driver-specific information diff --git a/plugins/outputs/sql/sql.go b/plugins/outputs/sql/sql.go index 3e003d3309873..fecaf2f6e7661 100644 --- a/plugins/outputs/sql/sql.go +++ b/plugins/outputs/sql/sql.go @@ -22,6 +22,7 @@ type ConvertStruct struct { Timestamp string Defaultvalue string Unsigned string + Bool string } type SQL struct { @@ -103,6 +104,8 @@ func (p *SQL) deriveDatatype(value interface{}) string { datatype = p.Convert.Real case string: datatype = p.Convert.Text + case bool: + datatype = p.Convert.Bool default: datatype = p.Convert.Defaultvalue p.Log.Errorf("Unknown datatype: '%T' %v", value, value) @@ -272,6 +275,7 @@ func newSQL() *SQL { Timestamp: "TIMESTAMP", Defaultvalue: "TEXT", Unsigned: "UNSIGNED", + Bool: "BOOL", }, } } diff --git a/plugins/outputs/sql/sql_test.go b/plugins/outputs/sql/sql_test.go index c57570442c617..5dad6752d4cfe 100644 --- a/plugins/outputs/sql/sql_test.go +++ b/plugins/outputs/sql/sql_test.go @@ -100,6 +100,14 @@ var ( Key: "int64_two", Value: int64(2345), }, + { + Key: "bool_one", + Value: true, + }, + { + Key: "bool_two", + Value: false, + }, }, ts, ), diff --git a/plugins/outputs/sql/testdata/mariadb/expected.sql b/plugins/outputs/sql/testdata/mariadb/expected.sql index 49a3095db4da2..43e0fa5e545b0 100644 --- a/plugins/outputs/sql/testdata/mariadb/expected.sql +++ b/plugins/outputs/sql/testdata/mariadb/expected.sql @@ -21,10 +21,12 @@ CREATE TABLE `metric_one` ( `tag_one` text DEFAULT NULL, `tag_two` text DEFAULT NULL, `int64_one` int(11) DEFAULT NULL, - `int64_two` int(11) DEFAULT NULL + `int64_two` int(11) DEFAULT NULL, + `bool_one` tinyint(1) DEFAULT NULL, + `bool_two` tinyint(1) DEFAULT NULL ); /*!40101 SET character_set_client = @saved_cs_client */; -INSERT INTO `metric_one` VALUES ('2021-05-17 22:04:45','tag1','tag2',1234,2345); +INSERT INTO `metric_one` VALUES ('2021-05-17 22:04:45','tag1','tag2',1234,2345,1,0); /*!40101 SET @saved_cs_client = @@character_set_client */; /*!40101 SET character_set_client = utf8 */; CREATE TABLE `metric_two` ( diff --git a/plugins/outputs/sql/testdata/postgres/expected.sql b/plugins/outputs/sql/testdata/postgres/expected.sql index 8bc2b2fc83018..c1ee733ac12d4 100644 --- a/plugins/outputs/sql/testdata/postgres/expected.sql +++ b/plugins/outputs/sql/testdata/postgres/expected.sql @@ -21,7 +21,9 @@ CREATE TABLE public.metric_one ( tag_one text, tag_two text, int64_one integer, - int64_two integer + int64_two integer, + bool_one boolean, + bool_two boolean ); ALTER TABLE public.metric_one OWNER TO postgres; CREATE TABLE public.metric_two ( @@ -33,8 +35,8 @@ ALTER TABLE public.metric_two OWNER TO postgres; COPY public."metric three" ("timestamp", "tag four", "string two") FROM stdin; 2021-05-17 22:04:45 tag4 string2 \. -COPY public.metric_one ("timestamp", tag_one, tag_two, int64_one, int64_two) FROM stdin; -2021-05-17 22:04:45 tag1 tag2 1234 2345 +COPY public.metric_one ("timestamp", tag_one, tag_two, int64_one, int64_two, bool_one, bool_two) FROM stdin; +2021-05-17 22:04:45 tag1 tag2 1234 2345 t f \. COPY public.metric_two ("timestamp", tag_three, string_one) FROM stdin; 2021-05-17 22:04:45 tag3 string1 From 02ccbec348aa5171da7fe237d70f7f6d28782723 Mon Sep 17 00:00:00 2001 From: Sebastian Spaink <3441183+sspaink@users.noreply.github.com> Date: Tue, 17 Aug 2021 16:22:14 -0500 Subject: [PATCH 009/176] fix: cookie test (#9608) --- plugins/common/cookie/cookie.go | 5 ++-- plugins/common/cookie/cookie_test.go | 34 +++++++++++++++------------- plugins/common/http/config.go | 3 ++- 3 files changed, 23 insertions(+), 19 deletions(-) diff --git a/plugins/common/cookie/cookie.go b/plugins/common/cookie/cookie.go index 92dab9104dcc5..10213f78d9b37 100644 --- a/plugins/common/cookie/cookie.go +++ b/plugins/common/cookie/cookie.go @@ -9,6 +9,7 @@ import ( "strings" "time" + clockutil "github.com/benbjohnson/clock" "github.com/influxdata/telegraf" "github.com/influxdata/telegraf/config" ) @@ -27,7 +28,7 @@ type CookieAuthConfig struct { client *http.Client } -func (c *CookieAuthConfig) Start(client *http.Client, log telegraf.Logger) (err error) { +func (c *CookieAuthConfig) Start(client *http.Client, log telegraf.Logger, clock clockutil.Clock) (err error) { c.client = client if c.Method == "" { @@ -45,7 +46,7 @@ func (c *CookieAuthConfig) Start(client *http.Client, log telegraf.Logger) (err // continual auth renewal if set if c.Renewal > 0 { - ticker := time.NewTicker(time.Duration(c.Renewal)) + ticker := clock.Ticker(time.Duration(c.Renewal)) go func() { for range ticker.C { if err := c.auth(); err != nil && log != nil { diff --git a/plugins/common/cookie/cookie_test.go b/plugins/common/cookie/cookie_test.go index 0231e10dd2eda..036ca2b5bb5a7 100644 --- a/plugins/common/cookie/cookie_test.go +++ b/plugins/common/cookie/cookie_test.go @@ -9,6 +9,7 @@ import ( "testing" "time" + clockutil "github.com/benbjohnson/clock" "github.com/google/go-cmp/cmp" "github.com/influxdata/telegraf/config" "github.com/influxdata/telegraf/plugins/common/cookie" @@ -121,7 +122,7 @@ func TestAuthConfig_Start(t *testing.T) { fields fields args args wantErr error - assert func(t *testing.T, c *cookie.CookieAuthConfig, srv fakeServer) + assert func(t *testing.T, c *cookie.CookieAuthConfig, srv fakeServer, mock *clockutil.Mock) }{ { name: "zero renewal does not renew", @@ -129,12 +130,11 @@ func TestAuthConfig_Start(t *testing.T) { renewal: 0, endpoint: authEndpointNoCreds, }, - assert: func(t *testing.T, c *cookie.CookieAuthConfig, srv fakeServer) { + assert: func(t *testing.T, c *cookie.CookieAuthConfig, srv fakeServer, mock *clockutil.Mock) { // should have Cookie Authed once srv.checkAuthCount(t, 1) srv.checkResp(t, http.StatusOK) - time.Sleep(renewalCheck) - // should have never Cookie Authed again + mock.Add(renewalCheck) srv.checkAuthCount(t, 1) srv.checkResp(t, http.StatusOK) }, @@ -145,13 +145,13 @@ func TestAuthConfig_Start(t *testing.T) { renewal: renewal, endpoint: authEndpointNoCreds, }, - assert: func(t *testing.T, c *cookie.CookieAuthConfig, srv fakeServer) { + assert: func(t *testing.T, c *cookie.CookieAuthConfig, srv fakeServer, mock *clockutil.Mock) { // should have Cookie Authed once srv.checkAuthCount(t, 1) // default method set require.Equal(t, http.MethodPost, c.Method) srv.checkResp(t, http.StatusOK) - time.Sleep(renewalCheck) + mock.Add(renewalCheck) // should have Cookie Authed at least twice more srv.checkAuthCount(t, 3) srv.checkResp(t, http.StatusOK) @@ -168,11 +168,11 @@ func TestAuthConfig_Start(t *testing.T) { renewal: renewal, endpoint: authEndpointWithBasicAuth, }, - assert: func(t *testing.T, c *cookie.CookieAuthConfig, srv fakeServer) { + assert: func(t *testing.T, c *cookie.CookieAuthConfig, srv fakeServer, mock *clockutil.Mock) { // should have Cookie Authed once srv.checkAuthCount(t, 1) srv.checkResp(t, http.StatusOK) - time.Sleep(renewalCheck) + mock.Add(renewalCheck) // should have Cookie Authed at least twice more srv.checkAuthCount(t, 3) srv.checkResp(t, http.StatusOK) @@ -190,11 +190,11 @@ func TestAuthConfig_Start(t *testing.T) { endpoint: authEndpointWithBasicAuth, }, wantErr: fmt.Errorf("cookie auth renewal received status code: 401 (Unauthorized)"), - assert: func(t *testing.T, c *cookie.CookieAuthConfig, srv fakeServer) { + assert: func(t *testing.T, c *cookie.CookieAuthConfig, srv fakeServer, mock *clockutil.Mock) { // should have never Cookie Authed srv.checkAuthCount(t, 0) srv.checkResp(t, http.StatusForbidden) - time.Sleep(renewalCheck) + mock.Add(renewalCheck) // should have still never Cookie Authed srv.checkAuthCount(t, 0) srv.checkResp(t, http.StatusForbidden) @@ -210,11 +210,11 @@ func TestAuthConfig_Start(t *testing.T) { renewal: renewal, endpoint: authEndpointWithBody, }, - assert: func(t *testing.T, c *cookie.CookieAuthConfig, srv fakeServer) { + assert: func(t *testing.T, c *cookie.CookieAuthConfig, srv fakeServer, mock *clockutil.Mock) { // should have Cookie Authed once srv.checkAuthCount(t, 1) srv.checkResp(t, http.StatusOK) - time.Sleep(renewalCheck) + mock.Add(renewalCheck) // should have Cookie Authed at least twice more srv.checkAuthCount(t, 3) srv.checkResp(t, http.StatusOK) @@ -231,11 +231,11 @@ func TestAuthConfig_Start(t *testing.T) { endpoint: authEndpointWithBody, }, wantErr: fmt.Errorf("cookie auth renewal received status code: 401 (Unauthorized)"), - assert: func(t *testing.T, c *cookie.CookieAuthConfig, srv fakeServer) { + assert: func(t *testing.T, c *cookie.CookieAuthConfig, srv fakeServer, mock *clockutil.Mock) { // should have never Cookie Authed srv.checkAuthCount(t, 0) srv.checkResp(t, http.StatusForbidden) - time.Sleep(renewalCheck) + mock.Add(renewalCheck) // should have still never Cookie Authed srv.checkAuthCount(t, 0) srv.checkResp(t, http.StatusForbidden) @@ -255,15 +255,17 @@ func TestAuthConfig_Start(t *testing.T) { Renewal: config.Duration(tt.args.renewal), } - if err := c.Start(srv.Client(), testutil.Logger{Name: "cookie_auth"}); tt.wantErr != nil { + mock := clockutil.NewMock() + if err := c.Start(srv.Client(), testutil.Logger{Name: "cookie_auth"}, mock); tt.wantErr != nil { require.EqualError(t, err, tt.wantErr.Error()) } else { require.NoError(t, err) } if tt.assert != nil { - tt.assert(t, c, srv) + tt.assert(t, c, srv, mock) } + srv.Close() }) } } diff --git a/plugins/common/http/config.go b/plugins/common/http/config.go index 07b486cba294e..bd6ce4fefa308 100644 --- a/plugins/common/http/config.go +++ b/plugins/common/http/config.go @@ -5,6 +5,7 @@ import ( "net/http" "time" + "github.com/benbjohnson/clock" "github.com/influxdata/telegraf" "github.com/influxdata/telegraf/config" "github.com/influxdata/telegraf/plugins/common/cookie" @@ -54,7 +55,7 @@ func (h *HTTPClientConfig) CreateClient(ctx context.Context, log telegraf.Logger client = h.OAuth2Config.CreateOauth2Client(ctx, client) if h.CookieAuthConfig.URL != "" { - if err := h.CookieAuthConfig.Start(client, log); err != nil { + if err := h.CookieAuthConfig.Start(client, log, clock.New()); err != nil { return nil, err } } From fe144e7c990e03bb4f32e8ae2a3eee24919eafd9 Mon Sep 17 00:00:00 2001 From: Grace Wehner Date: Tue, 17 Aug 2021 14:54:55 -0700 Subject: [PATCH 010/176] fix: issues with prometheus kubernetes pod discovery (#9605) --- plugins/inputs/prometheus/kubernetes.go | 50 ++++++++++++++----------- 1 file changed, 29 insertions(+), 21 deletions(-) diff --git a/plugins/inputs/prometheus/kubernetes.go b/plugins/inputs/prometheus/kubernetes.go index e78c64af3fcd4..0e658003a7122 100644 --- a/plugins/inputs/prometheus/kubernetes.go +++ b/plugins/inputs/prometheus/kubernetes.go @@ -111,35 +111,43 @@ func (p *Prometheus) watchPod(ctx context.Context, client *kubernetes.Clientset) LabelSelector: p.KubernetesLabelSelector, FieldSelector: p.KubernetesFieldSelector, }) + defer watcher.Stop() if err != nil { return err } - pod := &corev1.Pod{} - go func() { - for event := range watcher.ResultChan() { - pod = &corev1.Pod{} - // If the pod is not "ready", there will be no ip associated with it. - if pod.Annotations["prometheus.io/scrape"] != "true" || - !podReady(pod.Status.ContainerStatuses) { - continue - } - switch event.Type { - case watch.Added: - registerPod(pod, p) - case watch.Modified: - // To avoid multiple actions for each event, unregister on the first event - // in the delete sequence, when the containers are still "ready". - if pod.GetDeletionTimestamp() != nil { - unregisterPod(pod, p) - } else { + for { + select { + case <-ctx.Done(): + return nil + default: + for event := range watcher.ResultChan() { + pod, ok := event.Object.(*corev1.Pod) + if !ok { + return fmt.Errorf("Unexpected object when getting pods") + } + + // If the pod is not "ready", there will be no ip associated with it. + if pod.Annotations["prometheus.io/scrape"] != "true" || + !podReady(pod.Status.ContainerStatuses) { + continue + } + + switch event.Type { + case watch.Added: registerPod(pod, p) + case watch.Modified: + // To avoid multiple actions for each event, unregister on the first event + // in the delete sequence, when the containers are still "ready". + if pod.GetDeletionTimestamp() != nil { + unregisterPod(pod, p) + } else { + registerPod(pod, p) + } } } } - }() - - return nil + } } func (p *Prometheus) cAdvisor(ctx context.Context, bearerToken string) error { From ee5c50988a5a09ad1da5362f89c4de04823bb6f0 Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Wed, 18 Aug 2021 15:20:17 -0600 Subject: [PATCH 011/176] fix: Bump github.com/aws/aws-sdk-go-v2 from 1.3.2 to 1.8.0 (#9636) --- docs/LICENSE_OF_DEPENDENCIES.md | 1 + go.mod | 5 +++-- go.sum | 8 ++++++-- 3 files changed, 10 insertions(+), 4 deletions(-) diff --git a/docs/LICENSE_OF_DEPENDENCIES.md b/docs/LICENSE_OF_DEPENDENCIES.md index 1ec09fe87f486..ca0ef3e401bd9 100644 --- a/docs/LICENSE_OF_DEPENDENCIES.md +++ b/docs/LICENSE_OF_DEPENDENCIES.md @@ -38,6 +38,7 @@ following works: - github.com/aws/aws-sdk-go-v2/credentials [Apache License 2.0](https://github.com/aws/aws-sdk-go-v2/blob/main/credentials/LICENSE.txt) - github.com/aws/aws-sdk-go-v2/feature/ec2/imds [Apache License 2.0](https://github.com/aws/aws-sdk-go-v2/blob/main/feature/ec2/imds/LICENSE.txt) - github.com/aws/aws-sdk-go-v2/feature/s3/manager [Apache License 2.0](https://github.com/aws/aws-sdk-go-v2/blob/main/LICENSE.txt) +- github.com/aws/aws-sdk-go-v2/internal/ini [Apache License 2.0](https://github.com/aws/aws-sdk-go-v2/blob/main/config/LICENSE.txt) - github.com/aws/aws-sdk-go-v2/service/ec2 [Apache License 2.0](https://github.com/aws/aws-sdk-go-v2/blob/main/service/ec2/LICENSE.txt) - github.com/aws/aws-sdk-go-v2/service/internal/accept-encoding [Apache License 2.0](https://github.com/aws/aws-sdk-go-v2/blob/main/service/internal/accept-encoding/LICENSE.txt) - github.com/aws/aws-sdk-go-v2/service/internal/presigned-url [Apache License 2.0](https://github.com/aws/aws-sdk-go-v2/blob/main/service/internal/presigned-url/LICENSE.txt) diff --git a/go.mod b/go.mod index 0f6bdcd55c1cc..cf18136373821 100644 --- a/go.mod +++ b/go.mod @@ -29,11 +29,12 @@ require ( github.com/aristanetworks/glog v0.0.0-20191112221043-67e8567f59f3 // indirect github.com/aristanetworks/goarista v0.0.0-20190325233358-a123909ec740 github.com/aws/aws-sdk-go v1.38.69 - github.com/aws/aws-sdk-go-v2 v1.3.2 + github.com/aws/aws-sdk-go-v2 v1.8.0 github.com/aws/aws-sdk-go-v2/config v1.1.5 github.com/aws/aws-sdk-go-v2/feature/ec2/imds v1.0.6 + github.com/aws/aws-sdk-go-v2/internal/ini v1.2.0 // indirect github.com/aws/aws-sdk-go-v2/service/ec2 v1.1.0 - github.com/aws/smithy-go v1.3.1 + github.com/aws/smithy-go v1.7.0 github.com/benbjohnson/clock v1.0.3 github.com/bitly/go-hostpool v0.1.0 // indirect github.com/bmatcuk/doublestar/v3 v3.0.0 diff --git a/go.sum b/go.sum index d17f8209df7da..9433c42e6e0ca 100644 --- a/go.sum +++ b/go.sum @@ -228,8 +228,9 @@ github.com/aws/aws-sdk-go v1.38.69 h1:V489lmrdkIQSfF6OAGZZ1Cavcm7eczCm2JcGvX+yHR github.com/aws/aws-sdk-go v1.38.69/go.mod h1:hcU610XS61/+aQV88ixoOzUoG7v3b31pl2zKMmprdro= github.com/aws/aws-sdk-go-v2 v0.18.0/go.mod h1:JWVYvqSMppoMJC0x5wdwiImzgXTI9FuZwxzkQq9wy+g= github.com/aws/aws-sdk-go-v2 v1.1.0/go.mod h1:smfAbmpW+tcRVuNUjo3MOArSZmW72t62rkCzc2i0TWM= -github.com/aws/aws-sdk-go-v2 v1.3.2 h1:RQj8l98yKUm0UV2Wd3w/Ms+TXV9Rs1E6Kr5tRRMfyU4= github.com/aws/aws-sdk-go-v2 v1.3.2/go.mod h1:7OaACgj2SX3XGWnrIjGlJM22h6yD6MEWKvm7levnnM8= +github.com/aws/aws-sdk-go-v2 v1.8.0 h1:HcN6yDnHV9S7D69E7To0aUppJhiJNEzQSNcUxc7r3qo= +github.com/aws/aws-sdk-go-v2 v1.8.0/go.mod h1:xEFuWz+3TYdlPRuo+CqATbeDWIWyaT5uAPwPaWtgse0= github.com/aws/aws-sdk-go-v2/config v1.1.5 h1:imDWOGwlIrRpHLallJ9mli2SIQ4egtGKtFUFsuGRIaQ= github.com/aws/aws-sdk-go-v2/config v1.1.5/go.mod h1:P3F1hku7qzC81txjwXnwOM6Ex6ezkU6+/557Teyb64E= github.com/aws/aws-sdk-go-v2/credentials v1.1.5 h1:R9v/eN5cXv5yMLC619xRYl5PgCSuy5SarizmM7+qqSA= @@ -238,6 +239,8 @@ github.com/aws/aws-sdk-go-v2/feature/ec2/imds v1.0.6 h1:zoOz5V56jO/rGixsCDnrQtAz github.com/aws/aws-sdk-go-v2/feature/ec2/imds v1.0.6/go.mod h1:0+fWMitrmIpENiY8/1DyhdYPUCAPvd9UNz9mtCsEoLQ= github.com/aws/aws-sdk-go-v2/feature/s3/manager v1.1.2 h1:Doa5wabOIDA0XZzBX5yCTAPGwDCVZ8Ux0wh29AUDmN4= github.com/aws/aws-sdk-go-v2/feature/s3/manager v1.1.2/go.mod h1:Azf567f5wBUfUbwpyJJnLM/geFFIzEulGR30L+nQZOE= +github.com/aws/aws-sdk-go-v2/internal/ini v1.2.0 h1:xu45foJnwMwBqSkIMKyJP9kbyHi5hdhZ/WiJ7D2sHZ0= +github.com/aws/aws-sdk-go-v2/internal/ini v1.2.0/go.mod h1:Q5jATQc+f1MfZp3PDMhn6ry18hGvE0i8yvbXoKbnZaE= github.com/aws/aws-sdk-go-v2/service/ec2 v1.1.0 h1:+VnEgB1yp+7KlOsk6FXX/v/fU9uL5oSujIMkKQBBmp8= github.com/aws/aws-sdk-go-v2/service/ec2 v1.1.0/go.mod h1:/6514fU/SRcY3+ousB1zjUqiXjruSuti2qcfE70osOc= github.com/aws/aws-sdk-go-v2/service/internal/accept-encoding v1.0.4 h1:8yeByqOL6UWBsOOXsHnW93/ukwL66O008tRfxXxnTwA= @@ -254,8 +257,9 @@ github.com/aws/aws-sdk-go-v2/service/sso v1.1.5/go.mod h1:bpGz0tidC4y39sZkQSkpO/ github.com/aws/aws-sdk-go-v2/service/sts v1.2.2 h1:fKw6QSGcFlvZCBPYx3fo4sL0HfTmaT06ZtMHJfQQNQQ= github.com/aws/aws-sdk-go-v2/service/sts v1.2.2/go.mod h1:ssRzzJ2RZOVuKj2Vx1YE7ypfil/BIlgmQnCSW4DistU= github.com/aws/smithy-go v1.0.0/go.mod h1:EzMw8dbp/YJL4A5/sbhGddag+NPT7q084agLbB9LgIw= -github.com/aws/smithy-go v1.3.1 h1:xJFO4pK0y9J8fCl34uGsSJX5KNnGbdARDlA5BPhXnwE= github.com/aws/smithy-go v1.3.1/go.mod h1:SObp3lf9smib00L/v3U2eAKG8FyQ7iLrJnQiAmR5n+E= +github.com/aws/smithy-go v1.7.0 h1:+cLHMRrDZvQ4wk+KuQ9yH6eEg6KZEJ9RI2IkDqnygCg= +github.com/aws/smithy-go v1.7.0/go.mod h1:SObp3lf9smib00L/v3U2eAKG8FyQ7iLrJnQiAmR5n+E= github.com/benbjohnson/clock v1.0.3 h1:vkLuvpK4fmtSCuo60+yC63p7y0BmQ8gm5ZXGuBCJyXg= github.com/benbjohnson/clock v1.0.3/go.mod h1:bGMdMPoPVvcYyt1gHDf4J2KE153Yf9BuiUKYMaxlTDM= github.com/beorn7/perks v0.0.0-20160804104726-4c0e84591b9a/go.mod h1:Dwedo/Wpr24TaqPxmxbtue+5NUziq4I4S80YR8gNf3Q= From 65a7fadaa92b28256517ca4b4d64389f1eac2180 Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Wed, 18 Aug 2021 15:22:45 -0600 Subject: [PATCH 012/176] fix: Bump github.com/golang/snappy from 0.0.3 to 0.0.4 (#9637) --- go.mod | 2 +- go.sum | 3 ++- 2 files changed, 3 insertions(+), 2 deletions(-) diff --git a/go.mod b/go.mod index cf18136373821..7e2528392d227 100644 --- a/go.mod +++ b/go.mod @@ -64,7 +64,7 @@ require ( github.com/gogo/protobuf v1.3.2 github.com/golang/geo v0.0.0-20190916061304-5b978397cfec github.com/golang/protobuf v1.5.2 - github.com/golang/snappy v0.0.3 + github.com/golang/snappy v0.0.4 github.com/google/go-cmp v0.5.6 github.com/google/go-github/v32 v32.1.0 github.com/gopcua/opcua v0.1.13 diff --git a/go.sum b/go.sum index 9433c42e6e0ca..484da4129fb51 100644 --- a/go.sum +++ b/go.sum @@ -729,8 +729,9 @@ github.com/golang/protobuf v1.5.2 h1:ROPKBNFfQgOUMifHyP+KYbvpjbdoFNs+aK7DXlji0Tw github.com/golang/protobuf v1.5.2/go.mod h1:XVQd3VNwM+JqD3oG2Ue2ip4fOMUkwXdXDdiuN0vRsmY= github.com/golang/snappy v0.0.0-20180518054509-2e65f85255db/go.mod h1:/XxbfmMg8lxefKM7IXC3fBNl/7bRcc72aCRzEWrmP2Q= github.com/golang/snappy v0.0.1/go.mod h1:/XxbfmMg8lxefKM7IXC3fBNl/7bRcc72aCRzEWrmP2Q= -github.com/golang/snappy v0.0.3 h1:fHPg5GQYlCeLIPB9BZqMVR5nR9A+IM5zcgeTdjMYmLA= github.com/golang/snappy v0.0.3/go.mod h1:/XxbfmMg8lxefKM7IXC3fBNl/7bRcc72aCRzEWrmP2Q= +github.com/golang/snappy v0.0.4 h1:yAGX7huGHXlcLOEtBnF4w7FQwA26wojNCwOYAEhLjQM= +github.com/golang/snappy v0.0.4/go.mod h1:/XxbfmMg8lxefKM7IXC3fBNl/7bRcc72aCRzEWrmP2Q= github.com/google/addlicense v0.0.0-20190510175307-22550fa7c1b0/go.mod h1:QtPG26W17m+OIQgE6gQ24gC1M6pUaMBAbFrTIDtwG/E= github.com/google/btree v0.0.0-20180813153112-4030bb1f1f0c/go.mod h1:lNA+9X1NB3Zf8V7Ke586lFgjr2dZNuvo3lPJSGZ5JPQ= github.com/google/btree v1.0.0 h1:0udJVsspx3VBr5FwtLhQQtuAsVc79tTq0ocGIPAU6qo= From 229b46eb682981375504464fc817a9b44aa28d46 Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Wed, 18 Aug 2021 15:23:49 -0600 Subject: [PATCH 013/176] fix: Bump github.com/testcontainers/testcontainers-go from 0.11.0 to 0.11.1 (#9638) --- go.mod | 4 ++-- go.sum | 8 ++++---- 2 files changed, 6 insertions(+), 6 deletions(-) diff --git a/go.mod b/go.mod index 7e2528392d227..b48f4850a1d84 100644 --- a/go.mod +++ b/go.mod @@ -47,7 +47,7 @@ require ( github.com/denisenkom/go-mssqldb v0.10.0 github.com/dgrijalva/jwt-go/v4 v4.0.0-preview1 github.com/dimchansky/utfbom v1.1.1 - github.com/docker/docker v20.10.6+incompatible + github.com/docker/docker v20.10.7+incompatible github.com/doclambda/protobufquery v0.0.0-20210317203640-88ffabe06a60 github.com/dynatrace-oss/dynatrace-metric-utils-go v0.2.0 github.com/eclipse/paho.mqtt.golang v1.3.0 @@ -124,7 +124,7 @@ require ( github.com/streadway/amqp v0.0.0-20190827072141-edfb9018d271 github.com/stretchr/testify v1.7.0 github.com/tbrandon/mbserver v0.0.0-20170611213546-993e1772cc62 - github.com/testcontainers/testcontainers-go v0.11.0 + github.com/testcontainers/testcontainers-go v0.11.1 github.com/tidwall/gjson v1.8.0 github.com/tinylib/msgp v1.1.5 github.com/tklauser/go-sysconf v0.3.5 // indirect diff --git a/go.sum b/go.sum index 484da4129fb51..91bd038463435 100644 --- a/go.sum +++ b/go.sum @@ -461,8 +461,8 @@ github.com/docker/distribution v2.7.1-0.20190205005809-0d3efadf0154+incompatible github.com/docker/distribution v2.7.1+incompatible h1:a5mlkVzth6W5A4fOsS3D2EO5BUmsJpcB+cRlLU7cSug= github.com/docker/distribution v2.7.1+incompatible/go.mod h1:J2gT2udsDAN96Uj4KfcMRqY0/ypR+oyYUYmja8H+y+w= github.com/docker/docker v17.12.0-ce-rc1.0.20200706150819-a40b877fbb9e+incompatible/go.mod h1:eEKB0N0r5NX/I1kEveEz05bcu8tLC/8azJZsviup8Sk= -github.com/docker/docker v20.10.6+incompatible h1:oXI3Vas8TI8Eu/EjH4srKHJBVqraSzJybhxY7Om9faQ= -github.com/docker/docker v20.10.6+incompatible/go.mod h1:eEKB0N0r5NX/I1kEveEz05bcu8tLC/8azJZsviup8Sk= +github.com/docker/docker v20.10.7+incompatible h1:Z6O9Nhsjv+ayUEeI1IojKbYcsGdgYSNqxe1s2MYzUhQ= +github.com/docker/docker v20.10.7+incompatible/go.mod h1:eEKB0N0r5NX/I1kEveEz05bcu8tLC/8azJZsviup8Sk= github.com/docker/go-connections v0.4.0 h1:El9xVISelRB7BuFusrZozjnkIM5YnzCViNKohAFqRJQ= github.com/docker/go-connections v0.4.0 h1:El9xVISelRB7BuFusrZozjnkIM5YnzCViNKohAFqRJQ= github.com/docker/go-connections v0.4.0/go.mod h1:Gbd7IOopHjR8Iph03tsViu4nIes5XhDvyHbTtUxmeec= @@ -1468,8 +1468,8 @@ github.com/tbrandon/mbserver v0.0.0-20170611213546-993e1772cc62 h1:Oj2e7Sae4XrOs github.com/tbrandon/mbserver v0.0.0-20170611213546-993e1772cc62/go.mod h1:qUzPVlSj2UgxJkVbH0ZwuuiR46U8RBMDT5KLY78Ifpw= github.com/tchap/go-patricia v2.2.6+incompatible/go.mod h1:bmLyhP68RS6kStMGxByiQ23RP/odRBOTVjwp2cDyi6I= github.com/tedsuo/ifrit v0.0.0-20180802180643-bea94bb476cc/go.mod h1:eyZnKCc955uh98WQvzOm0dgAeLnf2O0Rz0LPoC5ze+0= -github.com/testcontainers/testcontainers-go v0.11.0 h1:HO5YOx2DYBHqcg4MzVWPj3FuHAv7USWVu94vCSsgiaM= -github.com/testcontainers/testcontainers-go v0.11.0/go.mod h1:HztBCODzuA+YpMXGK8amjO8j50jz2gcT0BOzSKUiYIs= +github.com/testcontainers/testcontainers-go v0.11.1 h1:FiYsB83LSGbiawoV8TpAZGfcCUbtaeeg1SXqEKUxh08= +github.com/testcontainers/testcontainers-go v0.11.1/go.mod h1:/V0UVq+1e7NWYoqTPog179clf0Qp9TOyp4EcXaEFQz8= github.com/tidwall/gjson v1.8.0 h1:Qt+orfosKn0rbNTZqHYDqBrmm3UDA4KRkv70fDzG+PQ= github.com/tidwall/gjson v1.8.0/go.mod h1:5/xDoumyyDNerp2U36lyolv46b3uF/9Bu6OfyQ9GImk= github.com/tidwall/match v1.0.3 h1:FQUVvBImDutD8wJLN6c5eMzWtjgONK9MwIBCOrUJKeE= From 47bf30cb1849af52d20d1ce08a3451e81c21bb0d Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Wed, 18 Aug 2021 15:24:30 -0600 Subject: [PATCH 014/176] fix: Bump github.com/sirupsen/logrus from 1.7.0 to 1.8.1 (#9639) --- go.mod | 2 +- go.sum | 3 ++- 2 files changed, 3 insertions(+), 2 deletions(-) diff --git a/go.mod b/go.mod index b48f4850a1d84..1267d6e431f4b 100644 --- a/go.mod +++ b/go.mod @@ -118,7 +118,7 @@ require ( github.com/shirou/gopsutil v3.21.6-0.20210624221800-cb512c850043+incompatible github.com/shopspring/decimal v0.0.0-20200105231215-408a2507e114 // indirect github.com/signalfx/golib/v3 v3.3.34 - github.com/sirupsen/logrus v1.7.0 + github.com/sirupsen/logrus v1.8.1 github.com/sleepinggenius2/gosmi v0.4.3 github.com/snowflakedb/gosnowflake v1.5.0 github.com/streadway/amqp v0.0.0-20190827072141-edfb9018d271 diff --git a/go.sum b/go.sum index 91bd038463435..c638e769fc74b 100644 --- a/go.sum +++ b/go.sum @@ -1415,8 +1415,9 @@ github.com/sirupsen/logrus v1.4.2/go.mod h1:tLMulIdttU9McNUspp0xgXVQah82FyeX6Mwd github.com/sirupsen/logrus v1.6.0/go.mod h1:7uNnSEd1DgxDLC74fIahvMZmmYsHGZGEOFrfsX/uA88= github.com/sirupsen/logrus v1.6.0/go.mod h1:7uNnSEd1DgxDLC74fIahvMZmmYsHGZGEOFrfsX/uA88= github.com/sirupsen/logrus v1.6.0/go.mod h1:7uNnSEd1DgxDLC74fIahvMZmmYsHGZGEOFrfsX/uA88= -github.com/sirupsen/logrus v1.7.0 h1:ShrD1U9pZB12TX0cVy0DtePoCH97K8EtX+mg7ZARUtM= github.com/sirupsen/logrus v1.7.0/go.mod h1:yWOB1SBYBC5VeMP7gHvWumXLIWorT60ONWic61uBYv0= +github.com/sirupsen/logrus v1.8.1 h1:dJKuHgqk1NNQlqoA6BTlM1Wf9DOH3NBjQyu0h9+AZZE= +github.com/sirupsen/logrus v1.8.1/go.mod h1:yWOB1SBYBC5VeMP7gHvWumXLIWorT60ONWic61uBYv0= github.com/sleepinggenius2/gosmi v0.4.3 h1:99Zwzy1Cvgsh396sw07oR2G4ab88ILGZFMxSlGWnR6o= github.com/sleepinggenius2/gosmi v0.4.3/go.mod h1:l8OniPmd3bJzw0MXP2/qh7AhP/e+bTY2CNivIhsnDT0= github.com/smartystreets/assertions v0.0.0-20180927180507-b2de0cb4f26d h1:zE9ykElWQ6/NYmHa3jpm/yHnI4xSofP+UP6SpjHcSeM= From c8a9aa225962a327a26b1ef1c9cc65036fa4707c Mon Sep 17 00:00:00 2001 From: David Reimschussel Date: Wed, 18 Aug 2021 18:27:27 -0600 Subject: [PATCH 015/176] Update changelog (cherry picked from commit 25b04d4720926c47eef54e61ce79951fc8b34d49) --- CHANGELOG.md | 23 +++++++++++++++++++++++ etc/telegraf.conf | 12 +++++++++--- 2 files changed, 32 insertions(+), 3 deletions(-) diff --git a/CHANGELOG.md b/CHANGELOG.md index f7ecd8d59ed24..053e9ee59bbf7 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -1,3 +1,26 @@ +## v1.19.3 [2021-08-18] + +#### Bugfixes + + - [#9639](https://github.com/influxdata/telegraf/pull/9639) Update sirupsen/logrus module from 1.7.0 to 1.8.1 + - [#9638](https://github.com/influxdata/telegraf/pull/9638) Update testcontainers/testcontainers-go module from 0.11.0 to 0.11.1 + - [#9637](https://github.com/influxdata/telegraf/pull/9637) Update golang/snappy module from 0.0.3 to 0.0.4 + - [#9636](https://github.com/influxdata/telegraf/pull/9636) Update aws/aws-sdk-go-v2 module from 1.3.2 to 1.8.0 + - [#9605](https://github.com/influxdata/telegraf/pull/9605) `inputs.prometheus` Fix prometheus kubernetes pod discovery + - [#9606](https://github.com/influxdata/telegraf/pull/9606) `inputs.redis` Improve redis commands documentation + - [#9566](https://github.com/influxdata/telegraf/pull/9566) `outputs.cratedb` Replace dots in tag keys with underscores + - [#9401](https://github.com/influxdata/telegraf/pull/9401) `inputs.clickhouse` Fix panic, improve handling empty result set + - [#9583](https://github.com/influxdata/telegraf/pull/9583) `inputs.opcua` Avoid closing session on a closed connection + - [#9576](https://github.com/influxdata/telegraf/pull/9576) `processors.aws` Refactor ec2 init for config-api + - [#9571](https://github.com/influxdata/telegraf/pull/9571) `outputs.loki` Sort logs by timestamp before writing to Loki + - [#9524](https://github.com/influxdata/telegraf/pull/9524) `inputs.opcua` Fix reconnection regression introduced in 1.19.1 + - [#9581](https://github.com/influxdata/telegraf/pull/9581) `inputs.kube_inventory` Fix k8s nodes and pods parsing error + - [#9577](https://github.com/influxdata/telegraf/pull/9577) Update sensu/go module to v2.9.0 + - [#9554](https://github.com/influxdata/telegraf/pull/9554) `inputs.postgresql` Normalize unix socket path + - [#9565](https://github.com/influxdata/telegraf/pull/9565) Update hashicorp/consul/api module to 1.9.1 + - [#9552](https://github.com/influxdata/telegraf/pull/9552) `inputs.vsphere` Update vmware/govmomi module to v0.26.0 in order to support vSphere 7.0 + - [#9550](https://github.com/influxdata/telegraf/pull/9550) `inputs.opcua` Do not skip good quality nodes after a bad quality node is encountered + ## v1.19.2 [2021-07-28] #### Release Notes diff --git a/etc/telegraf.conf b/etc/telegraf.conf index 6c3c0e98b36bb..c49761c947bc4 100644 --- a/etc/telegraf.conf +++ b/etc/telegraf.conf @@ -506,6 +506,8 @@ # table = "metrics" # # If true, and the metrics table does not exist, create it automatically. # table_create = true +# # The character(s) to replace any '.' in an object key with +# key_separator = "_" # # Configuration for DataDog API to send metrics to. @@ -5741,9 +5743,13 @@ # # ## Optional. Specify redis commands to retrieve values # # [[inputs.redis.commands]] -# # command = ["get", "sample-key"] -# # field = "sample-key-value" -# # type = "string" +# # # The command to run where each argument is a separate element +# # command = ["get", "sample-key"] +# # # The field to store the result in +# # field = "sample-key-value" +# # # The type of the result +# # # Can be "string", "integer", or "float" +# # type = "string" # # ## specify server password # # password = "s#cr@t%" From 3d7d5f2b360baa1bb5848eddfea2fffa24096c84 Mon Sep 17 00:00:00 2001 From: Sebastian Spaink <3441183+sspaink@users.noreply.github.com> Date: Thu, 19 Aug 2021 15:30:37 -0500 Subject: [PATCH 016/176] fix: prefix dependabot commits with "fix:" (#9641) --- .github/dependabot.yml | 2 ++ 1 file changed, 2 insertions(+) diff --git a/.github/dependabot.yml b/.github/dependabot.yml index c1de7d8fd2824..2068f1f06444d 100644 --- a/.github/dependabot.yml +++ b/.github/dependabot.yml @@ -7,3 +7,5 @@ updates: ignore: # Dependabot isn't able to update this packages that do not match the source, so anything with a version - dependency-name: "*.v*" + commit-message: + prefix: "fix:" From 34565a303db841c359c5a20fd5909ea58837fa1c Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Thu, 19 Aug 2021 15:31:04 -0500 Subject: [PATCH 017/176] fix: Bump github.com/aws/aws-sdk-go-v2/config from 1.1.5 to 1.6.0 --- go.mod | 5 ++--- go.sum | 18 ++++++++++++------ 2 files changed, 14 insertions(+), 9 deletions(-) diff --git a/go.mod b/go.mod index 1267d6e431f4b..de630f9a84fd2 100644 --- a/go.mod +++ b/go.mod @@ -30,9 +30,8 @@ require ( github.com/aristanetworks/goarista v0.0.0-20190325233358-a123909ec740 github.com/aws/aws-sdk-go v1.38.69 github.com/aws/aws-sdk-go-v2 v1.8.0 - github.com/aws/aws-sdk-go-v2/config v1.1.5 - github.com/aws/aws-sdk-go-v2/feature/ec2/imds v1.0.6 - github.com/aws/aws-sdk-go-v2/internal/ini v1.2.0 // indirect + github.com/aws/aws-sdk-go-v2/config v1.6.0 + github.com/aws/aws-sdk-go-v2/feature/ec2/imds v1.4.0 github.com/aws/aws-sdk-go-v2/service/ec2 v1.1.0 github.com/aws/smithy-go v1.7.0 github.com/benbjohnson/clock v1.0.3 diff --git a/go.sum b/go.sum index c638e769fc74b..1b482f7449834 100644 --- a/go.sum +++ b/go.sum @@ -231,12 +231,15 @@ github.com/aws/aws-sdk-go-v2 v1.1.0/go.mod h1:smfAbmpW+tcRVuNUjo3MOArSZmW72t62rk github.com/aws/aws-sdk-go-v2 v1.3.2/go.mod h1:7OaACgj2SX3XGWnrIjGlJM22h6yD6MEWKvm7levnnM8= github.com/aws/aws-sdk-go-v2 v1.8.0 h1:HcN6yDnHV9S7D69E7To0aUppJhiJNEzQSNcUxc7r3qo= github.com/aws/aws-sdk-go-v2 v1.8.0/go.mod h1:xEFuWz+3TYdlPRuo+CqATbeDWIWyaT5uAPwPaWtgse0= -github.com/aws/aws-sdk-go-v2/config v1.1.5 h1:imDWOGwlIrRpHLallJ9mli2SIQ4egtGKtFUFsuGRIaQ= github.com/aws/aws-sdk-go-v2/config v1.1.5/go.mod h1:P3F1hku7qzC81txjwXnwOM6Ex6ezkU6+/557Teyb64E= -github.com/aws/aws-sdk-go-v2/credentials v1.1.5 h1:R9v/eN5cXv5yMLC619xRYl5PgCSuy5SarizmM7+qqSA= +github.com/aws/aws-sdk-go-v2/config v1.6.0 h1:rtoCnNObhVm7me+v9sA2aY+NtHNZjjWWC3ifXVci+wE= +github.com/aws/aws-sdk-go-v2/config v1.6.0/go.mod h1:TNtBVmka80lRPk5+S9ZqVfFszOQAGJJ9KbT3EM3CHNU= github.com/aws/aws-sdk-go-v2/credentials v1.1.5/go.mod h1:Ir1R6tPiR1/2y1hes8yOijFMz54hzSmgcmCDo6F45Qc= -github.com/aws/aws-sdk-go-v2/feature/ec2/imds v1.0.6 h1:zoOz5V56jO/rGixsCDnrQtAzYRYM2hGA/43U6jVMFbo= +github.com/aws/aws-sdk-go-v2/credentials v1.3.2 h1:Uud/fZzm0lqqhE8kvXYJFAJ3PGnagKoUcvHq1hXfBZw= +github.com/aws/aws-sdk-go-v2/credentials v1.3.2/go.mod h1:PACKuTJdt6AlXvEq8rFI4eDmoqDFC5DpVKQbWysaDgM= github.com/aws/aws-sdk-go-v2/feature/ec2/imds v1.0.6/go.mod h1:0+fWMitrmIpENiY8/1DyhdYPUCAPvd9UNz9mtCsEoLQ= +github.com/aws/aws-sdk-go-v2/feature/ec2/imds v1.4.0 h1:SGqDJun6tydgsSIFxv9+EYBJVqVUwg2QMJp6PbNq8C8= +github.com/aws/aws-sdk-go-v2/feature/ec2/imds v1.4.0/go.mod h1:Mj/U8OpDbcVcoctrYwA2bak8k/HFPdcLzI/vaiXMwuM= github.com/aws/aws-sdk-go-v2/feature/s3/manager v1.1.2 h1:Doa5wabOIDA0XZzBX5yCTAPGwDCVZ8Ux0wh29AUDmN4= github.com/aws/aws-sdk-go-v2/feature/s3/manager v1.1.2/go.mod h1:Azf567f5wBUfUbwpyJJnLM/geFFIzEulGR30L+nQZOE= github.com/aws/aws-sdk-go-v2/internal/ini v1.2.0 h1:xu45foJnwMwBqSkIMKyJP9kbyHi5hdhZ/WiJ7D2sHZ0= @@ -246,16 +249,19 @@ github.com/aws/aws-sdk-go-v2/service/ec2 v1.1.0/go.mod h1:/6514fU/SRcY3+ousB1zjU github.com/aws/aws-sdk-go-v2/service/internal/accept-encoding v1.0.4 h1:8yeByqOL6UWBsOOXsHnW93/ukwL66O008tRfxXxnTwA= github.com/aws/aws-sdk-go-v2/service/internal/accept-encoding v1.0.4/go.mod h1:BCfU3Uo2fhKcMZFp9zU5QQGQxqWCOYmZ/27Dju3S/do= github.com/aws/aws-sdk-go-v2/service/internal/presigned-url v1.0.1/go.mod h1:PISaKWylTYAyruocNk4Lr9miOOJjOcVBd7twCPbydDk= -github.com/aws/aws-sdk-go-v2/service/internal/presigned-url v1.0.6 h1:ldYIsOP4WyjdzW8t6RC/aSieajrlx+3UN3UCZy1KM5Y= github.com/aws/aws-sdk-go-v2/service/internal/presigned-url v1.0.6/go.mod h1:L0KWr0ASo83PRZu9NaZaDsw3koS6PspKv137DMDZjHo= +github.com/aws/aws-sdk-go-v2/service/internal/presigned-url v1.2.2 h1:Xv1rGYgsRRn0xw9JFNnfpBMZam54PrWpC4rJOJ9koA8= +github.com/aws/aws-sdk-go-v2/service/internal/presigned-url v1.2.2/go.mod h1:NXmNI41bdEsJMrD0v9rUvbGCB5GwdBEpKvUvIY3vTFg= github.com/aws/aws-sdk-go-v2/service/internal/s3shared v1.2.2 h1:aU8H58DoYxNo8R1TaSPTofkuxfQNnoqZmWL+G3+k/vA= github.com/aws/aws-sdk-go-v2/service/internal/s3shared v1.2.2/go.mod h1:nnutjMLuna0s3GVY/MAkpLX03thyNER06gXvnMAPj5g= github.com/aws/aws-sdk-go-v2/service/s3 v1.5.0 h1:VbwXUI3L0hyhVmrFxbDxrs6cBX8TNFX0YxCpooMNjvY= github.com/aws/aws-sdk-go-v2/service/s3 v1.5.0/go.mod h1:uwA7gs93Qcss43astPUb1eq4RyceNmYWAQjZFDOAMLo= -github.com/aws/aws-sdk-go-v2/service/sso v1.1.5 h1:B7ec5wE4+3Ldkurmq0C4gfQFtElGTG+/iTpi/YPMzi4= github.com/aws/aws-sdk-go-v2/service/sso v1.1.5/go.mod h1:bpGz0tidC4y39sZkQSkpO/J0tzWCMXHbw6FZ0j1GkWM= -github.com/aws/aws-sdk-go-v2/service/sts v1.2.2 h1:fKw6QSGcFlvZCBPYx3fo4sL0HfTmaT06ZtMHJfQQNQQ= +github.com/aws/aws-sdk-go-v2/service/sso v1.3.2 h1:b+U3WrF9ON3f32FH19geqmiod4uKcMv/q+wosQjjyyM= +github.com/aws/aws-sdk-go-v2/service/sso v1.3.2/go.mod h1:J21I6kF+d/6XHVk7kp/cx9YVD2TMD2TbLwtRGVcinXo= github.com/aws/aws-sdk-go-v2/service/sts v1.2.2/go.mod h1:ssRzzJ2RZOVuKj2Vx1YE7ypfil/BIlgmQnCSW4DistU= +github.com/aws/aws-sdk-go-v2/service/sts v1.6.1 h1:1Pls85C5CFjhE3aH+h85/hyAk89kQNlAWlEQtIkaFyc= +github.com/aws/aws-sdk-go-v2/service/sts v1.6.1/go.mod h1:hLZ/AnkIKHLuPGjEiyghNEdvJ2PP0MgOxcmv9EBJ4xs= github.com/aws/smithy-go v1.0.0/go.mod h1:EzMw8dbp/YJL4A5/sbhGddag+NPT7q084agLbB9LgIw= github.com/aws/smithy-go v1.3.1/go.mod h1:SObp3lf9smib00L/v3U2eAKG8FyQ7iLrJnQiAmR5n+E= github.com/aws/smithy-go v1.7.0 h1:+cLHMRrDZvQ4wk+KuQ9yH6eEg6KZEJ9RI2IkDqnygCg= From 3a7d9b6d9801aedeed62ee7f7d5ec5ce9ad608f0 Mon Sep 17 00:00:00 2001 From: Sebastian Spaink <3441183+sspaink@users.noreply.github.com> Date: Mon, 23 Aug 2021 15:37:44 -0500 Subject: [PATCH 018/176] fix: support 1.17 & 1.16.7 Go versions (#9642) --- .circleci/config.yml | 86 +- Makefile | 14 +- agent/agent_posix.go | 1 + agent/agent_windows.go | 1 + cmd/telegraf/telegraf_posix.go | 1 + cmd/telegraf/telegraf_windows.go | 1 + docs/LICENSE_OF_DEPENDENCIES.md | 1 - go.mod | 181 +- go.sum | 40 +- internal/exec_unix.go | 1 + internal/exec_windows.go | 1 + internal/globpath/globpath_test.go | 1 + internal/goplugin/noplugin.go | 1 + internal/goplugin/plugin.go | 1 + internal/process/process_posix.go | 1 + internal/process/process_test.go | 1 + internal/process/process_windows.go | 1 + internal/usage.go | 1 + internal/usage_windows.go | 1 + logger/event_logger.go | 3 +- logger/event_logger_test.go | 3 +- plugins/inputs/bcache/bcache.go | 1 + plugins/inputs/bcache/bcache_test.go | 1 + plugins/inputs/bcache/bcache_windows.go | 1 + plugins/inputs/cgroup/cgroup_linux.go | 1 + plugins/inputs/cgroup/cgroup_notlinux.go | 1 + plugins/inputs/cgroup/cgroup_test.go | 1 + plugins/inputs/conntrack/conntrack.go | 1 + .../inputs/conntrack/conntrack_notlinux.go | 1 + plugins/inputs/conntrack/conntrack_test.go | 1 + plugins/inputs/diskio/diskio_linux_test.go | 1 + plugins/inputs/diskio/diskio_other.go | 1 + plugins/inputs/dmcache/dmcache_linux.go | 1 + plugins/inputs/dmcache/dmcache_linux_test.go | 1 + plugins/inputs/dmcache/dmcache_notlinux.go | 1 + plugins/inputs/dpdk/dpdk.go | 1 + plugins/inputs/dpdk/dpdk_connector.go | 1 + plugins/inputs/dpdk/dpdk_connector_test.go | 1 + plugins/inputs/dpdk/dpdk_notlinux.go | 1 + plugins/inputs/dpdk/dpdk_test.go | 1 + plugins/inputs/dpdk/dpdk_utils.go | 1 + plugins/inputs/dpdk/dpdk_utils_test.go | 1 + plugins/inputs/ethtool/ethtool_linux.go | 1 + plugins/inputs/ethtool/ethtool_notlinux.go | 1 + plugins/inputs/ethtool/ethtool_test.go | 1 + plugins/inputs/exec/exec_test.go | 1 + plugins/inputs/execd/execd_posix.go | 1 + plugins/inputs/execd/execd_windows.go | 1 + plugins/inputs/execd/shim/goshim_posix.go | 1 + plugins/inputs/execd/shim/goshim_windows.go | 1 + plugins/inputs/execd/shim/shim_posix_test.go | 1 + plugins/inputs/file/file_test.go | 1 + plugins/inputs/filecount/filecount_test.go | 1 + .../filecount/filesystem_helpers_test.go | 1 + plugins/inputs/filestat/filestat_test.go | 1 + .../http_response/http_response_test.go | 1 + plugins/inputs/infiniband/infiniband_linux.go | 1 + .../inputs/infiniband/infiniband_notlinux.go | 1 + plugins/inputs/infiniband/infiniband_test.go | 1 + plugins/inputs/intel_powerstat/file.go | 1 + .../inputs/intel_powerstat/intel_powerstat.go | 1 + .../intel_powerstat_notlinux.go | 1 + .../intel_powerstat/intel_powerstat_test.go | 1 + plugins/inputs/intel_powerstat/msr.go | 1 + plugins/inputs/intel_powerstat/msr_test.go | 1 + plugins/inputs/intel_powerstat/rapl.go | 1 + plugins/inputs/intel_powerstat/rapl_test.go | 1 + .../inputs/intel_powerstat/unit_converter.go | 1 + plugins/inputs/intel_rdt/intel_rdt.go | 1 + plugins/inputs/intel_rdt/intel_rdt_test.go | 1 + plugins/inputs/intel_rdt/intel_rdt_windows.go | 1 + plugins/inputs/intel_rdt/processes.go | 1 + plugins/inputs/intel_rdt/publisher.go | 1 + plugins/inputs/intel_rdt/publisher_test.go | 1 + plugins/inputs/iptables/iptables.go | 1 + plugins/inputs/iptables/iptables_nocompile.go | 1 + plugins/inputs/iptables/iptables_test.go | 1 + plugins/inputs/ipvs/ipvs.go | 1 + plugins/inputs/ipvs/ipvs_notlinux.go | 1 + plugins/inputs/kernel/kernel.go | 1 + plugins/inputs/kernel/kernel_notlinux.go | 1 + plugins/inputs/kernel/kernel_test.go | 1 + plugins/inputs/kernel_vmstat/kernel_vmstat.go | 1 + .../kernel_vmstat/kernel_vmstat_notlinux.go | 1 + .../kernel_vmstat/kernel_vmstat_test.go | 1 + plugins/inputs/logparser/logparser.go | 1 + plugins/inputs/logparser/logparser_solaris.go | 1 + plugins/inputs/lustre2/lustre2.go | 1 + plugins/inputs/lustre2/lustre2_test.go | 1 + plugins/inputs/lustre2/lustre2_windows.go | 1 + plugins/inputs/mongodb/mongodb_server_test.go | 1 + plugins/inputs/mongodb/mongodb_test.go | 1 + plugins/inputs/nats/nats.go | 1 + plugins/inputs/nats/nats_freebsd.go | 1 + plugins/inputs/nats/nats_test.go | 1 + plugins/inputs/phpfpm/phpfpm_test.go | 1 + plugins/inputs/ping/ping_notwindows.go | 1 + plugins/inputs/ping/ping_test.go | 1 + plugins/inputs/ping/ping_windows.go | 1 + plugins/inputs/ping/ping_windows_test.go | 1 + plugins/inputs/postfix/postfix.go | 1 + plugins/inputs/postfix/postfix_test.go | 1 + plugins/inputs/postfix/postfix_windows.go | 1 + plugins/inputs/postfix/stat_ctim.go | 1 + plugins/inputs/postfix/stat_ctimespec.go | 1 + plugins/inputs/postfix/stat_none.go | 1 + .../inputs/processes/processes_notwindows.go | 1 + plugins/inputs/processes/processes_test.go | 1 + plugins/inputs/processes/processes_windows.go | 1 + .../procstat/native_finder_notwindows.go | 1 + .../inputs/procstat/win_service_notwindows.go | 1 + .../inputs/procstat/win_service_windows.go | 1 + plugins/inputs/ras/ras.go | 1 + plugins/inputs/ras/ras_notlinux.go | 1 + plugins/inputs/ras/ras_test.go | 1 + .../inputs/rethinkdb/rethinkdb_server_test.go | 1 + plugins/inputs/rethinkdb/rethinkdb_test.go | 1 + .../riemann_listener/riemann_listener.go | 2 +- plugins/inputs/sensors/sensors.go | 1 + plugins/inputs/sensors/sensors_notlinux.go | 1 + plugins/inputs/sensors/sensors_test.go | 1 + plugins/inputs/snmp/snmp_mocks_generate.go | 1 + plugins/inputs/sql/drivers_sqlite.go | 5 +- plugins/inputs/synproxy/synproxy_linux.go | 1 + plugins/inputs/synproxy/synproxy_notlinux.go | 1 + plugins/inputs/synproxy/synproxy_test.go | 1 + plugins/inputs/sysstat/sysstat.go | 1 + .../inputs/sysstat/sysstat_interval_test.go | 4 +- plugins/inputs/sysstat/sysstat_notlinux.go | 1 + plugins/inputs/sysstat/sysstat_test.go | 1 + .../systemd_units/systemd_units_notlinux.go | 1 + plugins/inputs/tail/tail.go | 1 + plugins/inputs/tail/tail_solaris.go | 1 + plugins/inputs/varnish/varnish.go | 1 + plugins/inputs/varnish/varnish_test.go | 1 + plugins/inputs/varnish/varnish_windows.go | 1 + plugins/inputs/win_eventlog/event.go | 3 +- .../inputs/win_eventlog/syscall_windows.go | 3 +- plugins/inputs/win_eventlog/util.go | 3 +- plugins/inputs/win_eventlog/util_test.go | 3 +- plugins/inputs/win_eventlog/win_eventlog.go | 3 +- .../win_eventlog/win_eventlog_notwindows.go | 1 + .../inputs/win_eventlog/win_eventlog_test.go | 3 +- .../inputs/win_eventlog/zsyscall_windows.go | 3 +- plugins/inputs/win_perf_counters/kernel32.go | 1 + plugins/inputs/win_perf_counters/pdh.go | 1 + plugins/inputs/win_perf_counters/pdh_386.go | 1 + plugins/inputs/win_perf_counters/pdh_amd64.go | 1 + .../win_perf_counters/performance_query.go | 1 + .../win_perf_counters/win_perf_counters.go | 1 + .../win_perf_counters_integration_test.go | 1 + .../win_perf_counters_notwindows.go | 1 + .../win_perf_counters_test.go | 1 + plugins/inputs/win_services/win_services.go | 1 + .../win_services_integration_test.go | 1 + .../win_services/win_services_notwindows.go | 1 + .../inputs/win_services/win_services_test.go | 1 + plugins/inputs/wireless/wireless_linux.go | 1 + plugins/inputs/wireless/wireless_notlinux.go | 1 + plugins/inputs/wireless/wireless_test.go | 1 + plugins/inputs/zfs/zfs_freebsd.go | 1 + plugins/inputs/zfs/zfs_freebsd_test.go | 1 + plugins/inputs/zfs/zfs_linux.go | 1 + plugins/inputs/zfs/zfs_linux_test.go | 1 + plugins/inputs/zfs/zfs_other.go | 1 + plugins/outputs/sql/sqlite.go | 5 +- plugins/outputs/sql/sqlite_test.go | 4 +- plugins/parsers/influx/machine.go | 3129 +++++++++-------- plugins/processors/filepath/filepath_test.go | 1 + plugins/processors/port_name/services_path.go | 1 + .../port_name/services_path_notwindows.go | 1 + scripts/alpine.docker | 2 +- scripts/buster.docker | 2 +- scripts/ci-1.16.docker | 2 +- scripts/{ci-1.15.docker => ci-1.17.docker} | 2 +- scripts/mac_installgo.sh | 4 +- 176 files changed, 2167 insertions(+), 1494 deletions(-) rename scripts/{ci-1.15.docker => ci-1.17.docker} (95%) diff --git a/.circleci/config.yml b/.circleci/config.yml index 010c54a0fedfd..3daec86da98b4 100644 --- a/.circleci/config.yml +++ b/.circleci/config.yml @@ -4,16 +4,16 @@ orbs: aws-cli: circleci/aws-cli@1.4.0 executors: - go-1_15: + go-1_16: working_directory: '/go/src/github.com/influxdata/telegraf' docker: - - image: 'quay.io/influxdb/telegraf-ci:1.15.8' + - image: 'quay.io/influxdb/telegraf-ci:1.16.7' environment: GOFLAGS: -p=8 - go-1_16: + go-1_17: working_directory: '/go/src/github.com/influxdata/telegraf' docker: - - image: 'quay.io/influxdb/telegraf-ci:1.16.6' + - image: 'quay.io/influxdb/telegraf-ci:1.17.0' environment: GOFLAGS: -p=8 mac: @@ -88,7 +88,7 @@ commands: - 'dist' jobs: deps: - executor: go-1_16 + executor: go-1_17 steps: - checkout - restore_cache: @@ -105,21 +105,21 @@ jobs: root: '/go' paths: - '*' - test-go-1_15: - executor: go-1_15 + test-go-1_16: + executor: go-1_16 steps: - test-go - test-go-1_15-386: - executor: go-1_15 + test-go-1_16-386: + executor: go-1_16 steps: - test-go: goarch: "386" - test-go-1_16: - executor: go-1_16 + test-go-1_17: + executor: go-1_17 steps: - test-go - test-go-1_16-386: - executor: go-1_16 + test-go-1_17-386: + executor: go-1_17 steps: - test-go: goarch: "386" @@ -150,79 +150,79 @@ jobs: steps: - checkout - check-changed-files-or-halt-windows - - run: choco upgrade golang --version=1.16.6 + - run: choco upgrade golang --version=1.17.0 - run: choco install make - run: git config --system core.longpaths true - run: make test-windows windows-package: - executor: go-1_16 + executor: go-1_17 steps: - package-build: type: windows darwin-package: - executor: go-1_16 + executor: go-1_17 steps: - package-build: type: darwin i386-package: - executor: go-1_16 + executor: go-1_17 steps: - package-build: type: i386 ppc641e-package: - executor: go-1_16 + executor: go-1_17 steps: - package-build: type: ppc641e s390x-package: - executor: go-1_16 + executor: go-1_17 steps: - package-build: type: s390x armel-package: - executor: go-1_16 + executor: go-1_17 steps: - package-build: type: armel amd64-package: - executor: go-1_16 + executor: go-1_17 steps: - package-build: type: amd64 arm64-package: - executor: go-1_16 + executor: go-1_17 steps: - package-build: type: arm64 mipsel-package: - executor: go-1_16 + executor: go-1_17 steps: - package-build: type: mipsel mips-package: - executor: go-1_16 + executor: go-1_17 steps: - package-build: type: mips static-package: - executor: go-1_16 + executor: go-1_17 steps: - package-build: type: static armhf-package: - executor: go-1_16 + executor: go-1_17 steps: - package-build: type: armhf release: - executor: go-1_16 + executor: go-1_17 steps: - package-build: release: true nightly: - executor: go-1_16 + executor: go-1_17 steps: - package-build: nightly: true @@ -277,7 +277,7 @@ jobs: path: './dist' destination: 'build/dist' test-awaiter: - executor: go-1_16 + executor: go-1_17 steps: - run: command: | @@ -299,25 +299,25 @@ workflows: filters: tags: only: /.*/ - - 'test-go-1_15': + - 'test-go-1_16': requires: - 'deps' filters: tags: only: /.*/ - - 'test-go-1_15-386': + - 'test-go-1_16-386': requires: - 'deps' filters: tags: only: /.*/ - - 'test-go-1_16': + - 'test-go-1_17': requires: - 'deps' filters: tags: only: /.*/ - - 'test-go-1_16-386': + - 'test-go-1_17-386': requires: - 'deps' filters: @@ -333,10 +333,10 @@ workflows: only: /.*/ - 'test-awaiter': requires: - - 'test-go-1_15' - - 'test-go-1_15-386' - 'test-go-1_16' - 'test-go-1_16-386' + - 'test-go-1_17' + - 'test-go-1_17-386' - 'windows-package': requires: - 'test-go-windows' @@ -395,10 +395,10 @@ workflows: requires: - 'test-go-windows' - 'test-go-mac' - - 'test-go-1_15' - - 'test-go-1_15-386' - 'test-go-1_16' - 'test-go-1_16-386' + - 'test-go-1_17' + - 'test-go-1_17-386' filters: tags: only: /.*/ @@ -420,16 +420,16 @@ workflows: nightly: jobs: - 'deps' - - 'test-go-1_15': + - 'test-go-1_16': requires: - 'deps' - - 'test-go-1_15-386': + - 'test-go-1_16-386': requires: - 'deps' - - 'test-go-1_16': + - 'test-go-1_17': requires: - 'deps' - - 'test-go-1_16-386': + - 'test-go-1_17-386': requires: - 'deps' - 'test-go-mac' @@ -438,10 +438,10 @@ workflows: requires: - 'test-go-windows' - 'test-go-mac' - - 'test-go-1_15' - - 'test-go-1_15-386' - 'test-go-1_16' - 'test-go-1_16-386' + - 'test-go-1_17' + - 'test-go-1_17-386' triggers: - schedule: cron: "0 7 * * *" diff --git a/Makefile b/Makefile index 5cf7d2383604f..f0bb01dd2a35e 100644 --- a/Makefile +++ b/Makefile @@ -194,15 +194,15 @@ plugin-%: @echo "Starting dev environment for $${$(@)} input plugin..." @docker-compose -f plugins/inputs/$${$(@)}/dev/docker-compose.yml up -.PHONY: ci-1.15 -ci-1.15: - docker build -t quay.io/influxdb/telegraf-ci:1.15.8 - < scripts/ci-1.15.docker - docker push quay.io/influxdb/telegraf-ci:1.15.8 - .PHONY: ci-1.16 ci-1.16: - docker build -t quay.io/influxdb/telegraf-ci:1.16.6 - < scripts/ci-1.16.docker - docker push quay.io/influxdb/telegraf-ci:1.16.6 + docker build -t quay.io/influxdb/telegraf-ci:1.16.7 - < scripts/ci-1.16.docker + docker push quay.io/influxdb/telegraf-ci:1.16.7 + +.PHONY: ci-1.17 +ci-1.17: + docker build -t quay.io/influxdb/telegraf-ci:1.17.0 - < scripts/ci-1.17.docker + docker push quay.io/influxdb/telegraf-ci:1.17.0 .PHONY: install install: $(buildbin) diff --git a/agent/agent_posix.go b/agent/agent_posix.go index 09552cac07026..e43c3a7817a88 100644 --- a/agent/agent_posix.go +++ b/agent/agent_posix.go @@ -1,3 +1,4 @@ +//go:build !windows // +build !windows package agent diff --git a/agent/agent_windows.go b/agent/agent_windows.go index 94ed9d006acb2..3196dc70e78e2 100644 --- a/agent/agent_windows.go +++ b/agent/agent_windows.go @@ -1,3 +1,4 @@ +//go:build windows // +build windows package agent diff --git a/cmd/telegraf/telegraf_posix.go b/cmd/telegraf/telegraf_posix.go index a2d6b1e4e365c..21ad935b7147e 100644 --- a/cmd/telegraf/telegraf_posix.go +++ b/cmd/telegraf/telegraf_posix.go @@ -1,3 +1,4 @@ +//go:build !windows // +build !windows package main diff --git a/cmd/telegraf/telegraf_windows.go b/cmd/telegraf/telegraf_windows.go index 8bd14d64eaa19..38222f2d0871d 100644 --- a/cmd/telegraf/telegraf_windows.go +++ b/cmd/telegraf/telegraf_windows.go @@ -1,3 +1,4 @@ +//go:build windows // +build windows package main diff --git a/docs/LICENSE_OF_DEPENDENCIES.md b/docs/LICENSE_OF_DEPENDENCIES.md index ca0ef3e401bd9..1ec09fe87f486 100644 --- a/docs/LICENSE_OF_DEPENDENCIES.md +++ b/docs/LICENSE_OF_DEPENDENCIES.md @@ -38,7 +38,6 @@ following works: - github.com/aws/aws-sdk-go-v2/credentials [Apache License 2.0](https://github.com/aws/aws-sdk-go-v2/blob/main/credentials/LICENSE.txt) - github.com/aws/aws-sdk-go-v2/feature/ec2/imds [Apache License 2.0](https://github.com/aws/aws-sdk-go-v2/blob/main/feature/ec2/imds/LICENSE.txt) - github.com/aws/aws-sdk-go-v2/feature/s3/manager [Apache License 2.0](https://github.com/aws/aws-sdk-go-v2/blob/main/LICENSE.txt) -- github.com/aws/aws-sdk-go-v2/internal/ini [Apache License 2.0](https://github.com/aws/aws-sdk-go-v2/blob/main/config/LICENSE.txt) - github.com/aws/aws-sdk-go-v2/service/ec2 [Apache License 2.0](https://github.com/aws/aws-sdk-go-v2/blob/main/service/ec2/LICENSE.txt) - github.com/aws/aws-sdk-go-v2/service/internal/accept-encoding [Apache License 2.0](https://github.com/aws/aws-sdk-go-v2/blob/main/service/internal/accept-encoding/LICENSE.txt) - github.com/aws/aws-sdk-go-v2/service/internal/presigned-url [Apache License 2.0](https://github.com/aws/aws-sdk-go-v2/blob/main/service/internal/presigned-url/LICENSE.txt) diff --git a/go.mod b/go.mod index de630f9a84fd2..f09d594111d42 100644 --- a/go.mod +++ b/go.mod @@ -1,6 +1,6 @@ module github.com/influxdata/telegraf -go 1.16 +go 1.17 require ( cloud.google.com/go v0.56.0 @@ -8,73 +8,137 @@ require ( cloud.google.com/go/pubsub v1.2.0 code.cloudfoundry.org/clock v1.0.0 // indirect collectd.org v0.5.0 + github.com/Azure/azure-amqp-common-go/v3 v3.0.0 // indirect github.com/Azure/azure-event-hubs-go/v3 v3.2.0 github.com/Azure/azure-kusto-go v0.3.2 + github.com/Azure/azure-pipeline-go v0.2.3 // indirect + github.com/Azure/azure-sdk-for-go v45.1.0+incompatible // indirect + github.com/Azure/azure-storage-blob-go v0.13.0 // indirect github.com/Azure/azure-storage-queue-go v0.0.0-20191125232315-636801874cdd + github.com/Azure/go-amqp v0.12.6 // indirect + github.com/Azure/go-ansiterm v0.0.0-20170929234023-d6e3b3328b78 // indirect + github.com/Azure/go-autorest v14.2.0+incompatible // indirect github.com/Azure/go-autorest/autorest v0.11.17 github.com/Azure/go-autorest/autorest/adal v0.9.10 github.com/Azure/go-autorest/autorest/azure/auth v0.5.6 + github.com/Azure/go-autorest/autorest/azure/cli v0.4.2 // indirect + github.com/Azure/go-autorest/autorest/date v0.3.0 // indirect + github.com/Azure/go-autorest/autorest/to v0.3.0 // indirect + github.com/Azure/go-autorest/autorest/validation v0.2.0 // indirect + github.com/Azure/go-autorest/logger v0.2.0 // indirect + github.com/Azure/go-autorest/tracing v0.6.0 // indirect github.com/BurntSushi/toml v0.3.1 github.com/Mellanox/rdmamap v0.0.0-20191106181932-7c3c4763a6ee + github.com/Microsoft/go-winio v0.4.17-0.20210211115548-6eac466e5fa3 // indirect + github.com/Microsoft/hcsshim v0.8.16 // indirect github.com/Shopify/sarama v1.27.2 github.com/StackExchange/wmi v0.0.0-20210224194228-fe8f1750fd46 // indirect github.com/aerospike/aerospike-client-go v1.27.0 + github.com/alecthomas/participle v0.4.1 // indirect github.com/alecthomas/units v0.0.0-20190924025748-f65c72e2690d github.com/aliyun/alibaba-cloud-sdk-go v1.61.1004 github.com/amir/raidman v0.0.0-20170415203553-1ccc43bfb9c9 github.com/antchfx/jsonquery v1.1.4 github.com/antchfx/xmlquery v1.3.5 github.com/antchfx/xpath v1.1.11 + github.com/apache/arrow/go/arrow v0.0.0-20200601151325-b2287a20f230 // indirect github.com/apache/thrift v0.13.0 github.com/aristanetworks/glog v0.0.0-20191112221043-67e8567f59f3 // indirect github.com/aristanetworks/goarista v0.0.0-20190325233358-a123909ec740 + github.com/armon/go-metrics v0.3.3 // indirect github.com/aws/aws-sdk-go v1.38.69 - github.com/aws/aws-sdk-go-v2 v1.8.0 - github.com/aws/aws-sdk-go-v2/config v1.6.0 - github.com/aws/aws-sdk-go-v2/feature/ec2/imds v1.4.0 + github.com/aws/aws-sdk-go-v2 v1.3.2 + github.com/aws/aws-sdk-go-v2/config v1.1.5 + github.com/aws/aws-sdk-go-v2/credentials v1.1.5 // indirect + github.com/aws/aws-sdk-go-v2/feature/ec2/imds v1.0.6 + github.com/aws/aws-sdk-go-v2/feature/s3/manager v1.1.2 // indirect github.com/aws/aws-sdk-go-v2/service/ec2 v1.1.0 - github.com/aws/smithy-go v1.7.0 + github.com/aws/aws-sdk-go-v2/service/internal/accept-encoding v1.0.4 // indirect + github.com/aws/aws-sdk-go-v2/service/internal/presigned-url v1.0.6 // indirect + github.com/aws/aws-sdk-go-v2/service/internal/s3shared v1.2.2 // indirect + github.com/aws/aws-sdk-go-v2/service/s3 v1.5.0 // indirect + github.com/aws/aws-sdk-go-v2/service/sso v1.1.5 // indirect + github.com/aws/aws-sdk-go-v2/service/sts v1.2.2 // indirect + github.com/aws/smithy-go v1.3.1 github.com/benbjohnson/clock v1.0.3 + github.com/beorn7/perks v1.0.1 // indirect github.com/bitly/go-hostpool v0.1.0 // indirect github.com/bmatcuk/doublestar/v3 v3.0.0 github.com/bmizerany/assert v0.0.0-20160611221934-b7ed37b82869 github.com/caio/go-tdigest v3.1.0+incompatible + github.com/cenkalti/backoff v2.2.1+incompatible // indirect + github.com/cespare/xxhash/v2 v2.1.1 // indirect github.com/cisco-ie/nx-telemetry-proto v0.0.0-20190531143454-82441e232cf6 + github.com/containerd/cgroups v0.0.0-20210114181951-8a68de567b68 // indirect + github.com/containerd/containerd v1.5.0-beta.4 // indirect + github.com/coreos/go-semver v0.3.0 // indirect github.com/couchbase/go-couchbase v0.1.0 github.com/couchbase/gomemcached v0.1.3 // indirect github.com/couchbase/goutils v0.1.0 // indirect + github.com/davecgh/go-spew v1.1.1 // indirect github.com/denisenkom/go-mssqldb v0.10.0 + github.com/devigned/tab v0.1.1 // indirect + github.com/dgrijalva/jwt-go v3.2.0+incompatible // indirect github.com/dgrijalva/jwt-go/v4 v4.0.0-preview1 github.com/dimchansky/utfbom v1.1.1 - github.com/docker/docker v20.10.7+incompatible + github.com/docker/distribution v2.7.1+incompatible // indirect + github.com/docker/docker v20.10.6+incompatible + github.com/docker/go-connections v0.4.0 // indirect + github.com/docker/go-units v0.4.0 // indirect github.com/doclambda/protobufquery v0.0.0-20210317203640-88ffabe06a60 github.com/dynatrace-oss/dynatrace-metric-utils-go v0.2.0 + github.com/eapache/go-resiliency v1.2.0 // indirect + github.com/eapache/go-xerial-snappy v0.0.0-20180814174437-776d5712da21 // indirect + github.com/eapache/queue v1.1.0 // indirect + github.com/echlebek/timeproxy v1.0.0 // indirect github.com/eclipse/paho.mqtt.golang v1.3.0 + github.com/fatih/color v1.9.0 // indirect github.com/form3tech-oss/jwt-go v3.2.3+incompatible // indirect github.com/ghodss/yaml v1.0.1-0.20190212211648-25d852aebe32 github.com/go-logfmt/logfmt v0.5.0 + github.com/go-logr/logr v0.4.0 // indirect + github.com/go-ole/go-ole v1.2.4 // indirect github.com/go-ping/ping v0.0.0-20210201095549-52eed920f98c github.com/go-redis/redis v6.15.9+incompatible github.com/go-sql-driver/mysql v1.6.0 + github.com/go-stack/stack v1.8.0 // indirect github.com/goburrow/modbus v0.1.0 // indirect github.com/goburrow/serial v0.1.0 // indirect github.com/gobwas/glob v0.2.3 github.com/gofrs/uuid v3.3.0+incompatible + github.com/gogo/googleapis v1.4.0 // indirect github.com/gogo/protobuf v1.3.2 + github.com/golang-sql/civil v0.0.0-20190719163853-cb61b32ac6fe // indirect github.com/golang/geo v0.0.0-20190916061304-5b978397cfec + github.com/golang/groupcache v0.0.0-20200121045136-8c9f03a8e57e // indirect github.com/golang/protobuf v1.5.2 - github.com/golang/snappy v0.0.4 + github.com/golang/snappy v0.0.3 + github.com/google/flatbuffers v1.11.0 // indirect github.com/google/go-cmp v0.5.6 github.com/google/go-github/v32 v32.1.0 + github.com/google/go-querystring v1.0.0 // indirect + github.com/google/gofuzz v1.1.0 // indirect + github.com/google/uuid v1.2.0 // indirect + github.com/googleapis/gax-go/v2 v2.0.5 // indirect + github.com/googleapis/gnostic v0.4.1 // indirect github.com/gopcua/opcua v0.1.13 github.com/gorilla/mux v1.7.3 github.com/gorilla/websocket v1.4.2 github.com/gosnmp/gosnmp v1.32.0 github.com/grid-x/modbus v0.0.0-20210224155242-c4a3d042e99b + github.com/grid-x/serial v0.0.0-20191104121038-e24bc9bf6f08 // indirect + github.com/grpc-ecosystem/grpc-gateway v1.16.0 // indirect github.com/hailocab/go-hostpool v0.0.0-20160125115350-e80d13ce29ed // indirect github.com/harlow/kinesis-consumer v0.3.1-0.20181230152818-2f58b136fee0 github.com/hashicorp/consul/api v1.9.1 + github.com/hashicorp/go-cleanhttp v0.5.1 // indirect + github.com/hashicorp/go-hclog v0.12.2 // indirect + github.com/hashicorp/go-immutable-radix v1.2.0 // indirect github.com/hashicorp/go-msgpack v0.5.5 // indirect + github.com/hashicorp/go-rootcerts v1.0.2 // indirect + github.com/hashicorp/go-uuid v1.0.2 // indirect + github.com/hashicorp/golang-lru v0.5.4 // indirect + github.com/hashicorp/serf v0.9.5 // indirect github.com/influxdata/go-syslog/v3 v3.0.0 github.com/influxdata/influxdb-observability/common v0.2.4 github.com/influxdata/influxdb-observability/influx2otel v0.2.4 @@ -82,88 +146,187 @@ require ( github.com/influxdata/tail v1.0.1-0.20210707231403-b283181d1fa7 github.com/influxdata/toml v0.0.0-20190415235208-270119a8ce65 github.com/influxdata/wlog v0.0.0-20160411224016-7c63b0a71ef8 + github.com/jackc/chunkreader/v2 v2.0.1 // indirect + github.com/jackc/pgconn v1.5.0 // indirect + github.com/jackc/pgio v1.0.0 // indirect + github.com/jackc/pgpassfile v1.0.0 // indirect + github.com/jackc/pgproto3/v2 v2.0.1 // indirect + github.com/jackc/pgservicefile v0.0.0-20200307190119-3430c5407db8 // indirect + github.com/jackc/pgtype v1.3.0 // indirect github.com/jackc/pgx/v4 v4.6.0 + github.com/jaegertracing/jaeger v1.15.1 // indirect github.com/james4k/rcon v0.0.0-20120923215419-8fbb8268b60a + github.com/jcmturner/gofork v1.0.0 // indirect github.com/jhump/protoreflect v1.8.3-0.20210616212123-6cc1efa697ca github.com/jmespath/go-jmespath v0.4.0 + github.com/jpillora/backoff v1.0.0 // indirect + github.com/json-iterator/go v1.1.10 // indirect + github.com/jstemmer/go-junit-report v0.9.1 // indirect github.com/kardianos/service v1.0.0 github.com/karrick/godirwalk v1.16.1 github.com/kballard/go-shellquote v0.0.0-20180428030007-95032a82bc51 github.com/klauspost/compress v1.13.1 // indirect + github.com/kr/pretty v0.2.1 // indirect + github.com/kr/text v0.2.0 // indirect + github.com/kylelemons/godebug v1.1.0 // indirect + github.com/leodido/ragel-machinery v0.0.0-20181214104525-299bdde78165 // indirect github.com/lib/pq v1.3.0 // indirect + github.com/mailru/easyjson v0.7.1 // indirect + github.com/mattn/go-colorable v0.1.6 // indirect + github.com/mattn/go-ieproxy v0.0.1 // indirect + github.com/mattn/go-isatty v0.0.12 // indirect github.com/matttproud/golang_protobuf_extensions v1.0.2-0.20181231171920-c182affec369 github.com/mdlayher/apcupsd v0.0.0-20200608131503-2bf01da7bf1b + github.com/mdlayher/genetlink v1.0.0 // indirect + github.com/mdlayher/netlink v1.1.0 // indirect github.com/microsoft/ApplicationInsights-Go v0.4.4 github.com/miekg/dns v1.1.31 + github.com/minio/highwayhash v1.0.1 // indirect + github.com/mitchellh/go-homedir v1.1.0 // indirect + github.com/mitchellh/mapstructure v1.2.2 // indirect github.com/moby/ipvs v1.0.1 + github.com/moby/sys/mount v0.2.0 // indirect + github.com/moby/sys/mountinfo v0.4.1 // indirect + github.com/moby/term v0.0.0-20201216013528-df9cb8a40635 // indirect + github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd // indirect + github.com/modern-go/reflect2 v1.0.1 // indirect + github.com/morikuni/aec v1.0.0 // indirect github.com/multiplay/go-ts3 v1.0.0 github.com/naoina/go-stringutil v0.1.0 // indirect + github.com/nats-io/jwt/v2 v2.0.2 // indirect github.com/nats-io/nats-server/v2 v2.2.6 github.com/nats-io/nats.go v1.11.0 + github.com/nats-io/nkeys v0.3.0 // indirect + github.com/nats-io/nuid v1.0.1 // indirect github.com/newrelic/newrelic-telemetry-sdk-go v0.5.1 github.com/nsqio/go-nsq v1.0.8 github.com/openconfig/gnmi v0.0.0-20180912164834-33a1865c3029 + github.com/opencontainers/go-digest v1.0.0 // indirect + github.com/opencontainers/image-spec v1.0.1 // indirect + github.com/opencontainers/runc v1.0.0-rc93 // indirect + github.com/opentracing-contrib/go-observer v0.0.0-20170622124052-a52f23424492 // indirect + github.com/opentracing/opentracing-go v1.2.0 // indirect github.com/openzipkin/zipkin-go-opentracing v0.3.4 + github.com/philhofer/fwd v1.1.1 // indirect + github.com/pierrec/lz4 v2.5.2+incompatible // indirect github.com/pion/dtls/v2 v2.0.9 + github.com/pion/logging v0.2.2 // indirect + github.com/pion/transport v0.12.3 // indirect + github.com/pion/udp v0.1.1 // indirect + github.com/pkg/browser v0.0.0-20180916011732-0a3d74bf9ce4 // indirect github.com/pkg/errors v0.9.1 + github.com/pmezard/go-difflib v1.0.0 // indirect github.com/prometheus/client_golang v1.7.1 github.com/prometheus/client_model v0.2.0 github.com/prometheus/common v0.15.0 github.com/prometheus/procfs v0.6.0 github.com/prometheus/prometheus v1.8.2-0.20200911110723-e83ef207b6c2 + github.com/rcrowley/go-metrics v0.0.0-20200313005456-10cdbea86bc0 // indirect + github.com/remyoudompheng/bigfft v0.0.0-20200410134404-eec4a21b6bb0 // indirect github.com/riemann/riemann-go-client v0.5.0 + github.com/robertkrimen/otto v0.0.0-20191219234010-c382bd3c16ff // indirect + github.com/robfig/cron/v3 v3.0.1 // indirect github.com/safchain/ethtool v0.0.0-20200218184317-f459e2d13664 + github.com/samuel/go-zookeeper v0.0.0-20200724154423-2164a8ac840e // indirect + github.com/satori/go.uuid v1.2.1-0.20181028125025-b2ce2384e17b // indirect github.com/sensu/sensu-go/api/core/v2 v2.9.0 github.com/shirou/gopsutil v3.21.6-0.20210624221800-cb512c850043+incompatible github.com/shopspring/decimal v0.0.0-20200105231215-408a2507e114 // indirect + github.com/signalfx/com_signalfx_metrics_protobuf v0.0.2 // indirect + github.com/signalfx/gohistogram v0.0.0-20160107210732-1ccfd2ff5083 // indirect github.com/signalfx/golib/v3 v3.3.34 - github.com/sirupsen/logrus v1.8.1 + github.com/signalfx/sapm-proto v0.4.0 // indirect + github.com/sirupsen/logrus v1.7.0 github.com/sleepinggenius2/gosmi v0.4.3 github.com/snowflakedb/gosnowflake v1.5.0 github.com/streadway/amqp v0.0.0-20190827072141-edfb9018d271 + github.com/stretchr/objx v0.2.0 // indirect github.com/stretchr/testify v1.7.0 github.com/tbrandon/mbserver v0.0.0-20170611213546-993e1772cc62 - github.com/testcontainers/testcontainers-go v0.11.1 + github.com/testcontainers/testcontainers-go v0.11.0 github.com/tidwall/gjson v1.8.0 + github.com/tidwall/match v1.0.3 // indirect + github.com/tidwall/pretty v1.1.0 // indirect github.com/tinylib/msgp v1.1.5 github.com/tklauser/go-sysconf v0.3.5 // indirect + github.com/tklauser/numcpus v0.2.2 // indirect github.com/vapourismo/knx-go v0.0.0-20201122213738-75fe09ace330 + github.com/vishvananda/netlink v1.1.0 // indirect + github.com/vishvananda/netns v0.0.0-20191106174202-0a2b9b5464df // indirect github.com/vjeantet/grok v1.0.1 github.com/vmware/govmomi v0.26.0 github.com/wavefronthq/wavefront-sdk-go v0.9.7 github.com/wvanbergen/kafka v0.0.0-20171203153745-e2edea948ddf github.com/wvanbergen/kazoo-go v0.0.0-20180202103751-f72d8611297a // indirect + github.com/xdg-go/pbkdf2 v1.0.0 // indirect + github.com/xdg-go/scram v1.0.2 // indirect + github.com/xdg-go/stringprep v1.0.2 // indirect github.com/xdg/scram v0.0.0-20180814205039-7eeb5667e42c + github.com/xdg/stringprep v1.0.0 // indirect github.com/youmark/pkcs8 v0.0.0-20201027041543-1326539a0a0a // indirect github.com/yuin/gopher-lua v0.0.0-20180630135845-46796da1b0b4 // indirect + go.etcd.io/etcd/api/v3 v3.5.0 // indirect go.mongodb.org/mongo-driver v1.5.3 + go.opencensus.io v0.22.3 // indirect go.opentelemetry.io/collector/model v0.0.0-20210723184018-3b7d6ce4830c go.starlark.net v0.0.0-20210406145628-7a1108eaa012 + go.uber.org/atomic v1.7.0 // indirect go.uber.org/multierr v1.6.0 // indirect golang.org/x/crypto v0.0.0-20210616213533-5ff15b29337e // indirect + golang.org/x/lint v0.0.0-20210508222113-6edffad5e616 // indirect + golang.org/x/mod v0.4.2 // indirect golang.org/x/net v0.0.0-20210610132358-84b48f89b13b golang.org/x/oauth2 v0.0.0-20200107190931-bf48bf16ab8d golang.org/x/sync v0.0.0-20210220032951-036812b2e83c golang.org/x/sys v0.0.0-20210615035016-665e8c7367d1 + golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1 // indirect golang.org/x/text v0.3.6 + golang.org/x/time v0.0.0-20200630173020-3af7569d3a1e // indirect golang.org/x/tools v0.1.2 + golang.org/x/xerrors v0.0.0-20200804184101-5ec99f83aff1 // indirect + golang.zx2c4.com/wireguard v0.0.20200121 // indirect golang.zx2c4.com/wireguard/wgctrl v0.0.0-20200205215550-e35592f146e4 google.golang.org/api v0.29.0 + google.golang.org/appengine v1.6.6 // indirect google.golang.org/genproto v0.0.0-20210604141403-392c879c8b08 google.golang.org/grpc v1.39.0 google.golang.org/protobuf v1.27.1 + gopkg.in/asn1-ber.v1 v1.0.0-20181015200546-f715ec2f112d // indirect gopkg.in/djherbis/times.v1 v1.2.0 gopkg.in/fatih/pool.v2 v2.0.0 // indirect + gopkg.in/fsnotify.v1 v1.4.7 // indirect gopkg.in/gorethink/gorethink.v3 v3.0.5 + gopkg.in/inf.v0 v0.9.1 // indirect + gopkg.in/ini.v1 v1.42.0 // indirect + gopkg.in/jcmturner/aescts.v1 v1.0.1 // indirect + gopkg.in/jcmturner/dnsutils.v1 v1.0.1 // indirect + gopkg.in/jcmturner/gokrb5.v7 v7.5.0 // indirect + gopkg.in/jcmturner/rpc.v1 v1.1.0 // indirect gopkg.in/ldap.v3 v3.1.0 gopkg.in/olivere/elastic.v5 v5.0.70 + gopkg.in/sourcemap.v1 v1.0.5 // indirect gopkg.in/tomb.v1 v1.0.0-20141024135613-dd632973f1e7 + gopkg.in/tomb.v2 v2.0.0-20161208151619-d5d1b5820637 // indirect gopkg.in/yaml.v2 v2.4.0 + gopkg.in/yaml.v3 v3.0.0-20210107192922-496545a6307b // indirect gotest.tools v2.2.0+incompatible + honnef.co/go/tools v0.0.1-2020.1.4 // indirect k8s.io/api v0.20.4 k8s.io/apimachinery v0.21.1 k8s.io/client-go v0.20.4 + k8s.io/klog/v2 v2.8.0 // indirect + k8s.io/utils v0.0.0-20201110183641-67b214c5f920 // indirect + modernc.org/cc/v3 v3.33.5 // indirect + modernc.org/ccgo/v3 v3.9.4 // indirect + modernc.org/libc v1.9.5 // indirect + modernc.org/mathutil v1.2.2 // indirect + modernc.org/memory v1.0.4 // indirect + modernc.org/opt v0.1.1 // indirect modernc.org/sqlite v1.10.8 + modernc.org/strutil v1.1.0 // indirect + modernc.org/token v1.0.0 // indirect + sigs.k8s.io/structured-merge-diff/v4 v4.1.0 // indirect + sigs.k8s.io/yaml v1.2.0 // indirect ) // replaced due to https://github.com/satori/go.uuid/issues/73 diff --git a/go.sum b/go.sum index 1b482f7449834..d17f8209df7da 100644 --- a/go.sum +++ b/go.sum @@ -228,44 +228,34 @@ github.com/aws/aws-sdk-go v1.38.69 h1:V489lmrdkIQSfF6OAGZZ1Cavcm7eczCm2JcGvX+yHR github.com/aws/aws-sdk-go v1.38.69/go.mod h1:hcU610XS61/+aQV88ixoOzUoG7v3b31pl2zKMmprdro= github.com/aws/aws-sdk-go-v2 v0.18.0/go.mod h1:JWVYvqSMppoMJC0x5wdwiImzgXTI9FuZwxzkQq9wy+g= github.com/aws/aws-sdk-go-v2 v1.1.0/go.mod h1:smfAbmpW+tcRVuNUjo3MOArSZmW72t62rkCzc2i0TWM= +github.com/aws/aws-sdk-go-v2 v1.3.2 h1:RQj8l98yKUm0UV2Wd3w/Ms+TXV9Rs1E6Kr5tRRMfyU4= github.com/aws/aws-sdk-go-v2 v1.3.2/go.mod h1:7OaACgj2SX3XGWnrIjGlJM22h6yD6MEWKvm7levnnM8= -github.com/aws/aws-sdk-go-v2 v1.8.0 h1:HcN6yDnHV9S7D69E7To0aUppJhiJNEzQSNcUxc7r3qo= -github.com/aws/aws-sdk-go-v2 v1.8.0/go.mod h1:xEFuWz+3TYdlPRuo+CqATbeDWIWyaT5uAPwPaWtgse0= +github.com/aws/aws-sdk-go-v2/config v1.1.5 h1:imDWOGwlIrRpHLallJ9mli2SIQ4egtGKtFUFsuGRIaQ= github.com/aws/aws-sdk-go-v2/config v1.1.5/go.mod h1:P3F1hku7qzC81txjwXnwOM6Ex6ezkU6+/557Teyb64E= -github.com/aws/aws-sdk-go-v2/config v1.6.0 h1:rtoCnNObhVm7me+v9sA2aY+NtHNZjjWWC3ifXVci+wE= -github.com/aws/aws-sdk-go-v2/config v1.6.0/go.mod h1:TNtBVmka80lRPk5+S9ZqVfFszOQAGJJ9KbT3EM3CHNU= +github.com/aws/aws-sdk-go-v2/credentials v1.1.5 h1:R9v/eN5cXv5yMLC619xRYl5PgCSuy5SarizmM7+qqSA= github.com/aws/aws-sdk-go-v2/credentials v1.1.5/go.mod h1:Ir1R6tPiR1/2y1hes8yOijFMz54hzSmgcmCDo6F45Qc= -github.com/aws/aws-sdk-go-v2/credentials v1.3.2 h1:Uud/fZzm0lqqhE8kvXYJFAJ3PGnagKoUcvHq1hXfBZw= -github.com/aws/aws-sdk-go-v2/credentials v1.3.2/go.mod h1:PACKuTJdt6AlXvEq8rFI4eDmoqDFC5DpVKQbWysaDgM= +github.com/aws/aws-sdk-go-v2/feature/ec2/imds v1.0.6 h1:zoOz5V56jO/rGixsCDnrQtAzYRYM2hGA/43U6jVMFbo= github.com/aws/aws-sdk-go-v2/feature/ec2/imds v1.0.6/go.mod h1:0+fWMitrmIpENiY8/1DyhdYPUCAPvd9UNz9mtCsEoLQ= -github.com/aws/aws-sdk-go-v2/feature/ec2/imds v1.4.0 h1:SGqDJun6tydgsSIFxv9+EYBJVqVUwg2QMJp6PbNq8C8= -github.com/aws/aws-sdk-go-v2/feature/ec2/imds v1.4.0/go.mod h1:Mj/U8OpDbcVcoctrYwA2bak8k/HFPdcLzI/vaiXMwuM= github.com/aws/aws-sdk-go-v2/feature/s3/manager v1.1.2 h1:Doa5wabOIDA0XZzBX5yCTAPGwDCVZ8Ux0wh29AUDmN4= github.com/aws/aws-sdk-go-v2/feature/s3/manager v1.1.2/go.mod h1:Azf567f5wBUfUbwpyJJnLM/geFFIzEulGR30L+nQZOE= -github.com/aws/aws-sdk-go-v2/internal/ini v1.2.0 h1:xu45foJnwMwBqSkIMKyJP9kbyHi5hdhZ/WiJ7D2sHZ0= -github.com/aws/aws-sdk-go-v2/internal/ini v1.2.0/go.mod h1:Q5jATQc+f1MfZp3PDMhn6ry18hGvE0i8yvbXoKbnZaE= github.com/aws/aws-sdk-go-v2/service/ec2 v1.1.0 h1:+VnEgB1yp+7KlOsk6FXX/v/fU9uL5oSujIMkKQBBmp8= github.com/aws/aws-sdk-go-v2/service/ec2 v1.1.0/go.mod h1:/6514fU/SRcY3+ousB1zjUqiXjruSuti2qcfE70osOc= github.com/aws/aws-sdk-go-v2/service/internal/accept-encoding v1.0.4 h1:8yeByqOL6UWBsOOXsHnW93/ukwL66O008tRfxXxnTwA= github.com/aws/aws-sdk-go-v2/service/internal/accept-encoding v1.0.4/go.mod h1:BCfU3Uo2fhKcMZFp9zU5QQGQxqWCOYmZ/27Dju3S/do= github.com/aws/aws-sdk-go-v2/service/internal/presigned-url v1.0.1/go.mod h1:PISaKWylTYAyruocNk4Lr9miOOJjOcVBd7twCPbydDk= +github.com/aws/aws-sdk-go-v2/service/internal/presigned-url v1.0.6 h1:ldYIsOP4WyjdzW8t6RC/aSieajrlx+3UN3UCZy1KM5Y= github.com/aws/aws-sdk-go-v2/service/internal/presigned-url v1.0.6/go.mod h1:L0KWr0ASo83PRZu9NaZaDsw3koS6PspKv137DMDZjHo= -github.com/aws/aws-sdk-go-v2/service/internal/presigned-url v1.2.2 h1:Xv1rGYgsRRn0xw9JFNnfpBMZam54PrWpC4rJOJ9koA8= -github.com/aws/aws-sdk-go-v2/service/internal/presigned-url v1.2.2/go.mod h1:NXmNI41bdEsJMrD0v9rUvbGCB5GwdBEpKvUvIY3vTFg= github.com/aws/aws-sdk-go-v2/service/internal/s3shared v1.2.2 h1:aU8H58DoYxNo8R1TaSPTofkuxfQNnoqZmWL+G3+k/vA= github.com/aws/aws-sdk-go-v2/service/internal/s3shared v1.2.2/go.mod h1:nnutjMLuna0s3GVY/MAkpLX03thyNER06gXvnMAPj5g= github.com/aws/aws-sdk-go-v2/service/s3 v1.5.0 h1:VbwXUI3L0hyhVmrFxbDxrs6cBX8TNFX0YxCpooMNjvY= github.com/aws/aws-sdk-go-v2/service/s3 v1.5.0/go.mod h1:uwA7gs93Qcss43astPUb1eq4RyceNmYWAQjZFDOAMLo= +github.com/aws/aws-sdk-go-v2/service/sso v1.1.5 h1:B7ec5wE4+3Ldkurmq0C4gfQFtElGTG+/iTpi/YPMzi4= github.com/aws/aws-sdk-go-v2/service/sso v1.1.5/go.mod h1:bpGz0tidC4y39sZkQSkpO/J0tzWCMXHbw6FZ0j1GkWM= -github.com/aws/aws-sdk-go-v2/service/sso v1.3.2 h1:b+U3WrF9ON3f32FH19geqmiod4uKcMv/q+wosQjjyyM= -github.com/aws/aws-sdk-go-v2/service/sso v1.3.2/go.mod h1:J21I6kF+d/6XHVk7kp/cx9YVD2TMD2TbLwtRGVcinXo= +github.com/aws/aws-sdk-go-v2/service/sts v1.2.2 h1:fKw6QSGcFlvZCBPYx3fo4sL0HfTmaT06ZtMHJfQQNQQ= github.com/aws/aws-sdk-go-v2/service/sts v1.2.2/go.mod h1:ssRzzJ2RZOVuKj2Vx1YE7ypfil/BIlgmQnCSW4DistU= -github.com/aws/aws-sdk-go-v2/service/sts v1.6.1 h1:1Pls85C5CFjhE3aH+h85/hyAk89kQNlAWlEQtIkaFyc= -github.com/aws/aws-sdk-go-v2/service/sts v1.6.1/go.mod h1:hLZ/AnkIKHLuPGjEiyghNEdvJ2PP0MgOxcmv9EBJ4xs= github.com/aws/smithy-go v1.0.0/go.mod h1:EzMw8dbp/YJL4A5/sbhGddag+NPT7q084agLbB9LgIw= +github.com/aws/smithy-go v1.3.1 h1:xJFO4pK0y9J8fCl34uGsSJX5KNnGbdARDlA5BPhXnwE= github.com/aws/smithy-go v1.3.1/go.mod h1:SObp3lf9smib00L/v3U2eAKG8FyQ7iLrJnQiAmR5n+E= -github.com/aws/smithy-go v1.7.0 h1:+cLHMRrDZvQ4wk+KuQ9yH6eEg6KZEJ9RI2IkDqnygCg= -github.com/aws/smithy-go v1.7.0/go.mod h1:SObp3lf9smib00L/v3U2eAKG8FyQ7iLrJnQiAmR5n+E= github.com/benbjohnson/clock v1.0.3 h1:vkLuvpK4fmtSCuo60+yC63p7y0BmQ8gm5ZXGuBCJyXg= github.com/benbjohnson/clock v1.0.3/go.mod h1:bGMdMPoPVvcYyt1gHDf4J2KE153Yf9BuiUKYMaxlTDM= github.com/beorn7/perks v0.0.0-20160804104726-4c0e84591b9a/go.mod h1:Dwedo/Wpr24TaqPxmxbtue+5NUziq4I4S80YR8gNf3Q= @@ -467,8 +457,8 @@ github.com/docker/distribution v2.7.1-0.20190205005809-0d3efadf0154+incompatible github.com/docker/distribution v2.7.1+incompatible h1:a5mlkVzth6W5A4fOsS3D2EO5BUmsJpcB+cRlLU7cSug= github.com/docker/distribution v2.7.1+incompatible/go.mod h1:J2gT2udsDAN96Uj4KfcMRqY0/ypR+oyYUYmja8H+y+w= github.com/docker/docker v17.12.0-ce-rc1.0.20200706150819-a40b877fbb9e+incompatible/go.mod h1:eEKB0N0r5NX/I1kEveEz05bcu8tLC/8azJZsviup8Sk= -github.com/docker/docker v20.10.7+incompatible h1:Z6O9Nhsjv+ayUEeI1IojKbYcsGdgYSNqxe1s2MYzUhQ= -github.com/docker/docker v20.10.7+incompatible/go.mod h1:eEKB0N0r5NX/I1kEveEz05bcu8tLC/8azJZsviup8Sk= +github.com/docker/docker v20.10.6+incompatible h1:oXI3Vas8TI8Eu/EjH4srKHJBVqraSzJybhxY7Om9faQ= +github.com/docker/docker v20.10.6+incompatible/go.mod h1:eEKB0N0r5NX/I1kEveEz05bcu8tLC/8azJZsviup8Sk= github.com/docker/go-connections v0.4.0 h1:El9xVISelRB7BuFusrZozjnkIM5YnzCViNKohAFqRJQ= github.com/docker/go-connections v0.4.0 h1:El9xVISelRB7BuFusrZozjnkIM5YnzCViNKohAFqRJQ= github.com/docker/go-connections v0.4.0/go.mod h1:Gbd7IOopHjR8Iph03tsViu4nIes5XhDvyHbTtUxmeec= @@ -735,9 +725,8 @@ github.com/golang/protobuf v1.5.2 h1:ROPKBNFfQgOUMifHyP+KYbvpjbdoFNs+aK7DXlji0Tw github.com/golang/protobuf v1.5.2/go.mod h1:XVQd3VNwM+JqD3oG2Ue2ip4fOMUkwXdXDdiuN0vRsmY= github.com/golang/snappy v0.0.0-20180518054509-2e65f85255db/go.mod h1:/XxbfmMg8lxefKM7IXC3fBNl/7bRcc72aCRzEWrmP2Q= github.com/golang/snappy v0.0.1/go.mod h1:/XxbfmMg8lxefKM7IXC3fBNl/7bRcc72aCRzEWrmP2Q= +github.com/golang/snappy v0.0.3 h1:fHPg5GQYlCeLIPB9BZqMVR5nR9A+IM5zcgeTdjMYmLA= github.com/golang/snappy v0.0.3/go.mod h1:/XxbfmMg8lxefKM7IXC3fBNl/7bRcc72aCRzEWrmP2Q= -github.com/golang/snappy v0.0.4 h1:yAGX7huGHXlcLOEtBnF4w7FQwA26wojNCwOYAEhLjQM= -github.com/golang/snappy v0.0.4/go.mod h1:/XxbfmMg8lxefKM7IXC3fBNl/7bRcc72aCRzEWrmP2Q= github.com/google/addlicense v0.0.0-20190510175307-22550fa7c1b0/go.mod h1:QtPG26W17m+OIQgE6gQ24gC1M6pUaMBAbFrTIDtwG/E= github.com/google/btree v0.0.0-20180813153112-4030bb1f1f0c/go.mod h1:lNA+9X1NB3Zf8V7Ke586lFgjr2dZNuvo3lPJSGZ5JPQ= github.com/google/btree v1.0.0 h1:0udJVsspx3VBr5FwtLhQQtuAsVc79tTq0ocGIPAU6qo= @@ -1421,9 +1410,8 @@ github.com/sirupsen/logrus v1.4.2/go.mod h1:tLMulIdttU9McNUspp0xgXVQah82FyeX6Mwd github.com/sirupsen/logrus v1.6.0/go.mod h1:7uNnSEd1DgxDLC74fIahvMZmmYsHGZGEOFrfsX/uA88= github.com/sirupsen/logrus v1.6.0/go.mod h1:7uNnSEd1DgxDLC74fIahvMZmmYsHGZGEOFrfsX/uA88= github.com/sirupsen/logrus v1.6.0/go.mod h1:7uNnSEd1DgxDLC74fIahvMZmmYsHGZGEOFrfsX/uA88= +github.com/sirupsen/logrus v1.7.0 h1:ShrD1U9pZB12TX0cVy0DtePoCH97K8EtX+mg7ZARUtM= github.com/sirupsen/logrus v1.7.0/go.mod h1:yWOB1SBYBC5VeMP7gHvWumXLIWorT60ONWic61uBYv0= -github.com/sirupsen/logrus v1.8.1 h1:dJKuHgqk1NNQlqoA6BTlM1Wf9DOH3NBjQyu0h9+AZZE= -github.com/sirupsen/logrus v1.8.1/go.mod h1:yWOB1SBYBC5VeMP7gHvWumXLIWorT60ONWic61uBYv0= github.com/sleepinggenius2/gosmi v0.4.3 h1:99Zwzy1Cvgsh396sw07oR2G4ab88ILGZFMxSlGWnR6o= github.com/sleepinggenius2/gosmi v0.4.3/go.mod h1:l8OniPmd3bJzw0MXP2/qh7AhP/e+bTY2CNivIhsnDT0= github.com/smartystreets/assertions v0.0.0-20180927180507-b2de0cb4f26d h1:zE9ykElWQ6/NYmHa3jpm/yHnI4xSofP+UP6SpjHcSeM= @@ -1475,8 +1463,8 @@ github.com/tbrandon/mbserver v0.0.0-20170611213546-993e1772cc62 h1:Oj2e7Sae4XrOs github.com/tbrandon/mbserver v0.0.0-20170611213546-993e1772cc62/go.mod h1:qUzPVlSj2UgxJkVbH0ZwuuiR46U8RBMDT5KLY78Ifpw= github.com/tchap/go-patricia v2.2.6+incompatible/go.mod h1:bmLyhP68RS6kStMGxByiQ23RP/odRBOTVjwp2cDyi6I= github.com/tedsuo/ifrit v0.0.0-20180802180643-bea94bb476cc/go.mod h1:eyZnKCc955uh98WQvzOm0dgAeLnf2O0Rz0LPoC5ze+0= -github.com/testcontainers/testcontainers-go v0.11.1 h1:FiYsB83LSGbiawoV8TpAZGfcCUbtaeeg1SXqEKUxh08= -github.com/testcontainers/testcontainers-go v0.11.1/go.mod h1:/V0UVq+1e7NWYoqTPog179clf0Qp9TOyp4EcXaEFQz8= +github.com/testcontainers/testcontainers-go v0.11.0 h1:HO5YOx2DYBHqcg4MzVWPj3FuHAv7USWVu94vCSsgiaM= +github.com/testcontainers/testcontainers-go v0.11.0/go.mod h1:HztBCODzuA+YpMXGK8amjO8j50jz2gcT0BOzSKUiYIs= github.com/tidwall/gjson v1.8.0 h1:Qt+orfosKn0rbNTZqHYDqBrmm3UDA4KRkv70fDzG+PQ= github.com/tidwall/gjson v1.8.0/go.mod h1:5/xDoumyyDNerp2U36lyolv46b3uF/9Bu6OfyQ9GImk= github.com/tidwall/match v1.0.3 h1:FQUVvBImDutD8wJLN6c5eMzWtjgONK9MwIBCOrUJKeE= diff --git a/internal/exec_unix.go b/internal/exec_unix.go index 60b606cfb5f32..0f5d3fca037db 100644 --- a/internal/exec_unix.go +++ b/internal/exec_unix.go @@ -1,3 +1,4 @@ +//go:build !windows // +build !windows package internal diff --git a/internal/exec_windows.go b/internal/exec_windows.go index 7bab1baf3ac3f..708051dda3a2c 100644 --- a/internal/exec_windows.go +++ b/internal/exec_windows.go @@ -1,3 +1,4 @@ +//go:build windows // +build windows package internal diff --git a/internal/globpath/globpath_test.go b/internal/globpath/globpath_test.go index 33779f912a027..bc286bc75419e 100644 --- a/internal/globpath/globpath_test.go +++ b/internal/globpath/globpath_test.go @@ -1,3 +1,4 @@ +//go:build !windows // +build !windows // TODO: Windows - should be enabled for Windows when super asterisk is fixed on Windows diff --git a/internal/goplugin/noplugin.go b/internal/goplugin/noplugin.go index 089972d465196..65fcee418e388 100644 --- a/internal/goplugin/noplugin.go +++ b/internal/goplugin/noplugin.go @@ -1,3 +1,4 @@ +//go:build !goplugin // +build !goplugin package goplugin diff --git a/internal/goplugin/plugin.go b/internal/goplugin/plugin.go index 7e58ec32e92c2..3af051833b6a7 100644 --- a/internal/goplugin/plugin.go +++ b/internal/goplugin/plugin.go @@ -1,3 +1,4 @@ +//go:build goplugin // +build goplugin package goplugin diff --git a/internal/process/process_posix.go b/internal/process/process_posix.go index 7b42b7da13214..8f736bc673592 100644 --- a/internal/process/process_posix.go +++ b/internal/process/process_posix.go @@ -1,3 +1,4 @@ +//go:build !windows // +build !windows package process diff --git a/internal/process/process_test.go b/internal/process/process_test.go index b9cad3598ce13..228f2f1e1b28d 100644 --- a/internal/process/process_test.go +++ b/internal/process/process_test.go @@ -1,3 +1,4 @@ +//go:build !windows // +build !windows package process diff --git a/internal/process/process_windows.go b/internal/process/process_windows.go index 0995d52469b07..3aefd20f4aa9c 100644 --- a/internal/process/process_windows.go +++ b/internal/process/process_windows.go @@ -1,3 +1,4 @@ +//go:build windows // +build windows package process diff --git a/internal/usage.go b/internal/usage.go index 1a4b3a3496281..916b5cb86e908 100644 --- a/internal/usage.go +++ b/internal/usage.go @@ -1,3 +1,4 @@ +//go:build !windows // +build !windows package internal diff --git a/internal/usage_windows.go b/internal/usage_windows.go index 236e1426b345c..9a1169851cd74 100644 --- a/internal/usage_windows.go +++ b/internal/usage_windows.go @@ -1,3 +1,4 @@ +//go:build windows // +build windows package internal diff --git a/logger/event_logger.go b/logger/event_logger.go index 44d5bce656a04..bb0672de76c5c 100644 --- a/logger/event_logger.go +++ b/logger/event_logger.go @@ -1,4 +1,5 @@ -//+build windows +//go:build windows +// +build windows package logger diff --git a/logger/event_logger_test.go b/logger/event_logger_test.go index 05c27b1757e87..d268252779867 100644 --- a/logger/event_logger_test.go +++ b/logger/event_logger_test.go @@ -1,4 +1,5 @@ -//+build windows +//go:build windows +// +build windows package logger diff --git a/plugins/inputs/bcache/bcache.go b/plugins/inputs/bcache/bcache.go index 8c21c701f3da3..3195cf4dabcbb 100644 --- a/plugins/inputs/bcache/bcache.go +++ b/plugins/inputs/bcache/bcache.go @@ -1,3 +1,4 @@ +//go:build !windows // +build !windows // bcache doesn't aim for Windows diff --git a/plugins/inputs/bcache/bcache_test.go b/plugins/inputs/bcache/bcache_test.go index b9d786fa91bec..857538a8d6f72 100644 --- a/plugins/inputs/bcache/bcache_test.go +++ b/plugins/inputs/bcache/bcache_test.go @@ -1,3 +1,4 @@ +//go:build !windows // +build !windows package bcache diff --git a/plugins/inputs/bcache/bcache_windows.go b/plugins/inputs/bcache/bcache_windows.go index 9a580cc940106..faeba8888bb3b 100644 --- a/plugins/inputs/bcache/bcache_windows.go +++ b/plugins/inputs/bcache/bcache_windows.go @@ -1,3 +1,4 @@ +//go:build windows // +build windows package bcache diff --git a/plugins/inputs/cgroup/cgroup_linux.go b/plugins/inputs/cgroup/cgroup_linux.go index 43aa68f233fc1..d1eda6e7a3b07 100644 --- a/plugins/inputs/cgroup/cgroup_linux.go +++ b/plugins/inputs/cgroup/cgroup_linux.go @@ -1,3 +1,4 @@ +//go:build linux // +build linux package cgroup diff --git a/plugins/inputs/cgroup/cgroup_notlinux.go b/plugins/inputs/cgroup/cgroup_notlinux.go index 2bc227410a6e2..1c9c08ec41ac5 100644 --- a/plugins/inputs/cgroup/cgroup_notlinux.go +++ b/plugins/inputs/cgroup/cgroup_notlinux.go @@ -1,3 +1,4 @@ +//go:build !linux // +build !linux package cgroup diff --git a/plugins/inputs/cgroup/cgroup_test.go b/plugins/inputs/cgroup/cgroup_test.go index bd7a191b31df7..ba74247eeb1f3 100644 --- a/plugins/inputs/cgroup/cgroup_test.go +++ b/plugins/inputs/cgroup/cgroup_test.go @@ -1,3 +1,4 @@ +//go:build linux // +build linux package cgroup diff --git a/plugins/inputs/conntrack/conntrack.go b/plugins/inputs/conntrack/conntrack.go index bf6c021c80f4a..f1b04fb0d965a 100644 --- a/plugins/inputs/conntrack/conntrack.go +++ b/plugins/inputs/conntrack/conntrack.go @@ -1,3 +1,4 @@ +//go:build linux // +build linux package conntrack diff --git a/plugins/inputs/conntrack/conntrack_notlinux.go b/plugins/inputs/conntrack/conntrack_notlinux.go index 11948731bb88d..6ad8e4a10e3c5 100644 --- a/plugins/inputs/conntrack/conntrack_notlinux.go +++ b/plugins/inputs/conntrack/conntrack_notlinux.go @@ -1,3 +1,4 @@ +//go:build !linux // +build !linux package conntrack diff --git a/plugins/inputs/conntrack/conntrack_test.go b/plugins/inputs/conntrack/conntrack_test.go index e554f4e90d262..50f56d831791e 100644 --- a/plugins/inputs/conntrack/conntrack_test.go +++ b/plugins/inputs/conntrack/conntrack_test.go @@ -1,3 +1,4 @@ +//go:build linux // +build linux package conntrack diff --git a/plugins/inputs/diskio/diskio_linux_test.go b/plugins/inputs/diskio/diskio_linux_test.go index ede35b5befead..1a97aabf40db5 100644 --- a/plugins/inputs/diskio/diskio_linux_test.go +++ b/plugins/inputs/diskio/diskio_linux_test.go @@ -1,3 +1,4 @@ +//go:build linux // +build linux package diskio diff --git a/plugins/inputs/diskio/diskio_other.go b/plugins/inputs/diskio/diskio_other.go index 1c883e904f92c..458a64c13f7bb 100644 --- a/plugins/inputs/diskio/diskio_other.go +++ b/plugins/inputs/diskio/diskio_other.go @@ -1,3 +1,4 @@ +//go:build !linux // +build !linux package diskio diff --git a/plugins/inputs/dmcache/dmcache_linux.go b/plugins/inputs/dmcache/dmcache_linux.go index 8e8d7de918560..712e67900ba4d 100644 --- a/plugins/inputs/dmcache/dmcache_linux.go +++ b/plugins/inputs/dmcache/dmcache_linux.go @@ -1,3 +1,4 @@ +//go:build linux // +build linux package dmcache diff --git a/plugins/inputs/dmcache/dmcache_linux_test.go b/plugins/inputs/dmcache/dmcache_linux_test.go index 30e32b1e876a4..93cd1e85e79bb 100644 --- a/plugins/inputs/dmcache/dmcache_linux_test.go +++ b/plugins/inputs/dmcache/dmcache_linux_test.go @@ -1,3 +1,4 @@ +//go:build linux // +build linux package dmcache diff --git a/plugins/inputs/dmcache/dmcache_notlinux.go b/plugins/inputs/dmcache/dmcache_notlinux.go index ee1065638cab7..96aa0c65712ff 100644 --- a/plugins/inputs/dmcache/dmcache_notlinux.go +++ b/plugins/inputs/dmcache/dmcache_notlinux.go @@ -1,3 +1,4 @@ +//go:build !linux // +build !linux package dmcache diff --git a/plugins/inputs/dpdk/dpdk.go b/plugins/inputs/dpdk/dpdk.go index 293dbee90adf3..261784942232c 100644 --- a/plugins/inputs/dpdk/dpdk.go +++ b/plugins/inputs/dpdk/dpdk.go @@ -1,3 +1,4 @@ +//go:build linux // +build linux package dpdk diff --git a/plugins/inputs/dpdk/dpdk_connector.go b/plugins/inputs/dpdk/dpdk_connector.go index 1129d16d31604..9cd9c81c4362b 100644 --- a/plugins/inputs/dpdk/dpdk_connector.go +++ b/plugins/inputs/dpdk/dpdk_connector.go @@ -1,3 +1,4 @@ +//go:build linux // +build linux package dpdk diff --git a/plugins/inputs/dpdk/dpdk_connector_test.go b/plugins/inputs/dpdk/dpdk_connector_test.go index a322964979fe8..f5580417c3c67 100644 --- a/plugins/inputs/dpdk/dpdk_connector_test.go +++ b/plugins/inputs/dpdk/dpdk_connector_test.go @@ -1,3 +1,4 @@ +//go:build linux // +build linux package dpdk diff --git a/plugins/inputs/dpdk/dpdk_notlinux.go b/plugins/inputs/dpdk/dpdk_notlinux.go index a86625ff5c93f..1831b1212ae78 100644 --- a/plugins/inputs/dpdk/dpdk_notlinux.go +++ b/plugins/inputs/dpdk/dpdk_notlinux.go @@ -1,3 +1,4 @@ +//go:build !linux // +build !linux package dpdk diff --git a/plugins/inputs/dpdk/dpdk_test.go b/plugins/inputs/dpdk/dpdk_test.go index cfee021e9e6bb..41d2da3d07777 100644 --- a/plugins/inputs/dpdk/dpdk_test.go +++ b/plugins/inputs/dpdk/dpdk_test.go @@ -1,3 +1,4 @@ +//go:build linux // +build linux package dpdk diff --git a/plugins/inputs/dpdk/dpdk_utils.go b/plugins/inputs/dpdk/dpdk_utils.go index 962186a424893..b7049d8365597 100644 --- a/plugins/inputs/dpdk/dpdk_utils.go +++ b/plugins/inputs/dpdk/dpdk_utils.go @@ -1,3 +1,4 @@ +//go:build linux // +build linux package dpdk diff --git a/plugins/inputs/dpdk/dpdk_utils_test.go b/plugins/inputs/dpdk/dpdk_utils_test.go index 6697e9ab38113..87e8a6c8248c3 100644 --- a/plugins/inputs/dpdk/dpdk_utils_test.go +++ b/plugins/inputs/dpdk/dpdk_utils_test.go @@ -1,3 +1,4 @@ +//go:build linux // +build linux package dpdk diff --git a/plugins/inputs/ethtool/ethtool_linux.go b/plugins/inputs/ethtool/ethtool_linux.go index 08e21db50dede..6c0116e6e8089 100644 --- a/plugins/inputs/ethtool/ethtool_linux.go +++ b/plugins/inputs/ethtool/ethtool_linux.go @@ -1,3 +1,4 @@ +//go:build linux // +build linux package ethtool diff --git a/plugins/inputs/ethtool/ethtool_notlinux.go b/plugins/inputs/ethtool/ethtool_notlinux.go index b022e0a46bb72..ce149ecd6e69c 100644 --- a/plugins/inputs/ethtool/ethtool_notlinux.go +++ b/plugins/inputs/ethtool/ethtool_notlinux.go @@ -1,3 +1,4 @@ +//go:build !linux // +build !linux package ethtool diff --git a/plugins/inputs/ethtool/ethtool_test.go b/plugins/inputs/ethtool/ethtool_test.go index 87bc136d2db11..14cf14d811683 100644 --- a/plugins/inputs/ethtool/ethtool_test.go +++ b/plugins/inputs/ethtool/ethtool_test.go @@ -1,3 +1,4 @@ +//go:build linux // +build linux package ethtool diff --git a/plugins/inputs/exec/exec_test.go b/plugins/inputs/exec/exec_test.go index bdd11433d1ab6..d0647476c77ae 100644 --- a/plugins/inputs/exec/exec_test.go +++ b/plugins/inputs/exec/exec_test.go @@ -1,3 +1,4 @@ +//go:build !windows // +build !windows // TODO: Windows - should be enabled for Windows when super asterisk is fixed on Windows diff --git a/plugins/inputs/execd/execd_posix.go b/plugins/inputs/execd/execd_posix.go index 9593aaba0af29..a90b1a92dddf5 100644 --- a/plugins/inputs/execd/execd_posix.go +++ b/plugins/inputs/execd/execd_posix.go @@ -1,3 +1,4 @@ +//go:build !windows // +build !windows package execd diff --git a/plugins/inputs/execd/execd_windows.go b/plugins/inputs/execd/execd_windows.go index 15e6798f2389b..9b1f22204bdc4 100644 --- a/plugins/inputs/execd/execd_windows.go +++ b/plugins/inputs/execd/execd_windows.go @@ -1,3 +1,4 @@ +//go:build windows // +build windows package execd diff --git a/plugins/inputs/execd/shim/goshim_posix.go b/plugins/inputs/execd/shim/goshim_posix.go index 4e4a04f141b65..8d7faa2268878 100644 --- a/plugins/inputs/execd/shim/goshim_posix.go +++ b/plugins/inputs/execd/shim/goshim_posix.go @@ -1,3 +1,4 @@ +//go:build !windows // +build !windows package shim diff --git a/plugins/inputs/execd/shim/goshim_windows.go b/plugins/inputs/execd/shim/goshim_windows.go index 317f8a2f3d4cb..90adfeff6f6c9 100644 --- a/plugins/inputs/execd/shim/goshim_windows.go +++ b/plugins/inputs/execd/shim/goshim_windows.go @@ -1,3 +1,4 @@ +//go:build windows // +build windows package shim diff --git a/plugins/inputs/execd/shim/shim_posix_test.go b/plugins/inputs/execd/shim/shim_posix_test.go index 75484c45c78a0..36e0afcd83167 100644 --- a/plugins/inputs/execd/shim/shim_posix_test.go +++ b/plugins/inputs/execd/shim/shim_posix_test.go @@ -1,3 +1,4 @@ +//go:build !windows // +build !windows package shim diff --git a/plugins/inputs/file/file_test.go b/plugins/inputs/file/file_test.go index e633559236bd2..ab09753ca1145 100644 --- a/plugins/inputs/file/file_test.go +++ b/plugins/inputs/file/file_test.go @@ -1,3 +1,4 @@ +//go:build !windows // +build !windows // TODO: Windows - should be enabled for Windows when super asterisk is fixed on Windows diff --git a/plugins/inputs/filecount/filecount_test.go b/plugins/inputs/filecount/filecount_test.go index 74a3e2ec391c5..d02c28fb6f170 100644 --- a/plugins/inputs/filecount/filecount_test.go +++ b/plugins/inputs/filecount/filecount_test.go @@ -1,3 +1,4 @@ +//go:build !windows // +build !windows // TODO: Windows - should be enabled for Windows when super asterisk is fixed on Windows diff --git a/plugins/inputs/filecount/filesystem_helpers_test.go b/plugins/inputs/filecount/filesystem_helpers_test.go index 8a6d9cf2aa035..a3a3310d3fb4e 100644 --- a/plugins/inputs/filecount/filesystem_helpers_test.go +++ b/plugins/inputs/filecount/filesystem_helpers_test.go @@ -1,3 +1,4 @@ +//go:build !windows // +build !windows // TODO: Windows - should be enabled for Windows when super asterisk is fixed on Windows diff --git a/plugins/inputs/filestat/filestat_test.go b/plugins/inputs/filestat/filestat_test.go index ea1bee47e4fb4..ac2a9f9a9f75b 100644 --- a/plugins/inputs/filestat/filestat_test.go +++ b/plugins/inputs/filestat/filestat_test.go @@ -1,3 +1,4 @@ +//go:build !windows // +build !windows // TODO: Windows - should be enabled for Windows when super asterisk is fixed on Windows diff --git a/plugins/inputs/http_response/http_response_test.go b/plugins/inputs/http_response/http_response_test.go index 557639027ff03..40917bba1bc39 100644 --- a/plugins/inputs/http_response/http_response_test.go +++ b/plugins/inputs/http_response/http_response_test.go @@ -1,3 +1,4 @@ +//go:build !windows // +build !windows // TODO: Windows - should be enabled for Windows when https://github.com/influxdata/telegraf/issues/8451 is fixed diff --git a/plugins/inputs/infiniband/infiniband_linux.go b/plugins/inputs/infiniband/infiniband_linux.go index 224d35bc2fce0..2868c683e7ebb 100644 --- a/plugins/inputs/infiniband/infiniband_linux.go +++ b/plugins/inputs/infiniband/infiniband_linux.go @@ -1,3 +1,4 @@ +//go:build linux // +build linux package infiniband diff --git a/plugins/inputs/infiniband/infiniband_notlinux.go b/plugins/inputs/infiniband/infiniband_notlinux.go index 5b19672d975d8..8ad6731c17bd7 100644 --- a/plugins/inputs/infiniband/infiniband_notlinux.go +++ b/plugins/inputs/infiniband/infiniband_notlinux.go @@ -1,3 +1,4 @@ +//go:build !linux // +build !linux package infiniband diff --git a/plugins/inputs/infiniband/infiniband_test.go b/plugins/inputs/infiniband/infiniband_test.go index 7f747eb5fd89f..c382a1fdf9dd0 100644 --- a/plugins/inputs/infiniband/infiniband_test.go +++ b/plugins/inputs/infiniband/infiniband_test.go @@ -1,3 +1,4 @@ +//go:build linux // +build linux package infiniband diff --git a/plugins/inputs/intel_powerstat/file.go b/plugins/inputs/intel_powerstat/file.go index 7953726fd9ba8..a07dd57e16a57 100644 --- a/plugins/inputs/intel_powerstat/file.go +++ b/plugins/inputs/intel_powerstat/file.go @@ -1,3 +1,4 @@ +//go:build linux // +build linux package intel_powerstat diff --git a/plugins/inputs/intel_powerstat/intel_powerstat.go b/plugins/inputs/intel_powerstat/intel_powerstat.go index 9340fdec814b1..181e7642da4b8 100644 --- a/plugins/inputs/intel_powerstat/intel_powerstat.go +++ b/plugins/inputs/intel_powerstat/intel_powerstat.go @@ -1,3 +1,4 @@ +//go:build linux // +build linux package intel_powerstat diff --git a/plugins/inputs/intel_powerstat/intel_powerstat_notlinux.go b/plugins/inputs/intel_powerstat/intel_powerstat_notlinux.go index f46755cee92b7..256e64970094e 100644 --- a/plugins/inputs/intel_powerstat/intel_powerstat_notlinux.go +++ b/plugins/inputs/intel_powerstat/intel_powerstat_notlinux.go @@ -1,3 +1,4 @@ +//go:build !linux // +build !linux package intel_powerstat diff --git a/plugins/inputs/intel_powerstat/intel_powerstat_test.go b/plugins/inputs/intel_powerstat/intel_powerstat_test.go index d65756595927e..ce01e77997cdc 100644 --- a/plugins/inputs/intel_powerstat/intel_powerstat_test.go +++ b/plugins/inputs/intel_powerstat/intel_powerstat_test.go @@ -1,3 +1,4 @@ +//go:build linux // +build linux package intel_powerstat diff --git a/plugins/inputs/intel_powerstat/msr.go b/plugins/inputs/intel_powerstat/msr.go index 8d39164d6e783..6c19b56eb7cc5 100644 --- a/plugins/inputs/intel_powerstat/msr.go +++ b/plugins/inputs/intel_powerstat/msr.go @@ -1,3 +1,4 @@ +//go:build linux // +build linux package intel_powerstat diff --git a/plugins/inputs/intel_powerstat/msr_test.go b/plugins/inputs/intel_powerstat/msr_test.go index 945716e15a105..b03d2b00960a9 100644 --- a/plugins/inputs/intel_powerstat/msr_test.go +++ b/plugins/inputs/intel_powerstat/msr_test.go @@ -1,3 +1,4 @@ +//go:build linux // +build linux package intel_powerstat diff --git a/plugins/inputs/intel_powerstat/rapl.go b/plugins/inputs/intel_powerstat/rapl.go index 1e4b465fd7974..32d60ac89c705 100644 --- a/plugins/inputs/intel_powerstat/rapl.go +++ b/plugins/inputs/intel_powerstat/rapl.go @@ -1,3 +1,4 @@ +//go:build linux // +build linux package intel_powerstat diff --git a/plugins/inputs/intel_powerstat/rapl_test.go b/plugins/inputs/intel_powerstat/rapl_test.go index 551f06f890ea4..5333ec13aaa79 100644 --- a/plugins/inputs/intel_powerstat/rapl_test.go +++ b/plugins/inputs/intel_powerstat/rapl_test.go @@ -1,3 +1,4 @@ +//go:build linux // +build linux package intel_powerstat diff --git a/plugins/inputs/intel_powerstat/unit_converter.go b/plugins/inputs/intel_powerstat/unit_converter.go index 43dc79e6efc4a..7dd8c0d0d1aa0 100644 --- a/plugins/inputs/intel_powerstat/unit_converter.go +++ b/plugins/inputs/intel_powerstat/unit_converter.go @@ -1,3 +1,4 @@ +//go:build linux // +build linux package intel_powerstat diff --git a/plugins/inputs/intel_rdt/intel_rdt.go b/plugins/inputs/intel_rdt/intel_rdt.go index 69cc914227fc8..89370062d730e 100644 --- a/plugins/inputs/intel_rdt/intel_rdt.go +++ b/plugins/inputs/intel_rdt/intel_rdt.go @@ -1,3 +1,4 @@ +//go:build !windows // +build !windows package intel_rdt diff --git a/plugins/inputs/intel_rdt/intel_rdt_test.go b/plugins/inputs/intel_rdt/intel_rdt_test.go index 7e876425724ec..1eecbc5018125 100644 --- a/plugins/inputs/intel_rdt/intel_rdt_test.go +++ b/plugins/inputs/intel_rdt/intel_rdt_test.go @@ -1,3 +1,4 @@ +//go:build !windows // +build !windows package intel_rdt diff --git a/plugins/inputs/intel_rdt/intel_rdt_windows.go b/plugins/inputs/intel_rdt/intel_rdt_windows.go index e3ab0978fb374..64f9ebbe94b68 100644 --- a/plugins/inputs/intel_rdt/intel_rdt_windows.go +++ b/plugins/inputs/intel_rdt/intel_rdt_windows.go @@ -1,3 +1,4 @@ +//go:build windows // +build windows package intel_rdt diff --git a/plugins/inputs/intel_rdt/processes.go b/plugins/inputs/intel_rdt/processes.go index ff86a4e6b745c..dd172b6d92dd2 100644 --- a/plugins/inputs/intel_rdt/processes.go +++ b/plugins/inputs/intel_rdt/processes.go @@ -1,3 +1,4 @@ +//go:build !windows // +build !windows package intel_rdt diff --git a/plugins/inputs/intel_rdt/publisher.go b/plugins/inputs/intel_rdt/publisher.go index a01d730382da9..a567e1aacb1fa 100644 --- a/plugins/inputs/intel_rdt/publisher.go +++ b/plugins/inputs/intel_rdt/publisher.go @@ -1,3 +1,4 @@ +//go:build !windows // +build !windows package intel_rdt diff --git a/plugins/inputs/intel_rdt/publisher_test.go b/plugins/inputs/intel_rdt/publisher_test.go index 5248ede7a16db..7db71e9ac5afa 100644 --- a/plugins/inputs/intel_rdt/publisher_test.go +++ b/plugins/inputs/intel_rdt/publisher_test.go @@ -1,3 +1,4 @@ +//go:build !windows // +build !windows package intel_rdt diff --git a/plugins/inputs/iptables/iptables.go b/plugins/inputs/iptables/iptables.go index e56f8b31d5725..89924b88de7c8 100644 --- a/plugins/inputs/iptables/iptables.go +++ b/plugins/inputs/iptables/iptables.go @@ -1,3 +1,4 @@ +//go:build linux // +build linux package iptables diff --git a/plugins/inputs/iptables/iptables_nocompile.go b/plugins/inputs/iptables/iptables_nocompile.go index f71b4208e62fb..17c0eaced90e5 100644 --- a/plugins/inputs/iptables/iptables_nocompile.go +++ b/plugins/inputs/iptables/iptables_nocompile.go @@ -1,3 +1,4 @@ +//go:build !linux // +build !linux package iptables diff --git a/plugins/inputs/iptables/iptables_test.go b/plugins/inputs/iptables/iptables_test.go index 681d8bbfc130e..4c62ef6d6a86a 100644 --- a/plugins/inputs/iptables/iptables_test.go +++ b/plugins/inputs/iptables/iptables_test.go @@ -1,3 +1,4 @@ +//go:build linux // +build linux package iptables diff --git a/plugins/inputs/ipvs/ipvs.go b/plugins/inputs/ipvs/ipvs.go index 65db5efe3bf7f..7dea5240aab0f 100644 --- a/plugins/inputs/ipvs/ipvs.go +++ b/plugins/inputs/ipvs/ipvs.go @@ -1,3 +1,4 @@ +//go:build linux // +build linux package ipvs diff --git a/plugins/inputs/ipvs/ipvs_notlinux.go b/plugins/inputs/ipvs/ipvs_notlinux.go index bbbb1240b62a8..b46035f2c2b3c 100644 --- a/plugins/inputs/ipvs/ipvs_notlinux.go +++ b/plugins/inputs/ipvs/ipvs_notlinux.go @@ -1,3 +1,4 @@ +//go:build !linux // +build !linux package ipvs diff --git a/plugins/inputs/kernel/kernel.go b/plugins/inputs/kernel/kernel.go index 404c62d88c2b8..22311e9a0f12d 100644 --- a/plugins/inputs/kernel/kernel.go +++ b/plugins/inputs/kernel/kernel.go @@ -1,3 +1,4 @@ +//go:build linux // +build linux package kernel diff --git a/plugins/inputs/kernel/kernel_notlinux.go b/plugins/inputs/kernel/kernel_notlinux.go index 05f6e55c453c5..838a97071a6d4 100644 --- a/plugins/inputs/kernel/kernel_notlinux.go +++ b/plugins/inputs/kernel/kernel_notlinux.go @@ -1,3 +1,4 @@ +//go:build !linux // +build !linux package kernel diff --git a/plugins/inputs/kernel/kernel_test.go b/plugins/inputs/kernel/kernel_test.go index 2068237d5b60f..462624c2eb40d 100644 --- a/plugins/inputs/kernel/kernel_test.go +++ b/plugins/inputs/kernel/kernel_test.go @@ -1,3 +1,4 @@ +//go:build linux // +build linux package kernel diff --git a/plugins/inputs/kernel_vmstat/kernel_vmstat.go b/plugins/inputs/kernel_vmstat/kernel_vmstat.go index 66e7c7d664748..2019e0cbfddb3 100644 --- a/plugins/inputs/kernel_vmstat/kernel_vmstat.go +++ b/plugins/inputs/kernel_vmstat/kernel_vmstat.go @@ -1,3 +1,4 @@ +//go:build linux // +build linux package kernel_vmstat diff --git a/plugins/inputs/kernel_vmstat/kernel_vmstat_notlinux.go b/plugins/inputs/kernel_vmstat/kernel_vmstat_notlinux.go index 11a5d2e553dff..d687b13a9e72d 100644 --- a/plugins/inputs/kernel_vmstat/kernel_vmstat_notlinux.go +++ b/plugins/inputs/kernel_vmstat/kernel_vmstat_notlinux.go @@ -1,3 +1,4 @@ +//go:build !linux // +build !linux package kernel_vmstat diff --git a/plugins/inputs/kernel_vmstat/kernel_vmstat_test.go b/plugins/inputs/kernel_vmstat/kernel_vmstat_test.go index eca873ff71896..6bbb9d7b5b12f 100644 --- a/plugins/inputs/kernel_vmstat/kernel_vmstat_test.go +++ b/plugins/inputs/kernel_vmstat/kernel_vmstat_test.go @@ -1,3 +1,4 @@ +//go:build linux // +build linux package kernel_vmstat diff --git a/plugins/inputs/logparser/logparser.go b/plugins/inputs/logparser/logparser.go index 5fec865eaa8d7..83f5abd210bdd 100644 --- a/plugins/inputs/logparser/logparser.go +++ b/plugins/inputs/logparser/logparser.go @@ -1,3 +1,4 @@ +//go:build !solaris // +build !solaris package logparser diff --git a/plugins/inputs/logparser/logparser_solaris.go b/plugins/inputs/logparser/logparser_solaris.go index 28afe26772846..da482b97d27be 100644 --- a/plugins/inputs/logparser/logparser_solaris.go +++ b/plugins/inputs/logparser/logparser_solaris.go @@ -1,3 +1,4 @@ +//go:build solaris // +build solaris package logparser diff --git a/plugins/inputs/lustre2/lustre2.go b/plugins/inputs/lustre2/lustre2.go index 5327386339f84..00aa288b316a8 100644 --- a/plugins/inputs/lustre2/lustre2.go +++ b/plugins/inputs/lustre2/lustre2.go @@ -1,3 +1,4 @@ +//go:build !windows // +build !windows // Package lustre2 (doesn't aim for Windows) diff --git a/plugins/inputs/lustre2/lustre2_test.go b/plugins/inputs/lustre2/lustre2_test.go index e89c33b5a46e3..52c7e87f08fc6 100644 --- a/plugins/inputs/lustre2/lustre2_test.go +++ b/plugins/inputs/lustre2/lustre2_test.go @@ -1,3 +1,4 @@ +//go:build !windows // +build !windows package lustre2 diff --git a/plugins/inputs/lustre2/lustre2_windows.go b/plugins/inputs/lustre2/lustre2_windows.go index 0c4d970608e09..cd3aea1b534f1 100644 --- a/plugins/inputs/lustre2/lustre2_windows.go +++ b/plugins/inputs/lustre2/lustre2_windows.go @@ -1,3 +1,4 @@ +//go:build windows // +build windows package lustre2 diff --git a/plugins/inputs/mongodb/mongodb_server_test.go b/plugins/inputs/mongodb/mongodb_server_test.go index 2cf58689a6eab..64fb191639105 100644 --- a/plugins/inputs/mongodb/mongodb_server_test.go +++ b/plugins/inputs/mongodb/mongodb_server_test.go @@ -1,3 +1,4 @@ +//go:build integration // +build integration package mongodb diff --git a/plugins/inputs/mongodb/mongodb_test.go b/plugins/inputs/mongodb/mongodb_test.go index 9484118dd19ab..24aa2fe3e0d04 100644 --- a/plugins/inputs/mongodb/mongodb_test.go +++ b/plugins/inputs/mongodb/mongodb_test.go @@ -1,3 +1,4 @@ +//go:build integration // +build integration package mongodb diff --git a/plugins/inputs/nats/nats.go b/plugins/inputs/nats/nats.go index 53f688bb3bcd9..c2adab29b324d 100644 --- a/plugins/inputs/nats/nats.go +++ b/plugins/inputs/nats/nats.go @@ -1,3 +1,4 @@ +//go:build !freebsd || (freebsd && cgo) // +build !freebsd freebsd,cgo package nats diff --git a/plugins/inputs/nats/nats_freebsd.go b/plugins/inputs/nats/nats_freebsd.go index 08d08ba760df0..f50ba2cfcf678 100644 --- a/plugins/inputs/nats/nats_freebsd.go +++ b/plugins/inputs/nats/nats_freebsd.go @@ -1,3 +1,4 @@ +//go:build freebsd && !cgo // +build freebsd,!cgo package nats diff --git a/plugins/inputs/nats/nats_test.go b/plugins/inputs/nats/nats_test.go index 7207df94cfd02..135951405feda 100644 --- a/plugins/inputs/nats/nats_test.go +++ b/plugins/inputs/nats/nats_test.go @@ -1,3 +1,4 @@ +//go:build !freebsd || (freebsd && cgo) // +build !freebsd freebsd,cgo package nats diff --git a/plugins/inputs/phpfpm/phpfpm_test.go b/plugins/inputs/phpfpm/phpfpm_test.go index 50d8d604efb5b..d51c576aad7f0 100644 --- a/plugins/inputs/phpfpm/phpfpm_test.go +++ b/plugins/inputs/phpfpm/phpfpm_test.go @@ -1,3 +1,4 @@ +//go:build !windows // +build !windows // TODO: Windows - should be enabled for Windows when super asterisk is fixed on Windows diff --git a/plugins/inputs/ping/ping_notwindows.go b/plugins/inputs/ping/ping_notwindows.go index a014a8237e8e7..f6bd751c2a4e3 100644 --- a/plugins/inputs/ping/ping_notwindows.go +++ b/plugins/inputs/ping/ping_notwindows.go @@ -1,3 +1,4 @@ +//go:build !windows // +build !windows package ping diff --git a/plugins/inputs/ping/ping_test.go b/plugins/inputs/ping/ping_test.go index 895b9c1fdf5b9..7faba097c4562 100644 --- a/plugins/inputs/ping/ping_test.go +++ b/plugins/inputs/ping/ping_test.go @@ -1,3 +1,4 @@ +//go:build !windows // +build !windows package ping diff --git a/plugins/inputs/ping/ping_windows.go b/plugins/inputs/ping/ping_windows.go index f53d6f09a7373..1d3d933e7736b 100644 --- a/plugins/inputs/ping/ping_windows.go +++ b/plugins/inputs/ping/ping_windows.go @@ -1,3 +1,4 @@ +//go:build windows // +build windows package ping diff --git a/plugins/inputs/ping/ping_windows_test.go b/plugins/inputs/ping/ping_windows_test.go index 0986d58bc74a8..6df8af3732a5f 100644 --- a/plugins/inputs/ping/ping_windows_test.go +++ b/plugins/inputs/ping/ping_windows_test.go @@ -1,3 +1,4 @@ +//go:build windows // +build windows package ping diff --git a/plugins/inputs/postfix/postfix.go b/plugins/inputs/postfix/postfix.go index f72474a114f94..e2d271f51cba1 100644 --- a/plugins/inputs/postfix/postfix.go +++ b/plugins/inputs/postfix/postfix.go @@ -1,3 +1,4 @@ +//go:build !windows // +build !windows // postfix doesn't aim for Windows diff --git a/plugins/inputs/postfix/postfix_test.go b/plugins/inputs/postfix/postfix_test.go index ad997eebdbbe7..782a0c78c95b9 100644 --- a/plugins/inputs/postfix/postfix_test.go +++ b/plugins/inputs/postfix/postfix_test.go @@ -1,3 +1,4 @@ +//go:build !windows // +build !windows package postfix diff --git a/plugins/inputs/postfix/postfix_windows.go b/plugins/inputs/postfix/postfix_windows.go index 122c1543da55d..3a2c5e5cb3619 100644 --- a/plugins/inputs/postfix/postfix_windows.go +++ b/plugins/inputs/postfix/postfix_windows.go @@ -1,3 +1,4 @@ +//go:build windows // +build windows package postfix diff --git a/plugins/inputs/postfix/stat_ctim.go b/plugins/inputs/postfix/stat_ctim.go index 456df5ffd4dd2..06ddccb178fce 100644 --- a/plugins/inputs/postfix/stat_ctim.go +++ b/plugins/inputs/postfix/stat_ctim.go @@ -1,3 +1,4 @@ +//go:build dragonfly || linux || netbsd || openbsd || solaris // +build dragonfly linux netbsd openbsd solaris package postfix diff --git a/plugins/inputs/postfix/stat_ctimespec.go b/plugins/inputs/postfix/stat_ctimespec.go index 40e0de6cc4a40..03f4e0a435f2c 100644 --- a/plugins/inputs/postfix/stat_ctimespec.go +++ b/plugins/inputs/postfix/stat_ctimespec.go @@ -1,3 +1,4 @@ +//go:build darwin || freebsd // +build darwin freebsd package postfix diff --git a/plugins/inputs/postfix/stat_none.go b/plugins/inputs/postfix/stat_none.go index d9b67b1663af8..c1ca6a41c662f 100644 --- a/plugins/inputs/postfix/stat_none.go +++ b/plugins/inputs/postfix/stat_none.go @@ -1,3 +1,4 @@ +//go:build !dragonfly && !linux && !netbsd && !openbsd && !solaris && !darwin && !freebsd // +build !dragonfly,!linux,!netbsd,!openbsd,!solaris,!darwin,!freebsd package postfix diff --git a/plugins/inputs/processes/processes_notwindows.go b/plugins/inputs/processes/processes_notwindows.go index 9faec83afa7d0..3c685cf1ebf7f 100644 --- a/plugins/inputs/processes/processes_notwindows.go +++ b/plugins/inputs/processes/processes_notwindows.go @@ -1,3 +1,4 @@ +//go:build !windows // +build !windows package processes diff --git a/plugins/inputs/processes/processes_test.go b/plugins/inputs/processes/processes_test.go index de04fecb56fc1..144b80f3fc1ec 100644 --- a/plugins/inputs/processes/processes_test.go +++ b/plugins/inputs/processes/processes_test.go @@ -1,3 +1,4 @@ +//go:build !windows // +build !windows package processes diff --git a/plugins/inputs/processes/processes_windows.go b/plugins/inputs/processes/processes_windows.go index 567373c7c7260..f798a1668c738 100644 --- a/plugins/inputs/processes/processes_windows.go +++ b/plugins/inputs/processes/processes_windows.go @@ -1,3 +1,4 @@ +//go:build windows // +build windows package processes diff --git a/plugins/inputs/procstat/native_finder_notwindows.go b/plugins/inputs/procstat/native_finder_notwindows.go index 9d7409ba1df8e..528b083ae628b 100644 --- a/plugins/inputs/procstat/native_finder_notwindows.go +++ b/plugins/inputs/procstat/native_finder_notwindows.go @@ -1,3 +1,4 @@ +//go:build !windows // +build !windows package procstat diff --git a/plugins/inputs/procstat/win_service_notwindows.go b/plugins/inputs/procstat/win_service_notwindows.go index a0a776d33736f..b7efcee17cdc1 100644 --- a/plugins/inputs/procstat/win_service_notwindows.go +++ b/plugins/inputs/procstat/win_service_notwindows.go @@ -1,3 +1,4 @@ +//go:build !windows // +build !windows package procstat diff --git a/plugins/inputs/procstat/win_service_windows.go b/plugins/inputs/procstat/win_service_windows.go index 06dffc8472089..5d9c196e388c0 100644 --- a/plugins/inputs/procstat/win_service_windows.go +++ b/plugins/inputs/procstat/win_service_windows.go @@ -1,3 +1,4 @@ +//go:build windows // +build windows package procstat diff --git a/plugins/inputs/ras/ras.go b/plugins/inputs/ras/ras.go index a8599c4a78d0f..a8d4ba727d7df 100644 --- a/plugins/inputs/ras/ras.go +++ b/plugins/inputs/ras/ras.go @@ -1,3 +1,4 @@ +//go:build linux && (386 || amd64 || arm || arm64) // +build linux // +build 386 amd64 arm arm64 diff --git a/plugins/inputs/ras/ras_notlinux.go b/plugins/inputs/ras/ras_notlinux.go index 74f0aaf9fc59f..b0795fd794f6f 100644 --- a/plugins/inputs/ras/ras_notlinux.go +++ b/plugins/inputs/ras/ras_notlinux.go @@ -1,3 +1,4 @@ +//go:build !linux || (linux && !386 && !amd64 && !arm && !arm64) // +build !linux linux,!386,!amd64,!arm,!arm64 package ras diff --git a/plugins/inputs/ras/ras_test.go b/plugins/inputs/ras/ras_test.go index a90258bb4423b..656200fde95cc 100644 --- a/plugins/inputs/ras/ras_test.go +++ b/plugins/inputs/ras/ras_test.go @@ -1,3 +1,4 @@ +//go:build linux && (386 || amd64 || arm || arm64) // +build linux // +build 386 amd64 arm arm64 diff --git a/plugins/inputs/rethinkdb/rethinkdb_server_test.go b/plugins/inputs/rethinkdb/rethinkdb_server_test.go index 82ff292804a8c..0119131900b61 100644 --- a/plugins/inputs/rethinkdb/rethinkdb_server_test.go +++ b/plugins/inputs/rethinkdb/rethinkdb_server_test.go @@ -1,3 +1,4 @@ +//go:build integration // +build integration package rethinkdb diff --git a/plugins/inputs/rethinkdb/rethinkdb_test.go b/plugins/inputs/rethinkdb/rethinkdb_test.go index 9a09864cad91a..651042ab13783 100644 --- a/plugins/inputs/rethinkdb/rethinkdb_test.go +++ b/plugins/inputs/rethinkdb/rethinkdb_test.go @@ -1,3 +1,4 @@ +//go:build integration // +build integration package rethinkdb diff --git a/plugins/inputs/riemann_listener/riemann_listener.go b/plugins/inputs/riemann_listener/riemann_listener.go index 24bdd11540e1b..a38d5989cb5d0 100644 --- a/plugins/inputs/riemann_listener/riemann_listener.go +++ b/plugins/inputs/riemann_listener/riemann_listener.go @@ -368,7 +368,7 @@ func (rsl *RiemannSocketListener) Start(acc telegraf.Accumulator) error { // Handle cancellations from the process func processOsSignals(cancelFunc context.CancelFunc) { - signalChan := make(chan os.Signal) + signalChan := make(chan os.Signal, 1) signal.Notify(signalChan, os.Interrupt) for { sig := <-signalChan diff --git a/plugins/inputs/sensors/sensors.go b/plugins/inputs/sensors/sensors.go index d3a8ba762f379..f2590c105272a 100644 --- a/plugins/inputs/sensors/sensors.go +++ b/plugins/inputs/sensors/sensors.go @@ -1,3 +1,4 @@ +//go:build linux // +build linux package sensors diff --git a/plugins/inputs/sensors/sensors_notlinux.go b/plugins/inputs/sensors/sensors_notlinux.go index 62a6211598f4e..424e96181b46b 100644 --- a/plugins/inputs/sensors/sensors_notlinux.go +++ b/plugins/inputs/sensors/sensors_notlinux.go @@ -1,3 +1,4 @@ +//go:build !linux // +build !linux package sensors diff --git a/plugins/inputs/sensors/sensors_test.go b/plugins/inputs/sensors/sensors_test.go index 6bf1b616cb985..be4cace6eab79 100644 --- a/plugins/inputs/sensors/sensors_test.go +++ b/plugins/inputs/sensors/sensors_test.go @@ -1,3 +1,4 @@ +//go:build linux // +build linux package sensors diff --git a/plugins/inputs/snmp/snmp_mocks_generate.go b/plugins/inputs/snmp/snmp_mocks_generate.go index 7227771a7e4fa..f87f9029b0d06 100644 --- a/plugins/inputs/snmp/snmp_mocks_generate.go +++ b/plugins/inputs/snmp/snmp_mocks_generate.go @@ -1,3 +1,4 @@ +//go:build generate // +build generate package main diff --git a/plugins/inputs/sql/drivers_sqlite.go b/plugins/inputs/sql/drivers_sqlite.go index 26cf7e08b5170..945e2b8425a3b 100644 --- a/plugins/inputs/sql/drivers_sqlite.go +++ b/plugins/inputs/sql/drivers_sqlite.go @@ -1,4 +1,7 @@ -// +build linux,freebsd,darwin +//go:build linux && freebsd && darwin && (!mips || !mips64) +// +build linux +// +build freebsd +// +build darwin // +build !mips !mips64 package sql diff --git a/plugins/inputs/synproxy/synproxy_linux.go b/plugins/inputs/synproxy/synproxy_linux.go index bcc9729384282..93cd26e3343f3 100644 --- a/plugins/inputs/synproxy/synproxy_linux.go +++ b/plugins/inputs/synproxy/synproxy_linux.go @@ -1,3 +1,4 @@ +//go:build linux // +build linux package synproxy diff --git a/plugins/inputs/synproxy/synproxy_notlinux.go b/plugins/inputs/synproxy/synproxy_notlinux.go index 71a223644d8ed..f12fc70656eba 100644 --- a/plugins/inputs/synproxy/synproxy_notlinux.go +++ b/plugins/inputs/synproxy/synproxy_notlinux.go @@ -1,3 +1,4 @@ +//go:build !linux // +build !linux package synproxy diff --git a/plugins/inputs/synproxy/synproxy_test.go b/plugins/inputs/synproxy/synproxy_test.go index fc5d67d6a064a..dd733253635b8 100644 --- a/plugins/inputs/synproxy/synproxy_test.go +++ b/plugins/inputs/synproxy/synproxy_test.go @@ -1,3 +1,4 @@ +//go:build linux // +build linux package synproxy diff --git a/plugins/inputs/sysstat/sysstat.go b/plugins/inputs/sysstat/sysstat.go index 01b4db9fa4af9..7e69ff41ccdf2 100644 --- a/plugins/inputs/sysstat/sysstat.go +++ b/plugins/inputs/sysstat/sysstat.go @@ -1,3 +1,4 @@ +//go:build linux // +build linux package sysstat diff --git a/plugins/inputs/sysstat/sysstat_interval_test.go b/plugins/inputs/sysstat/sysstat_interval_test.go index 972eb9af936de..f714ec10b1c36 100644 --- a/plugins/inputs/sysstat/sysstat_interval_test.go +++ b/plugins/inputs/sysstat/sysstat_interval_test.go @@ -1,5 +1,5 @@ -// +build !race -// +build linux +//go:build !race && linux +// +build !race,linux package sysstat diff --git a/plugins/inputs/sysstat/sysstat_notlinux.go b/plugins/inputs/sysstat/sysstat_notlinux.go index e97e71e78280c..6b5dd6fcf18cb 100644 --- a/plugins/inputs/sysstat/sysstat_notlinux.go +++ b/plugins/inputs/sysstat/sysstat_notlinux.go @@ -1,3 +1,4 @@ +//go:build !linux // +build !linux package sysstat diff --git a/plugins/inputs/sysstat/sysstat_test.go b/plugins/inputs/sysstat/sysstat_test.go index 1766130391bbb..64b596bb329ba 100644 --- a/plugins/inputs/sysstat/sysstat_test.go +++ b/plugins/inputs/sysstat/sysstat_test.go @@ -1,3 +1,4 @@ +//go:build linux // +build linux package sysstat diff --git a/plugins/inputs/systemd_units/systemd_units_notlinux.go b/plugins/inputs/systemd_units/systemd_units_notlinux.go index f53cea3de6eba..32f5b97cc37ec 100644 --- a/plugins/inputs/systemd_units/systemd_units_notlinux.go +++ b/plugins/inputs/systemd_units/systemd_units_notlinux.go @@ -1,3 +1,4 @@ +//go:build !linux // +build !linux package systemd_units diff --git a/plugins/inputs/tail/tail.go b/plugins/inputs/tail/tail.go index d84c09ff8d3c2..d5bda84732ad8 100644 --- a/plugins/inputs/tail/tail.go +++ b/plugins/inputs/tail/tail.go @@ -1,3 +1,4 @@ +//go:build !solaris // +build !solaris package tail diff --git a/plugins/inputs/tail/tail_solaris.go b/plugins/inputs/tail/tail_solaris.go index 802088da28248..093dd16a06c23 100644 --- a/plugins/inputs/tail/tail_solaris.go +++ b/plugins/inputs/tail/tail_solaris.go @@ -1,5 +1,6 @@ // Skipping plugin on Solaris due to fsnotify support // +//go:build solaris // +build solaris package tail diff --git a/plugins/inputs/varnish/varnish.go b/plugins/inputs/varnish/varnish.go index e4f18bee42ed3..d9872b9d81af7 100644 --- a/plugins/inputs/varnish/varnish.go +++ b/plugins/inputs/varnish/varnish.go @@ -1,3 +1,4 @@ +//go:build !windows // +build !windows package varnish diff --git a/plugins/inputs/varnish/varnish_test.go b/plugins/inputs/varnish/varnish_test.go index 4ba9e941a52ee..088c08378c1ef 100644 --- a/plugins/inputs/varnish/varnish_test.go +++ b/plugins/inputs/varnish/varnish_test.go @@ -1,3 +1,4 @@ +//go:build !windows // +build !windows package varnish diff --git a/plugins/inputs/varnish/varnish_windows.go b/plugins/inputs/varnish/varnish_windows.go index 0c85c106f2b4f..9fed7dfc2a3c8 100644 --- a/plugins/inputs/varnish/varnish_windows.go +++ b/plugins/inputs/varnish/varnish_windows.go @@ -1,3 +1,4 @@ +//go:build windows // +build windows package varnish diff --git a/plugins/inputs/win_eventlog/event.go b/plugins/inputs/win_eventlog/event.go index 2169ce8b490b3..86ddefdcb95e0 100644 --- a/plugins/inputs/win_eventlog/event.go +++ b/plugins/inputs/win_eventlog/event.go @@ -1,4 +1,5 @@ -//+build windows +//go:build windows +// +build windows //revive:disable-next-line:var-naming // Package win_eventlog Input plugin to collect Windows Event Log messages diff --git a/plugins/inputs/win_eventlog/syscall_windows.go b/plugins/inputs/win_eventlog/syscall_windows.go index df02913eee2af..d7bc07d0a5d42 100644 --- a/plugins/inputs/win_eventlog/syscall_windows.go +++ b/plugins/inputs/win_eventlog/syscall_windows.go @@ -1,4 +1,5 @@ -//+build windows +//go:build windows +// +build windows //revive:disable-next-line:var-naming // Package win_eventlog Input plugin to collect Windows Event Log messages diff --git a/plugins/inputs/win_eventlog/util.go b/plugins/inputs/win_eventlog/util.go index 7435cdb09ceaf..276e7514228e0 100644 --- a/plugins/inputs/win_eventlog/util.go +++ b/plugins/inputs/win_eventlog/util.go @@ -1,4 +1,5 @@ -//+build windows +//go:build windows +// +build windows //revive:disable-next-line:var-naming // Package win_eventlog Input plugin to collect Windows Event Log messages diff --git a/plugins/inputs/win_eventlog/util_test.go b/plugins/inputs/win_eventlog/util_test.go index ce7428dd391d2..1dc90cc2326d3 100644 --- a/plugins/inputs/win_eventlog/util_test.go +++ b/plugins/inputs/win_eventlog/util_test.go @@ -1,4 +1,5 @@ -//+build windows +//go:build windows +// +build windows //revive:disable-next-line:var-naming // Package win_eventlog Input plugin to collect Windows Event Log messages diff --git a/plugins/inputs/win_eventlog/win_eventlog.go b/plugins/inputs/win_eventlog/win_eventlog.go index 8d0efe3119d97..2ee303d483530 100644 --- a/plugins/inputs/win_eventlog/win_eventlog.go +++ b/plugins/inputs/win_eventlog/win_eventlog.go @@ -1,4 +1,5 @@ -//+build windows +//go:build windows +// +build windows //revive:disable-next-line:var-naming // Package win_eventlog Input plugin to collect Windows Event Log messages diff --git a/plugins/inputs/win_eventlog/win_eventlog_notwindows.go b/plugins/inputs/win_eventlog/win_eventlog_notwindows.go index 005077aa64c7d..e78ad6133b367 100644 --- a/plugins/inputs/win_eventlog/win_eventlog_notwindows.go +++ b/plugins/inputs/win_eventlog/win_eventlog_notwindows.go @@ -1,3 +1,4 @@ +//go:build !windows // +build !windows //revive:disable-next-line:var-naming diff --git a/plugins/inputs/win_eventlog/win_eventlog_test.go b/plugins/inputs/win_eventlog/win_eventlog_test.go index 9f922431ed776..bd6a434f40088 100644 --- a/plugins/inputs/win_eventlog/win_eventlog_test.go +++ b/plugins/inputs/win_eventlog/win_eventlog_test.go @@ -1,4 +1,5 @@ -//+build windows +//go:build windows +// +build windows //revive:disable-next-line:var-naming // Package win_eventlog Input plugin to collect Windows Event Log messages diff --git a/plugins/inputs/win_eventlog/zsyscall_windows.go b/plugins/inputs/win_eventlog/zsyscall_windows.go index 5c7b0a504b0bf..34c17471691e8 100644 --- a/plugins/inputs/win_eventlog/zsyscall_windows.go +++ b/plugins/inputs/win_eventlog/zsyscall_windows.go @@ -1,4 +1,5 @@ -//+build windows +//go:build windows +// +build windows //revive:disable-next-line:var-naming // Package win_eventlog Input plugin to collect Windows Event Log messages diff --git a/plugins/inputs/win_perf_counters/kernel32.go b/plugins/inputs/win_perf_counters/kernel32.go index 9cdadedc873bd..09cbd4be5f182 100644 --- a/plugins/inputs/win_perf_counters/kernel32.go +++ b/plugins/inputs/win_perf_counters/kernel32.go @@ -28,6 +28,7 @@ // Joseph Watson // Kevin Pors +//go:build windows // +build windows package win_perf_counters diff --git a/plugins/inputs/win_perf_counters/pdh.go b/plugins/inputs/win_perf_counters/pdh.go index 3a24761b9d593..d4e5f14a1c267 100644 --- a/plugins/inputs/win_perf_counters/pdh.go +++ b/plugins/inputs/win_perf_counters/pdh.go @@ -28,6 +28,7 @@ // Joseph Watson // Kevin Pors +//go:build windows // +build windows package win_perf_counters diff --git a/plugins/inputs/win_perf_counters/pdh_386.go b/plugins/inputs/win_perf_counters/pdh_386.go index 134d15c8d1461..ec572db72447e 100644 --- a/plugins/inputs/win_perf_counters/pdh_386.go +++ b/plugins/inputs/win_perf_counters/pdh_386.go @@ -28,6 +28,7 @@ // Joseph Watson // Kevin Pors +//go:build windows // +build windows package win_perf_counters diff --git a/plugins/inputs/win_perf_counters/pdh_amd64.go b/plugins/inputs/win_perf_counters/pdh_amd64.go index ff3b39335bcd4..1afedc317260e 100644 --- a/plugins/inputs/win_perf_counters/pdh_amd64.go +++ b/plugins/inputs/win_perf_counters/pdh_amd64.go @@ -28,6 +28,7 @@ // Joseph Watson // Kevin Pors +//go:build windows // +build windows package win_perf_counters diff --git a/plugins/inputs/win_perf_counters/performance_query.go b/plugins/inputs/win_perf_counters/performance_query.go index a59f96b84dc43..ab130a41dec3f 100644 --- a/plugins/inputs/win_perf_counters/performance_query.go +++ b/plugins/inputs/win_perf_counters/performance_query.go @@ -1,4 +1,5 @@ // Go API over pdh syscalls +//go:build windows // +build windows package win_perf_counters diff --git a/plugins/inputs/win_perf_counters/win_perf_counters.go b/plugins/inputs/win_perf_counters/win_perf_counters.go index 4bcbbfb1b2318..3a74e34a5228a 100644 --- a/plugins/inputs/win_perf_counters/win_perf_counters.go +++ b/plugins/inputs/win_perf_counters/win_perf_counters.go @@ -1,3 +1,4 @@ +//go:build windows // +build windows package win_perf_counters diff --git a/plugins/inputs/win_perf_counters/win_perf_counters_integration_test.go b/plugins/inputs/win_perf_counters/win_perf_counters_integration_test.go index 43b20eb611577..a5ae58370ab4a 100644 --- a/plugins/inputs/win_perf_counters/win_perf_counters_integration_test.go +++ b/plugins/inputs/win_perf_counters/win_perf_counters_integration_test.go @@ -1,3 +1,4 @@ +//go:build windows // +build windows package win_perf_counters diff --git a/plugins/inputs/win_perf_counters/win_perf_counters_notwindows.go b/plugins/inputs/win_perf_counters/win_perf_counters_notwindows.go index 427f5d5461ff3..00af92b722552 100644 --- a/plugins/inputs/win_perf_counters/win_perf_counters_notwindows.go +++ b/plugins/inputs/win_perf_counters/win_perf_counters_notwindows.go @@ -1,3 +1,4 @@ +//go:build !windows // +build !windows package win_perf_counters diff --git a/plugins/inputs/win_perf_counters/win_perf_counters_test.go b/plugins/inputs/win_perf_counters/win_perf_counters_test.go index 930e923754ac8..969b518d0f2b0 100644 --- a/plugins/inputs/win_perf_counters/win_perf_counters_test.go +++ b/plugins/inputs/win_perf_counters/win_perf_counters_test.go @@ -1,3 +1,4 @@ +//go:build windows // +build windows package win_perf_counters diff --git a/plugins/inputs/win_services/win_services.go b/plugins/inputs/win_services/win_services.go index 185e9b6b67de4..38f873a99284d 100644 --- a/plugins/inputs/win_services/win_services.go +++ b/plugins/inputs/win_services/win_services.go @@ -1,3 +1,4 @@ +//go:build windows // +build windows package win_services diff --git a/plugins/inputs/win_services/win_services_integration_test.go b/plugins/inputs/win_services/win_services_integration_test.go index 998aa1ed5eb2f..3c831642a01cf 100644 --- a/plugins/inputs/win_services/win_services_integration_test.go +++ b/plugins/inputs/win_services/win_services_integration_test.go @@ -1,3 +1,4 @@ +//go:build windows // +build windows //these tests must be run under administrator account diff --git a/plugins/inputs/win_services/win_services_notwindows.go b/plugins/inputs/win_services/win_services_notwindows.go index 062c11cfc8eed..aa2f3534ca74d 100644 --- a/plugins/inputs/win_services/win_services_notwindows.go +++ b/plugins/inputs/win_services/win_services_notwindows.go @@ -1,3 +1,4 @@ +//go:build !windows // +build !windows package win_services diff --git a/plugins/inputs/win_services/win_services_test.go b/plugins/inputs/win_services/win_services_test.go index 7d1672e8f6515..69a75372dd086 100644 --- a/plugins/inputs/win_services/win_services_test.go +++ b/plugins/inputs/win_services/win_services_test.go @@ -1,3 +1,4 @@ +//go:build windows // +build windows package win_services diff --git a/plugins/inputs/wireless/wireless_linux.go b/plugins/inputs/wireless/wireless_linux.go index 75890a7901074..706f9700d12c9 100644 --- a/plugins/inputs/wireless/wireless_linux.go +++ b/plugins/inputs/wireless/wireless_linux.go @@ -1,3 +1,4 @@ +//go:build linux // +build linux package wireless diff --git a/plugins/inputs/wireless/wireless_notlinux.go b/plugins/inputs/wireless/wireless_notlinux.go index 4769acc970e42..435559ca58529 100644 --- a/plugins/inputs/wireless/wireless_notlinux.go +++ b/plugins/inputs/wireless/wireless_notlinux.go @@ -1,3 +1,4 @@ +//go:build !linux // +build !linux package wireless diff --git a/plugins/inputs/wireless/wireless_test.go b/plugins/inputs/wireless/wireless_test.go index 6c562887e54db..20c10de88a347 100644 --- a/plugins/inputs/wireless/wireless_test.go +++ b/plugins/inputs/wireless/wireless_test.go @@ -1,3 +1,4 @@ +//go:build linux // +build linux package wireless diff --git a/plugins/inputs/zfs/zfs_freebsd.go b/plugins/inputs/zfs/zfs_freebsd.go index 491388147d93c..24f6a50997612 100644 --- a/plugins/inputs/zfs/zfs_freebsd.go +++ b/plugins/inputs/zfs/zfs_freebsd.go @@ -1,3 +1,4 @@ +//go:build freebsd // +build freebsd package zfs diff --git a/plugins/inputs/zfs/zfs_freebsd_test.go b/plugins/inputs/zfs/zfs_freebsd_test.go index 4d1fea0ae483a..816f82b6dbf5b 100644 --- a/plugins/inputs/zfs/zfs_freebsd_test.go +++ b/plugins/inputs/zfs/zfs_freebsd_test.go @@ -1,3 +1,4 @@ +//go:build freebsd // +build freebsd package zfs diff --git a/plugins/inputs/zfs/zfs_linux.go b/plugins/inputs/zfs/zfs_linux.go index 276880d7dea97..ac3ca6ee81d23 100644 --- a/plugins/inputs/zfs/zfs_linux.go +++ b/plugins/inputs/zfs/zfs_linux.go @@ -1,3 +1,4 @@ +//go:build linux // +build linux package zfs diff --git a/plugins/inputs/zfs/zfs_linux_test.go b/plugins/inputs/zfs/zfs_linux_test.go index 7d8aff81c689c..52622582029a5 100644 --- a/plugins/inputs/zfs/zfs_linux_test.go +++ b/plugins/inputs/zfs/zfs_linux_test.go @@ -1,3 +1,4 @@ +//go:build linux // +build linux package zfs diff --git a/plugins/inputs/zfs/zfs_other.go b/plugins/inputs/zfs/zfs_other.go index 98de02be917dd..963afd3580ff8 100644 --- a/plugins/inputs/zfs/zfs_other.go +++ b/plugins/inputs/zfs/zfs_other.go @@ -1,3 +1,4 @@ +//go:build !linux && !freebsd // +build !linux,!freebsd package zfs diff --git a/plugins/outputs/sql/sqlite.go b/plugins/outputs/sql/sqlite.go index 3703f42923ac1..15666101a957d 100644 --- a/plugins/outputs/sql/sqlite.go +++ b/plugins/outputs/sql/sqlite.go @@ -1,4 +1,7 @@ -// +build linux,freebsd,darwin +//go:build linux && freebsd && darwin && (!mips || !mips64) +// +build linux +// +build freebsd +// +build darwin // +build !mips !mips64 package sql diff --git a/plugins/outputs/sql/sqlite_test.go b/plugins/outputs/sql/sqlite_test.go index 6ed08a2570662..d54ffe877a80f 100644 --- a/plugins/outputs/sql/sqlite_test.go +++ b/plugins/outputs/sql/sqlite_test.go @@ -1,4 +1,6 @@ -// +build linux,freebsd +//go:build linux && freebsd && (!mips || !mips64) +// +build linux +// +build freebsd // +build !mips !mips64 package sql diff --git a/plugins/parsers/influx/machine.go b/plugins/parsers/influx/machine.go index 5d715af1c4aaf..2649cdb42bc37 100644 --- a/plugins/parsers/influx/machine.go +++ b/plugins/parsers/influx/machine.go @@ -1,4 +1,3 @@ - //line plugins/parsers/influx/machine.go.rl:1 package influx @@ -16,19 +15,16 @@ func (e *readErr) Error() string { } var ( - ErrNameParse = errors.New("expected measurement name") - ErrFieldParse = errors.New("expected field") - ErrTagParse = errors.New("expected tag") + ErrNameParse = errors.New("expected measurement name") + ErrFieldParse = errors.New("expected field") + ErrTagParse = errors.New("expected tag") ErrTimestampParse = errors.New("expected timestamp") - ErrParse = errors.New("parse error") - EOF = errors.New("EOF") + ErrParse = errors.New("parse error") + EOF = errors.New("EOF") ) - //line plugins/parsers/influx/machine.go.rl:318 - - //line plugins/parsers/influx/machine.go:33 const LineProtocol_start int = 46 const LineProtocol_first_final int = 46 @@ -39,7 +35,6 @@ const LineProtocol_en_discard_line int = 34 const LineProtocol_en_align int = 85 const LineProtocol_en_series int = 37 - //line plugins/parsers/influx/machine.go.rl:321 type Handler interface { @@ -69,26 +64,25 @@ type machine struct { func NewMachine(handler Handler) *machine { m := &machine{ - handler: handler, + handler: handler, initState: LineProtocol_en_align, } - //line plugins/parsers/influx/machine.go.rl:354 - + //line plugins/parsers/influx/machine.go.rl:355 - + //line plugins/parsers/influx/machine.go.rl:356 - + //line plugins/parsers/influx/machine.go.rl:357 - + //line plugins/parsers/influx/machine.go.rl:358 - + //line plugins/parsers/influx/machine.go.rl:359 - + //line plugins/parsers/influx/machine.go:90 { - ( m.cs) = LineProtocol_start + (m.cs) = LineProtocol_start } //line plugins/parsers/influx/machine.go.rl:360 @@ -98,24 +92,23 @@ func NewMachine(handler Handler) *machine { func NewSeriesMachine(handler Handler) *machine { m := &machine{ - handler: handler, + handler: handler, initState: LineProtocol_en_series, } - //line plugins/parsers/influx/machine.go.rl:371 - + //line plugins/parsers/influx/machine.go.rl:372 - + //line plugins/parsers/influx/machine.go.rl:373 - + //line plugins/parsers/influx/machine.go.rl:374 - + //line plugins/parsers/influx/machine.go.rl:375 - + //line plugins/parsers/influx/machine.go:117 { - ( m.cs) = LineProtocol_start + (m.cs) = LineProtocol_start } //line plugins/parsers/influx/machine.go.rl:376 @@ -135,10 +128,9 @@ func (m *machine) SetData(data []byte) { m.beginMetric = false m.finishMetric = false - //line plugins/parsers/influx/machine.go:140 { - ( m.cs) = LineProtocol_start + (m.cs) = LineProtocol_start } //line plugins/parsers/influx/machine.go.rl:393 @@ -163,391 +155,391 @@ func (m *machine) Next() error { func (m *machine) exec() error { var err error - + //line plugins/parsers/influx/machine.go:168 { - if ( m.p) == ( m.pe) { - goto _test_eof - } - goto _resume + if (m.p) == (m.pe) { + goto _test_eof + } + goto _resume -_again: - switch ( m.cs) { - case 46: - goto st46 - case 1: - goto st1 - case 2: - goto st2 - case 3: - goto st3 - case 0: - goto st0 - case 4: - goto st4 - case 5: - goto st5 - case 6: - goto st6 - case 47: - goto st47 - case 48: - goto st48 - case 49: - goto st49 - case 7: - goto st7 - case 8: - goto st8 - case 9: - goto st9 - case 10: - goto st10 - case 50: - goto st50 - case 51: - goto st51 - case 52: - goto st52 - case 53: - goto st53 - case 54: - goto st54 - case 55: - goto st55 - case 56: - goto st56 - case 57: - goto st57 - case 58: - goto st58 - case 59: - goto st59 - case 60: - goto st60 - case 61: - goto st61 - case 62: - goto st62 - case 63: - goto st63 - case 64: - goto st64 - case 65: - goto st65 - case 66: - goto st66 - case 67: - goto st67 - case 68: - goto st68 - case 69: - goto st69 - case 11: - goto st11 - case 12: - goto st12 - case 13: - goto st13 - case 14: - goto st14 - case 15: - goto st15 - case 70: - goto st70 - case 16: - goto st16 - case 17: - goto st17 - case 71: - goto st71 - case 72: - goto st72 - case 73: - goto st73 - case 74: - goto st74 - case 75: - goto st75 - case 76: - goto st76 - case 77: - goto st77 - case 78: - goto st78 - case 79: - goto st79 - case 18: - goto st18 - case 19: - goto st19 - case 20: - goto st20 - case 80: - goto st80 - case 21: - goto st21 - case 22: - goto st22 - case 23: - goto st23 - case 81: - goto st81 - case 24: - goto st24 - case 25: - goto st25 - case 82: - goto st82 - case 83: - goto st83 - case 26: - goto st26 - case 27: - goto st27 - case 28: - goto st28 - case 29: - goto st29 - case 30: - goto st30 - case 31: - goto st31 - case 32: - goto st32 - case 33: - goto st33 - case 34: - goto st34 - case 84: - goto st84 - case 37: - goto st37 - case 86: - goto st86 - case 87: - goto st87 - case 38: - goto st38 - case 39: - goto st39 - case 40: - goto st40 - case 41: - goto st41 - case 88: - goto st88 - case 42: - goto st42 - case 89: - goto st89 - case 43: - goto st43 - case 44: - goto st44 - case 45: - goto st45 - case 85: - goto st85 - case 35: - goto st35 - case 36: - goto st36 - } + _again: + switch m.cs { + case 46: + goto st46 + case 1: + goto st1 + case 2: + goto st2 + case 3: + goto st3 + case 0: + goto st0 + case 4: + goto st4 + case 5: + goto st5 + case 6: + goto st6 + case 47: + goto st47 + case 48: + goto st48 + case 49: + goto st49 + case 7: + goto st7 + case 8: + goto st8 + case 9: + goto st9 + case 10: + goto st10 + case 50: + goto st50 + case 51: + goto st51 + case 52: + goto st52 + case 53: + goto st53 + case 54: + goto st54 + case 55: + goto st55 + case 56: + goto st56 + case 57: + goto st57 + case 58: + goto st58 + case 59: + goto st59 + case 60: + goto st60 + case 61: + goto st61 + case 62: + goto st62 + case 63: + goto st63 + case 64: + goto st64 + case 65: + goto st65 + case 66: + goto st66 + case 67: + goto st67 + case 68: + goto st68 + case 69: + goto st69 + case 11: + goto st11 + case 12: + goto st12 + case 13: + goto st13 + case 14: + goto st14 + case 15: + goto st15 + case 70: + goto st70 + case 16: + goto st16 + case 17: + goto st17 + case 71: + goto st71 + case 72: + goto st72 + case 73: + goto st73 + case 74: + goto st74 + case 75: + goto st75 + case 76: + goto st76 + case 77: + goto st77 + case 78: + goto st78 + case 79: + goto st79 + case 18: + goto st18 + case 19: + goto st19 + case 20: + goto st20 + case 80: + goto st80 + case 21: + goto st21 + case 22: + goto st22 + case 23: + goto st23 + case 81: + goto st81 + case 24: + goto st24 + case 25: + goto st25 + case 82: + goto st82 + case 83: + goto st83 + case 26: + goto st26 + case 27: + goto st27 + case 28: + goto st28 + case 29: + goto st29 + case 30: + goto st30 + case 31: + goto st31 + case 32: + goto st32 + case 33: + goto st33 + case 34: + goto st34 + case 84: + goto st84 + case 37: + goto st37 + case 86: + goto st86 + case 87: + goto st87 + case 38: + goto st38 + case 39: + goto st39 + case 40: + goto st40 + case 41: + goto st41 + case 88: + goto st88 + case 42: + goto st42 + case 89: + goto st89 + case 43: + goto st43 + case 44: + goto st44 + case 45: + goto st45 + case 85: + goto st85 + case 35: + goto st35 + case 36: + goto st36 + } - if ( m.p)++; ( m.p) == ( m.pe) { - goto _test_eof - } -_resume: - switch ( m.cs) { - case 46: - goto st_case_46 - case 1: - goto st_case_1 - case 2: - goto st_case_2 - case 3: - goto st_case_3 - case 0: - goto st_case_0 - case 4: - goto st_case_4 - case 5: - goto st_case_5 - case 6: - goto st_case_6 - case 47: - goto st_case_47 - case 48: - goto st_case_48 - case 49: - goto st_case_49 - case 7: - goto st_case_7 - case 8: - goto st_case_8 - case 9: - goto st_case_9 - case 10: - goto st_case_10 - case 50: - goto st_case_50 - case 51: - goto st_case_51 - case 52: - goto st_case_52 - case 53: - goto st_case_53 - case 54: - goto st_case_54 - case 55: - goto st_case_55 - case 56: - goto st_case_56 - case 57: - goto st_case_57 - case 58: - goto st_case_58 - case 59: - goto st_case_59 - case 60: - goto st_case_60 - case 61: - goto st_case_61 - case 62: - goto st_case_62 - case 63: - goto st_case_63 - case 64: - goto st_case_64 - case 65: - goto st_case_65 - case 66: - goto st_case_66 - case 67: - goto st_case_67 - case 68: - goto st_case_68 - case 69: - goto st_case_69 - case 11: - goto st_case_11 - case 12: - goto st_case_12 - case 13: - goto st_case_13 - case 14: - goto st_case_14 - case 15: - goto st_case_15 - case 70: - goto st_case_70 - case 16: - goto st_case_16 - case 17: - goto st_case_17 - case 71: - goto st_case_71 - case 72: - goto st_case_72 - case 73: - goto st_case_73 - case 74: - goto st_case_74 - case 75: - goto st_case_75 - case 76: - goto st_case_76 - case 77: - goto st_case_77 - case 78: - goto st_case_78 - case 79: - goto st_case_79 - case 18: - goto st_case_18 - case 19: - goto st_case_19 - case 20: - goto st_case_20 - case 80: - goto st_case_80 - case 21: - goto st_case_21 - case 22: - goto st_case_22 - case 23: - goto st_case_23 - case 81: - goto st_case_81 - case 24: - goto st_case_24 - case 25: - goto st_case_25 - case 82: - goto st_case_82 - case 83: - goto st_case_83 - case 26: - goto st_case_26 - case 27: - goto st_case_27 - case 28: - goto st_case_28 - case 29: - goto st_case_29 - case 30: - goto st_case_30 - case 31: - goto st_case_31 - case 32: - goto st_case_32 - case 33: - goto st_case_33 - case 34: - goto st_case_34 - case 84: - goto st_case_84 - case 37: - goto st_case_37 - case 86: - goto st_case_86 - case 87: - goto st_case_87 - case 38: - goto st_case_38 - case 39: - goto st_case_39 - case 40: - goto st_case_40 - case 41: - goto st_case_41 - case 88: - goto st_case_88 - case 42: - goto st_case_42 - case 89: - goto st_case_89 - case 43: - goto st_case_43 - case 44: - goto st_case_44 - case 45: - goto st_case_45 - case 85: - goto st_case_85 - case 35: - goto st_case_35 - case 36: - goto st_case_36 - } - goto st_out + if (m.p)++; (m.p) == (m.pe) { + goto _test_eof + } + _resume: + switch m.cs { + case 46: + goto st_case_46 + case 1: + goto st_case_1 + case 2: + goto st_case_2 + case 3: + goto st_case_3 + case 0: + goto st_case_0 + case 4: + goto st_case_4 + case 5: + goto st_case_5 + case 6: + goto st_case_6 + case 47: + goto st_case_47 + case 48: + goto st_case_48 + case 49: + goto st_case_49 + case 7: + goto st_case_7 + case 8: + goto st_case_8 + case 9: + goto st_case_9 + case 10: + goto st_case_10 + case 50: + goto st_case_50 + case 51: + goto st_case_51 + case 52: + goto st_case_52 + case 53: + goto st_case_53 + case 54: + goto st_case_54 + case 55: + goto st_case_55 + case 56: + goto st_case_56 + case 57: + goto st_case_57 + case 58: + goto st_case_58 + case 59: + goto st_case_59 + case 60: + goto st_case_60 + case 61: + goto st_case_61 + case 62: + goto st_case_62 + case 63: + goto st_case_63 + case 64: + goto st_case_64 + case 65: + goto st_case_65 + case 66: + goto st_case_66 + case 67: + goto st_case_67 + case 68: + goto st_case_68 + case 69: + goto st_case_69 + case 11: + goto st_case_11 + case 12: + goto st_case_12 + case 13: + goto st_case_13 + case 14: + goto st_case_14 + case 15: + goto st_case_15 + case 70: + goto st_case_70 + case 16: + goto st_case_16 + case 17: + goto st_case_17 + case 71: + goto st_case_71 + case 72: + goto st_case_72 + case 73: + goto st_case_73 + case 74: + goto st_case_74 + case 75: + goto st_case_75 + case 76: + goto st_case_76 + case 77: + goto st_case_77 + case 78: + goto st_case_78 + case 79: + goto st_case_79 + case 18: + goto st_case_18 + case 19: + goto st_case_19 + case 20: + goto st_case_20 + case 80: + goto st_case_80 + case 21: + goto st_case_21 + case 22: + goto st_case_22 + case 23: + goto st_case_23 + case 81: + goto st_case_81 + case 24: + goto st_case_24 + case 25: + goto st_case_25 + case 82: + goto st_case_82 + case 83: + goto st_case_83 + case 26: + goto st_case_26 + case 27: + goto st_case_27 + case 28: + goto st_case_28 + case 29: + goto st_case_29 + case 30: + goto st_case_30 + case 31: + goto st_case_31 + case 32: + goto st_case_32 + case 33: + goto st_case_33 + case 34: + goto st_case_34 + case 84: + goto st_case_84 + case 37: + goto st_case_37 + case 86: + goto st_case_86 + case 87: + goto st_case_87 + case 38: + goto st_case_38 + case 39: + goto st_case_39 + case 40: + goto st_case_40 + case 41: + goto st_case_41 + case 88: + goto st_case_88 + case 42: + goto st_case_42 + case 89: + goto st_case_89 + case 43: + goto st_case_43 + case 44: + goto st_case_44 + case 45: + goto st_case_45 + case 85: + goto st_case_85 + case 35: + goto st_case_35 + case 36: + goto st_case_36 + } + goto st_out st46: - if ( m.p)++; ( m.p) == ( m.pe) { + if (m.p)++; (m.p) == (m.pe) { goto _test_eof46 } st_case_46: - switch ( m.data)[( m.p)] { + switch (m.data)[(m.p)] { case 10: goto tr31 case 13: @@ -561,33 +553,33 @@ _resume: case 92: goto tr81 } - if 9 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 12 { + if 9 <= (m.data)[(m.p)] && (m.data)[(m.p)] <= 12 { goto tr80 } goto tr79 -tr29: + tr29: //line plugins/parsers/influx/machine.go.rl:28 - m.pb = m.p + m.pb = m.p - goto st1 -tr79: + goto st1 + tr79: //line plugins/parsers/influx/machine.go.rl:82 - m.beginMetric = true + m.beginMetric = true //line plugins/parsers/influx/machine.go.rl:28 - m.pb = m.p + m.pb = m.p - goto st1 + goto st1 st1: - if ( m.p)++; ( m.p) == ( m.pe) { + if (m.p)++; (m.p) == (m.pe) { goto _test_eof1 } st_case_1: //line plugins/parsers/influx/machine.go:590 - switch ( m.data)[( m.p)] { + switch (m.data)[(m.p)] { case 10: goto tr2 case 13: @@ -599,43 +591,49 @@ tr79: case 92: goto st8 } - if 9 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 12 { + if 9 <= (m.data)[(m.p)] && (m.data)[(m.p)] <= 12 { goto tr1 } goto st1 -tr1: - ( m.cs) = 2 + tr1: + (m.cs) = 2 //line plugins/parsers/influx/machine.go.rl:86 - err = m.handler.SetMeasurement(m.text()) - if err != nil { - ( m.p)-- + err = m.handler.SetMeasurement(m.text()) + if err != nil { + (m.p)-- - ( m.cs) = 34; - {( m.p)++; goto _out } - } + (m.cs) = 34 + { + (m.p)++ + goto _out + } + } - goto _again -tr56: - ( m.cs) = 2 + goto _again + tr56: + (m.cs) = 2 //line plugins/parsers/influx/machine.go.rl:99 - err = m.handler.AddTag(m.key, m.text()) - if err != nil { - ( m.p)-- + err = m.handler.AddTag(m.key, m.text()) + if err != nil { + (m.p)-- - ( m.cs) = 34; - {( m.p)++; goto _out } - } + (m.cs) = 34 + { + (m.p)++ + goto _out + } + } - goto _again + goto _again st2: - if ( m.p)++; ( m.p) == ( m.pe) { + if (m.p)++; (m.p) == (m.pe) { goto _test_eof2 } st_case_2: //line plugins/parsers/influx/machine.go:638 - switch ( m.data)[( m.p)] { + switch (m.data)[(m.p)] { case 10: goto tr7 case 13: @@ -649,23 +647,23 @@ tr56: case 92: goto tr8 } - if 9 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 12 { + if 9 <= (m.data)[(m.p)] && (m.data)[(m.p)] <= 12 { goto st2 } goto tr5 -tr5: + tr5: //line plugins/parsers/influx/machine.go.rl:28 - m.pb = m.p + m.pb = m.p - goto st3 + goto st3 st3: - if ( m.p)++; ( m.p) == ( m.pe) { + if (m.p)++; (m.p) == (m.pe) { goto _test_eof3 } st_case_3: //line plugins/parsers/influx/machine.go:668 - switch ( m.data)[( m.p)] { + switch (m.data)[(m.p)] { case 32: goto tr7 case 44: @@ -675,99 +673,119 @@ tr5: case 92: goto st12 } - if 9 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 13 { + if 9 <= (m.data)[(m.p)] && (m.data)[(m.p)] <= 13 { goto tr7 } goto st3 -tr2: - ( m.cs) = 0 + tr2: + (m.cs) = 0 //line plugins/parsers/influx/machine.go.rl:46 - err = ErrTagParse - ( m.p)-- + err = ErrTagParse + (m.p)-- - ( m.cs) = 34; - {( m.p)++; goto _out } + (m.cs) = 34 + { + (m.p)++ + goto _out + } - goto _again -tr7: - ( m.cs) = 0 + goto _again + tr7: + (m.cs) = 0 //line plugins/parsers/influx/machine.go.rl:39 - err = ErrFieldParse - ( m.p)-- + err = ErrFieldParse + (m.p)-- - ( m.cs) = 34; - {( m.p)++; goto _out } + (m.cs) = 34 + { + (m.p)++ + goto _out + } - goto _again -tr31: - ( m.cs) = 0 + goto _again + tr31: + (m.cs) = 0 //line plugins/parsers/influx/machine.go.rl:32 - err = ErrNameParse - ( m.p)-- + err = ErrNameParse + (m.p)-- - ( m.cs) = 34; - {( m.p)++; goto _out } + (m.cs) = 34 + { + (m.p)++ + goto _out + } - goto _again -tr35: - ( m.cs) = 0 + goto _again + tr35: + (m.cs) = 0 //line plugins/parsers/influx/machine.go.rl:53 - err = ErrTimestampParse - ( m.p)-- + err = ErrTimestampParse + (m.p)-- - ( m.cs) = 34; - {( m.p)++; goto _out } + (m.cs) = 34 + { + (m.p)++ + goto _out + } - goto _again -tr82: - ( m.cs) = 0 + goto _again + tr82: + (m.cs) = 0 //line plugins/parsers/influx/machine.go.rl:39 - err = ErrFieldParse - ( m.p)-- + err = ErrFieldParse + (m.p)-- - ( m.cs) = 34; - {( m.p)++; goto _out } + (m.cs) = 34 + { + (m.p)++ + goto _out + } //line plugins/parsers/influx/machine.go.rl:53 - err = ErrTimestampParse - ( m.p)-- + err = ErrTimestampParse + (m.p)-- - ( m.cs) = 34; - {( m.p)++; goto _out } + (m.cs) = 34 + { + (m.p)++ + goto _out + } - goto _again -tr135: + goto _again + tr135: //line plugins/parsers/influx/machine.go.rl:73 - ( m.p)-- + (m.p)-- - {goto st46 } + { + goto st46 + } - goto st0 + goto st0 //line plugins/parsers/influx/machine.go:754 -st_case_0: + st_case_0: st0: - ( m.cs) = 0 + (m.cs) = 0 goto _out -tr10: + tr10: //line plugins/parsers/influx/machine.go.rl:108 - m.key = m.text() + m.key = m.text() - goto st4 + goto st4 st4: - if ( m.p)++; ( m.p) == ( m.pe) { + if (m.p)++; (m.p) == (m.pe) { goto _test_eof4 } st_case_4: //line plugins/parsers/influx/machine.go:770 - switch ( m.data)[( m.p)] { + switch (m.data)[(m.p)] { case 34: goto st5 case 45: @@ -785,16 +803,16 @@ tr10: case 116: goto tr20 } - if 49 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 { + if 49 <= (m.data)[(m.p)] && (m.data)[(m.p)] <= 57 { goto tr16 } goto tr7 st5: - if ( m.p)++; ( m.p) == ( m.pe) { + if (m.p)++; (m.p) == (m.pe) { goto _test_eof5 } st_case_5: - switch ( m.data)[( m.p)] { + switch (m.data)[(m.p)] { case 10: goto tr22 case 34: @@ -803,39 +821,39 @@ tr10: goto tr24 } goto tr21 -tr21: + tr21: //line plugins/parsers/influx/machine.go.rl:28 - m.pb = m.p + m.pb = m.p - goto st6 -tr22: + goto st6 + tr22: //line plugins/parsers/influx/machine.go.rl:28 - m.pb = m.p + m.pb = m.p //line plugins/parsers/influx/machine.go.rl:166 - m.lineno++ - m.sol = m.p - m.sol++ // next char will be the first column in the line + m.lineno++ + m.sol = m.p + m.sol++ // next char will be the first column in the line - goto st6 -tr26: + goto st6 + tr26: //line plugins/parsers/influx/machine.go.rl:166 - m.lineno++ - m.sol = m.p - m.sol++ // next char will be the first column in the line + m.lineno++ + m.sol = m.p + m.sol++ // next char will be the first column in the line - goto st6 + goto st6 st6: - if ( m.p)++; ( m.p) == ( m.pe) { + if (m.p)++; (m.p) == (m.pe) { goto _test_eof6 } st_case_6: //line plugins/parsers/influx/machine.go:838 - switch ( m.data)[( m.p)] { + switch (m.data)[(m.p)] { case 10: goto tr26 case 34: @@ -844,43 +862,49 @@ tr26: goto st13 } goto st6 -tr23: - ( m.cs) = 47 + tr23: + (m.cs) = 47 //line plugins/parsers/influx/machine.go.rl:28 - m.pb = m.p + m.pb = m.p //line plugins/parsers/influx/machine.go.rl:148 - err = m.handler.AddString(m.key, m.text()) - if err != nil { - ( m.p)-- + err = m.handler.AddString(m.key, m.text()) + if err != nil { + (m.p)-- - ( m.cs) = 34; - {( m.p)++; goto _out } - } + (m.cs) = 34 + { + (m.p)++ + goto _out + } + } - goto _again -tr27: - ( m.cs) = 47 + goto _again + tr27: + (m.cs) = 47 //line plugins/parsers/influx/machine.go.rl:148 - err = m.handler.AddString(m.key, m.text()) - if err != nil { - ( m.p)-- + err = m.handler.AddString(m.key, m.text()) + if err != nil { + (m.p)-- - ( m.cs) = 34; - {( m.p)++; goto _out } - } + (m.cs) = 34 + { + (m.p)++ + goto _out + } + } - goto _again + goto _again st47: - if ( m.p)++; ( m.p) == ( m.pe) { + if (m.p)++; (m.p) == (m.pe) { goto _test_eof47 } st_case_47: //line plugins/parsers/influx/machine.go:883 - switch ( m.data)[( m.p)] { + switch (m.data)[(m.p)] { case 10: goto tr34 case 13: @@ -890,69 +914,81 @@ tr27: case 44: goto st11 } - if 9 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 12 { + if 9 <= (m.data)[(m.p)] && (m.data)[(m.p)] <= 12 { goto st48 } goto tr82 -tr110: - ( m.cs) = 48 + tr110: + (m.cs) = 48 //line plugins/parsers/influx/machine.go.rl:130 - err = m.handler.AddFloat(m.key, m.text()) - if err != nil { - ( m.p)-- + err = m.handler.AddFloat(m.key, m.text()) + if err != nil { + (m.p)-- - ( m.cs) = 34; - {( m.p)++; goto _out } - } + (m.cs) = 34 + { + (m.p)++ + goto _out + } + } - goto _again -tr117: - ( m.cs) = 48 + goto _again + tr117: + (m.cs) = 48 //line plugins/parsers/influx/machine.go.rl:112 - err = m.handler.AddInt(m.key, m.text()) - if err != nil { - ( m.p)-- + err = m.handler.AddInt(m.key, m.text()) + if err != nil { + (m.p)-- - ( m.cs) = 34; - {( m.p)++; goto _out } - } + (m.cs) = 34 + { + (m.p)++ + goto _out + } + } - goto _again -tr122: - ( m.cs) = 48 + goto _again + tr122: + (m.cs) = 48 //line plugins/parsers/influx/machine.go.rl:121 - err = m.handler.AddUint(m.key, m.text()) - if err != nil { - ( m.p)-- + err = m.handler.AddUint(m.key, m.text()) + if err != nil { + (m.p)-- - ( m.cs) = 34; - {( m.p)++; goto _out } - } + (m.cs) = 34 + { + (m.p)++ + goto _out + } + } - goto _again -tr127: - ( m.cs) = 48 + goto _again + tr127: + (m.cs) = 48 //line plugins/parsers/influx/machine.go.rl:139 - err = m.handler.AddBool(m.key, m.text()) - if err != nil { - ( m.p)-- + err = m.handler.AddBool(m.key, m.text()) + if err != nil { + (m.p)-- - ( m.cs) = 34; - {( m.p)++; goto _out } - } + (m.cs) = 34 + { + (m.p)++ + goto _out + } + } - goto _again + goto _again st48: - if ( m.p)++; ( m.p) == ( m.pe) { + if (m.p)++; (m.p) == (m.pe) { goto _test_eof48 } st_case_48: //line plugins/parsers/influx/machine.go:955 - switch ( m.data)[( m.p)] { + switch (m.data)[(m.p)] { case 10: goto tr34 case 13: @@ -963,130 +999,148 @@ tr127: goto tr86 } switch { - case ( m.data)[( m.p)] > 12: - if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 { + case (m.data)[(m.p)] > 12: + if 48 <= (m.data)[(m.p)] && (m.data)[(m.p)] <= 57 { goto tr87 } - case ( m.data)[( m.p)] >= 9: + case (m.data)[(m.p)] >= 9: goto st48 } goto tr35 -tr34: + tr34: //line plugins/parsers/influx/machine.go.rl:166 - m.lineno++ - m.sol = m.p - m.sol++ // next char will be the first column in the line + m.lineno++ + m.sol = m.p + m.sol++ // next char will be the first column in the line - goto st49 -tr89: - ( m.cs) = 49 + goto st49 + tr89: + (m.cs) = 49 //line plugins/parsers/influx/machine.go.rl:157 - err = m.handler.SetTimestamp(m.text()) - if err != nil { - ( m.p)-- + err = m.handler.SetTimestamp(m.text()) + if err != nil { + (m.p)-- - ( m.cs) = 34; - {( m.p)++; goto _out } - } + (m.cs) = 34 + { + (m.p)++ + goto _out + } + } //line plugins/parsers/influx/machine.go.rl:166 - m.lineno++ - m.sol = m.p - m.sol++ // next char will be the first column in the line + m.lineno++ + m.sol = m.p + m.sol++ // next char will be the first column in the line - goto _again -tr111: - ( m.cs) = 49 + goto _again + tr111: + (m.cs) = 49 //line plugins/parsers/influx/machine.go.rl:130 - err = m.handler.AddFloat(m.key, m.text()) - if err != nil { - ( m.p)-- + err = m.handler.AddFloat(m.key, m.text()) + if err != nil { + (m.p)-- - ( m.cs) = 34; - {( m.p)++; goto _out } - } + (m.cs) = 34 + { + (m.p)++ + goto _out + } + } //line plugins/parsers/influx/machine.go.rl:166 - m.lineno++ - m.sol = m.p - m.sol++ // next char will be the first column in the line + m.lineno++ + m.sol = m.p + m.sol++ // next char will be the first column in the line - goto _again -tr118: - ( m.cs) = 49 + goto _again + tr118: + (m.cs) = 49 //line plugins/parsers/influx/machine.go.rl:112 - err = m.handler.AddInt(m.key, m.text()) - if err != nil { - ( m.p)-- + err = m.handler.AddInt(m.key, m.text()) + if err != nil { + (m.p)-- - ( m.cs) = 34; - {( m.p)++; goto _out } - } + (m.cs) = 34 + { + (m.p)++ + goto _out + } + } //line plugins/parsers/influx/machine.go.rl:166 - m.lineno++ - m.sol = m.p - m.sol++ // next char will be the first column in the line + m.lineno++ + m.sol = m.p + m.sol++ // next char will be the first column in the line - goto _again -tr123: - ( m.cs) = 49 + goto _again + tr123: + (m.cs) = 49 //line plugins/parsers/influx/machine.go.rl:121 - err = m.handler.AddUint(m.key, m.text()) - if err != nil { - ( m.p)-- + err = m.handler.AddUint(m.key, m.text()) + if err != nil { + (m.p)-- - ( m.cs) = 34; - {( m.p)++; goto _out } - } + (m.cs) = 34 + { + (m.p)++ + goto _out + } + } //line plugins/parsers/influx/machine.go.rl:166 - m.lineno++ - m.sol = m.p - m.sol++ // next char will be the first column in the line + m.lineno++ + m.sol = m.p + m.sol++ // next char will be the first column in the line - goto _again -tr128: - ( m.cs) = 49 + goto _again + tr128: + (m.cs) = 49 //line plugins/parsers/influx/machine.go.rl:139 - err = m.handler.AddBool(m.key, m.text()) - if err != nil { - ( m.p)-- + err = m.handler.AddBool(m.key, m.text()) + if err != nil { + (m.p)-- - ( m.cs) = 34; - {( m.p)++; goto _out } - } + (m.cs) = 34 + { + (m.p)++ + goto _out + } + } //line plugins/parsers/influx/machine.go.rl:166 - m.lineno++ - m.sol = m.p - m.sol++ // next char will be the first column in the line + m.lineno++ + m.sol = m.p + m.sol++ // next char will be the first column in the line - goto _again + goto _again st49: //line plugins/parsers/influx/machine.go.rl:172 - m.finishMetric = true - ( m.cs) = 85; - {( m.p)++; goto _out } + m.finishMetric = true + (m.cs) = 85 + { + (m.p)++ + goto _out + } - if ( m.p)++; ( m.p) == ( m.pe) { + if (m.p)++; (m.p) == (m.pe) { goto _test_eof49 } st_case_49: //line plugins/parsers/influx/machine.go:1089 - switch ( m.data)[( m.p)] { + switch (m.data)[(m.p)] { case 10: goto tr31 case 13: @@ -1100,23 +1154,23 @@ tr128: case 92: goto tr32 } - if 9 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 12 { + if 9 <= (m.data)[(m.p)] && (m.data)[(m.p)] <= 12 { goto st7 } goto tr29 -tr80: + tr80: //line plugins/parsers/influx/machine.go.rl:82 - m.beginMetric = true + m.beginMetric = true - goto st7 + goto st7 st7: - if ( m.p)++; ( m.p) == ( m.pe) { + if (m.p)++; (m.p) == (m.pe) { goto _test_eof7 } st_case_7: //line plugins/parsers/influx/machine.go:1119 - switch ( m.data)[( m.p)] { + switch (m.data)[(m.p)] { case 10: goto tr31 case 13: @@ -1130,140 +1184,155 @@ tr80: case 92: goto tr32 } - if 9 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 12 { + if 9 <= (m.data)[(m.p)] && (m.data)[(m.p)] <= 12 { goto st7 } goto tr29 -tr32: + tr32: //line plugins/parsers/influx/machine.go.rl:28 - m.pb = m.p + m.pb = m.p - goto st8 -tr81: + goto st8 + tr81: //line plugins/parsers/influx/machine.go.rl:82 - m.beginMetric = true + m.beginMetric = true //line plugins/parsers/influx/machine.go.rl:28 - m.pb = m.p + m.pb = m.p - goto st8 + goto st8 st8: - if ( m.p)++; ( m.p) == ( m.pe) { + if (m.p)++; (m.p) == (m.pe) { goto _test_eof8 } st_case_8: //line plugins/parsers/influx/machine.go:1159 - if 9 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 13 { + if 9 <= (m.data)[(m.p)] && (m.data)[(m.p)] <= 13 { goto st0 } goto st1 -tr90: - ( m.cs) = 9 + tr90: + (m.cs) = 9 //line plugins/parsers/influx/machine.go.rl:157 - err = m.handler.SetTimestamp(m.text()) - if err != nil { - ( m.p)-- + err = m.handler.SetTimestamp(m.text()) + if err != nil { + (m.p)-- - ( m.cs) = 34; - {( m.p)++; goto _out } - } + (m.cs) = 34 + { + (m.p)++ + goto _out + } + } - goto _again -tr112: - ( m.cs) = 9 + goto _again + tr112: + (m.cs) = 9 //line plugins/parsers/influx/machine.go.rl:130 - err = m.handler.AddFloat(m.key, m.text()) - if err != nil { - ( m.p)-- + err = m.handler.AddFloat(m.key, m.text()) + if err != nil { + (m.p)-- - ( m.cs) = 34; - {( m.p)++; goto _out } - } + (m.cs) = 34 + { + (m.p)++ + goto _out + } + } - goto _again -tr119: - ( m.cs) = 9 + goto _again + tr119: + (m.cs) = 9 //line plugins/parsers/influx/machine.go.rl:112 - err = m.handler.AddInt(m.key, m.text()) - if err != nil { - ( m.p)-- + err = m.handler.AddInt(m.key, m.text()) + if err != nil { + (m.p)-- - ( m.cs) = 34; - {( m.p)++; goto _out } - } + (m.cs) = 34 + { + (m.p)++ + goto _out + } + } - goto _again -tr124: - ( m.cs) = 9 + goto _again + tr124: + (m.cs) = 9 //line plugins/parsers/influx/machine.go.rl:121 - err = m.handler.AddUint(m.key, m.text()) - if err != nil { - ( m.p)-- + err = m.handler.AddUint(m.key, m.text()) + if err != nil { + (m.p)-- - ( m.cs) = 34; - {( m.p)++; goto _out } - } + (m.cs) = 34 + { + (m.p)++ + goto _out + } + } - goto _again -tr129: - ( m.cs) = 9 + goto _again + tr129: + (m.cs) = 9 //line plugins/parsers/influx/machine.go.rl:139 - err = m.handler.AddBool(m.key, m.text()) - if err != nil { - ( m.p)-- + err = m.handler.AddBool(m.key, m.text()) + if err != nil { + (m.p)-- - ( m.cs) = 34; - {( m.p)++; goto _out } - } + (m.cs) = 34 + { + (m.p)++ + goto _out + } + } - goto _again + goto _again st9: - if ( m.p)++; ( m.p) == ( m.pe) { + if (m.p)++; (m.p) == (m.pe) { goto _test_eof9 } st_case_9: //line plugins/parsers/influx/machine.go:1234 - if ( m.data)[( m.p)] == 10 { + if (m.data)[(m.p)] == 10 { goto tr34 } goto st0 -tr86: + tr86: //line plugins/parsers/influx/machine.go.rl:28 - m.pb = m.p + m.pb = m.p - goto st10 + goto st10 st10: - if ( m.p)++; ( m.p) == ( m.pe) { + if (m.p)++; (m.p) == (m.pe) { goto _test_eof10 } st_case_10: //line plugins/parsers/influx/machine.go:1250 - if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 { + if 48 <= (m.data)[(m.p)] && (m.data)[(m.p)] <= 57 { goto st50 } goto tr35 -tr87: + tr87: //line plugins/parsers/influx/machine.go.rl:28 - m.pb = m.p + m.pb = m.p - goto st50 + goto st50 st50: - if ( m.p)++; ( m.p) == ( m.pe) { + if (m.p)++; (m.p) == (m.pe) { goto _test_eof50 } st_case_50: //line plugins/parsers/influx/machine.go:1266 - switch ( m.data)[( m.p)] { + switch (m.data)[(m.p)] { case 10: goto tr89 case 13: @@ -1272,34 +1341,37 @@ tr87: goto tr88 } switch { - case ( m.data)[( m.p)] > 12: - if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 { + case (m.data)[(m.p)] > 12: + if 48 <= (m.data)[(m.p)] && (m.data)[(m.p)] <= 57 { goto st52 } - case ( m.data)[( m.p)] >= 9: + case (m.data)[(m.p)] >= 9: goto tr88 } goto tr35 -tr88: - ( m.cs) = 51 + tr88: + (m.cs) = 51 //line plugins/parsers/influx/machine.go.rl:157 - err = m.handler.SetTimestamp(m.text()) - if err != nil { - ( m.p)-- + err = m.handler.SetTimestamp(m.text()) + if err != nil { + (m.p)-- - ( m.cs) = 34; - {( m.p)++; goto _out } - } + (m.cs) = 34 + { + (m.p)++ + goto _out + } + } - goto _again + goto _again st51: - if ( m.p)++; ( m.p) == ( m.pe) { + if (m.p)++; (m.p) == (m.pe) { goto _test_eof51 } st_case_51: //line plugins/parsers/influx/machine.go:1302 - switch ( m.data)[( m.p)] { + switch (m.data)[(m.p)] { case 10: goto tr34 case 13: @@ -1307,16 +1379,16 @@ tr88: case 32: goto st51 } - if 9 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 12 { + if 9 <= (m.data)[(m.p)] && (m.data)[(m.p)] <= 12 { goto st51 } goto st0 st52: - if ( m.p)++; ( m.p) == ( m.pe) { + if (m.p)++; (m.p) == (m.pe) { goto _test_eof52 } st_case_52: - switch ( m.data)[( m.p)] { + switch (m.data)[(m.p)] { case 10: goto tr89 case 13: @@ -1325,20 +1397,20 @@ tr88: goto tr88 } switch { - case ( m.data)[( m.p)] > 12: - if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 { + case (m.data)[(m.p)] > 12: + if 48 <= (m.data)[(m.p)] && (m.data)[(m.p)] <= 57 { goto st53 } - case ( m.data)[( m.p)] >= 9: + case (m.data)[(m.p)] >= 9: goto tr88 } goto tr35 st53: - if ( m.p)++; ( m.p) == ( m.pe) { + if (m.p)++; (m.p) == (m.pe) { goto _test_eof53 } st_case_53: - switch ( m.data)[( m.p)] { + switch (m.data)[(m.p)] { case 10: goto tr89 case 13: @@ -1347,20 +1419,20 @@ tr88: goto tr88 } switch { - case ( m.data)[( m.p)] > 12: - if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 { + case (m.data)[(m.p)] > 12: + if 48 <= (m.data)[(m.p)] && (m.data)[(m.p)] <= 57 { goto st54 } - case ( m.data)[( m.p)] >= 9: + case (m.data)[(m.p)] >= 9: goto tr88 } goto tr35 st54: - if ( m.p)++; ( m.p) == ( m.pe) { + if (m.p)++; (m.p) == (m.pe) { goto _test_eof54 } st_case_54: - switch ( m.data)[( m.p)] { + switch (m.data)[(m.p)] { case 10: goto tr89 case 13: @@ -1369,20 +1441,20 @@ tr88: goto tr88 } switch { - case ( m.data)[( m.p)] > 12: - if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 { + case (m.data)[(m.p)] > 12: + if 48 <= (m.data)[(m.p)] && (m.data)[(m.p)] <= 57 { goto st55 } - case ( m.data)[( m.p)] >= 9: + case (m.data)[(m.p)] >= 9: goto tr88 } goto tr35 st55: - if ( m.p)++; ( m.p) == ( m.pe) { + if (m.p)++; (m.p) == (m.pe) { goto _test_eof55 } st_case_55: - switch ( m.data)[( m.p)] { + switch (m.data)[(m.p)] { case 10: goto tr89 case 13: @@ -1391,20 +1463,20 @@ tr88: goto tr88 } switch { - case ( m.data)[( m.p)] > 12: - if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 { + case (m.data)[(m.p)] > 12: + if 48 <= (m.data)[(m.p)] && (m.data)[(m.p)] <= 57 { goto st56 } - case ( m.data)[( m.p)] >= 9: + case (m.data)[(m.p)] >= 9: goto tr88 } goto tr35 st56: - if ( m.p)++; ( m.p) == ( m.pe) { + if (m.p)++; (m.p) == (m.pe) { goto _test_eof56 } st_case_56: - switch ( m.data)[( m.p)] { + switch (m.data)[(m.p)] { case 10: goto tr89 case 13: @@ -1413,20 +1485,20 @@ tr88: goto tr88 } switch { - case ( m.data)[( m.p)] > 12: - if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 { + case (m.data)[(m.p)] > 12: + if 48 <= (m.data)[(m.p)] && (m.data)[(m.p)] <= 57 { goto st57 } - case ( m.data)[( m.p)] >= 9: + case (m.data)[(m.p)] >= 9: goto tr88 } goto tr35 st57: - if ( m.p)++; ( m.p) == ( m.pe) { + if (m.p)++; (m.p) == (m.pe) { goto _test_eof57 } st_case_57: - switch ( m.data)[( m.p)] { + switch (m.data)[(m.p)] { case 10: goto tr89 case 13: @@ -1435,20 +1507,20 @@ tr88: goto tr88 } switch { - case ( m.data)[( m.p)] > 12: - if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 { + case (m.data)[(m.p)] > 12: + if 48 <= (m.data)[(m.p)] && (m.data)[(m.p)] <= 57 { goto st58 } - case ( m.data)[( m.p)] >= 9: + case (m.data)[(m.p)] >= 9: goto tr88 } goto tr35 st58: - if ( m.p)++; ( m.p) == ( m.pe) { + if (m.p)++; (m.p) == (m.pe) { goto _test_eof58 } st_case_58: - switch ( m.data)[( m.p)] { + switch (m.data)[(m.p)] { case 10: goto tr89 case 13: @@ -1457,20 +1529,20 @@ tr88: goto tr88 } switch { - case ( m.data)[( m.p)] > 12: - if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 { + case (m.data)[(m.p)] > 12: + if 48 <= (m.data)[(m.p)] && (m.data)[(m.p)] <= 57 { goto st59 } - case ( m.data)[( m.p)] >= 9: + case (m.data)[(m.p)] >= 9: goto tr88 } goto tr35 st59: - if ( m.p)++; ( m.p) == ( m.pe) { + if (m.p)++; (m.p) == (m.pe) { goto _test_eof59 } st_case_59: - switch ( m.data)[( m.p)] { + switch (m.data)[(m.p)] { case 10: goto tr89 case 13: @@ -1479,20 +1551,20 @@ tr88: goto tr88 } switch { - case ( m.data)[( m.p)] > 12: - if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 { + case (m.data)[(m.p)] > 12: + if 48 <= (m.data)[(m.p)] && (m.data)[(m.p)] <= 57 { goto st60 } - case ( m.data)[( m.p)] >= 9: + case (m.data)[(m.p)] >= 9: goto tr88 } goto tr35 st60: - if ( m.p)++; ( m.p) == ( m.pe) { + if (m.p)++; (m.p) == (m.pe) { goto _test_eof60 } st_case_60: - switch ( m.data)[( m.p)] { + switch (m.data)[(m.p)] { case 10: goto tr89 case 13: @@ -1501,20 +1573,20 @@ tr88: goto tr88 } switch { - case ( m.data)[( m.p)] > 12: - if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 { + case (m.data)[(m.p)] > 12: + if 48 <= (m.data)[(m.p)] && (m.data)[(m.p)] <= 57 { goto st61 } - case ( m.data)[( m.p)] >= 9: + case (m.data)[(m.p)] >= 9: goto tr88 } goto tr35 st61: - if ( m.p)++; ( m.p) == ( m.pe) { + if (m.p)++; (m.p) == (m.pe) { goto _test_eof61 } st_case_61: - switch ( m.data)[( m.p)] { + switch (m.data)[(m.p)] { case 10: goto tr89 case 13: @@ -1523,20 +1595,20 @@ tr88: goto tr88 } switch { - case ( m.data)[( m.p)] > 12: - if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 { + case (m.data)[(m.p)] > 12: + if 48 <= (m.data)[(m.p)] && (m.data)[(m.p)] <= 57 { goto st62 } - case ( m.data)[( m.p)] >= 9: + case (m.data)[(m.p)] >= 9: goto tr88 } goto tr35 st62: - if ( m.p)++; ( m.p) == ( m.pe) { + if (m.p)++; (m.p) == (m.pe) { goto _test_eof62 } st_case_62: - switch ( m.data)[( m.p)] { + switch (m.data)[(m.p)] { case 10: goto tr89 case 13: @@ -1545,20 +1617,20 @@ tr88: goto tr88 } switch { - case ( m.data)[( m.p)] > 12: - if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 { + case (m.data)[(m.p)] > 12: + if 48 <= (m.data)[(m.p)] && (m.data)[(m.p)] <= 57 { goto st63 } - case ( m.data)[( m.p)] >= 9: + case (m.data)[(m.p)] >= 9: goto tr88 } goto tr35 st63: - if ( m.p)++; ( m.p) == ( m.pe) { + if (m.p)++; (m.p) == (m.pe) { goto _test_eof63 } st_case_63: - switch ( m.data)[( m.p)] { + switch (m.data)[(m.p)] { case 10: goto tr89 case 13: @@ -1567,20 +1639,20 @@ tr88: goto tr88 } switch { - case ( m.data)[( m.p)] > 12: - if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 { + case (m.data)[(m.p)] > 12: + if 48 <= (m.data)[(m.p)] && (m.data)[(m.p)] <= 57 { goto st64 } - case ( m.data)[( m.p)] >= 9: + case (m.data)[(m.p)] >= 9: goto tr88 } goto tr35 st64: - if ( m.p)++; ( m.p) == ( m.pe) { + if (m.p)++; (m.p) == (m.pe) { goto _test_eof64 } st_case_64: - switch ( m.data)[( m.p)] { + switch (m.data)[(m.p)] { case 10: goto tr89 case 13: @@ -1589,20 +1661,20 @@ tr88: goto tr88 } switch { - case ( m.data)[( m.p)] > 12: - if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 { + case (m.data)[(m.p)] > 12: + if 48 <= (m.data)[(m.p)] && (m.data)[(m.p)] <= 57 { goto st65 } - case ( m.data)[( m.p)] >= 9: + case (m.data)[(m.p)] >= 9: goto tr88 } goto tr35 st65: - if ( m.p)++; ( m.p) == ( m.pe) { + if (m.p)++; (m.p) == (m.pe) { goto _test_eof65 } st_case_65: - switch ( m.data)[( m.p)] { + switch (m.data)[(m.p)] { case 10: goto tr89 case 13: @@ -1611,20 +1683,20 @@ tr88: goto tr88 } switch { - case ( m.data)[( m.p)] > 12: - if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 { + case (m.data)[(m.p)] > 12: + if 48 <= (m.data)[(m.p)] && (m.data)[(m.p)] <= 57 { goto st66 } - case ( m.data)[( m.p)] >= 9: + case (m.data)[(m.p)] >= 9: goto tr88 } goto tr35 st66: - if ( m.p)++; ( m.p) == ( m.pe) { + if (m.p)++; (m.p) == (m.pe) { goto _test_eof66 } st_case_66: - switch ( m.data)[( m.p)] { + switch (m.data)[(m.p)] { case 10: goto tr89 case 13: @@ -1633,20 +1705,20 @@ tr88: goto tr88 } switch { - case ( m.data)[( m.p)] > 12: - if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 { + case (m.data)[(m.p)] > 12: + if 48 <= (m.data)[(m.p)] && (m.data)[(m.p)] <= 57 { goto st67 } - case ( m.data)[( m.p)] >= 9: + case (m.data)[(m.p)] >= 9: goto tr88 } goto tr35 st67: - if ( m.p)++; ( m.p) == ( m.pe) { + if (m.p)++; (m.p) == (m.pe) { goto _test_eof67 } st_case_67: - switch ( m.data)[( m.p)] { + switch (m.data)[(m.p)] { case 10: goto tr89 case 13: @@ -1655,20 +1727,20 @@ tr88: goto tr88 } switch { - case ( m.data)[( m.p)] > 12: - if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 { + case (m.data)[(m.p)] > 12: + if 48 <= (m.data)[(m.p)] && (m.data)[(m.p)] <= 57 { goto st68 } - case ( m.data)[( m.p)] >= 9: + case (m.data)[(m.p)] >= 9: goto tr88 } goto tr35 st68: - if ( m.p)++; ( m.p) == ( m.pe) { + if (m.p)++; (m.p) == (m.pe) { goto _test_eof68 } st_case_68: - switch ( m.data)[( m.p)] { + switch (m.data)[(m.p)] { case 10: goto tr89 case 13: @@ -1677,20 +1749,20 @@ tr88: goto tr88 } switch { - case ( m.data)[( m.p)] > 12: - if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 { + case (m.data)[(m.p)] > 12: + if 48 <= (m.data)[(m.p)] && (m.data)[(m.p)] <= 57 { goto st69 } - case ( m.data)[( m.p)] >= 9: + case (m.data)[(m.p)] >= 9: goto tr88 } goto tr35 st69: - if ( m.p)++; ( m.p) == ( m.pe) { + if (m.p)++; (m.p) == (m.pe) { goto _test_eof69 } st_case_69: - switch ( m.data)[( m.p)] { + switch (m.data)[(m.p)] { case 10: goto tr89 case 13: @@ -1698,69 +1770,81 @@ tr88: case 32: goto tr88 } - if 9 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 12 { + if 9 <= (m.data)[(m.p)] && (m.data)[(m.p)] <= 12 { goto tr88 } goto tr35 -tr113: - ( m.cs) = 11 + tr113: + (m.cs) = 11 //line plugins/parsers/influx/machine.go.rl:130 - err = m.handler.AddFloat(m.key, m.text()) - if err != nil { - ( m.p)-- + err = m.handler.AddFloat(m.key, m.text()) + if err != nil { + (m.p)-- - ( m.cs) = 34; - {( m.p)++; goto _out } - } + (m.cs) = 34 + { + (m.p)++ + goto _out + } + } - goto _again -tr120: - ( m.cs) = 11 + goto _again + tr120: + (m.cs) = 11 //line plugins/parsers/influx/machine.go.rl:112 - err = m.handler.AddInt(m.key, m.text()) - if err != nil { - ( m.p)-- + err = m.handler.AddInt(m.key, m.text()) + if err != nil { + (m.p)-- - ( m.cs) = 34; - {( m.p)++; goto _out } - } + (m.cs) = 34 + { + (m.p)++ + goto _out + } + } - goto _again -tr125: - ( m.cs) = 11 + goto _again + tr125: + (m.cs) = 11 //line plugins/parsers/influx/machine.go.rl:121 - err = m.handler.AddUint(m.key, m.text()) - if err != nil { - ( m.p)-- + err = m.handler.AddUint(m.key, m.text()) + if err != nil { + (m.p)-- - ( m.cs) = 34; - {( m.p)++; goto _out } - } + (m.cs) = 34 + { + (m.p)++ + goto _out + } + } - goto _again -tr130: - ( m.cs) = 11 + goto _again + tr130: + (m.cs) = 11 //line plugins/parsers/influx/machine.go.rl:139 - err = m.handler.AddBool(m.key, m.text()) - if err != nil { - ( m.p)-- + err = m.handler.AddBool(m.key, m.text()) + if err != nil { + (m.p)-- - ( m.cs) = 34; - {( m.p)++; goto _out } - } + (m.cs) = 34 + { + (m.p)++ + goto _out + } + } - goto _again + goto _again st11: - if ( m.p)++; ( m.p) == ( m.pe) { + if (m.p)++; (m.p) == (m.pe) { goto _test_eof11 } st_case_11: //line plugins/parsers/influx/machine.go:1763 - switch ( m.data)[( m.p)] { + switch (m.data)[(m.p)] { case 32: goto tr7 case 44: @@ -1770,89 +1854,89 @@ tr130: case 92: goto tr8 } - if 9 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 13 { + if 9 <= (m.data)[(m.p)] && (m.data)[(m.p)] <= 13 { goto tr7 } goto tr5 -tr8: + tr8: //line plugins/parsers/influx/machine.go.rl:28 - m.pb = m.p + m.pb = m.p - goto st12 + goto st12 st12: - if ( m.p)++; ( m.p) == ( m.pe) { + if (m.p)++; (m.p) == (m.pe) { goto _test_eof12 } st_case_12: //line plugins/parsers/influx/machine.go:1789 - if 9 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 13 { + if 9 <= (m.data)[(m.p)] && (m.data)[(m.p)] <= 13 { goto tr7 } goto st3 -tr24: + tr24: //line plugins/parsers/influx/machine.go.rl:28 - m.pb = m.p + m.pb = m.p - goto st13 + goto st13 st13: - if ( m.p)++; ( m.p) == ( m.pe) { + if (m.p)++; (m.p) == (m.pe) { goto _test_eof13 } st_case_13: //line plugins/parsers/influx/machine.go:1805 - switch ( m.data)[( m.p)] { + switch (m.data)[(m.p)] { case 34: goto st6 case 92: goto st6 } goto tr7 -tr13: + tr13: //line plugins/parsers/influx/machine.go.rl:28 - m.pb = m.p + m.pb = m.p - goto st14 + goto st14 st14: - if ( m.p)++; ( m.p) == ( m.pe) { + if (m.p)++; (m.p) == (m.pe) { goto _test_eof14 } st_case_14: //line plugins/parsers/influx/machine.go:1824 - switch ( m.data)[( m.p)] { + switch (m.data)[(m.p)] { case 46: goto st15 case 48: goto st72 } - if 49 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 { + if 49 <= (m.data)[(m.p)] && (m.data)[(m.p)] <= 57 { goto st75 } goto tr7 -tr14: + tr14: //line plugins/parsers/influx/machine.go.rl:28 - m.pb = m.p + m.pb = m.p - goto st15 + goto st15 st15: - if ( m.p)++; ( m.p) == ( m.pe) { + if (m.p)++; (m.p) == (m.pe) { goto _test_eof15 } st_case_15: //line plugins/parsers/influx/machine.go:1846 - if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 { + if 48 <= (m.data)[(m.p)] && (m.data)[(m.p)] <= 57 { goto st70 } goto tr7 st70: - if ( m.p)++; ( m.p) == ( m.pe) { + if (m.p)++; (m.p) == (m.pe) { goto _test_eof70 } st_case_70: - switch ( m.data)[( m.p)] { + switch (m.data)[(m.p)] { case 10: goto tr111 case 13: @@ -1867,20 +1951,20 @@ tr14: goto st16 } switch { - case ( m.data)[( m.p)] > 12: - if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 { + case (m.data)[(m.p)] > 12: + if 48 <= (m.data)[(m.p)] && (m.data)[(m.p)] <= 57 { goto st70 } - case ( m.data)[( m.p)] >= 9: + case (m.data)[(m.p)] >= 9: goto tr110 } goto tr82 st16: - if ( m.p)++; ( m.p) == ( m.pe) { + if (m.p)++; (m.p) == (m.pe) { goto _test_eof16 } st_case_16: - switch ( m.data)[( m.p)] { + switch (m.data)[(m.p)] { case 34: goto st17 case 43: @@ -1888,25 +1972,25 @@ tr14: case 45: goto st17 } - if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 { + if 48 <= (m.data)[(m.p)] && (m.data)[(m.p)] <= 57 { goto st71 } goto tr7 st17: - if ( m.p)++; ( m.p) == ( m.pe) { + if (m.p)++; (m.p) == (m.pe) { goto _test_eof17 } st_case_17: - if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 { + if 48 <= (m.data)[(m.p)] && (m.data)[(m.p)] <= 57 { goto st71 } goto tr7 st71: - if ( m.p)++; ( m.p) == ( m.pe) { + if (m.p)++; (m.p) == (m.pe) { goto _test_eof71 } st_case_71: - switch ( m.data)[( m.p)] { + switch (m.data)[(m.p)] { case 10: goto tr111 case 13: @@ -1917,20 +2001,20 @@ tr14: goto tr113 } switch { - case ( m.data)[( m.p)] > 12: - if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 { + case (m.data)[(m.p)] > 12: + if 48 <= (m.data)[(m.p)] && (m.data)[(m.p)] <= 57 { goto st71 } - case ( m.data)[( m.p)] >= 9: + case (m.data)[(m.p)] >= 9: goto tr110 } goto tr82 st72: - if ( m.p)++; ( m.p) == ( m.pe) { + if (m.p)++; (m.p) == (m.pe) { goto _test_eof72 } st_case_72: - switch ( m.data)[( m.p)] { + switch (m.data)[(m.p)] { case 10: goto tr111 case 13: @@ -1949,20 +2033,20 @@ tr14: goto st74 } switch { - case ( m.data)[( m.p)] > 12: - if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 { + case (m.data)[(m.p)] > 12: + if 48 <= (m.data)[(m.p)] && (m.data)[(m.p)] <= 57 { goto st73 } - case ( m.data)[( m.p)] >= 9: + case (m.data)[(m.p)] >= 9: goto tr110 } goto tr82 st73: - if ( m.p)++; ( m.p) == ( m.pe) { + if (m.p)++; (m.p) == (m.pe) { goto _test_eof73 } st_case_73: - switch ( m.data)[( m.p)] { + switch (m.data)[(m.p)] { case 10: goto tr111 case 13: @@ -1979,20 +2063,20 @@ tr14: goto st16 } switch { - case ( m.data)[( m.p)] > 12: - if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 { + case (m.data)[(m.p)] > 12: + if 48 <= (m.data)[(m.p)] && (m.data)[(m.p)] <= 57 { goto st73 } - case ( m.data)[( m.p)] >= 9: + case (m.data)[(m.p)] >= 9: goto tr110 } goto tr82 st74: - if ( m.p)++; ( m.p) == ( m.pe) { + if (m.p)++; (m.p) == (m.pe) { goto _test_eof74 } st_case_74: - switch ( m.data)[( m.p)] { + switch (m.data)[(m.p)] { case 10: goto tr118 case 13: @@ -2002,16 +2086,16 @@ tr14: case 44: goto tr120 } - if 9 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 12 { + if 9 <= (m.data)[(m.p)] && (m.data)[(m.p)] <= 12 { goto tr117 } goto tr82 st75: - if ( m.p)++; ( m.p) == ( m.pe) { + if (m.p)++; (m.p) == (m.pe) { goto _test_eof75 } st_case_75: - switch ( m.data)[( m.p)] { + switch (m.data)[(m.p)] { case 10: goto tr111 case 13: @@ -2030,27 +2114,27 @@ tr14: goto st74 } switch { - case ( m.data)[( m.p)] > 12: - if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 { + case (m.data)[(m.p)] > 12: + if 48 <= (m.data)[(m.p)] && (m.data)[(m.p)] <= 57 { goto st75 } - case ( m.data)[( m.p)] >= 9: + case (m.data)[(m.p)] >= 9: goto tr110 } goto tr82 -tr15: + tr15: //line plugins/parsers/influx/machine.go.rl:28 - m.pb = m.p + m.pb = m.p - goto st76 + goto st76 st76: - if ( m.p)++; ( m.p) == ( m.pe) { + if (m.p)++; (m.p) == (m.pe) { goto _test_eof76 } st_case_76: //line plugins/parsers/influx/machine.go:2053 - switch ( m.data)[( m.p)] { + switch (m.data)[(m.p)] { case 10: goto tr111 case 13: @@ -2071,20 +2155,20 @@ tr15: goto st77 } switch { - case ( m.data)[( m.p)] > 12: - if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 { + case (m.data)[(m.p)] > 12: + if 48 <= (m.data)[(m.p)] && (m.data)[(m.p)] <= 57 { goto st73 } - case ( m.data)[( m.p)] >= 9: + case (m.data)[(m.p)] >= 9: goto tr110 } goto tr82 st77: - if ( m.p)++; ( m.p) == ( m.pe) { + if (m.p)++; (m.p) == (m.pe) { goto _test_eof77 } st_case_77: - switch ( m.data)[( m.p)] { + switch (m.data)[(m.p)] { case 10: goto tr123 case 13: @@ -2094,23 +2178,23 @@ tr15: case 44: goto tr125 } - if 9 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 12 { + if 9 <= (m.data)[(m.p)] && (m.data)[(m.p)] <= 12 { goto tr122 } goto tr82 -tr16: + tr16: //line plugins/parsers/influx/machine.go.rl:28 - m.pb = m.p + m.pb = m.p - goto st78 + goto st78 st78: - if ( m.p)++; ( m.p) == ( m.pe) { + if (m.p)++; (m.p) == (m.pe) { goto _test_eof78 } st_case_78: //line plugins/parsers/influx/machine.go:2113 - switch ( m.data)[( m.p)] { + switch (m.data)[(m.p)] { case 10: goto tr111 case 13: @@ -2131,27 +2215,27 @@ tr16: goto st77 } switch { - case ( m.data)[( m.p)] > 12: - if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 { + case (m.data)[(m.p)] > 12: + if 48 <= (m.data)[(m.p)] && (m.data)[(m.p)] <= 57 { goto st78 } - case ( m.data)[( m.p)] >= 9: + case (m.data)[(m.p)] >= 9: goto tr110 } goto tr82 -tr17: + tr17: //line plugins/parsers/influx/machine.go.rl:28 - m.pb = m.p + m.pb = m.p - goto st79 + goto st79 st79: - if ( m.p)++; ( m.p) == ( m.pe) { + if (m.p)++; (m.p) == (m.pe) { goto _test_eof79 } st_case_79: //line plugins/parsers/influx/machine.go:2154 - switch ( m.data)[( m.p)] { + switch (m.data)[(m.p)] { case 10: goto tr128 case 13: @@ -2165,43 +2249,43 @@ tr17: case 97: goto st21 } - if 9 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 12 { + if 9 <= (m.data)[(m.p)] && (m.data)[(m.p)] <= 12 { goto tr127 } goto tr82 st18: - if ( m.p)++; ( m.p) == ( m.pe) { + if (m.p)++; (m.p) == (m.pe) { goto _test_eof18 } st_case_18: - if ( m.data)[( m.p)] == 76 { + if (m.data)[(m.p)] == 76 { goto st19 } goto tr7 st19: - if ( m.p)++; ( m.p) == ( m.pe) { + if (m.p)++; (m.p) == (m.pe) { goto _test_eof19 } st_case_19: - if ( m.data)[( m.p)] == 83 { + if (m.data)[(m.p)] == 83 { goto st20 } goto tr7 st20: - if ( m.p)++; ( m.p) == ( m.pe) { + if (m.p)++; (m.p) == (m.pe) { goto _test_eof20 } st_case_20: - if ( m.data)[( m.p)] == 69 { + if (m.data)[(m.p)] == 69 { goto st80 } goto tr7 st80: - if ( m.p)++; ( m.p) == ( m.pe) { + if (m.p)++; (m.p) == (m.pe) { goto _test_eof80 } st_case_80: - switch ( m.data)[( m.p)] { + switch (m.data)[(m.p)] { case 10: goto tr128 case 13: @@ -2211,50 +2295,50 @@ tr17: case 44: goto tr130 } - if 9 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 12 { + if 9 <= (m.data)[(m.p)] && (m.data)[(m.p)] <= 12 { goto tr127 } goto tr82 st21: - if ( m.p)++; ( m.p) == ( m.pe) { + if (m.p)++; (m.p) == (m.pe) { goto _test_eof21 } st_case_21: - if ( m.data)[( m.p)] == 108 { + if (m.data)[(m.p)] == 108 { goto st22 } goto tr7 st22: - if ( m.p)++; ( m.p) == ( m.pe) { + if (m.p)++; (m.p) == (m.pe) { goto _test_eof22 } st_case_22: - if ( m.data)[( m.p)] == 115 { + if (m.data)[(m.p)] == 115 { goto st23 } goto tr7 st23: - if ( m.p)++; ( m.p) == ( m.pe) { + if (m.p)++; (m.p) == (m.pe) { goto _test_eof23 } st_case_23: - if ( m.data)[( m.p)] == 101 { + if (m.data)[(m.p)] == 101 { goto st80 } goto tr7 -tr18: + tr18: //line plugins/parsers/influx/machine.go.rl:28 - m.pb = m.p + m.pb = m.p - goto st81 + goto st81 st81: - if ( m.p)++; ( m.p) == ( m.pe) { + if (m.p)++; (m.p) == (m.pe) { goto _test_eof81 } st_case_81: //line plugins/parsers/influx/machine.go:2257 - switch ( m.data)[( m.p)] { + switch (m.data)[(m.p)] { case 10: goto tr128 case 13: @@ -2268,41 +2352,41 @@ tr18: case 114: goto st25 } - if 9 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 12 { + if 9 <= (m.data)[(m.p)] && (m.data)[(m.p)] <= 12 { goto tr127 } goto tr82 st24: - if ( m.p)++; ( m.p) == ( m.pe) { + if (m.p)++; (m.p) == (m.pe) { goto _test_eof24 } st_case_24: - if ( m.data)[( m.p)] == 85 { + if (m.data)[(m.p)] == 85 { goto st20 } goto tr7 st25: - if ( m.p)++; ( m.p) == ( m.pe) { + if (m.p)++; (m.p) == (m.pe) { goto _test_eof25 } st_case_25: - if ( m.data)[( m.p)] == 117 { + if (m.data)[(m.p)] == 117 { goto st23 } goto tr7 -tr19: + tr19: //line plugins/parsers/influx/machine.go.rl:28 - m.pb = m.p + m.pb = m.p - goto st82 + goto st82 st82: - if ( m.p)++; ( m.p) == ( m.pe) { + if (m.p)++; (m.p) == (m.pe) { goto _test_eof82 } st_case_82: //line plugins/parsers/influx/machine.go:2305 - switch ( m.data)[( m.p)] { + switch (m.data)[(m.p)] { case 10: goto tr128 case 13: @@ -2314,23 +2398,23 @@ tr19: case 97: goto st21 } - if 9 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 12 { + if 9 <= (m.data)[(m.p)] && (m.data)[(m.p)] <= 12 { goto tr127 } goto tr82 -tr20: + tr20: //line plugins/parsers/influx/machine.go.rl:28 - m.pb = m.p + m.pb = m.p - goto st83 + goto st83 st83: - if ( m.p)++; ( m.p) == ( m.pe) { + if (m.p)++; (m.p) == (m.pe) { goto _test_eof83 } st_case_83: //line plugins/parsers/influx/machine.go:2333 - switch ( m.data)[( m.p)] { + switch (m.data)[(m.p)] { case 10: goto tr128 case 13: @@ -2342,43 +2426,49 @@ tr20: case 114: goto st25 } - if 9 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 12 { + if 9 <= (m.data)[(m.p)] && (m.data)[(m.p)] <= 12 { goto tr127 } goto tr82 -tr3: - ( m.cs) = 26 + tr3: + (m.cs) = 26 //line plugins/parsers/influx/machine.go.rl:86 - err = m.handler.SetMeasurement(m.text()) - if err != nil { - ( m.p)-- + err = m.handler.SetMeasurement(m.text()) + if err != nil { + (m.p)-- - ( m.cs) = 34; - {( m.p)++; goto _out } - } + (m.cs) = 34 + { + (m.p)++ + goto _out + } + } - goto _again -tr57: - ( m.cs) = 26 + goto _again + tr57: + (m.cs) = 26 //line plugins/parsers/influx/machine.go.rl:99 - err = m.handler.AddTag(m.key, m.text()) - if err != nil { - ( m.p)-- + err = m.handler.AddTag(m.key, m.text()) + if err != nil { + (m.p)-- - ( m.cs) = 34; - {( m.p)++; goto _out } - } + (m.cs) = 34 + { + (m.p)++ + goto _out + } + } - goto _again + goto _again st26: - if ( m.p)++; ( m.p) == ( m.pe) { + if (m.p)++; (m.p) == (m.pe) { goto _test_eof26 } st_case_26: //line plugins/parsers/influx/machine.go:2381 - switch ( m.data)[( m.p)] { + switch (m.data)[(m.p)] { case 32: goto tr2 case 44: @@ -2388,23 +2478,23 @@ tr57: case 92: goto tr49 } - if 9 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 13 { + if 9 <= (m.data)[(m.p)] && (m.data)[(m.p)] <= 13 { goto tr2 } goto tr48 -tr48: + tr48: //line plugins/parsers/influx/machine.go.rl:28 - m.pb = m.p + m.pb = m.p - goto st27 + goto st27 st27: - if ( m.p)++; ( m.p) == ( m.pe) { + if (m.p)++; (m.p) == (m.pe) { goto _test_eof27 } st_case_27: //line plugins/parsers/influx/machine.go:2407 - switch ( m.data)[( m.p)] { + switch (m.data)[(m.p)] { case 32: goto tr2 case 44: @@ -2414,23 +2504,23 @@ tr48: case 92: goto st32 } - if 9 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 13 { + if 9 <= (m.data)[(m.p)] && (m.data)[(m.p)] <= 13 { goto tr2 } goto st27 -tr51: + tr51: //line plugins/parsers/influx/machine.go.rl:95 - m.key = m.text() + m.key = m.text() - goto st28 + goto st28 st28: - if ( m.p)++; ( m.p) == ( m.pe) { + if (m.p)++; (m.p) == (m.pe) { goto _test_eof28 } st_case_28: //line plugins/parsers/influx/machine.go:2433 - switch ( m.data)[( m.p)] { + switch (m.data)[(m.p)] { case 32: goto tr2 case 44: @@ -2440,23 +2530,23 @@ tr51: case 92: goto tr54 } - if 9 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 13 { + if 9 <= (m.data)[(m.p)] && (m.data)[(m.p)] <= 13 { goto tr2 } goto tr53 -tr53: + tr53: //line plugins/parsers/influx/machine.go.rl:28 - m.pb = m.p + m.pb = m.p - goto st29 + goto st29 st29: - if ( m.p)++; ( m.p) == ( m.pe) { + if (m.p)++; (m.p) == (m.pe) { goto _test_eof29 } st_case_29: //line plugins/parsers/influx/machine.go:2459 - switch ( m.data)[( m.p)] { + switch (m.data)[(m.p)] { case 10: goto tr2 case 13: @@ -2470,39 +2560,39 @@ tr53: case 92: goto st30 } - if 9 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 12 { + if 9 <= (m.data)[(m.p)] && (m.data)[(m.p)] <= 12 { goto tr56 } goto st29 -tr54: + tr54: //line plugins/parsers/influx/machine.go.rl:28 - m.pb = m.p + m.pb = m.p - goto st30 + goto st30 st30: - if ( m.p)++; ( m.p) == ( m.pe) { + if (m.p)++; (m.p) == (m.pe) { goto _test_eof30 } st_case_30: //line plugins/parsers/influx/machine.go:2489 - if ( m.data)[( m.p)] == 92 { + if (m.data)[(m.p)] == 92 { goto st31 } - if 9 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 13 { + if 9 <= (m.data)[(m.p)] && (m.data)[(m.p)] <= 13 { goto tr2 } goto st29 st31: //line plugins/parsers/influx/machine.go.rl:248 - ( m.p)-- - - if ( m.p)++; ( m.p) == ( m.pe) { + (m.p)-- + + if (m.p)++; (m.p) == (m.pe) { goto _test_eof31 } st_case_31: //line plugins/parsers/influx/machine.go:2505 - switch ( m.data)[( m.p)] { + switch (m.data)[(m.p)] { case 10: goto tr2 case 13: @@ -2516,39 +2606,39 @@ tr54: case 92: goto st30 } - if 9 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 12 { + if 9 <= (m.data)[(m.p)] && (m.data)[(m.p)] <= 12 { goto tr56 } goto st29 -tr49: + tr49: //line plugins/parsers/influx/machine.go.rl:28 - m.pb = m.p + m.pb = m.p - goto st32 + goto st32 st32: - if ( m.p)++; ( m.p) == ( m.pe) { + if (m.p)++; (m.p) == (m.pe) { goto _test_eof32 } st_case_32: //line plugins/parsers/influx/machine.go:2535 - if ( m.data)[( m.p)] == 92 { + if (m.data)[(m.p)] == 92 { goto st33 } - if 9 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 13 { + if 9 <= (m.data)[(m.p)] && (m.data)[(m.p)] <= 13 { goto tr2 } goto st27 st33: //line plugins/parsers/influx/machine.go.rl:248 - ( m.p)-- - - if ( m.p)++; ( m.p) == ( m.pe) { + (m.p)-- + + if (m.p)++; (m.p) == (m.pe) { goto _test_eof33 } st_case_33: //line plugins/parsers/influx/machine.go:2551 - switch ( m.data)[( m.p)] { + switch (m.data)[(m.p)] { case 32: goto tr2 case 44: @@ -2558,44 +2648,46 @@ tr49: case 92: goto st32 } - if 9 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 13 { + if 9 <= (m.data)[(m.p)] && (m.data)[(m.p)] <= 13 { goto tr2 } goto st27 st34: - if ( m.p)++; ( m.p) == ( m.pe) { + if (m.p)++; (m.p) == (m.pe) { goto _test_eof34 } st_case_34: - if ( m.data)[( m.p)] == 10 { + if (m.data)[(m.p)] == 10 { goto tr62 } goto st34 -tr62: + tr62: //line plugins/parsers/influx/machine.go.rl:166 - m.lineno++ - m.sol = m.p - m.sol++ // next char will be the first column in the line + m.lineno++ + m.sol = m.p + m.sol++ // next char will be the first column in the line //line plugins/parsers/influx/machine.go.rl:78 - {goto st85 } + { + goto st85 + } - goto st84 + goto st84 st84: - if ( m.p)++; ( m.p) == ( m.pe) { + if (m.p)++; (m.p) == (m.pe) { goto _test_eof84 } st_case_84: //line plugins/parsers/influx/machine.go:2592 goto st0 st37: - if ( m.p)++; ( m.p) == ( m.pe) { + if (m.p)++; (m.p) == (m.pe) { goto _test_eof37 } st_case_37: - switch ( m.data)[( m.p)] { + switch (m.data)[(m.p)] { case 32: goto tr31 case 35: @@ -2605,27 +2697,27 @@ tr62: case 92: goto tr66 } - if 9 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 13 { + if 9 <= (m.data)[(m.p)] && (m.data)[(m.p)] <= 13 { goto tr31 } goto tr65 -tr65: + tr65: //line plugins/parsers/influx/machine.go.rl:82 - m.beginMetric = true + m.beginMetric = true //line plugins/parsers/influx/machine.go.rl:28 - m.pb = m.p + m.pb = m.p - goto st86 + goto st86 st86: - if ( m.p)++; ( m.p) == ( m.pe) { + if (m.p)++; (m.p) == (m.pe) { goto _test_eof86 } st_case_86: //line plugins/parsers/influx/machine.go:2628 - switch ( m.data)[( m.p)] { + switch (m.data)[(m.p)] { case 10: goto tr138 case 13: @@ -2637,138 +2729,159 @@ tr65: case 92: goto st45 } - if 9 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 12 { + if 9 <= (m.data)[(m.p)] && (m.data)[(m.p)] <= 12 { goto tr2 } goto st86 -tr67: + tr67: //line plugins/parsers/influx/machine.go.rl:166 - m.lineno++ - m.sol = m.p - m.sol++ // next char will be the first column in the line + m.lineno++ + m.sol = m.p + m.sol++ // next char will be the first column in the line - goto st87 -tr138: - ( m.cs) = 87 + goto st87 + tr138: + (m.cs) = 87 //line plugins/parsers/influx/machine.go.rl:86 - err = m.handler.SetMeasurement(m.text()) - if err != nil { - ( m.p)-- + err = m.handler.SetMeasurement(m.text()) + if err != nil { + (m.p)-- - ( m.cs) = 34; - {( m.p)++; goto _out } - } + (m.cs) = 34 + { + (m.p)++ + goto _out + } + } //line plugins/parsers/influx/machine.go.rl:166 - m.lineno++ - m.sol = m.p - m.sol++ // next char will be the first column in the line + m.lineno++ + m.sol = m.p + m.sol++ // next char will be the first column in the line - goto _again -tr142: - ( m.cs) = 87 + goto _again + tr142: + (m.cs) = 87 //line plugins/parsers/influx/machine.go.rl:99 - err = m.handler.AddTag(m.key, m.text()) - if err != nil { - ( m.p)-- + err = m.handler.AddTag(m.key, m.text()) + if err != nil { + (m.p)-- - ( m.cs) = 34; - {( m.p)++; goto _out } - } + (m.cs) = 34 + { + (m.p)++ + goto _out + } + } //line plugins/parsers/influx/machine.go.rl:166 - m.lineno++ - m.sol = m.p - m.sol++ // next char will be the first column in the line + m.lineno++ + m.sol = m.p + m.sol++ // next char will be the first column in the line - goto _again + goto _again st87: //line plugins/parsers/influx/machine.go.rl:172 - m.finishMetric = true - ( m.cs) = 85; - {( m.p)++; goto _out } + m.finishMetric = true + (m.cs) = 85 + { + (m.p)++ + goto _out + } - if ( m.p)++; ( m.p) == ( m.pe) { + if (m.p)++; (m.p) == (m.pe) { goto _test_eof87 } st_case_87: //line plugins/parsers/influx/machine.go:2702 goto st0 -tr139: - ( m.cs) = 38 + tr139: + (m.cs) = 38 //line plugins/parsers/influx/machine.go.rl:86 - err = m.handler.SetMeasurement(m.text()) - if err != nil { - ( m.p)-- + err = m.handler.SetMeasurement(m.text()) + if err != nil { + (m.p)-- - ( m.cs) = 34; - {( m.p)++; goto _out } - } + (m.cs) = 34 + { + (m.p)++ + goto _out + } + } - goto _again -tr143: - ( m.cs) = 38 + goto _again + tr143: + (m.cs) = 38 //line plugins/parsers/influx/machine.go.rl:99 - err = m.handler.AddTag(m.key, m.text()) - if err != nil { - ( m.p)-- + err = m.handler.AddTag(m.key, m.text()) + if err != nil { + (m.p)-- - ( m.cs) = 34; - {( m.p)++; goto _out } - } + (m.cs) = 34 + { + (m.p)++ + goto _out + } + } - goto _again + goto _again st38: - if ( m.p)++; ( m.p) == ( m.pe) { + if (m.p)++; (m.p) == (m.pe) { goto _test_eof38 } st_case_38: //line plugins/parsers/influx/machine.go:2735 - if ( m.data)[( m.p)] == 10 { + if (m.data)[(m.p)] == 10 { goto tr67 } goto st0 -tr140: - ( m.cs) = 39 + tr140: + (m.cs) = 39 //line plugins/parsers/influx/machine.go.rl:86 - err = m.handler.SetMeasurement(m.text()) - if err != nil { - ( m.p)-- + err = m.handler.SetMeasurement(m.text()) + if err != nil { + (m.p)-- - ( m.cs) = 34; - {( m.p)++; goto _out } - } + (m.cs) = 34 + { + (m.p)++ + goto _out + } + } - goto _again -tr144: - ( m.cs) = 39 + goto _again + tr144: + (m.cs) = 39 //line plugins/parsers/influx/machine.go.rl:99 - err = m.handler.AddTag(m.key, m.text()) - if err != nil { - ( m.p)-- + err = m.handler.AddTag(m.key, m.text()) + if err != nil { + (m.p)-- - ( m.cs) = 34; - {( m.p)++; goto _out } - } + (m.cs) = 34 + { + (m.p)++ + goto _out + } + } - goto _again + goto _again st39: - if ( m.p)++; ( m.p) == ( m.pe) { + if (m.p)++; (m.p) == (m.pe) { goto _test_eof39 } st_case_39: //line plugins/parsers/influx/machine.go:2771 - switch ( m.data)[( m.p)] { + switch (m.data)[(m.p)] { case 32: goto tr2 case 44: @@ -2778,23 +2891,23 @@ tr144: case 92: goto tr69 } - if 9 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 13 { + if 9 <= (m.data)[(m.p)] && (m.data)[(m.p)] <= 13 { goto tr2 } goto tr68 -tr68: + tr68: //line plugins/parsers/influx/machine.go.rl:28 - m.pb = m.p + m.pb = m.p - goto st40 + goto st40 st40: - if ( m.p)++; ( m.p) == ( m.pe) { + if (m.p)++; (m.p) == (m.pe) { goto _test_eof40 } st_case_40: //line plugins/parsers/influx/machine.go:2797 - switch ( m.data)[( m.p)] { + switch (m.data)[(m.p)] { case 32: goto tr2 case 44: @@ -2804,23 +2917,23 @@ tr68: case 92: goto st43 } - if 9 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 13 { + if 9 <= (m.data)[(m.p)] && (m.data)[(m.p)] <= 13 { goto tr2 } goto st40 -tr71: + tr71: //line plugins/parsers/influx/machine.go.rl:95 - m.key = m.text() + m.key = m.text() - goto st41 + goto st41 st41: - if ( m.p)++; ( m.p) == ( m.pe) { + if (m.p)++; (m.p) == (m.pe) { goto _test_eof41 } st_case_41: //line plugins/parsers/influx/machine.go:2823 - switch ( m.data)[( m.p)] { + switch (m.data)[(m.p)] { case 32: goto tr2 case 44: @@ -2830,23 +2943,23 @@ tr71: case 92: goto tr74 } - if 9 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 13 { + if 9 <= (m.data)[(m.p)] && (m.data)[(m.p)] <= 13 { goto tr2 } goto tr73 -tr73: + tr73: //line plugins/parsers/influx/machine.go.rl:28 - m.pb = m.p + m.pb = m.p - goto st88 + goto st88 st88: - if ( m.p)++; ( m.p) == ( m.pe) { + if (m.p)++; (m.p) == (m.pe) { goto _test_eof88 } st_case_88: //line plugins/parsers/influx/machine.go:2849 - switch ( m.data)[( m.p)] { + switch (m.data)[(m.p)] { case 10: goto tr142 case 13: @@ -2860,39 +2973,39 @@ tr73: case 92: goto st42 } - if 9 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 12 { + if 9 <= (m.data)[(m.p)] && (m.data)[(m.p)] <= 12 { goto tr2 } goto st88 -tr74: + tr74: //line plugins/parsers/influx/machine.go.rl:28 - m.pb = m.p + m.pb = m.p - goto st42 + goto st42 st42: - if ( m.p)++; ( m.p) == ( m.pe) { + if (m.p)++; (m.p) == (m.pe) { goto _test_eof42 } st_case_42: //line plugins/parsers/influx/machine.go:2879 - if ( m.data)[( m.p)] == 92 { + if (m.data)[(m.p)] == 92 { goto st89 } - if 9 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 13 { + if 9 <= (m.data)[(m.p)] && (m.data)[(m.p)] <= 13 { goto tr2 } goto st88 st89: //line plugins/parsers/influx/machine.go.rl:248 - ( m.p)-- - - if ( m.p)++; ( m.p) == ( m.pe) { + (m.p)-- + + if (m.p)++; (m.p) == (m.pe) { goto _test_eof89 } st_case_89: //line plugins/parsers/influx/machine.go:2895 - switch ( m.data)[( m.p)] { + switch (m.data)[(m.p)] { case 10: goto tr142 case 13: @@ -2906,39 +3019,39 @@ tr74: case 92: goto st42 } - if 9 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 12 { + if 9 <= (m.data)[(m.p)] && (m.data)[(m.p)] <= 12 { goto tr2 } goto st88 -tr69: + tr69: //line plugins/parsers/influx/machine.go.rl:28 - m.pb = m.p + m.pb = m.p - goto st43 + goto st43 st43: - if ( m.p)++; ( m.p) == ( m.pe) { + if (m.p)++; (m.p) == (m.pe) { goto _test_eof43 } st_case_43: //line plugins/parsers/influx/machine.go:2925 - if ( m.data)[( m.p)] == 92 { + if (m.data)[(m.p)] == 92 { goto st44 } - if 9 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 13 { + if 9 <= (m.data)[(m.p)] && (m.data)[(m.p)] <= 13 { goto tr2 } goto st40 st44: //line plugins/parsers/influx/machine.go.rl:248 - ( m.p)-- - - if ( m.p)++; ( m.p) == ( m.pe) { + (m.p)-- + + if (m.p)++; (m.p) == (m.pe) { goto _test_eof44 } st_case_44: //line plugins/parsers/influx/machine.go:2941 - switch ( m.data)[( m.p)] { + switch (m.data)[(m.p)] { case 32: goto tr2 case 44: @@ -2948,45 +3061,45 @@ tr69: case 92: goto st43 } - if 9 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 13 { + if 9 <= (m.data)[(m.p)] && (m.data)[(m.p)] <= 13 { goto tr2 } goto st40 -tr66: + tr66: //line plugins/parsers/influx/machine.go.rl:82 - m.beginMetric = true + m.beginMetric = true //line plugins/parsers/influx/machine.go.rl:28 - m.pb = m.p + m.pb = m.p - goto st45 + goto st45 st45: - if ( m.p)++; ( m.p) == ( m.pe) { + if (m.p)++; (m.p) == (m.pe) { goto _test_eof45 } st_case_45: //line plugins/parsers/influx/machine.go:2971 - if 9 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 13 { + if 9 <= (m.data)[(m.p)] && (m.data)[(m.p)] <= 13 { goto st0 } goto st86 -tr63: + tr63: //line plugins/parsers/influx/machine.go.rl:166 - m.lineno++ - m.sol = m.p - m.sol++ // next char will be the first column in the line + m.lineno++ + m.sol = m.p + m.sol++ // next char will be the first column in the line - goto st85 + goto st85 st85: - if ( m.p)++; ( m.p) == ( m.pe) { + if (m.p)++; (m.p) == (m.pe) { goto _test_eof85 } st_case_85: //line plugins/parsers/influx/machine.go:2989 - switch ( m.data)[( m.p)] { + switch (m.data)[(m.p)] { case 10: goto tr63 case 13: @@ -2996,312 +3109,554 @@ tr63: case 35: goto st36 } - if 9 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 12 { + if 9 <= (m.data)[(m.p)] && (m.data)[(m.p)] <= 12 { goto st85 } goto tr135 st35: - if ( m.p)++; ( m.p) == ( m.pe) { + if (m.p)++; (m.p) == (m.pe) { goto _test_eof35 } st_case_35: - if ( m.data)[( m.p)] == 10 { + if (m.data)[(m.p)] == 10 { goto tr63 } goto st0 st36: - if ( m.p)++; ( m.p) == ( m.pe) { + if (m.p)++; (m.p) == (m.pe) { goto _test_eof36 } st_case_36: - if ( m.data)[( m.p)] == 10 { + if (m.data)[(m.p)] == 10 { goto tr63 } goto st36 st_out: - _test_eof46: ( m.cs) = 46; goto _test_eof - _test_eof1: ( m.cs) = 1; goto _test_eof - _test_eof2: ( m.cs) = 2; goto _test_eof - _test_eof3: ( m.cs) = 3; goto _test_eof - _test_eof4: ( m.cs) = 4; goto _test_eof - _test_eof5: ( m.cs) = 5; goto _test_eof - _test_eof6: ( m.cs) = 6; goto _test_eof - _test_eof47: ( m.cs) = 47; goto _test_eof - _test_eof48: ( m.cs) = 48; goto _test_eof - _test_eof49: ( m.cs) = 49; goto _test_eof - _test_eof7: ( m.cs) = 7; goto _test_eof - _test_eof8: ( m.cs) = 8; goto _test_eof - _test_eof9: ( m.cs) = 9; goto _test_eof - _test_eof10: ( m.cs) = 10; goto _test_eof - _test_eof50: ( m.cs) = 50; goto _test_eof - _test_eof51: ( m.cs) = 51; goto _test_eof - _test_eof52: ( m.cs) = 52; goto _test_eof - _test_eof53: ( m.cs) = 53; goto _test_eof - _test_eof54: ( m.cs) = 54; goto _test_eof - _test_eof55: ( m.cs) = 55; goto _test_eof - _test_eof56: ( m.cs) = 56; goto _test_eof - _test_eof57: ( m.cs) = 57; goto _test_eof - _test_eof58: ( m.cs) = 58; goto _test_eof - _test_eof59: ( m.cs) = 59; goto _test_eof - _test_eof60: ( m.cs) = 60; goto _test_eof - _test_eof61: ( m.cs) = 61; goto _test_eof - _test_eof62: ( m.cs) = 62; goto _test_eof - _test_eof63: ( m.cs) = 63; goto _test_eof - _test_eof64: ( m.cs) = 64; goto _test_eof - _test_eof65: ( m.cs) = 65; goto _test_eof - _test_eof66: ( m.cs) = 66; goto _test_eof - _test_eof67: ( m.cs) = 67; goto _test_eof - _test_eof68: ( m.cs) = 68; goto _test_eof - _test_eof69: ( m.cs) = 69; goto _test_eof - _test_eof11: ( m.cs) = 11; goto _test_eof - _test_eof12: ( m.cs) = 12; goto _test_eof - _test_eof13: ( m.cs) = 13; goto _test_eof - _test_eof14: ( m.cs) = 14; goto _test_eof - _test_eof15: ( m.cs) = 15; goto _test_eof - _test_eof70: ( m.cs) = 70; goto _test_eof - _test_eof16: ( m.cs) = 16; goto _test_eof - _test_eof17: ( m.cs) = 17; goto _test_eof - _test_eof71: ( m.cs) = 71; goto _test_eof - _test_eof72: ( m.cs) = 72; goto _test_eof - _test_eof73: ( m.cs) = 73; goto _test_eof - _test_eof74: ( m.cs) = 74; goto _test_eof - _test_eof75: ( m.cs) = 75; goto _test_eof - _test_eof76: ( m.cs) = 76; goto _test_eof - _test_eof77: ( m.cs) = 77; goto _test_eof - _test_eof78: ( m.cs) = 78; goto _test_eof - _test_eof79: ( m.cs) = 79; goto _test_eof - _test_eof18: ( m.cs) = 18; goto _test_eof - _test_eof19: ( m.cs) = 19; goto _test_eof - _test_eof20: ( m.cs) = 20; goto _test_eof - _test_eof80: ( m.cs) = 80; goto _test_eof - _test_eof21: ( m.cs) = 21; goto _test_eof - _test_eof22: ( m.cs) = 22; goto _test_eof - _test_eof23: ( m.cs) = 23; goto _test_eof - _test_eof81: ( m.cs) = 81; goto _test_eof - _test_eof24: ( m.cs) = 24; goto _test_eof - _test_eof25: ( m.cs) = 25; goto _test_eof - _test_eof82: ( m.cs) = 82; goto _test_eof - _test_eof83: ( m.cs) = 83; goto _test_eof - _test_eof26: ( m.cs) = 26; goto _test_eof - _test_eof27: ( m.cs) = 27; goto _test_eof - _test_eof28: ( m.cs) = 28; goto _test_eof - _test_eof29: ( m.cs) = 29; goto _test_eof - _test_eof30: ( m.cs) = 30; goto _test_eof - _test_eof31: ( m.cs) = 31; goto _test_eof - _test_eof32: ( m.cs) = 32; goto _test_eof - _test_eof33: ( m.cs) = 33; goto _test_eof - _test_eof34: ( m.cs) = 34; goto _test_eof - _test_eof84: ( m.cs) = 84; goto _test_eof - _test_eof37: ( m.cs) = 37; goto _test_eof - _test_eof86: ( m.cs) = 86; goto _test_eof - _test_eof87: ( m.cs) = 87; goto _test_eof - _test_eof38: ( m.cs) = 38; goto _test_eof - _test_eof39: ( m.cs) = 39; goto _test_eof - _test_eof40: ( m.cs) = 40; goto _test_eof - _test_eof41: ( m.cs) = 41; goto _test_eof - _test_eof88: ( m.cs) = 88; goto _test_eof - _test_eof42: ( m.cs) = 42; goto _test_eof - _test_eof89: ( m.cs) = 89; goto _test_eof - _test_eof43: ( m.cs) = 43; goto _test_eof - _test_eof44: ( m.cs) = 44; goto _test_eof - _test_eof45: ( m.cs) = 45; goto _test_eof - _test_eof85: ( m.cs) = 85; goto _test_eof - _test_eof35: ( m.cs) = 35; goto _test_eof - _test_eof36: ( m.cs) = 36; goto _test_eof - - _test_eof: {} - if ( m.p) == ( m.eof) { - switch ( m.cs) { - case 7, 37: + _test_eof46: + (m.cs) = 46 + goto _test_eof + _test_eof1: + (m.cs) = 1 + goto _test_eof + _test_eof2: + (m.cs) = 2 + goto _test_eof + _test_eof3: + (m.cs) = 3 + goto _test_eof + _test_eof4: + (m.cs) = 4 + goto _test_eof + _test_eof5: + (m.cs) = 5 + goto _test_eof + _test_eof6: + (m.cs) = 6 + goto _test_eof + _test_eof47: + (m.cs) = 47 + goto _test_eof + _test_eof48: + (m.cs) = 48 + goto _test_eof + _test_eof49: + (m.cs) = 49 + goto _test_eof + _test_eof7: + (m.cs) = 7 + goto _test_eof + _test_eof8: + (m.cs) = 8 + goto _test_eof + _test_eof9: + (m.cs) = 9 + goto _test_eof + _test_eof10: + (m.cs) = 10 + goto _test_eof + _test_eof50: + (m.cs) = 50 + goto _test_eof + _test_eof51: + (m.cs) = 51 + goto _test_eof + _test_eof52: + (m.cs) = 52 + goto _test_eof + _test_eof53: + (m.cs) = 53 + goto _test_eof + _test_eof54: + (m.cs) = 54 + goto _test_eof + _test_eof55: + (m.cs) = 55 + goto _test_eof + _test_eof56: + (m.cs) = 56 + goto _test_eof + _test_eof57: + (m.cs) = 57 + goto _test_eof + _test_eof58: + (m.cs) = 58 + goto _test_eof + _test_eof59: + (m.cs) = 59 + goto _test_eof + _test_eof60: + (m.cs) = 60 + goto _test_eof + _test_eof61: + (m.cs) = 61 + goto _test_eof + _test_eof62: + (m.cs) = 62 + goto _test_eof + _test_eof63: + (m.cs) = 63 + goto _test_eof + _test_eof64: + (m.cs) = 64 + goto _test_eof + _test_eof65: + (m.cs) = 65 + goto _test_eof + _test_eof66: + (m.cs) = 66 + goto _test_eof + _test_eof67: + (m.cs) = 67 + goto _test_eof + _test_eof68: + (m.cs) = 68 + goto _test_eof + _test_eof69: + (m.cs) = 69 + goto _test_eof + _test_eof11: + (m.cs) = 11 + goto _test_eof + _test_eof12: + (m.cs) = 12 + goto _test_eof + _test_eof13: + (m.cs) = 13 + goto _test_eof + _test_eof14: + (m.cs) = 14 + goto _test_eof + _test_eof15: + (m.cs) = 15 + goto _test_eof + _test_eof70: + (m.cs) = 70 + goto _test_eof + _test_eof16: + (m.cs) = 16 + goto _test_eof + _test_eof17: + (m.cs) = 17 + goto _test_eof + _test_eof71: + (m.cs) = 71 + goto _test_eof + _test_eof72: + (m.cs) = 72 + goto _test_eof + _test_eof73: + (m.cs) = 73 + goto _test_eof + _test_eof74: + (m.cs) = 74 + goto _test_eof + _test_eof75: + (m.cs) = 75 + goto _test_eof + _test_eof76: + (m.cs) = 76 + goto _test_eof + _test_eof77: + (m.cs) = 77 + goto _test_eof + _test_eof78: + (m.cs) = 78 + goto _test_eof + _test_eof79: + (m.cs) = 79 + goto _test_eof + _test_eof18: + (m.cs) = 18 + goto _test_eof + _test_eof19: + (m.cs) = 19 + goto _test_eof + _test_eof20: + (m.cs) = 20 + goto _test_eof + _test_eof80: + (m.cs) = 80 + goto _test_eof + _test_eof21: + (m.cs) = 21 + goto _test_eof + _test_eof22: + (m.cs) = 22 + goto _test_eof + _test_eof23: + (m.cs) = 23 + goto _test_eof + _test_eof81: + (m.cs) = 81 + goto _test_eof + _test_eof24: + (m.cs) = 24 + goto _test_eof + _test_eof25: + (m.cs) = 25 + goto _test_eof + _test_eof82: + (m.cs) = 82 + goto _test_eof + _test_eof83: + (m.cs) = 83 + goto _test_eof + _test_eof26: + (m.cs) = 26 + goto _test_eof + _test_eof27: + (m.cs) = 27 + goto _test_eof + _test_eof28: + (m.cs) = 28 + goto _test_eof + _test_eof29: + (m.cs) = 29 + goto _test_eof + _test_eof30: + (m.cs) = 30 + goto _test_eof + _test_eof31: + (m.cs) = 31 + goto _test_eof + _test_eof32: + (m.cs) = 32 + goto _test_eof + _test_eof33: + (m.cs) = 33 + goto _test_eof + _test_eof34: + (m.cs) = 34 + goto _test_eof + _test_eof84: + (m.cs) = 84 + goto _test_eof + _test_eof37: + (m.cs) = 37 + goto _test_eof + _test_eof86: + (m.cs) = 86 + goto _test_eof + _test_eof87: + (m.cs) = 87 + goto _test_eof + _test_eof38: + (m.cs) = 38 + goto _test_eof + _test_eof39: + (m.cs) = 39 + goto _test_eof + _test_eof40: + (m.cs) = 40 + goto _test_eof + _test_eof41: + (m.cs) = 41 + goto _test_eof + _test_eof88: + (m.cs) = 88 + goto _test_eof + _test_eof42: + (m.cs) = 42 + goto _test_eof + _test_eof89: + (m.cs) = 89 + goto _test_eof + _test_eof43: + (m.cs) = 43 + goto _test_eof + _test_eof44: + (m.cs) = 44 + goto _test_eof + _test_eof45: + (m.cs) = 45 + goto _test_eof + _test_eof85: + (m.cs) = 85 + goto _test_eof + _test_eof35: + (m.cs) = 35 + goto _test_eof + _test_eof36: + (m.cs) = 36 + goto _test_eof + + _test_eof: + { + } + if (m.p) == (m.eof) { + switch m.cs { + case 7, 37: //line plugins/parsers/influx/machine.go.rl:32 - err = ErrNameParse - ( m.p)-- + err = ErrNameParse + (m.p)-- - ( m.cs) = 34; - {( m.p)++; ( m.cs) = 0; goto _out } + (m.cs) = 34 + { + (m.p)++ + (m.cs) = 0 + goto _out + } - case 2, 3, 4, 5, 6, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25: + case 2, 3, 4, 5, 6, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25: //line plugins/parsers/influx/machine.go.rl:39 - err = ErrFieldParse - ( m.p)-- + err = ErrFieldParse + (m.p)-- - ( m.cs) = 34; - {( m.p)++; ( m.cs) = 0; goto _out } + (m.cs) = 34 + { + (m.p)++ + (m.cs) = 0 + goto _out + } - case 26, 27, 28, 30, 32, 33, 39, 40, 41, 42, 43, 44: + case 26, 27, 28, 30, 32, 33, 39, 40, 41, 42, 43, 44: //line plugins/parsers/influx/machine.go.rl:46 - err = ErrTagParse - ( m.p)-- + err = ErrTagParse + (m.p)-- - ( m.cs) = 34; - {( m.p)++; ( m.cs) = 0; goto _out } + (m.cs) = 34 + { + (m.p)++ + (m.cs) = 0 + goto _out + } - case 10: + case 10: //line plugins/parsers/influx/machine.go.rl:53 - err = ErrTimestampParse - ( m.p)-- + err = ErrTimestampParse + (m.p)-- - ( m.cs) = 34; - {( m.p)++; ( m.cs) = 0; goto _out } + (m.cs) = 34 + { + (m.p)++ + (m.cs) = 0 + goto _out + } - case 86: + case 86: //line plugins/parsers/influx/machine.go.rl:86 - err = m.handler.SetMeasurement(m.text()) - if err != nil { - ( m.p)-- + err = m.handler.SetMeasurement(m.text()) + if err != nil { + (m.p)-- - ( m.cs) = 34; - {( m.p)++; ( m.cs) = 0; goto _out } - } + (m.cs) = 34 + { + (m.p)++ + (m.cs) = 0 + goto _out + } + } - case 88, 89: + case 88, 89: //line plugins/parsers/influx/machine.go.rl:99 - err = m.handler.AddTag(m.key, m.text()) - if err != nil { - ( m.p)-- + err = m.handler.AddTag(m.key, m.text()) + if err != nil { + (m.p)-- - ( m.cs) = 34; - {( m.p)++; ( m.cs) = 0; goto _out } - } + (m.cs) = 34 + { + (m.p)++ + (m.cs) = 0 + goto _out + } + } - case 47, 48, 49, 51: + case 47, 48, 49, 51: //line plugins/parsers/influx/machine.go.rl:178 - m.finishMetric = true + m.finishMetric = true - case 46: + case 46: //line plugins/parsers/influx/machine.go.rl:82 - m.beginMetric = true + m.beginMetric = true //line plugins/parsers/influx/machine.go.rl:178 - m.finishMetric = true + m.finishMetric = true - case 1: + case 1: //line plugins/parsers/influx/machine.go.rl:86 - err = m.handler.SetMeasurement(m.text()) - if err != nil { - ( m.p)-- + err = m.handler.SetMeasurement(m.text()) + if err != nil { + (m.p)-- - ( m.cs) = 34; - {( m.p)++; ( m.cs) = 0; goto _out } - } + (m.cs) = 34 + { + (m.p)++ + (m.cs) = 0 + goto _out + } + } //line plugins/parsers/influx/machine.go.rl:46 - err = ErrTagParse - ( m.p)-- + err = ErrTagParse + (m.p)-- - ( m.cs) = 34; - {( m.p)++; ( m.cs) = 0; goto _out } + (m.cs) = 34 + { + (m.p)++ + (m.cs) = 0 + goto _out + } - case 29, 31: + case 29, 31: //line plugins/parsers/influx/machine.go.rl:99 - err = m.handler.AddTag(m.key, m.text()) - if err != nil { - ( m.p)-- + err = m.handler.AddTag(m.key, m.text()) + if err != nil { + (m.p)-- - ( m.cs) = 34; - {( m.p)++; ( m.cs) = 0; goto _out } - } + (m.cs) = 34 + { + (m.p)++ + (m.cs) = 0 + goto _out + } + } //line plugins/parsers/influx/machine.go.rl:46 - err = ErrTagParse - ( m.p)-- + err = ErrTagParse + (m.p)-- - ( m.cs) = 34; - {( m.p)++; ( m.cs) = 0; goto _out } + (m.cs) = 34 + { + (m.p)++ + (m.cs) = 0 + goto _out + } - case 74: + case 74: //line plugins/parsers/influx/machine.go.rl:112 - err = m.handler.AddInt(m.key, m.text()) - if err != nil { - ( m.p)-- + err = m.handler.AddInt(m.key, m.text()) + if err != nil { + (m.p)-- - ( m.cs) = 34; - {( m.p)++; ( m.cs) = 0; goto _out } - } + (m.cs) = 34 + { + (m.p)++ + (m.cs) = 0 + goto _out + } + } //line plugins/parsers/influx/machine.go.rl:178 - m.finishMetric = true + m.finishMetric = true - case 77: + case 77: //line plugins/parsers/influx/machine.go.rl:121 - err = m.handler.AddUint(m.key, m.text()) - if err != nil { - ( m.p)-- + err = m.handler.AddUint(m.key, m.text()) + if err != nil { + (m.p)-- - ( m.cs) = 34; - {( m.p)++; ( m.cs) = 0; goto _out } - } + (m.cs) = 34 + { + (m.p)++ + (m.cs) = 0 + goto _out + } + } //line plugins/parsers/influx/machine.go.rl:178 - m.finishMetric = true + m.finishMetric = true - case 70, 71, 72, 73, 75, 76, 78: + case 70, 71, 72, 73, 75, 76, 78: //line plugins/parsers/influx/machine.go.rl:130 - err = m.handler.AddFloat(m.key, m.text()) - if err != nil { - ( m.p)-- + err = m.handler.AddFloat(m.key, m.text()) + if err != nil { + (m.p)-- - ( m.cs) = 34; - {( m.p)++; ( m.cs) = 0; goto _out } - } + (m.cs) = 34 + { + (m.p)++ + (m.cs) = 0 + goto _out + } + } //line plugins/parsers/influx/machine.go.rl:178 - m.finishMetric = true + m.finishMetric = true - case 79, 80, 81, 82, 83: + case 79, 80, 81, 82, 83: //line plugins/parsers/influx/machine.go.rl:139 - err = m.handler.AddBool(m.key, m.text()) - if err != nil { - ( m.p)-- + err = m.handler.AddBool(m.key, m.text()) + if err != nil { + (m.p)-- - ( m.cs) = 34; - {( m.p)++; ( m.cs) = 0; goto _out } - } + (m.cs) = 34 + { + (m.p)++ + (m.cs) = 0 + goto _out + } + } //line plugins/parsers/influx/machine.go.rl:178 - m.finishMetric = true + m.finishMetric = true - case 50, 52, 53, 54, 55, 56, 57, 58, 59, 60, 61, 62, 63, 64, 65, 66, 67, 68, 69: + case 50, 52, 53, 54, 55, 56, 57, 58, 59, 60, 61, 62, 63, 64, 65, 66, 67, 68, 69: //line plugins/parsers/influx/machine.go.rl:157 - err = m.handler.SetTimestamp(m.text()) - if err != nil { - ( m.p)-- + err = m.handler.SetTimestamp(m.text()) + if err != nil { + (m.p)-- - ( m.cs) = 34; - {( m.p)++; ( m.cs) = 0; goto _out } - } + (m.cs) = 34 + { + (m.p)++ + (m.cs) = 0 + goto _out + } + } //line plugins/parsers/influx/machine.go.rl:178 - m.finishMetric = true + m.finishMetric = true //line plugins/parsers/influx/machine.go:3301 + } } - } - _out: {} + _out: + { + } } //line plugins/parsers/influx/machine.go.rl:415 @@ -3364,7 +3719,7 @@ type streamMachine struct { func NewStreamMachine(r io.Reader, handler Handler) *streamMachine { m := &streamMachine{ machine: NewMachine(handler), - reader: r, + reader: r, } m.machine.SetData(make([]byte, 1024)) @@ -3394,7 +3749,7 @@ func (m *streamMachine) Next() error { for { // Expand the buffer if it is full if m.machine.pe == len(m.machine.data) { - expanded := make([]byte, 2 * len(m.machine.data)) + expanded := make([]byte, 2*len(m.machine.data)) copy(expanded, m.machine.data) m.machine.data = expanded } diff --git a/plugins/processors/filepath/filepath_test.go b/plugins/processors/filepath/filepath_test.go index a305c4c5c2f29..c6a3262921407 100644 --- a/plugins/processors/filepath/filepath_test.go +++ b/plugins/processors/filepath/filepath_test.go @@ -1,3 +1,4 @@ +//go:build !windows // +build !windows package filepath diff --git a/plugins/processors/port_name/services_path.go b/plugins/processors/port_name/services_path.go index c8cf73d14157c..3b9a4ce579c9a 100644 --- a/plugins/processors/port_name/services_path.go +++ b/plugins/processors/port_name/services_path.go @@ -1,3 +1,4 @@ +//go:build windows // +build windows package portname diff --git a/plugins/processors/port_name/services_path_notwindows.go b/plugins/processors/port_name/services_path_notwindows.go index 5097bfa9c6140..5fd30eb59671d 100644 --- a/plugins/processors/port_name/services_path_notwindows.go +++ b/plugins/processors/port_name/services_path_notwindows.go @@ -1,3 +1,4 @@ +//go:build !windows // +build !windows package portname diff --git a/scripts/alpine.docker b/scripts/alpine.docker index d5b8b85f6abb7..8c2418083ef8c 100644 --- a/scripts/alpine.docker +++ b/scripts/alpine.docker @@ -1,4 +1,4 @@ -FROM golang:1.16.6 as builder +FROM golang:1.17.0 as builder WORKDIR /go/src/github.com/influxdata/telegraf COPY . /go/src/github.com/influxdata/telegraf diff --git a/scripts/buster.docker b/scripts/buster.docker index 685d30067e0ef..fbb18eee24f17 100644 --- a/scripts/buster.docker +++ b/scripts/buster.docker @@ -1,4 +1,4 @@ -FROM golang:1.16.6-buster as builder +FROM golang:1.17.0-buster as builder WORKDIR /go/src/github.com/influxdata/telegraf COPY . /go/src/github.com/influxdata/telegraf diff --git a/scripts/ci-1.16.docker b/scripts/ci-1.16.docker index f0b2badafd521..ab1683329e633 100644 --- a/scripts/ci-1.16.docker +++ b/scripts/ci-1.16.docker @@ -1,4 +1,4 @@ -FROM golang:1.16.6 +FROM golang:1.16.7 RUN chmod -R 755 "$GOPATH" diff --git a/scripts/ci-1.15.docker b/scripts/ci-1.17.docker similarity index 95% rename from scripts/ci-1.15.docker rename to scripts/ci-1.17.docker index 2b87f29be4e3e..574ab7be7a896 100644 --- a/scripts/ci-1.15.docker +++ b/scripts/ci-1.17.docker @@ -1,4 +1,4 @@ -FROM golang:1.15.8 +FROM golang:1.17.0 RUN chmod -R 755 "$GOPATH" diff --git a/scripts/mac_installgo.sh b/scripts/mac_installgo.sh index aab4731c22f30..cb41ee5f666cd 100644 --- a/scripts/mac_installgo.sh +++ b/scripts/mac_installgo.sh @@ -3,8 +3,8 @@ set -eux GO_ARCH="darwin-amd64" -GO_VERSION="1.16.6" -GO_VERSION_SHA="e4e83e7c6891baa00062ed37273ce95835f0be77ad8203a29ec56dbf3d87508a" # from https://golang.org/dl +GO_VERSION="1.17" +GO_VERSION_SHA="355bd544ce08d7d484d9d7de05a71b5c6f5bc10aa4b316688c2192aeb3dacfd1" # from https://golang.org/dl # This path is cachable. (Saving in /usr/local/ would cause issues restoring the cache.) path="/usr/local/Cellar" From 9ac5ae72d2e68f55ed6032dea937addad05a74d9 Mon Sep 17 00:00:00 2001 From: Samantha Wang <32681364+sjwang90@users.noreply.github.com> Date: Tue, 24 Aug 2021 13:18:08 -0700 Subject: [PATCH 019/176] docs: update links (#9632) --- README.md | 8 ++++++++ docs/DATA_FORMATS_INPUT.md | 3 ++- 2 files changed, 10 insertions(+), 1 deletion(-) diff --git a/README.md b/README.md index 763ed861e0533..2097ea5c37e2b 100644 --- a/README.md +++ b/README.md @@ -383,10 +383,14 @@ For documentation on the latest development code see the [documentation index][d - [Graphite](/plugins/parsers/graphite) - [Grok](/plugins/parsers/grok) - [JSON](/plugins/parsers/json) +- [JSON v2](/plugins/parsers/json_v2) - [Logfmt](/plugins/parsers/logfmt) - [Nagios](/plugins/parsers/nagios) +- [Prometheus](/plugins/parsers/prometheus) +- [Prometheus Remote Write](/plugins/parsers/prometheusremotewrite) - [Value](/plugins/parsers/value), ie: 45 or "booyah" - [Wavefront](/plugins/parsers/wavefront) +- [XPath](/plugins/parsers/xpath) (supports XML, JSON, MessagePack, Protocol Buffers) ## Serializers @@ -395,6 +399,8 @@ For documentation on the latest development code see the [documentation index][d - [Graphite](/plugins/serializers/graphite) - [JSON](/plugins/serializers/json) - [MessagePack](/plugins/serializers/msgpack) +- [Prometheus](/plugins/serializers/prometheus) +- [Prometheus Remote Write](/plugins/serializers/prometheusremotewrite) - [ServiceNow](/plugins/serializers/nowmetric) - [SplunkMetric](/plugins/serializers/splunkmetric) - [Wavefront](/plugins/serializers/wavefront) @@ -429,10 +435,12 @@ For documentation on the latest development code see the [documentation index][d ## Aggregator Plugins * [basicstats](./plugins/aggregators/basicstats) +* [derivative](./plugins/aggregators/derivative) * [final](./plugins/aggregators/final) * [histogram](./plugins/aggregators/histogram) * [merge](./plugins/aggregators/merge) * [minmax](./plugins/aggregators/minmax) +* [quantile](./plugins/aggregators/quantile) * [valuecounter](./plugins/aggregators/valuecounter) ## Output Plugins diff --git a/docs/DATA_FORMATS_INPUT.md b/docs/DATA_FORMATS_INPUT.md index 2550e7e1044cc..cb04d3e009030 100644 --- a/docs/DATA_FORMATS_INPUT.md +++ b/docs/DATA_FORMATS_INPUT.md @@ -12,13 +12,14 @@ Protocol or in JSON format. - [Grok](/plugins/parsers/grok) - [InfluxDB Line Protocol](/plugins/parsers/influx) - [JSON](/plugins/parsers/json) +- [JSON v2](/plugins/parsers/json_v2) - [Logfmt](/plugins/parsers/logfmt) - [Nagios](/plugins/parsers/nagios) - [Prometheus](/plugins/parsers/prometheus) - [PrometheusRemoteWrite](/plugins/parsers/prometheusremotewrite) - [Value](/plugins/parsers/value), ie: 45 or "booyah" - [Wavefront](/plugins/parsers/wavefront) -- [XML](/plugins/parsers/xml) +- [XPath](/plugins/parsers/xpath) (supports XML, JSON, MessagePack, Protocol Buffers) Any input plugin containing the `data_format` option can use it to select the desired parser: From 8e8074e47b7d00335e9b9aecc67e417351a4e82d Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Tue, 24 Aug 2021 16:08:15 -0600 Subject: [PATCH 020/176] fix: bump github.com/tinylib/msgp from 1.1.5 to 1.1.6 (#9652) --- go.mod | 2 +- go.sum | 5 ++--- 2 files changed, 3 insertions(+), 4 deletions(-) diff --git a/go.mod b/go.mod index f09d594111d42..7509cf6ac9b0a 100644 --- a/go.mod +++ b/go.mod @@ -247,7 +247,7 @@ require ( github.com/tidwall/gjson v1.8.0 github.com/tidwall/match v1.0.3 // indirect github.com/tidwall/pretty v1.1.0 // indirect - github.com/tinylib/msgp v1.1.5 + github.com/tinylib/msgp v1.1.6 github.com/tklauser/go-sysconf v0.3.5 // indirect github.com/tklauser/numcpus v0.2.2 // indirect github.com/vapourismo/knx-go v0.0.0-20201122213738-75fe09ace330 diff --git a/go.sum b/go.sum index d17f8209df7da..8e2fbee60fd3f 100644 --- a/go.sum +++ b/go.sum @@ -1473,15 +1473,14 @@ github.com/tidwall/pretty v1.0.0/go.mod h1:XNkn88O1ChpSDQmQeStsy+sBenx6DDtFZJxhV github.com/tidwall/pretty v1.1.0 h1:K3hMW5epkdAVwibsQEfR/7Zj0Qgt4DxtNumTq/VloO8= github.com/tidwall/pretty v1.1.0/go.mod h1:XNkn88O1ChpSDQmQeStsy+sBenx6DDtFZJxhVysOjyk= github.com/tinylib/msgp v1.0.2/go.mod h1:+d+yLhGm8mzTaHzB+wgMYrodPfmZrzkirds8fDWklFE= -github.com/tinylib/msgp v1.1.5 h1:2gXmtWueD2HefZHQe1QOy9HVzmFrLOVvsXwXBQ0ayy0= -github.com/tinylib/msgp v1.1.5/go.mod h1:eQsjooMTnV42mHu917E26IogZ2930nFyBQdofk10Udg= +github.com/tinylib/msgp v1.1.6 h1:i+SbKraHhnrf9M5MYmvQhFnbLhAXSDWF8WWsuyRdocw= +github.com/tinylib/msgp v1.1.6/go.mod h1:75BAfg2hauQhs3qedfdDZmWAPcFMAvJE5b9rGOMufyw= github.com/tklauser/go-sysconf v0.3.5 h1:uu3Xl4nkLzQfXNsWn15rPc/HQCJKObbt1dKJeWp3vU4= github.com/tklauser/go-sysconf v0.3.5/go.mod h1:MkWzOF4RMCshBAMXuhXJs64Rte09mITnppBXY/rYEFI= github.com/tklauser/numcpus v0.2.2 h1:oyhllyrScuYI6g+h/zUvNXNp1wy7x8qQy3t/piefldA= github.com/tklauser/numcpus v0.2.2/go.mod h1:x3qojaO3uyYt0i56EW/VUYs7uBvdl2fkfZFu0T9wgjM= github.com/tmc/grpc-websocket-proxy v0.0.0-20170815181823-89b8d40f7ca8/go.mod h1:ncp9v5uamzpCO7NfCPTXjqaC+bZgJeR0sMTm6dMHP7U= github.com/tmc/grpc-websocket-proxy v0.0.0-20190109142713-0ad062ec5ee5/go.mod h1:ncp9v5uamzpCO7NfCPTXjqaC+bZgJeR0sMTm6dMHP7U= -github.com/ttacon/chalk v0.0.0-20160626202418-22c06c80ed31/go.mod h1:onvgF043R+lC5RZ8IT9rBXDaEDnpnw/Cl+HFiw+v/7Q= github.com/tv42/httpunix v0.0.0-20150427012821-b75d8614f926/go.mod h1:9ESjWnEqriFuLhtthL60Sar/7RFoluCcXsuvEwTV5KM= github.com/uber/jaeger-client-go v2.25.0+incompatible/go.mod h1:WVhlPFC8FDjOFMMWRy2pZqQJSXxYSwNYOkTr/Z6d3Kk= github.com/uber/jaeger-lib v2.2.0+incompatible/go.mod h1:ComeNDZlWwrWnDv8aPp0Ba6+uUTzImX/AauajbLI56U= From 8daba8aa19e2c504ad62bc76cc08f0c47d6f30f7 Mon Sep 17 00:00:00 2001 From: Phil Bracikowski <13472206+philjb@users.noreply.github.com> Date: Wed, 25 Aug 2021 15:43:06 -0700 Subject: [PATCH 021/176] chore(influxv2plugin): Increase accepted retry-after header values. (#9619) --- plugins/outputs/influxdb_v2/http.go | 15 ++++---- .../outputs/influxdb_v2/http_internal_test.go | 36 ++++++++++++++++--- 2 files changed, 40 insertions(+), 11 deletions(-) diff --git a/plugins/outputs/influxdb_v2/http.go b/plugins/outputs/influxdb_v2/http.go index e62919cf43b13..e8df4da7d2041 100644 --- a/plugins/outputs/influxdb_v2/http.go +++ b/plugins/outputs/influxdb_v2/http.go @@ -36,8 +36,9 @@ func (e APIError) Error() string { } const ( - defaultRequestTimeout = time.Second * 5 - defaultMaxWait = 60 // seconds + defaultRequestTimeout = time.Second * 5 + defaultMaxWaitSeconds = 60 + defaultMaxWaitRetryAfterSeconds = 10 * 60 ) type HTTPConfig struct { @@ -306,8 +307,9 @@ func (c *httpClient) writeBatch(ctx context.Context, bucket string, metrics []te // retryDuration takes the longer of the Retry-After header and our own back-off calculation func (c *httpClient) getRetryDuration(headers http.Header) time.Duration { // basic exponential backoff (x^2)/40 (denominator to widen the slope) - // at 40 denominator, it'll take 35 retries to hit the max defaultMaxWait of 30s + // at 40 denominator, it'll take 49 retries to hit the max defaultMaxWait of 60s backoff := math.Pow(float64(c.retryCount), 2) / 40 + backoff = math.Min(backoff, defaultMaxWaitSeconds) // get any value from the header, if available retryAfterHeader := float64(0) @@ -319,11 +321,12 @@ func (c *httpClient) getRetryDuration(headers http.Header) time.Duration { // there was a value but we couldn't parse it? guess minimum 10 sec retryAfterHeader = 10 } + // protect against excessively large retry-after + retryAfterHeader = math.Min(retryAfterHeader, defaultMaxWaitRetryAfterSeconds) } - // take the highest value from both, but not over the max wait. + // take the highest value of backoff and retry-after. retry := math.Max(backoff, retryAfterHeader) - retry = math.Min(retry, defaultMaxWait) - return time.Duration(retry) * time.Second + return time.Duration(retry*1000) * time.Millisecond } func (c *httpClient) makeWriteRequest(url string, body io.Reader) (*http.Request, error) { diff --git a/plugins/outputs/influxdb_v2/http_internal_test.go b/plugins/outputs/influxdb_v2/http_internal_test.go index 2ff4990fa8a3b..10e2a4e133eeb 100644 --- a/plugins/outputs/influxdb_v2/http_internal_test.go +++ b/plugins/outputs/influxdb_v2/http_internal_test.go @@ -56,12 +56,12 @@ func TestExponentialBackoffCalculation(t *testing.T) { expected time.Duration }{ {retryCount: 0, expected: 0}, - {retryCount: 1, expected: 0}, - {retryCount: 5, expected: 0}, - {retryCount: 10, expected: 2 * time.Second}, - {retryCount: 30, expected: 22 * time.Second}, + {retryCount: 1, expected: 25 * time.Millisecond}, + {retryCount: 5, expected: 625 * time.Millisecond}, + {retryCount: 10, expected: 2500 * time.Millisecond}, + {retryCount: 30, expected: 22500 * time.Millisecond}, {retryCount: 40, expected: 40 * time.Second}, - {retryCount: 50, expected: 60 * time.Second}, + {retryCount: 50, expected: 60 * time.Second}, // max hit {retryCount: 100, expected: 60 * time.Second}, {retryCount: 1000, expected: 60 * time.Second}, } @@ -72,3 +72,29 @@ func TestExponentialBackoffCalculation(t *testing.T) { }) } } + +func TestExponentialBackoffCalculationWithRetryAfter(t *testing.T) { + c := &httpClient{} + tests := []struct { + retryCount int + retryAfter string + expected time.Duration + }{ + {retryCount: 0, retryAfter: "0", expected: 0}, + {retryCount: 0, retryAfter: "10", expected: 10 * time.Second}, + {retryCount: 0, retryAfter: "60", expected: 60 * time.Second}, + {retryCount: 0, retryAfter: "600", expected: 600 * time.Second}, + {retryCount: 0, retryAfter: "601", expected: 600 * time.Second}, // max hit + {retryCount: 40, retryAfter: "39", expected: 40 * time.Second}, // retryCount wins + {retryCount: 40, retryAfter: "41", expected: 41 * time.Second}, // retryAfter wins + {retryCount: 100, retryAfter: "100", expected: 100 * time.Second}, + } + for _, test := range tests { + t.Run(fmt.Sprintf("%d_retries", test.retryCount), func(t *testing.T) { + c.retryCount = test.retryCount + hdr := http.Header{} + hdr.Add("Retry-After", test.retryAfter) + require.EqualValues(t, test.expected, c.getRetryDuration(hdr)) + }) + } +} From 0ce9c2e9f6c1339c53e4f763a4eb2fd03779ab06 Mon Sep 17 00:00:00 2001 From: Daniel Dyla Date: Thu, 26 Aug 2021 12:57:22 -0400 Subject: [PATCH 022/176] fix(dt-output): remove hardcoded int value (#9676) --- plugins/outputs/dynatrace/dynatrace.go | 2 +- plugins/outputs/dynatrace/dynatrace_test.go | 6 +++--- 2 files changed, 4 insertions(+), 4 deletions(-) diff --git a/plugins/outputs/dynatrace/dynatrace.go b/plugins/outputs/dynatrace/dynatrace.go index c66bc8da2171e..470eb0e2cd0c6 100644 --- a/plugins/outputs/dynatrace/dynatrace.go +++ b/plugins/outputs/dynatrace/dynatrace.go @@ -287,7 +287,7 @@ func getTypeOption(metricType telegraf.ValueType, field *telegraf.Field) dtMetri case uint64: return dtMetric.WithIntGaugeValue(int64(v)) case int64: - return dtMetric.WithIntGaugeValue(32) + return dtMetric.WithIntGaugeValue(v) case bool: if v { return dtMetric.WithIntGaugeValue(1) diff --git a/plugins/outputs/dynatrace/dynatrace_test.go b/plugins/outputs/dynatrace/dynatrace_test.go index d9076906c1020..65cd3d2a86f0a 100644 --- a/plugins/outputs/dynatrace/dynatrace_test.go +++ b/plugins/outputs/dynatrace/dynatrace_test.go @@ -353,11 +353,11 @@ func TestSendMetricWithDefaultDimensions(t *testing.T) { require.NoError(t, err) bodyString := string(bodyBytes) // use regex because field order isn't guaranteed - require.Equal(t, len(bodyString), 79) + require.Equal(t, len(bodyString), 78) require.Regexp(t, regexp.MustCompile("^mymeasurement.value"), bodyString) require.Regexp(t, regexp.MustCompile("dt.metrics.source=telegraf"), bodyString) require.Regexp(t, regexp.MustCompile("dim=value"), bodyString) - require.Regexp(t, regexp.MustCompile("gauge,32 1289430000000$"), bodyString) + require.Regexp(t, regexp.MustCompile("gauge,2 1289430000000$"), bodyString) err = json.NewEncoder(w).Encode(`{"linesOk":1,"linesInvalid":0,"error":null}`) require.NoError(t, err) })) @@ -378,7 +378,7 @@ func TestSendMetricWithDefaultDimensions(t *testing.T) { m1 := metric.New( "mymeasurement", map[string]string{}, - map[string]interface{}{"value": 32}, + map[string]interface{}{"value": 2}, time.Date(2010, time.November, 10, 23, 0, 0, 0, time.UTC), ) From 1a59157b91b35c5b8ddf7dc1d06b31846248d362 Mon Sep 17 00:00:00 2001 From: Alexander Krantz Date: Thu, 26 Aug 2021 13:32:48 -0700 Subject: [PATCH 023/176] fix(mongodb): change command based on server version (#9674) --- plugins/inputs/mongodb/README.md | 2 + plugins/inputs/mongodb/mongodb_server.go | 27 ++++++++++-- plugins/inputs/mongodb/mongodb_server_test.go | 42 +++++++++++++++++++ 3 files changed, 67 insertions(+), 4 deletions(-) diff --git a/plugins/inputs/mongodb/README.md b/plugins/inputs/mongodb/README.md index ddcb1971f9667..15a474e6bb66a 100644 --- a/plugins/inputs/mongodb/README.md +++ b/plugins/inputs/mongodb/README.md @@ -1,5 +1,7 @@ # MongoDB Input Plugin +All MongoDB server versions from 2.6 and higher are supported. + ### Configuration: ```toml diff --git a/plugins/inputs/mongodb/mongodb_server.go b/plugins/inputs/mongodb/mongodb_server.go index 723b0698b9ac8..79d3d36c6c038 100644 --- a/plugins/inputs/mongodb/mongodb_server.go +++ b/plugins/inputs/mongodb/mongodb_server.go @@ -4,6 +4,7 @@ import ( "context" "fmt" "go.mongodb.org/mongo-driver/bson/primitive" + "strconv" "strings" "time" @@ -126,11 +127,29 @@ func (s *Server) gatherClusterStatus() (*ClusterStatus, error) { }, nil } -func (s *Server) gatherShardConnPoolStats() (*ShardStats, error) { +func poolStatsCommand(version string) (string, error) { + majorPart := string(version[0]) + major, err := strconv.ParseInt(majorPart, 10, 64) + if err != nil { + return "", err + } + + if major == 5 { + return "connPoolStats", nil + } + return "shardConnPoolStats", nil +} + +func (s *Server) gatherShardConnPoolStats(version string) (*ShardStats, error) { + command, err := poolStatsCommand(version) + if err != nil { + return nil, err + } + shardStats := &ShardStats{} - err := s.runCommand("admin", bson.D{ + err = s.runCommand("admin", bson.D{ { - Key: "shardConnPoolStats", + Key: command, Value: 1, }, }, &shardStats) @@ -272,7 +291,7 @@ func (s *Server) gatherData(acc telegraf.Accumulator, gatherClusterStatus bool, clusterStatus = status } - shardStats, err := s.gatherShardConnPoolStats() + shardStats, err := s.gatherShardConnPoolStats(serverStatus.Version) if err != nil { s.authLog(fmt.Errorf("unable to gather shard connection pool stats: %s", err.Error())) } diff --git a/plugins/inputs/mongodb/mongodb_server_test.go b/plugins/inputs/mongodb/mongodb_server_test.go index 64fb191639105..c8fd9f7c15284 100644 --- a/plugins/inputs/mongodb/mongodb_server_test.go +++ b/plugins/inputs/mongodb/mongodb_server_test.go @@ -40,3 +40,45 @@ func TestAddDefaultStats(t *testing.T) { assert.True(t, acc.HasInt64Field("mongodb", key)) } } + +func TestPoolStatsVersionCompatibility(t *testing.T) { + tests := []struct { + name string + version string + expectedCommand string + err bool + }{ + { + name: "mongodb v3", + version: "3.0.0", + expectedCommand: "shardConnPoolStats", + }, + { + name: "mongodb v4", + version: "4.0.0", + expectedCommand: "shardConnPoolStats", + }, + { + name: "mongodb v5", + version: "5.0.0", + expectedCommand: "connPoolStats", + }, + { + name: "invalid version", + version: "v4", + err: true, + }, + } + + for _, test := range tests { + t.Run(test.name, func(t *testing.T) { + command, err := poolStatsCommand(test.version) + require.Equal(t, test.expectedCommand, command) + if test.err { + require.Error(t, err) + } else { + require.NoError(t, err) + } + }) + } +} From 4dc2967e34f36a8da9d500620711334b9ff3b8d3 Mon Sep 17 00:00:00 2001 From: David B <36965011+DavidBuettner@users.noreply.github.com> Date: Thu, 26 Aug 2021 22:34:52 +0200 Subject: [PATCH 024/176] feat(plugins/inputs/systemd_units): add pattern support (#9665) --- plugins/inputs/systemd_units/README.md | 9 ++++- .../systemd_units/systemd_units_linux.go | 36 ++++++++++++++----- .../systemd_units/systemd_units_linux_test.go | 2 +- 3 files changed, 37 insertions(+), 10 deletions(-) diff --git a/plugins/inputs/systemd_units/README.md b/plugins/inputs/systemd_units/README.md index 7fe09e224c564..f9d47d7df1252 100644 --- a/plugins/inputs/systemd_units/README.md +++ b/plugins/inputs/systemd_units/README.md @@ -1,7 +1,7 @@ # systemd Units Input Plugin The systemd_units plugin gathers systemd unit status on Linux. It relies on -`systemctl list-units --all --plain --type=service` to collect data on service status. +`systemctl list-units [PATTERN] --all --plain --type=service` to collect data on service status. The results are tagged with the unit name and provide enumerated fields for loaded, active and running fields, indicating the unit health. @@ -22,6 +22,13 @@ see `systemctl list-units --all --type help` for possible options. ## values are "socket", "target", "device", "mount", "automount", "swap", ## "timer", "path", "slice" and "scope ": # unittype = "service" + # + ## Filter for a specific pattern, default is "" (i.e. all), other possible + ## values are valid pattern for systemctl, e.g. "a*" for all units with + ## names starting with "a" + # pattern = "" + ## pattern = "telegraf* influxdb*" + ## pattern = "a*" ``` ### Metrics diff --git a/plugins/inputs/systemd_units/systemd_units_linux.go b/plugins/inputs/systemd_units/systemd_units_linux.go index e94b9432136e4..e41c64752977e 100644 --- a/plugins/inputs/systemd_units/systemd_units_linux.go +++ b/plugins/inputs/systemd_units/systemd_units_linux.go @@ -18,10 +18,11 @@ import ( type SystemdUnits struct { Timeout config.Duration UnitType string `toml:"unittype"` + Pattern string `toml:"pattern"` systemctl systemctl } -type systemctl func(timeout config.Duration, unitType string) (*bytes.Buffer, error) +type systemctl func(timeout config.Duration, unitType string, pattern string) (*bytes.Buffer, error) const measurement = "systemd_units" @@ -115,6 +116,7 @@ var subMap = map[string]int{ var ( defaultTimeout = config.Duration(time.Second) defaultUnitType = "service" + defaultPattern = "" ) // Description returns a short description of the plugin @@ -132,12 +134,19 @@ func (s *SystemdUnits) SampleConfig() string { ## values are "socket", "target", "device", "mount", "automount", "swap", ## "timer", "path", "slice" and "scope ": # unittype = "service" + # + ## Filter for a specific pattern, default is "" (i.e. all), other possible + ## values are valid pattern for systemctl, e.g. "a*" for all units with + ## names starting with "a" + # pattern = "" + ## pattern = "telegraf* influxdb*" + ## pattern = "a*" ` } // Gather parses systemctl outputs and adds counters to the Accumulator func (s *SystemdUnits) Gather(acc telegraf.Accumulator) error { - out, err := s.systemctl(s.Timeout, s.UnitType) + out, err := s.systemctl(s.Timeout, s.UnitType, s.Pattern) if err != nil { return err } @@ -192,22 +201,32 @@ func (s *SystemdUnits) Gather(acc telegraf.Accumulator) error { return nil } -func setSystemctl(timeout config.Duration, unitType string) (*bytes.Buffer, error) { +func setSystemctl(timeout config.Duration, unitType string, pattern string) (*bytes.Buffer, error) { // is systemctl available ? systemctlPath, err := exec.LookPath("systemctl") if err != nil { return nil, err } - - cmd := exec.Command(systemctlPath, "list-units", "--all", "--plain", fmt.Sprintf("--type=%s", unitType), "--no-legend") - + // build parameters for systemctl call + params := []string{"list-units"} + // create patterns parameters if provided in config + if pattern != "" { + psplit := strings.SplitN(pattern, " ", -1) + for v := range psplit { + params = append(params, psplit[v]) + } + } + params = append(params, "--all", "--plain") + // add type as configured in config + params = append(params, fmt.Sprintf("--type=%s", unitType)) + params = append(params, "--no-legend") + cmd := exec.Command(systemctlPath, params...) var out bytes.Buffer cmd.Stdout = &out err = internal.RunTimeout(cmd, time.Duration(timeout)) if err != nil { - return &out, fmt.Errorf("error running systemctl list-units --all --plain --type=%s --no-legend: %s", unitType, err) + return &out, fmt.Errorf("error running systemctl %s: %s", strings.Join(params, " "), err) } - return &out, nil } @@ -217,6 +236,7 @@ func init() { systemctl: setSystemctl, Timeout: defaultTimeout, UnitType: defaultUnitType, + Pattern: defaultPattern, } }) } diff --git a/plugins/inputs/systemd_units/systemd_units_linux_test.go b/plugins/inputs/systemd_units/systemd_units_linux_test.go index a6cfbd6552771..05070c6ff5e94 100644 --- a/plugins/inputs/systemd_units/systemd_units_linux_test.go +++ b/plugins/inputs/systemd_units/systemd_units_linux_test.go @@ -74,7 +74,7 @@ func TestSystemdUnits(t *testing.T) { for _, tt := range tests { t.Run(tt.name, func(t *testing.T) { systemdUnits := &SystemdUnits{ - systemctl: func(timeout config.Duration, unitType string) (*bytes.Buffer, error) { + systemctl: func(timeout config.Duration, unitType string, pattern string) (*bytes.Buffer, error) { return bytes.NewBufferString(tt.line), nil }, } From 31178e1cf3c7e93e657626d5d35f07a6c5481f29 Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Thu, 26 Aug 2021 15:36:53 -0500 Subject: [PATCH 025/176] fix: bump cloud.google.com/go/pubsub from 1.2.0 to 1.15.0 (#9655) Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> Co-authored-by: Sebastian Spaink --- go.mod | 25 ++++---- go.sum | 177 ++++++++++++++++++++++++++++++++++++++++++++++++++++----- 2 files changed, 174 insertions(+), 28 deletions(-) diff --git a/go.mod b/go.mod index 7509cf6ac9b0a..ff441b60264d1 100644 --- a/go.mod +++ b/go.mod @@ -3,9 +3,9 @@ module github.com/influxdata/telegraf go 1.17 require ( - cloud.google.com/go v0.56.0 - cloud.google.com/go/bigquery v1.4.0 - cloud.google.com/go/pubsub v1.2.0 + cloud.google.com/go v0.90.0 + cloud.google.com/go/bigquery v1.8.0 + cloud.google.com/go/pubsub v1.15.0 code.cloudfoundry.org/clock v1.0.0 // indirect collectd.org v0.5.0 github.com/Azure/azure-amqp-common-go/v3 v3.0.0 // indirect @@ -267,7 +267,7 @@ require ( github.com/yuin/gopher-lua v0.0.0-20180630135845-46796da1b0b4 // indirect go.etcd.io/etcd/api/v3 v3.5.0 // indirect go.mongodb.org/mongo-driver v1.5.3 - go.opencensus.io v0.22.3 // indirect + go.opencensus.io v0.23.0 // indirect go.opentelemetry.io/collector/model v0.0.0-20210723184018-3b7d6ce4830c go.starlark.net v0.0.0-20210406145628-7a1108eaa012 go.uber.org/atomic v1.7.0 // indirect @@ -276,20 +276,20 @@ require ( golang.org/x/lint v0.0.0-20210508222113-6edffad5e616 // indirect golang.org/x/mod v0.4.2 // indirect golang.org/x/net v0.0.0-20210610132358-84b48f89b13b - golang.org/x/oauth2 v0.0.0-20200107190931-bf48bf16ab8d + golang.org/x/oauth2 v0.0.0-20210805134026-6f1e6394065a golang.org/x/sync v0.0.0-20210220032951-036812b2e83c - golang.org/x/sys v0.0.0-20210615035016-665e8c7367d1 + golang.org/x/sys v0.0.0-20210806184541-e5e7981a1069 golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1 // indirect golang.org/x/text v0.3.6 - golang.org/x/time v0.0.0-20200630173020-3af7569d3a1e // indirect - golang.org/x/tools v0.1.2 + golang.org/x/time v0.0.0-20210723032227-1f47c861a9ac // indirect + golang.org/x/tools v0.1.5 golang.org/x/xerrors v0.0.0-20200804184101-5ec99f83aff1 // indirect golang.zx2c4.com/wireguard v0.0.20200121 // indirect golang.zx2c4.com/wireguard/wgctrl v0.0.0-20200205215550-e35592f146e4 - google.golang.org/api v0.29.0 - google.golang.org/appengine v1.6.6 // indirect - google.golang.org/genproto v0.0.0-20210604141403-392c879c8b08 - google.golang.org/grpc v1.39.0 + google.golang.org/api v0.54.0 + google.golang.org/appengine v1.6.7 // indirect + google.golang.org/genproto v0.0.0-20210813162853-db860fec028c + google.golang.org/grpc v1.39.1 google.golang.org/protobuf v1.27.1 gopkg.in/asn1-ber.v1 v1.0.0-20181015200546-f715ec2f112d // indirect gopkg.in/djherbis/times.v1 v1.2.0 @@ -310,7 +310,6 @@ require ( gopkg.in/yaml.v2 v2.4.0 gopkg.in/yaml.v3 v3.0.0-20210107192922-496545a6307b // indirect gotest.tools v2.2.0+incompatible - honnef.co/go/tools v0.0.1-2020.1.4 // indirect k8s.io/api v0.20.4 k8s.io/apimachinery v0.21.1 k8s.io/client-go v0.20.4 diff --git a/go.sum b/go.sum index 8e2fbee60fd3f..d797edf56c569 100644 --- a/go.sum +++ b/go.sum @@ -12,24 +12,42 @@ cloud.google.com/go v0.51.0/go.mod h1:hWtGJ6gnXH+KgDv+V0zFGDvpi07n3z8ZNj3T1RW0Gc cloud.google.com/go v0.52.0/go.mod h1:pXajvRH/6o3+F9jDHZWQ5PbGhn+o8w9qiu/CffaVdO4= cloud.google.com/go v0.53.0/go.mod h1:fp/UouUEsRkN6ryDKNW/Upv/JBKnv6WDthjR6+vze6M= cloud.google.com/go v0.54.0/go.mod h1:1rq2OEkV3YMf6n/9ZvGWI3GWw0VoqH/1x2nd8Is/bPc= -cloud.google.com/go v0.56.0 h1:WRz29PgAsVEyPSDHyk+0fpEkwEFyfhHn+JbksT6gIL4= cloud.google.com/go v0.56.0/go.mod h1:jr7tqZxxKOVYizybht9+26Z/gUq7tiRzu+ACVAMbKVk= +cloud.google.com/go v0.57.0/go.mod h1:oXiQ6Rzq3RAkkY7N6t3TcE6jE+CIBBbA36lwQ1JyzZs= +cloud.google.com/go v0.62.0/go.mod h1:jmCYTdRCQuc1PHIIJ/maLInMho30T/Y0M4hTdTShOYc= +cloud.google.com/go v0.65.0/go.mod h1:O5N8zS7uWy9vkA9vayVHs65eM1ubvY4h553ofrNHObY= +cloud.google.com/go v0.72.0/go.mod h1:M+5Vjvlc2wnp6tjzE102Dw08nGShTscUx2nZMufOKPI= +cloud.google.com/go v0.74.0/go.mod h1:VV1xSbzvo+9QJOxLDaJfTjx5e+MePCpCWwvftOeQmWk= +cloud.google.com/go v0.78.0/go.mod h1:QjdrLG0uq+YwhjoVOLsS1t7TW8fs36kLs4XO5R5ECHg= +cloud.google.com/go v0.79.0/go.mod h1:3bzgcEeQlzbuEAYu4mrWhKqWjmpprinYgKJLgKHnbb8= +cloud.google.com/go v0.81.0/go.mod h1:mk/AM35KwGk/Nm2YSeZbxXdrNK3KZOYHmLkOqC2V6E0= +cloud.google.com/go v0.83.0/go.mod h1:Z7MJUsANfY0pYPdw0lbnivPx4/vhy/e2FEkSkF7vAVY= +cloud.google.com/go v0.84.0/go.mod h1:RazrYuxIK6Kb7YrzzhPoLmCVzl7Sup4NrbKPg8KHSUM= +cloud.google.com/go v0.87.0/go.mod h1:TpDYlFy7vuLzZMMZ+B6iRiELaY7z/gJPaqbMx6mlWcY= +cloud.google.com/go v0.90.0 h1:MjvSkUq8RuAb+2JLDi5VQmmExRJPUQ3JLCWpRB6fmdw= +cloud.google.com/go v0.90.0/go.mod h1:kRX0mNRHe0e2rC6oNakvwQqzyDmg57xJ+SZU1eT2aDQ= cloud.google.com/go/bigquery v1.0.1/go.mod h1:i/xbL2UlR5RvWAURpBYZTtm/cXjCha9lbfbpx4poX+o= cloud.google.com/go/bigquery v1.3.0/go.mod h1:PjpwJnslEMmckchkHFfq+HTD2DmtT67aNFKH1/VBDHE= -cloud.google.com/go/bigquery v1.4.0 h1:xE3CPsOgttP4ACBePh79zTKALtXwn/Edhcr16R5hMWU= cloud.google.com/go/bigquery v1.4.0/go.mod h1:S8dzgnTigyfTmLBfrtrhyYhwRxG72rYxvftPBK2Dvzc= +cloud.google.com/go/bigquery v1.5.0/go.mod h1:snEHRnqQbz117VIFhE8bmtwIDY80NLUZUMb4Nv6dBIg= +cloud.google.com/go/bigquery v1.7.0/go.mod h1://okPTzCYNXSlb24MZs83e2Do+h+VXtc4gLoIoXIAPc= +cloud.google.com/go/bigquery v1.8.0 h1:PQcPefKFdaIzjQFbiyOgAqyx8q5djaE7x9Sqe712DPA= +cloud.google.com/go/bigquery v1.8.0/go.mod h1:J5hqkt3O0uAFnINi6JXValWIb1v0goeZM77hZzJN/fQ= cloud.google.com/go/bigtable v1.2.0/go.mod h1:JcVAOl45lrTmQfLj7T6TxyMzIN/3FGGcFm+2xVAli2o= cloud.google.com/go/datastore v1.0.0/go.mod h1:LXYbyblFSglQ5pkeyhO+Qmw7ukd3C+pD7TKLgZqpHYE= -cloud.google.com/go/datastore v1.1.0 h1:/May9ojXjRkPBNVrq+oWLqmWCkr4OU5uRY29bu0mRyQ= cloud.google.com/go/datastore v1.1.0/go.mod h1:umbIZjpQpHh4hmRpGhH4tLFup+FVzqBi1b3c64qFpCk= cloud.google.com/go/pubsub v1.0.1/go.mod h1:R0Gpsv3s54REJCy4fxDixWD93lHJMoZTyQ2kNxGRt3I= cloud.google.com/go/pubsub v1.1.0/go.mod h1:EwwdRX2sKPjnvnqCa270oGRyludottCI76h+R3AArQw= -cloud.google.com/go/pubsub v1.2.0 h1:Lpy6hKgdcl7a3WGSfJIFmxmcdjSpP6OmBEfcOv1Y680= cloud.google.com/go/pubsub v1.2.0/go.mod h1:jhfEVHT8odbXTkndysNHCcx0awwzvfOlguIAii9o8iA= +cloud.google.com/go/pubsub v1.3.1/go.mod h1:i+ucay31+CNRpDW4Lu78I4xXG+O1r/MAHgjpRVR+TSU= +cloud.google.com/go/pubsub v1.15.0 h1:6KI/wDVYLtNvzIPJ8ObuJcq5bBtAWQ6Suo8osHPvYn4= +cloud.google.com/go/pubsub v1.15.0/go.mod h1:DnEUPGZlp+N9MElp/6uVqCKiknQixvVLcrgrqT62O6A= cloud.google.com/go/storage v1.0.0/go.mod h1:IhtSnM/ZTZV8YYJWCY8RULGVqBDmpoyjwiyrjsg+URw= cloud.google.com/go/storage v1.5.0/go.mod h1:tpKbwo567HUNpVclU5sGELwQWBDZ8gh0ZeosJ0Rtdos= -cloud.google.com/go/storage v1.6.0 h1:UDpwYIwla4jHGzZJaEJYx1tOejbgSoNqsAfHAUYe2r8= cloud.google.com/go/storage v1.6.0/go.mod h1:N7U0C8pVQ/+NIKOBQyamJIeKQKkZ+mxpohlUTyfDhBk= +cloud.google.com/go/storage v1.8.0/go.mod h1:Wv1Oy7z6Yz3DshWRJFhqM/UCfaWIRTdp0RXyy7KQOVs= +cloud.google.com/go/storage v1.10.0 h1:STgFzyU5/8miMl0//zKh2aQeTyeaUH3WN9bSUiJ09bA= +cloud.google.com/go/storage v1.10.0/go.mod h1:FLPqc6j+Ki4BU591ie1oL6qBQGu2Bl/tZ9ullr3+Kg0= code.cloudfoundry.org/clock v0.0.0-20180518195852-02e53af36e6c/go.mod h1:QD9Lzhd/ux6eNQVUDVRJX/RKTigpewimNYBi7ivZKY8= code.cloudfoundry.org/clock v1.0.0 h1:kFXWQM4bxYvdBw2X8BbBeXwQNgfoWv1vqAk2ZZyBN2o= code.cloudfoundry.org/clock v1.0.0/go.mod h1:QD9Lzhd/ux6eNQVUDVRJX/RKTigpewimNYBi7ivZKY8= @@ -317,6 +335,7 @@ github.com/cisco-ie/nx-telemetry-proto v0.0.0-20190531143454-82441e232cf6/go.mod github.com/clbanning/x2j v0.0.0-20191024224557-825249438eec/go.mod h1:jMjuTZXRI4dUb/I5gc9Hdhagfvm9+RyrPryS/auMzxE= github.com/client9/misspell v0.3.4/go.mod h1:qj6jICC3Q7zFZvVWo7KLAzC3yx5G7kyvSDkc90ppPyw= github.com/cncf/udpa/go v0.0.0-20191209042840-269d4d468f6f/go.mod h1:M8M6+tZqaGXZJjfX53e64911xZQV5JYwmTeXPW+k8Sc= +github.com/cncf/udpa/go v0.0.0-20200629203442-efcf912fb354/go.mod h1:WmhPx2Nbnhtbo57+VJT5O0JRkEi1Wbu0z5j0R8u5Hbk= github.com/cncf/udpa/go v0.0.0-20201120205902-5459f2c99403/go.mod h1:WmhPx2Nbnhtbo57+VJT5O0JRkEi1Wbu0z5j0R8u5Hbk= github.com/cncf/xds/go v0.0.0-20210312221358-fbca930ec8ed/go.mod h1:eXthEFrGJvWHgFFCl3hGmgk+/aYT6PnTQLykKQRLhEs= github.com/cockroachdb/apd v1.1.0 h1:3LFP3629v+1aKXU5Q37mxmRxX/pIu1nijXydLShEq5I= @@ -509,6 +528,7 @@ github.com/envoyproxy/go-control-plane v0.6.9/go.mod h1:SBwIajubJHhxtWwsL9s8ss4s github.com/envoyproxy/go-control-plane v0.9.0/go.mod h1:YTl/9mNaCwkRvm6d1a2C3ymFceY/DCBVvsKhRF0iEA4= github.com/envoyproxy/go-control-plane v0.9.1-0.20191026205805-5f8ba28d4473/go.mod h1:YTl/9mNaCwkRvm6d1a2C3ymFceY/DCBVvsKhRF0iEA4= github.com/envoyproxy/go-control-plane v0.9.4/go.mod h1:6rpuAdCZL397s3pYoYcLgu1mIlRU8Am5FuJP05cCM98= +github.com/envoyproxy/go-control-plane v0.9.7/go.mod h1:cwu0lG7PUMfa9snN8LXBig5ynNVH9qI8YYLbd1fK2po= github.com/envoyproxy/go-control-plane v0.9.9-0.20201210154907-fd9021fe5dad/go.mod h1:cXg6YxExXjJnVBQHBLXeUAgxn2UodCpnH306RInaBQk= github.com/envoyproxy/go-control-plane v0.9.9-0.20210217033140-668b12f5399d/go.mod h1:cXg6YxExXjJnVBQHBLXeUAgxn2UodCpnH306RInaBQk= github.com/envoyproxy/go-control-plane v0.9.9-0.20210512163311-63b5d3c536b0/go.mod h1:hliV/p42l8fGbc6Y9bQ70uLwIvmJyVE5k4iMKlh8wCQ= @@ -704,6 +724,8 @@ github.com/golang/mock v1.4.0/go.mod h1:UOMv5ysSaYNkG+OFQykRIcU/QvvxJf3p21QfJ2Bt github.com/golang/mock v1.4.1/go.mod h1:UOMv5ysSaYNkG+OFQykRIcU/QvvxJf3p21QfJ2Bt3cw= github.com/golang/mock v1.4.3/go.mod h1:UOMv5ysSaYNkG+OFQykRIcU/QvvxJf3p21QfJ2Bt3cw= github.com/golang/mock v1.4.4/go.mod h1:l3mdAwkq5BuhzHwde/uurv3sEJeZMXNpwsxVWU71h+4= +github.com/golang/mock v1.5.0/go.mod h1:CWnOUgYIOo4TcNZ0wHX3YZCqsaM1I1Jvs6v3mP3KVu8= +github.com/golang/mock v1.6.0/go.mod h1:p6yTPP+5HYm5mzsMV8JkE6ZKdX+/wYM6Hr+LicevLPs= github.com/golang/protobuf v0.0.0-20161109072736-4bd1920723d7/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= github.com/golang/protobuf v0.0.0-20170307001533-c9c7427a2a70/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= github.com/golang/protobuf v1.2.0/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= @@ -721,6 +743,7 @@ github.com/golang/protobuf v1.4.1/go.mod h1:U8fpvMrcmy5pZrNK1lt4xCsGvpyWQ/VVv6QD github.com/golang/protobuf v1.4.2/go.mod h1:oDoupMAO8OvCJWAcko0GGGIgR6R6ocIYbsSw735rRwI= github.com/golang/protobuf v1.4.3/go.mod h1:oDoupMAO8OvCJWAcko0GGGIgR6R6ocIYbsSw735rRwI= github.com/golang/protobuf v1.5.0/go.mod h1:FsONVRAS9T7sI+LIUmWTfcYkHO4aIWwzhcaSAoJOfIk= +github.com/golang/protobuf v1.5.1/go.mod h1:DopwsBzvsk0Fs44TXzsVbJyPhcCPeIwnvohx4u74HPM= github.com/golang/protobuf v1.5.2 h1:ROPKBNFfQgOUMifHyP+KYbvpjbdoFNs+aK7DXlji0Tw= github.com/golang/protobuf v1.5.2/go.mod h1:XVQd3VNwM+JqD3oG2Ue2ip4fOMUkwXdXDdiuN0vRsmY= github.com/golang/snappy v0.0.0-20180518054509-2e65f85255db/go.mod h1:/XxbfmMg8lxefKM7IXC3fBNl/7bRcc72aCRzEWrmP2Q= @@ -755,12 +778,24 @@ github.com/google/gofuzz v1.1.0 h1:Hsa8mG0dQ46ij8Sl2AYJDUv1oA9/d6Vk+3LG99Oe02g= github.com/google/gofuzz v1.1.0/go.mod h1:dBl0BpW6vV/+mYPU4Po3pmUjxk6FQPldtuIdl/M65Eg= github.com/google/martian v2.1.0+incompatible h1:/CP5g8u/VJHijgedC/Legn3BAbAaWPgecwXBIDzw5no= github.com/google/martian v2.1.0+incompatible/go.mod h1:9I4somxYTbIHy5NJKHRl3wXiIaQGbYVAs8BPL6v8lEs= +github.com/google/martian/v3 v3.0.0/go.mod h1:y5Zk1BBys9G+gd6Jrk0W3cC1+ELVxBWuIGO+w/tUAp0= +github.com/google/martian/v3 v3.1.0/go.mod h1:y5Zk1BBys9G+gd6Jrk0W3cC1+ELVxBWuIGO+w/tUAp0= +github.com/google/martian/v3 v3.2.1 h1:d8MncMlErDFTwQGBK1xhv026j9kqhvw1Qv9IbWT1VLQ= +github.com/google/martian/v3 v3.2.1/go.mod h1:oBOf6HBosgwRXnUGWUB05QECsc6uvmMiJ3+6W4l/CUk= github.com/google/pprof v0.0.0-20181206194817-3ea8567a2e57/go.mod h1:zfwlbNMJ+OItoe0UupaVj+oy1omPYYDuagoSzA8v9mc= github.com/google/pprof v0.0.0-20190515194954-54271f7e092f/go.mod h1:zfwlbNMJ+OItoe0UupaVj+oy1omPYYDuagoSzA8v9mc= github.com/google/pprof v0.0.0-20191218002539-d4f498aebedc/go.mod h1:ZgVRPoUq/hfqzAqh7sHMqb3I9Rq5C59dIz2SbBwJ4eM= github.com/google/pprof v0.0.0-20200212024743-f11f1df84d12/go.mod h1:ZgVRPoUq/hfqzAqh7sHMqb3I9Rq5C59dIz2SbBwJ4eM= github.com/google/pprof v0.0.0-20200229191704-1ebb73c60ed3/go.mod h1:ZgVRPoUq/hfqzAqh7sHMqb3I9Rq5C59dIz2SbBwJ4eM= +github.com/google/pprof v0.0.0-20200430221834-fc25d7d30c6d/go.mod h1:ZgVRPoUq/hfqzAqh7sHMqb3I9Rq5C59dIz2SbBwJ4eM= github.com/google/pprof v0.0.0-20200708004538-1a94d8640e99/go.mod h1:ZgVRPoUq/hfqzAqh7sHMqb3I9Rq5C59dIz2SbBwJ4eM= +github.com/google/pprof v0.0.0-20201023163331-3e6fc7fc9c4c/go.mod h1:kpwsk12EmLew5upagYY7GY0pfYCcupk39gWOCRROcvE= +github.com/google/pprof v0.0.0-20201203190320-1bf35d6f28c2/go.mod h1:kpwsk12EmLew5upagYY7GY0pfYCcupk39gWOCRROcvE= +github.com/google/pprof v0.0.0-20210122040257-d980be63207e/go.mod h1:kpwsk12EmLew5upagYY7GY0pfYCcupk39gWOCRROcvE= +github.com/google/pprof v0.0.0-20210226084205-cbba55b83ad5/go.mod h1:kpwsk12EmLew5upagYY7GY0pfYCcupk39gWOCRROcvE= +github.com/google/pprof v0.0.0-20210601050228-01bbb1931b22/go.mod h1:kpwsk12EmLew5upagYY7GY0pfYCcupk39gWOCRROcvE= +github.com/google/pprof v0.0.0-20210609004039-a478d1d731e9/go.mod h1:kpwsk12EmLew5upagYY7GY0pfYCcupk39gWOCRROcvE= +github.com/google/pprof v0.0.0-20210720184732-4bb14d4b1be1/go.mod h1:kpwsk12EmLew5upagYY7GY0pfYCcupk39gWOCRROcvE= github.com/google/renameio v0.1.0/go.mod h1:KWCgfxg9yswjAJkECMjeO8J8rahYeXnNhOm40UhjYkI= github.com/google/uuid v1.0.0/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= github.com/google/uuid v1.1.1/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= @@ -868,6 +903,7 @@ github.com/hpcloud/tail v1.0.0 h1:nfCOvKYfkgYP8hkirhJocXT2+zOD8yUNjXaWfTlyFKI= github.com/hpcloud/tail v1.0.0/go.mod h1:ab1qPbhIpdTxEkNHXyeSf5vhxWSCs/tWer42PpOxQnU= github.com/hudl/fargo v1.3.0/go.mod h1:y3CKSmjA+wD2gak7sUSXTAoopbhU08POFhmITJgmKTg= github.com/ianlancetaylor/demangle v0.0.0-20181102032728-5e5cf60278f6/go.mod h1:aSSvb/t6k1mPoxDqO4vJh6VOCGPwU4O0C2/Eqndh1Sc= +github.com/ianlancetaylor/demangle v0.0.0-20200824232613-28f6c0f3b639/go.mod h1:aSSvb/t6k1mPoxDqO4vJh6VOCGPwU4O0C2/Eqndh1Sc= github.com/imdario/mergo v0.3.5/go.mod h1:2EnlNZ0deacrJVfApfmtdGgDfMuh/nq6Ok1EcJh5FfA= github.com/imdario/mergo v0.3.8/go.mod h1:2EnlNZ0deacrJVfApfmtdGgDfMuh/nq6Ok1EcJh5FfA= github.com/imdario/mergo v0.3.10/go.mod h1:jmQim1M+e3UYxmgPu/WyfjB3N3VflVyUjjjwH0dnCYA= @@ -1564,8 +1600,11 @@ go.opencensus.io v0.20.2/go.mod h1:6WKK9ahsWS3RSO+PY9ZHZUfv2irvY6gN279GOPZjmmk= go.opencensus.io v0.21.0/go.mod h1:mSImk1erAIZhrmZN+AvHh14ztQfjbGwt4TtuofqLduU= go.opencensus.io v0.22.0/go.mod h1:+kGneAE2xo2IficOXnaByMWTGM9T73dGwxeWcUqIpI8= go.opencensus.io v0.22.2/go.mod h1:yxeiOL68Rb0Xd1ddK5vPZ/oVn4vY4Ynel7k9FzqtOIw= -go.opencensus.io v0.22.3 h1:8sGtKOrtQqkN1bp2AtX+misvLIlOmsEsNd+9NIcPEm8= go.opencensus.io v0.22.3/go.mod h1:yxeiOL68Rb0Xd1ddK5vPZ/oVn4vY4Ynel7k9FzqtOIw= +go.opencensus.io v0.22.4/go.mod h1:yxeiOL68Rb0Xd1ddK5vPZ/oVn4vY4Ynel7k9FzqtOIw= +go.opencensus.io v0.22.5/go.mod h1:5pWMHQbX5EPX2/62yrJeAkowc+lfs/XD7Uxpq3pI6kk= +go.opencensus.io v0.23.0 h1:gqCw0LfLxScz8irSi8exQc7fyQ0fKQU/qnC/X8+V/1M= +go.opencensus.io v0.23.0/go.mod h1:XItmlyltB5F7CS4xOC1DcqMoFqwtC6OG2xF7mCv7P7E= go.opentelemetry.io/collector/model v0.0.0-20210723184018-3b7d6ce4830c h1:3s2a2cav7u4W1b0cOYxmlj1y1NcVuDZwgUaAQ6wfImo= go.opentelemetry.io/collector/model v0.0.0-20210723184018-3b7d6ce4830c/go.mod h1:PcHNnM+RUl0uD8VkSn93PO78N7kQYhfqpI/eki57pl4= go.opentelemetry.io/proto/otlp v0.7.0/go.mod h1:PqfVotwruBrMGOCsRd/89rSnXhoiJIqeYNgFYFoEGnI= @@ -1649,6 +1688,7 @@ golang.org/x/lint v0.0.0-20190930215403-16217165b5de/go.mod h1:6SW0HCj/g11FgYtHl golang.org/x/lint v0.0.0-20191125180803-fdd1cda4f05f/go.mod h1:5qLYkcX4OjUUV8bRuDixDT3tpyyb+LUpUlRWLxfhWrs= golang.org/x/lint v0.0.0-20200130185559-910be7a94367/go.mod h1:3xt1FjdF8hUf6vQPIChWIBhFzV8gjjsPE/fR3IyQdNY= golang.org/x/lint v0.0.0-20200302205851-738671d3881b/go.mod h1:3xt1FjdF8hUf6vQPIChWIBhFzV8gjjsPE/fR3IyQdNY= +golang.org/x/lint v0.0.0-20201208152925-83fdc39ff7b5/go.mod h1:3xt1FjdF8hUf6vQPIChWIBhFzV8gjjsPE/fR3IyQdNY= golang.org/x/lint v0.0.0-20210508222113-6edffad5e616 h1:VLliZ0d+/avPrXXH+OakdXhpJuEoBZuwh1m2j7U6Iug= golang.org/x/lint v0.0.0-20210508222113-6edffad5e616/go.mod h1:3xt1FjdF8hUf6vQPIChWIBhFzV8gjjsPE/fR3IyQdNY= golang.org/x/mobile v0.0.0-20190312151609-d3739f865fa6/go.mod h1:z+o9i4GpDbdi3rU15maQ/Ox0txvL9dWGYEHz965HBQE= @@ -1659,6 +1699,8 @@ golang.org/x/mod v0.1.1-0.20191105210325-c90efee705ee/go.mod h1:QqPTAvyqsEbceGzB golang.org/x/mod v0.1.1-0.20191107180719-034126e5016b/go.mod h1:QqPTAvyqsEbceGzBzNggFXnrqF1CaUcvgkdR5Ot7KZg= golang.org/x/mod v0.2.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= golang.org/x/mod v0.3.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= +golang.org/x/mod v0.4.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= +golang.org/x/mod v0.4.1/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= golang.org/x/mod v0.4.2 h1:Gz96sIWK3OalVv/I/qNygP42zyoKp3xptRVCWRFEBvo= golang.org/x/mod v0.4.2/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= golang.org/x/net v0.0.0-20170114055629-f2499483f923/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= @@ -1685,6 +1727,7 @@ golang.org/x/net v0.0.0-20190603091049-60506f45cf65/go.mod h1:HSz+uSET+XFnRR8LxR golang.org/x/net v0.0.0-20190613194153-d28f0bde5980/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= golang.org/x/net v0.0.0-20190619014844-b5b0513f8c1b/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= golang.org/x/net v0.0.0-20190620200207-3b0461eec859/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= +golang.org/x/net v0.0.0-20190628185345-da137c7871d7/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= golang.org/x/net v0.0.0-20190724013045-ca1201d0de80/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= golang.org/x/net v0.0.0-20190813141303-74dc4d7220e7/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= golang.org/x/net v0.0.0-20190827160401-ba9fcec4b297/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= @@ -1703,15 +1746,20 @@ golang.org/x/net v0.0.0-20200226121028-0de0cce0169b/go.mod h1:z5CRVTTTmAJ677TzLL golang.org/x/net v0.0.0-20200301022130-244492dfa37a/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= golang.org/x/net v0.0.0-20200324143707-d3edc9973b7e/go.mod h1:qpuaurCH72eLCgpAm/N6yyVIVM9cpaDIP3A8BGJEC5A= golang.org/x/net v0.0.0-20200425230154-ff2c4b7c35a0/go.mod h1:qpuaurCH72eLCgpAm/N6yyVIVM9cpaDIP3A8BGJEC5A= +golang.org/x/net v0.0.0-20200501053045-e0ff5e5a1de5/go.mod h1:qpuaurCH72eLCgpAm/N6yyVIVM9cpaDIP3A8BGJEC5A= +golang.org/x/net v0.0.0-20200506145744-7e3656a0809f/go.mod h1:qpuaurCH72eLCgpAm/N6yyVIVM9cpaDIP3A8BGJEC5A= golang.org/x/net v0.0.0-20200513185701-a91f0712d120/go.mod h1:qpuaurCH72eLCgpAm/N6yyVIVM9cpaDIP3A8BGJEC5A= +golang.org/x/net v0.0.0-20200520182314-0ba52f642ac2/go.mod h1:qpuaurCH72eLCgpAm/N6yyVIVM9cpaDIP3A8BGJEC5A= golang.org/x/net v0.0.0-20200625001655-4c5254603344/go.mod h1:/O7V0waA8r7cgGh81Ro3o1hOxt32SMVPicZroKQ2sZA= golang.org/x/net v0.0.0-20200707034311-ab3426394381/go.mod h1:/O7V0waA8r7cgGh81Ro3o1hOxt32SMVPicZroKQ2sZA= golang.org/x/net v0.0.0-20200813134508-3edf25e44fcc/go.mod h1:/O7V0waA8r7cgGh81Ro3o1hOxt32SMVPicZroKQ2sZA= golang.org/x/net v0.0.0-20200822124328-c89045814202/go.mod h1:/O7V0waA8r7cgGh81Ro3o1hOxt32SMVPicZroKQ2sZA= golang.org/x/net v0.0.0-20200904194848-62affa334b73/go.mod h1:/O7V0waA8r7cgGh81Ro3o1hOxt32SMVPicZroKQ2sZA= golang.org/x/net v0.0.0-20201021035429-f5854403a974/go.mod h1:sp8m0HH+o8qH0wwXwYZr8TS3Oi6o0r6Gce1SSxlDquU= +golang.org/x/net v0.0.0-20201031054903-ff519b6c9102/go.mod h1:sp8m0HH+o8qH0wwXwYZr8TS3Oi6o0r6Gce1SSxlDquU= golang.org/x/net v0.0.0-20201110031124-69a78807bb2b/go.mod h1:sp8m0HH+o8qH0wwXwYZr8TS3Oi6o0r6Gce1SSxlDquU= golang.org/x/net v0.0.0-20201201195509-5d6afe98e0b7/go.mod h1:sp8m0HH+o8qH0wwXwYZr8TS3Oi6o0r6Gce1SSxlDquU= +golang.org/x/net v0.0.0-20201209123823-ac852fbbde11/go.mod h1:m0MpNAwzfU5UDzcl9v0D8zg8gWTRqZa9RBIspLL5mdg= golang.org/x/net v0.0.0-20201224014010-6772e930b67b/go.mod h1:m0MpNAwzfU5UDzcl9v0D8zg8gWTRqZa9RBIspLL5mdg= golang.org/x/net v0.0.0-20201224014010-6772e930b67b/go.mod h1:m0MpNAwzfU5UDzcl9v0D8zg8gWTRqZa9RBIspLL5mdg= golang.org/x/net v0.0.0-20201224014010-6772e930b67b/go.mod h1:m0MpNAwzfU5UDzcl9v0D8zg8gWTRqZa9RBIspLL5mdg= @@ -1719,17 +1767,28 @@ golang.org/x/net v0.0.0-20210119194325-5f4716e94777/go.mod h1:m0MpNAwzfU5UDzcl9v golang.org/x/net v0.0.0-20210224082022-3d97a244fca7/go.mod h1:m0MpNAwzfU5UDzcl9v0D8zg8gWTRqZa9RBIspLL5mdg= golang.org/x/net v0.0.0-20210226172049-e18ecbb05110/go.mod h1:m0MpNAwzfU5UDzcl9v0D8zg8gWTRqZa9RBIspLL5mdg= golang.org/x/net v0.0.0-20210226172049-e18ecbb05110/go.mod h1:m0MpNAwzfU5UDzcl9v0D8zg8gWTRqZa9RBIspLL5mdg= +golang.org/x/net v0.0.0-20210316092652-d523dce5a7f4/go.mod h1:RBQZq4jEuRlivfhVLdyRGr576XBO4/greRjx4P4O3yc= golang.org/x/net v0.0.0-20210331212208-0fccb6fa2b5c/go.mod h1:p54w0d4576C0XHj96bSt6lcn1PtDYWL6XObtHCRCNQM= golang.org/x/net v0.0.0-20210405180319-a5a99cb37ef4/go.mod h1:p54w0d4576C0XHj96bSt6lcn1PtDYWL6XObtHCRCNQM= golang.org/x/net v0.0.0-20210405180319-a5a99cb37ef4/go.mod h1:p54w0d4576C0XHj96bSt6lcn1PtDYWL6XObtHCRCNQM= +golang.org/x/net v0.0.0-20210503060351-7fd8e65b6420/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y= golang.org/x/net v0.0.0-20210610132358-84b48f89b13b h1:k+E048sYJHyVnsr1GDrRZWQ32D2C7lWs9JRc0bel53A= golang.org/x/net v0.0.0-20210610132358-84b48f89b13b/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y= golang.org/x/oauth2 v0.0.0-20180821212333-d2e6202438be/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U= golang.org/x/oauth2 v0.0.0-20190226205417-e64efc72b421/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw= golang.org/x/oauth2 v0.0.0-20190604053449-0f29369cfe45/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw= golang.org/x/oauth2 v0.0.0-20191202225959-858c2ad4c8b6/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw= -golang.org/x/oauth2 v0.0.0-20200107190931-bf48bf16ab8d h1:TzXSXBo42m9gQenoE3b9BGiEpg5IG2JkU5FkPIawgtw= golang.org/x/oauth2 v0.0.0-20200107190931-bf48bf16ab8d/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw= +golang.org/x/oauth2 v0.0.0-20200902213428-5d25da1a8d43/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A= +golang.org/x/oauth2 v0.0.0-20201109201403-9fd604954f58/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A= +golang.org/x/oauth2 v0.0.0-20201208152858-08078c50e5b5/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A= +golang.org/x/oauth2 v0.0.0-20210218202405-ba52d332ba99/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A= +golang.org/x/oauth2 v0.0.0-20210220000619-9bb904979d93/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A= +golang.org/x/oauth2 v0.0.0-20210313182246-cd4f82c27b84/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A= +golang.org/x/oauth2 v0.0.0-20210514164344-f6687ab2804c/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A= +golang.org/x/oauth2 v0.0.0-20210628180205-a41e5a781914/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A= +golang.org/x/oauth2 v0.0.0-20210805134026-6f1e6394065a h1:4Kd8OPUx1xgUwrHDaviWZO8MsgoZTZYC3g+8m16RBww= +golang.org/x/oauth2 v0.0.0-20210805134026-6f1e6394065a/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A= golang.org/x/sync v0.0.0-20180314180146-1d60e4601c6f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20181108010431-42b317875d0f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20181221193216-37e7f081c4d4/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= @@ -1812,15 +1871,21 @@ golang.org/x/sys v0.0.0-20200302150141-5c8b2ff67527/go.mod h1:h1NjWce9XRLGQEsW7w golang.org/x/sys v0.0.0-20200323222414-85ca7c5b95cd/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20200331124033-c3d80250170d/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20200420163511-1957bb5e6d1f/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200501052902-10377860bb8e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200511232937-7e40ca221e25/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200515095857-1151b9dac4a9/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200523222454-059865788121/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20200615200032-f1bc736245b1/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20200622214017-ed371f2e16b4/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20200625212154-ddb9806d33ae/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20200728102440-3e129f6d46b1/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200803210538-64077c9b5642/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20200817155316-9781c653f443/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20200821140526-fda516888d29/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20200821140526-fda516888d29/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20200828194041-157a740278f4/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20200831180312-196b9ba8737a/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200905004654-be1d3432aa8f/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20200909081042-eff7692f9009/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20200916030750-2334cc1a136f/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20200922070232-aee5d888a860/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= @@ -1833,21 +1898,32 @@ golang.org/x/sys v0.0.0-20201201145000-ef89a241ccb3/go.mod h1:h1NjWce9XRLGQEsW7w golang.org/x/sys v0.0.0-20201201145000-ef89a241ccb3/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20201202213521-69691e467435/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20201202213521-69691e467435/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20210104204734-6f8348627aad/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20210119212857-b64e53b001e4/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20210124154548-22da62e12c0c/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20210124154548-22da62e12c0c/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20210124154548-22da62e12c0c/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20210220050731-9a76102bfb43/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20210225134936-a50acf3fe073/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20210303074136-134d130e1a04/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20210305230114-8fe3ee5dd75b/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20210315160823-c6e025ad8005/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20210316164454-77fc1eacc6aa/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20210316164454-77fc1eacc6aa/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20210320140829-1e4c9ba3b0c4/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20210324051608-47abb6519492/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20210330210617-4fbd30eecc44/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20210423082822-04245dca01da/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20210510120138-977fb7262007/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20210510120138-977fb7262007/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20210514084401-e8d321eab015/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20210603125802-9665404d3644/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20210611083646-a4fc73990273/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.0.0-20210615035016-665e8c7367d1 h1:SrN+KX8Art/Sf4HNj6Zcz06G7VEz+7w9tdXTPOZ7+l4= golang.org/x/sys v0.0.0-20210615035016-665e8c7367d1/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20210616094352-59db8d763f22/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20210630005230-0f9fa26af87c/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20210806184541-e5e7981a1069 h1:siQdpVirKtzPhKl3lZWozZraCFObP8S1v6PRp0bLrtU= +golang.org/x/sys v0.0.0-20210806184541-e5e7981a1069/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/term v0.0.0-20201117132131-f5c789dd3221/go.mod h1:Nr5EML6q2oocZ2LXRh80K7BxOlk5/8JxuGnuhpl+muw= golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1 h1:v+OssWQX+hTHEmOBgwxdZxK4zHq3yOs8F9J7mk0PY8E= golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo= @@ -1866,8 +1942,9 @@ golang.org/x/time v0.0.0-20181108054448-85acf8d2951c/go.mod h1:tRJNPiyCQ0inRvYxb golang.org/x/time v0.0.0-20190308202827-9d24e82272b4/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= golang.org/x/time v0.0.0-20191024005414-555d28b269f0/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= golang.org/x/time v0.0.0-20200416051211-89c76fbcd5d1/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= -golang.org/x/time v0.0.0-20200630173020-3af7569d3a1e h1:EHBhcS0mlXEAVwNyO2dLfjToGsyY4j24pTs2ScHnX7s= golang.org/x/time v0.0.0-20200630173020-3af7569d3a1e/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= +golang.org/x/time v0.0.0-20210723032227-1f47c861a9ac h1:7zkz7BUtwNFFqcowJ+RIgu2MaV/MapERkDIy+mwPyjs= +golang.org/x/time v0.0.0-20210723032227-1f47c861a9ac/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= golang.org/x/tools v0.0.0-20180221164845-07fd8470d635/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= golang.org/x/tools v0.0.0-20180525024113-a5b4c53f6e8b/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= golang.org/x/tools v0.0.0-20180828015842-6cd1fcedba52/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= @@ -1923,18 +2000,37 @@ golang.org/x/tools v0.0.0-20200204074204-1cc6d1ef6c74/go.mod h1:TB2adYChydJhpapK golang.org/x/tools v0.0.0-20200207183749-b753a1ba74fa/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= golang.org/x/tools v0.0.0-20200212150539-ea181f53ac56/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= golang.org/x/tools v0.0.0-20200224181240-023911ca70b2/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= +golang.org/x/tools v0.0.0-20200227222343-706bc42d1f0d/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= golang.org/x/tools v0.0.0-20200304193943-95d2e580d8eb/go.mod h1:o4KQGtdN14AW+yjsvvwRTJJuXz8XRtIHtEnmAXLyFUw= +golang.org/x/tools v0.0.0-20200312045724-11d5b4c81c7d/go.mod h1:o4KQGtdN14AW+yjsvvwRTJJuXz8XRtIHtEnmAXLyFUw= golang.org/x/tools v0.0.0-20200331025713-a30bf2db82d4/go.mod h1:Sl4aGygMT6LrqrWclx+PTx3U+LnKx/seiNR+3G19Ar8= +golang.org/x/tools v0.0.0-20200501065659-ab2804fb9c9d/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE= +golang.org/x/tools v0.0.0-20200512131952-2bc93b1c0c88/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE= golang.org/x/tools v0.0.0-20200513201620-d5fe73897c97/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE= +golang.org/x/tools v0.0.0-20200515010526-7d3b6ebf133d/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE= golang.org/x/tools v0.0.0-20200522201501-cb1345f3a375/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE= +golang.org/x/tools v0.0.0-20200618134242-20370b0cb4b2/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE= golang.org/x/tools v0.0.0-20200619180055-7c47624df98f/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE= golang.org/x/tools v0.0.0-20200717024301-6ddee64345a6/go.mod h1:njjCfa9FT2d7l9Bc6FUM5FLjQPp3cFF28FI3qnDFljA= +golang.org/x/tools v0.0.0-20200729194436-6467de6f59a7/go.mod h1:njjCfa9FT2d7l9Bc6FUM5FLjQPp3cFF28FI3qnDFljA= +golang.org/x/tools v0.0.0-20200804011535-6c149bb5ef0d/go.mod h1:njjCfa9FT2d7l9Bc6FUM5FLjQPp3cFF28FI3qnDFljA= golang.org/x/tools v0.0.0-20200822203824-307de81be3f4/go.mod h1:njjCfa9FT2d7l9Bc6FUM5FLjQPp3cFF28FI3qnDFljA= +golang.org/x/tools v0.0.0-20200825202427-b303f430e36d/go.mod h1:njjCfa9FT2d7l9Bc6FUM5FLjQPp3cFF28FI3qnDFljA= +golang.org/x/tools v0.0.0-20200904185747-39188db58858/go.mod h1:Cj7w3i3Rnn0Xh82ur9kSqwfTHTeVxaDqrfMjpcNT6bE= golang.org/x/tools v0.0.0-20201022035929-9cf592e881e9/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA= +golang.org/x/tools v0.0.0-20201110124207-079ba7bd75cd/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA= golang.org/x/tools v0.0.0-20201124115921-2c860bdd6e78/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA= +golang.org/x/tools v0.0.0-20201201161351-ac6f37ff4c2a/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA= +golang.org/x/tools v0.0.0-20201208233053-a543418bbed2/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA= +golang.org/x/tools v0.0.0-20210105154028-b0ab187a4818/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA= golang.org/x/tools v0.0.0-20210106214847-113979e3529a/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA= -golang.org/x/tools v0.1.2 h1:kRBLX7v7Af8W7Gdbbc908OJcdgtK8bOz9Uaj8/F1ACA= +golang.org/x/tools v0.1.0/go.mod h1:xkSsbof2nBLbhDlRMhhhyNLN/zl3eTqcnHD5viDpcZ0= +golang.org/x/tools v0.1.1/go.mod h1:o0xws9oXOQQZyjljx8fwUC0k7L1pTE6eaCbjGeHmOkk= golang.org/x/tools v0.1.2/go.mod h1:o0xws9oXOQQZyjljx8fwUC0k7L1pTE6eaCbjGeHmOkk= +golang.org/x/tools v0.1.3/go.mod h1:o0xws9oXOQQZyjljx8fwUC0k7L1pTE6eaCbjGeHmOkk= +golang.org/x/tools v0.1.4/go.mod h1:o0xws9oXOQQZyjljx8fwUC0k7L1pTE6eaCbjGeHmOkk= +golang.org/x/tools v0.1.5 h1:ouewzE6p+/VEB31YYnTbEJdi8pFqKp4P4n85vwo3DHA= +golang.org/x/tools v0.1.5/go.mod h1:o0xws9oXOQQZyjljx8fwUC0k7L1pTE6eaCbjGeHmOkk= golang.org/x/xerrors v0.0.0-20190410155217-1f06c39b4373/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= golang.org/x/xerrors v0.0.0-20190513163551-3ee3066db522/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= golang.org/x/xerrors v0.0.0-20190717185122-a985d3407aa7/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= @@ -1965,17 +2061,33 @@ google.golang.org/api v0.14.0/go.mod h1:iLdEw5Ide6rF15KTC1Kkl0iskquN2gFfn9o9XIsb google.golang.org/api v0.15.0/go.mod h1:iLdEw5Ide6rF15KTC1Kkl0iskquN2gFfn9o9XIsbkAI= google.golang.org/api v0.17.0/go.mod h1:BwFmGc8tA3vsd7r/7kR8DY7iEEGSU04BFxCo5jP/sfE= google.golang.org/api v0.18.0/go.mod h1:BwFmGc8tA3vsd7r/7kR8DY7iEEGSU04BFxCo5jP/sfE= +google.golang.org/api v0.19.0/go.mod h1:BwFmGc8tA3vsd7r/7kR8DY7iEEGSU04BFxCo5jP/sfE= google.golang.org/api v0.20.0/go.mod h1:BwFmGc8tA3vsd7r/7kR8DY7iEEGSU04BFxCo5jP/sfE= -google.golang.org/api v0.29.0 h1:BaiDisFir8O4IJxvAabCGGkQ6yCJegNQqSVoYUNAnbk= +google.golang.org/api v0.22.0/go.mod h1:BwFmGc8tA3vsd7r/7kR8DY7iEEGSU04BFxCo5jP/sfE= +google.golang.org/api v0.24.0/go.mod h1:lIXQywCXRcnZPGlsd8NbLnOjtAoL6em04bJ9+z0MncE= +google.golang.org/api v0.28.0/go.mod h1:lIXQywCXRcnZPGlsd8NbLnOjtAoL6em04bJ9+z0MncE= google.golang.org/api v0.29.0/go.mod h1:Lcubydp8VUV7KeIHD9z2Bys/sm/vGKnG1UHuDBSrHWM= +google.golang.org/api v0.30.0/go.mod h1:QGmEvQ87FHZNiUVJkT14jQNYJ4ZJjdRF23ZXz5138Fc= +google.golang.org/api v0.35.0/go.mod h1:/XrVsuzM0rZmrsbjJutiuftIzeuTQcEeaYcSk/mQ1dg= +google.golang.org/api v0.36.0/go.mod h1:+z5ficQTmoYpPn8LCUNVpK5I7hwkpjbcgqA7I34qYtE= +google.golang.org/api v0.40.0/go.mod h1:fYKFpnQN0DsDSKRVRcQSDQNtqWPfM9i+zNPxepjRCQ8= +google.golang.org/api v0.41.0/go.mod h1:RkxM5lITDfTzmyKFPt+wGrCJbVfniCr2ool8kTBzRTU= +google.golang.org/api v0.43.0/go.mod h1:nQsDGjRXMo4lvh5hP0TKqF244gqhGcr/YSIykhUk/94= +google.golang.org/api v0.47.0/go.mod h1:Wbvgpq1HddcWVtzsVLyfLp8lDg6AA241LmgIL59tHXo= +google.golang.org/api v0.48.0/go.mod h1:71Pr1vy+TAZRPkPs/xlCf5SsU8WjuAWv1Pfjbtukyy4= +google.golang.org/api v0.50.0/go.mod h1:4bNT5pAuq5ji4SRZm+5QIkjny9JAyVD/3gaSihNefaw= +google.golang.org/api v0.51.0/go.mod h1:t4HdrdoNgyN5cbEfm7Lum0lcLDLiise1F8qDKX00sOU= +google.golang.org/api v0.54.0 h1:ECJUVngj71QI6XEm7b1sAf8BljU5inEhMbKPR8Lxhhk= +google.golang.org/api v0.54.0/go.mod h1:7C4bFFOvVDGXjfDTAsgGwDgAxRDeQ4X8NvUedIt6z3k= google.golang.org/appengine v1.1.0/go.mod h1:EbEs0AVv82hx2wNQdGPgUI5lhzA/G0D9YwlJXL52JkM= google.golang.org/appengine v1.2.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4= google.golang.org/appengine v1.4.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4= google.golang.org/appengine v1.5.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4= google.golang.org/appengine v1.6.1/go.mod h1:i06prIuMbXzDqacNJfV5OdTW448YApPu5ww/cMBSeb0= google.golang.org/appengine v1.6.5/go.mod h1:8WjMMxjGQR8xUklV/ARdw2HLXBOI7O7uCIDZVag1xfc= -google.golang.org/appengine v1.6.6 h1:lMO5rYAqUxkmaj76jAkRUvt5JZgFymx/+Q5Mzfivuhc= google.golang.org/appengine v1.6.6/go.mod h1:8WjMMxjGQR8xUklV/ARdw2HLXBOI7O7uCIDZVag1xfc= +google.golang.org/appengine v1.6.7 h1:FZR1q0exgwxzPzp/aF+VccGrSfxfPpkBqjIIEq3ru6c= +google.golang.org/appengine v1.6.7/go.mod h1:8WjMMxjGQR8xUklV/ARdw2HLXBOI7O7uCIDZVag1xfc= google.golang.org/cloud v0.0.0-20151119220103-975617b05ea8/go.mod h1:0H1ncTHf11KCFhTc/+EFRbzSCOZx+VUbRMk55Yv5MYk= google.golang.org/genproto v0.0.0-20180817151627-c66870c02cf8/go.mod h1:JiN7NxoALGmiZfu7CAH4rXhgtRTLTxftemlI0sWmxmc= google.golang.org/genproto v0.0.0-20190307195333-5fe7a883aa19/go.mod h1:VzzqZJRnGkLBvHegQrXjBqPurQTc5/KpmUdxsrq26oE= @@ -1999,15 +2111,42 @@ google.golang.org/genproto v0.0.0-20200122232147-0452cf42e150/go.mod h1:n3cpQtvx google.golang.org/genproto v0.0.0-20200204135345-fa8e72b47b90/go.mod h1:GmwEX6Z4W5gMy59cAlVYjN9JhxgbQH6Gn+gFDQe2lzA= google.golang.org/genproto v0.0.0-20200212174721-66ed5ce911ce/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c= google.golang.org/genproto v0.0.0-20200224152610-e50cd9704f63/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c= +google.golang.org/genproto v0.0.0-20200228133532-8c2c7df3a383/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c= google.golang.org/genproto v0.0.0-20200305110556-506484158171/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c= +google.golang.org/genproto v0.0.0-20200312145019-da6875a35672/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c= google.golang.org/genproto v0.0.0-20200331122359-1ee6d9798940/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c= +google.golang.org/genproto v0.0.0-20200430143042-b979b6f78d84/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c= +google.golang.org/genproto v0.0.0-20200511104702-f5ebc3bea380/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c= google.golang.org/genproto v0.0.0-20200513103714-09dca8ec2884/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c= +google.golang.org/genproto v0.0.0-20200515170657-fc4c6c6a6587/go.mod h1:YsZOwe1myG/8QRHRsmBRE1LrgQY60beZKjly0O1fX9U= google.golang.org/genproto v0.0.0-20200526211855-cb27e3aa2013/go.mod h1:NbSheEEYHJ7i3ixzK3sjbqSGDJWnxyFXZblF3eUsNvo= +google.golang.org/genproto v0.0.0-20200618031413-b414f8b61790/go.mod h1:jDfRM7FcilCzHH/e9qn6dsT145K34l5v+OpcnNgKAAA= +google.golang.org/genproto v0.0.0-20200729003335-053ba62fc06f/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= +google.golang.org/genproto v0.0.0-20200804131852-c06518451d9c/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= google.golang.org/genproto v0.0.0-20200815001618-f69a88009b70/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= +google.golang.org/genproto v0.0.0-20200825200019-8632dd797987/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= +google.golang.org/genproto v0.0.0-20200904004341-0bd0a958aa1d/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= +google.golang.org/genproto v0.0.0-20201109203340-2640f1f9cdfb/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= google.golang.org/genproto v0.0.0-20201110150050-8816d57aaa9a/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= +google.golang.org/genproto v0.0.0-20201201144952-b05cb90ed32e/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= +google.golang.org/genproto v0.0.0-20201210142538-e3217bee35cc/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= +google.golang.org/genproto v0.0.0-20201214200347-8c77b98c765d/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= +google.golang.org/genproto v0.0.0-20210222152913-aa3ee6e6a81c/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= +google.golang.org/genproto v0.0.0-20210303154014-9728d6b83eeb/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= +google.golang.org/genproto v0.0.0-20210310155132-4ce2db91004e/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= +google.golang.org/genproto v0.0.0-20210319143718-93e7006c17a6/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= +google.golang.org/genproto v0.0.0-20210402141018-6c239bbf2bb1/go.mod h1:9lPAdzaEmUacj36I+k7YKbEc5CXzPIeORRgDAUOu28A= +google.golang.org/genproto v0.0.0-20210513213006-bf773b8c8384/go.mod h1:P3QM42oQyzQSnHPnZ/vqoCdDmzH28fzWByN9asMeM8A= google.golang.org/genproto v0.0.0-20210602131652-f16073e35f0c/go.mod h1:UODoCrxHCcBojKKwX1terBiRUaqAsFqJiF615XL43r0= -google.golang.org/genproto v0.0.0-20210604141403-392c879c8b08 h1:pc16UedxnxXXtGxHCSUhafAoVHQZ0yXl8ZelMH4EETc= google.golang.org/genproto v0.0.0-20210604141403-392c879c8b08/go.mod h1:UODoCrxHCcBojKKwX1terBiRUaqAsFqJiF615XL43r0= +google.golang.org/genproto v0.0.0-20210608205507-b6d2f5bf0d7d/go.mod h1:UODoCrxHCcBojKKwX1terBiRUaqAsFqJiF615XL43r0= +google.golang.org/genproto v0.0.0-20210624195500-8bfb893ecb84/go.mod h1:SzzZ/N+nwJDaO1kznhnlzqS8ocJICar6hYhVyhi++24= +google.golang.org/genproto v0.0.0-20210713002101-d411969a0d9a/go.mod h1:AxrInvYm1dci+enl5hChSFPOmmUF1+uAa/UsgNRWd7k= +google.golang.org/genproto v0.0.0-20210716133855-ce7ef5c701ea/go.mod h1:AxrInvYm1dci+enl5hChSFPOmmUF1+uAa/UsgNRWd7k= +google.golang.org/genproto v0.0.0-20210728212813-7823e685a01f/go.mod h1:ob2IJxKrgPT52GcgX759i1sleT07tiKowYBGbczaW48= +google.golang.org/genproto v0.0.0-20210805201207-89edb61ffb67/go.mod h1:ob2IJxKrgPT52GcgX759i1sleT07tiKowYBGbczaW48= +google.golang.org/genproto v0.0.0-20210813162853-db860fec028c h1:iLQakcwWG3k/++1q/46apVb1sUQ3IqIdn9yUE6eh/xA= +google.golang.org/genproto v0.0.0-20210813162853-db860fec028c/go.mod h1:cFeNkxwySK631ADgubI+/XFU/xp8FD5KIVV4rj8UC5w= google.golang.org/grpc v0.0.0-20160317175043-d3ddb4469d5a/go.mod h1:yo6s7OP7yaDglbqo1J04qKzAhqBH6lvTonzMVmEdcZw= google.golang.org/grpc v1.17.0/go.mod h1:6QZJwpn2B+Zp71q/5VxRsJ6NXXVCE5NRUHRo+f3cWCs= google.golang.org/grpc v1.19.0/go.mod h1:mqu4LbDTu4XGKhr4mRzUsmM4RtVoemTSY81AxZiDr8c= @@ -2026,13 +2165,22 @@ google.golang.org/grpc v1.27.1/go.mod h1:qbnxyOmOxrQa7FizSgH+ReBfzJrCY1pSN7KXBS8 google.golang.org/grpc v1.28.0/go.mod h1:rpkK4SK4GF4Ach/+MFLZUBavHOvF2JJB5uozKKal+60= google.golang.org/grpc v1.29.1/go.mod h1:itym6AZVZYACWQqET3MqgPpjcuV5QH3BxFS3IjizoKk= google.golang.org/grpc v1.30.0/go.mod h1:N36X2cJ7JwdamYAgDz+s+rVMFjt3numwzf/HckM8pak= +google.golang.org/grpc v1.31.0/go.mod h1:N36X2cJ7JwdamYAgDz+s+rVMFjt3numwzf/HckM8pak= +google.golang.org/grpc v1.31.1/go.mod h1:N36X2cJ7JwdamYAgDz+s+rVMFjt3numwzf/HckM8pak= google.golang.org/grpc v1.33.1/go.mod h1:fr5YgcSWrqhRRxogOsw7RzIpsmvOZ6IcH4kBYTpR3n0= google.golang.org/grpc v1.33.2/go.mod h1:JMHMWHQWaTccqQQlmk3MJZS+GWXOdAesneDmEnv2fbc= +google.golang.org/grpc v1.34.0/go.mod h1:WotjhfgOW/POjDeRt8vscBtXq+2VjORFy659qA51WJ8= +google.golang.org/grpc v1.35.0/go.mod h1:qjiiYl8FncCW8feJPdyg3v6XW24KsRHe+dy9BAGRRjU= google.golang.org/grpc v1.36.0/go.mod h1:qjiiYl8FncCW8feJPdyg3v6XW24KsRHe+dy9BAGRRjU= +google.golang.org/grpc v1.36.1/go.mod h1:qjiiYl8FncCW8feJPdyg3v6XW24KsRHe+dy9BAGRRjU= +google.golang.org/grpc v1.37.0/go.mod h1:NREThFqKR1f3iQ6oBuvc5LadQuXVGo9rkm5ZGrQdJfM= +google.golang.org/grpc v1.37.1/go.mod h1:NREThFqKR1f3iQ6oBuvc5LadQuXVGo9rkm5ZGrQdJfM= google.golang.org/grpc v1.38.0/go.mod h1:NREThFqKR1f3iQ6oBuvc5LadQuXVGo9rkm5ZGrQdJfM= google.golang.org/grpc v1.38.0/go.mod h1:NREThFqKR1f3iQ6oBuvc5LadQuXVGo9rkm5ZGrQdJfM= -google.golang.org/grpc v1.39.0 h1:Klz8I9kdtkIN6EpHHUOMLCYhTn/2WAe5a0s1hcBkdTI= google.golang.org/grpc v1.39.0/go.mod h1:PImNr+rS9TWYb2O4/emRugxiyHZ5JyHW5F+RPnDzfrE= +google.golang.org/grpc v1.39.1 h1:f37vZbBVTiJ6jKG5mWz8ySOBxNqy6ViPgyhSdVnxF3E= +google.golang.org/grpc v1.39.1/go.mod h1:PImNr+rS9TWYb2O4/emRugxiyHZ5JyHW5F+RPnDzfrE= +google.golang.org/grpc/cmd/protoc-gen-go-grpc v1.1.0/go.mod h1:6Kw0yEErY5E/yWrBtf03jp27GLLJujG4z/JK95pnjjw= google.golang.org/protobuf v0.0.0-20200109180630-ec00e32a8dfd/go.mod h1:DFci5gLYBciE7Vtevhsrf46CRTquxDuWsQurQQe4oz8= google.golang.org/protobuf v0.0.0-20200221191635-4d8936d0db64/go.mod h1:kwYJMbMJ01Woi6D6+Kah6886xMZcty6N08ah7+eCXa0= google.golang.org/protobuf v0.0.0-20200228230310-ab0ca4ff8a60/go.mod h1:cfTl7dwQJ+fmap5saPgwCLgHXTUD7jkjRqWcaiX5VyM= @@ -2135,7 +2283,6 @@ honnef.co/go/tools v0.0.0-20190418001031-e561f6794a2a/go.mod h1:rf3lG4BRIbNafJWh honnef.co/go/tools v0.0.0-20190523083050-ea95bdfd59fc/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4= honnef.co/go/tools v0.0.1-2019.2.3/go.mod h1:a3bituU0lyd329TUQxRnasdCoJDkEUEAqEt0JzvZhAg= honnef.co/go/tools v0.0.1-2020.1.3/go.mod h1:X/FiERA/W4tHapMX5mGpAtMSVEeEUOyHaw9vFzvIQ3k= -honnef.co/go/tools v0.0.1-2020.1.4 h1:UoveltGrhghAA7ePc+e+QYDHXrBps2PqFZiHkGR/xK8= honnef.co/go/tools v0.0.1-2020.1.4/go.mod h1:X/FiERA/W4tHapMX5mGpAtMSVEeEUOyHaw9vFzvIQ3k= k8s.io/api v0.18.8/go.mod h1:d/CXqwWv+Z2XEG1LgceeDmHQwpUJhROPx16SlxJgERY= k8s.io/api v0.20.1/go.mod h1:KqwcCVogGxQY3nBlRpwt+wpAMF/KjaCc7RpywacvqUo= From 2370d39e8905529de585758c8ccdfe11943d7dd4 Mon Sep 17 00:00:00 2001 From: reimda Date: Mon, 30 Aug 2021 09:13:08 -0600 Subject: [PATCH 026/176] fix: output timestamp with fractional seconds (#9625) --- plugins/outputs/graylog/graylog.go | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/plugins/outputs/graylog/graylog.go b/plugins/outputs/graylog/graylog.go index 05feafe9effc1..cf5dc6dc5ac3b 100644 --- a/plugins/outputs/graylog/graylog.go +++ b/plugins/outputs/graylog/graylog.go @@ -214,7 +214,7 @@ func (g *Graylog) serialize(metric telegraf.Metric) ([]string, error) { m := make(map[string]interface{}) m["version"] = "1.1" - m["timestamp"] = metric.Time().UnixNano() / 1000000000 + m["timestamp"] = float64(metric.Time().UnixNano()) / 1_000_000_000 m["short_message"] = "telegraf" m["name"] = metric.Name() From 435c2a6e3399c08fcecf26b0e294ea7051d1312e Mon Sep 17 00:00:00 2001 From: John Seekins Date: Tue, 31 Aug 2021 16:04:32 -0600 Subject: [PATCH 027/176] feat: add inputs.mdstat to gather from /proc/mdstat collection (#9101) --- README.md | 1 + plugins/inputs/all/all.go | 1 + plugins/inputs/mdstat/README.md | 49 ++++ plugins/inputs/mdstat/mdstat.go | 313 +++++++++++++++++++++++ plugins/inputs/mdstat/mdstat_notlinux.go | 3 + plugins/inputs/mdstat/mdstat_test.go | 148 +++++++++++ 6 files changed, 515 insertions(+) create mode 100644 plugins/inputs/mdstat/README.md create mode 100644 plugins/inputs/mdstat/mdstat.go create mode 100644 plugins/inputs/mdstat/mdstat_notlinux.go create mode 100644 plugins/inputs/mdstat/mdstat_test.go diff --git a/README.md b/README.md index 2097ea5c37e2b..c4a89b751c5d2 100644 --- a/README.md +++ b/README.md @@ -265,6 +265,7 @@ For documentation on the latest development code see the [documentation index][d * [mailchimp](./plugins/inputs/mailchimp) * [marklogic](./plugins/inputs/marklogic) * [mcrouter](./plugins/inputs/mcrouter) +* [mdstat](./plugins/inputs/mdstat) * [memcached](./plugins/inputs/memcached) * [mem](./plugins/inputs/mem) * [mesos](./plugins/inputs/mesos) diff --git a/plugins/inputs/all/all.go b/plugins/inputs/all/all.go index 95cfcf6626444..350a8cca08cdb 100644 --- a/plugins/inputs/all/all.go +++ b/plugins/inputs/all/all.go @@ -101,6 +101,7 @@ import ( _ "github.com/influxdata/telegraf/plugins/inputs/mailchimp" _ "github.com/influxdata/telegraf/plugins/inputs/marklogic" _ "github.com/influxdata/telegraf/plugins/inputs/mcrouter" + _ "github.com/influxdata/telegraf/plugins/inputs/mdstat" _ "github.com/influxdata/telegraf/plugins/inputs/mem" _ "github.com/influxdata/telegraf/plugins/inputs/memcached" _ "github.com/influxdata/telegraf/plugins/inputs/mesos" diff --git a/plugins/inputs/mdstat/README.md b/plugins/inputs/mdstat/README.md new file mode 100644 index 0000000000000..6180833b69ade --- /dev/null +++ b/plugins/inputs/mdstat/README.md @@ -0,0 +1,49 @@ +# mdstat Input Plugin + +The mdstat plugin gathers statistics about any Linux MD RAID arrays configured on the host +by reading /proc/mdstat. For a full list of available fields see the +/proc/mdstat section of the [proc man page](http://man7.org/linux/man-pages/man5/proc.5.html). +For a better idea of what each field represents, see the +[mdstat man page](https://raid.wiki.kernel.org/index.php/Mdstat). + +Stat collection based on Prometheus' mdstat collection library at https://github.com/prometheus/procfs/blob/master/mdstat.go + + +### Configuration: + +```toml +# Get kernel statistics from /proc/mdstat +[[inputs.mdstat]] + ## Sets file path + ## If not specified, then default is /proc/mdstat + # file_name = "/proc/mdstat" +``` + +### Measurements & Fields: + +- mdstat + - BlocksSynced (if the array is rebuilding/checking, this is the count of blocks that have been scanned) + - BlocksSyncedFinishTime (the expected finish time of the rebuild scan, listed in minutes remaining) + - BlocksSyncedPct (the percentage of the rebuild scan left) + - BlocksSyncedSpeed (the current speed the rebuild is running at, listed in K/sec) + - BlocksTotal (the total count of blocks in the array) + - DisksActive (the number of disks that are currently considered healthy in the array) + - DisksFailed (the current count of failed disks in the array) + - DisksSpare (the current count of "spare" disks in the array) + - DisksTotal (total count of disks in the array) + +### Tags: + +- mdstat + - ActivityState (`active` or `inactive`) + - Devices (comma separated list of devices that make up the array) + - Name (name of the array) + +### Example Output: + +``` +$ telegraf --config ~/ws/telegraf.conf --input-filter mdstat --test +* Plugin: mdstat, Collection 1 +> mdstat,ActivityState=active,Devices=sdm1\,sdn1,Name=md1 BlocksSynced=231299072i,BlocksSyncedFinishTime=0,BlocksSyncedPct=0,BlocksSyncedSpeed=0,BlocksTotal=231299072i,DisksActive=2i,DisksFailed=0i,DisksSpare=0i,DisksTotal=2i,DisksDown=0i 1617814276000000000 +> mdstat,ActivityState=active,Devices=sdm5\,sdn5,Name=md2 BlocksSynced=2996224i,BlocksSyncedFinishTime=0,BlocksSyncedPct=0,BlocksSyncedSpeed=0,BlocksTotal=2996224i,DisksActive=2i,DisksFailed=0i,DisksSpare=0i,DisksTotal=2i,DisksDown=0i 1617814276000000000 +``` diff --git a/plugins/inputs/mdstat/mdstat.go b/plugins/inputs/mdstat/mdstat.go new file mode 100644 index 0000000000000..0f18379c4c092 --- /dev/null +++ b/plugins/inputs/mdstat/mdstat.go @@ -0,0 +1,313 @@ +// +build linux + +// Copyright 2018 The Prometheus Authors +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +// +// Code has been changed since initial import. + +package mdstat + +import ( + "fmt" + "io/ioutil" + "os" + "regexp" + "sort" + "strconv" + "strings" + + "github.com/influxdata/telegraf" + "github.com/influxdata/telegraf/plugins/inputs" +) + +const ( + defaultHostProc = "/proc" + envProc = "HOST_PROC" +) + +var ( + statusLineRE = regexp.MustCompile(`(\d+) blocks .*\[(\d+)/(\d+)\] \[([U_]+)\]`) + recoveryLineBlocksRE = regexp.MustCompile(`\((\d+)/\d+\)`) + recoveryLinePctRE = regexp.MustCompile(`= (.+)%`) + recoveryLineFinishRE = regexp.MustCompile(`finish=(.+)min`) + recoveryLineSpeedRE = regexp.MustCompile(`speed=(.+)[A-Z]`) + componentDeviceRE = regexp.MustCompile(`(.*)\[\d+\]`) +) + +type statusLine struct { + active int64 + total int64 + size int64 + down int64 +} + +type recoveryLine struct { + syncedBlocks int64 + pct float64 + finish float64 + speed float64 +} + +type MdstatConf struct { + FileName string `toml:"file_name"` +} + +func (k *MdstatConf) Description() string { + return "Get md array statistics from /proc/mdstat" +} + +var mdSampleConfig = ` + ## Sets file path + ## If not specified, then default is /proc/mdstat + # file_name = "/proc/mdstat" +` + +func (k *MdstatConf) SampleConfig() string { + return mdSampleConfig +} + +func evalStatusLine(deviceLine, statusLineStr string) (statusLine, error) { + sizeFields := strings.Fields(statusLineStr) + if len(sizeFields) < 1 { + return statusLine{active: 0, total: 0, down: 0, size: 0}, + fmt.Errorf("statusLine empty? %q", statusLineStr) + } + sizeStr := sizeFields[0] + size, err := strconv.ParseInt(sizeStr, 10, 64) + if err != nil { + return statusLine{active: 0, total: 0, down: 0, size: 0}, + fmt.Errorf("unexpected statusLine %q: %w", statusLineStr, err) + } + + if strings.Contains(deviceLine, "raid0") || strings.Contains(deviceLine, "linear") { + // In the device deviceLine, only disks have a number associated with them in []. + total := int64(strings.Count(deviceLine, "[")) + return statusLine{active: total, total: total, down: 0, size: size}, nil + } + + if strings.Contains(deviceLine, "inactive") { + return statusLine{active: 0, total: 0, down: 0, size: size}, nil + } + + matches := statusLineRE.FindStringSubmatch(statusLineStr) + if len(matches) != 5 { + return statusLine{active: 0, total: 0, down: 0, size: size}, + fmt.Errorf("couldn't find all the substring matches: %s", statusLineStr) + } + total, err := strconv.ParseInt(matches[2], 10, 64) + if err != nil { + return statusLine{active: 0, total: 0, down: 0, size: size}, + fmt.Errorf("unexpected statusLine %q: %w", statusLineStr, err) + } + active, err := strconv.ParseInt(matches[3], 10, 64) + if err != nil { + return statusLine{active: 0, total: total, down: 0, size: size}, + fmt.Errorf("unexpected statusLine %q: %w", statusLineStr, err) + } + down := int64(strings.Count(matches[4], "_")) + + return statusLine{active: active, total: total, size: size, down: down}, nil +} + +func evalRecoveryLine(recoveryLineStr string) (recoveryLine, error) { + // Get count of completed vs. total blocks + matches := recoveryLineBlocksRE.FindStringSubmatch(recoveryLineStr) + if len(matches) != 2 { + return recoveryLine{syncedBlocks: 0, pct: 0, finish: 0, speed: 0}, + fmt.Errorf("unexpected recoveryLine matching syncedBlocks: %s", recoveryLineStr) + } + syncedBlocks, err := strconv.ParseInt(matches[1], 10, 64) + if err != nil { + return recoveryLine{syncedBlocks: 0, pct: 0, finish: 0, speed: 0}, + fmt.Errorf("error parsing int from recoveryLine %q: %w", recoveryLineStr, err) + } + + // Get percentage complete + matches = recoveryLinePctRE.FindStringSubmatch(recoveryLineStr) + if len(matches) != 2 { + return recoveryLine{syncedBlocks: syncedBlocks, pct: 0, finish: 0, speed: 0}, + fmt.Errorf("unexpected recoveryLine matching percentage: %s", recoveryLineStr) + } + pct, err := strconv.ParseFloat(matches[1], 64) + if err != nil { + return recoveryLine{syncedBlocks: syncedBlocks, pct: 0, finish: 0, speed: 0}, + fmt.Errorf("error parsing float from recoveryLine %q: %w", recoveryLineStr, err) + } + + // Get time expected left to complete + matches = recoveryLineFinishRE.FindStringSubmatch(recoveryLineStr) + if len(matches) != 2 { + return recoveryLine{syncedBlocks: syncedBlocks, pct: pct, finish: 0, speed: 0}, + fmt.Errorf("unexpected recoveryLine matching est. finish time: %s", recoveryLineStr) + } + finish, err := strconv.ParseFloat(matches[1], 64) + if err != nil { + return recoveryLine{syncedBlocks: syncedBlocks, pct: pct, finish: 0, speed: 0}, + fmt.Errorf("error parsing float from recoveryLine %q: %w", recoveryLineStr, err) + } + + // Get recovery speed + matches = recoveryLineSpeedRE.FindStringSubmatch(recoveryLineStr) + if len(matches) != 2 { + return recoveryLine{syncedBlocks: syncedBlocks, pct: pct, finish: finish, speed: 0}, + fmt.Errorf("unexpected recoveryLine matching speed: %s", recoveryLineStr) + } + speed, err := strconv.ParseFloat(matches[1], 64) + if err != nil { + return recoveryLine{syncedBlocks: syncedBlocks, pct: pct, finish: finish, speed: 0}, + fmt.Errorf("error parsing float from recoveryLine %q: %w", recoveryLineStr, err) + } + return recoveryLine{syncedBlocks: syncedBlocks, pct: pct, finish: finish, speed: speed}, nil +} + +func evalComponentDevices(deviceFields []string) string { + mdComponentDevices := make([]string, 0) + if len(deviceFields) > 3 { + for _, field := range deviceFields[4:] { + match := componentDeviceRE.FindStringSubmatch(field) + if match == nil { + continue + } + mdComponentDevices = append(mdComponentDevices, match[1]) + } + } + + // Ensure no churn on tag ordering change + sort.Strings(mdComponentDevices) + return strings.Join(mdComponentDevices, ",") +} + +func (k *MdstatConf) Gather(acc telegraf.Accumulator) error { + data, err := k.getProcMdstat() + if err != nil { + return err + } + lines := strings.Split(string(data), "\n") + // empty file should return nothing + if len(lines) < 3 { + return nil + } + for i, line := range lines { + if strings.TrimSpace(line) == "" || line[0] == ' ' || strings.HasPrefix(line, "Personalities") || strings.HasPrefix(line, "unused") { + continue + } + deviceFields := strings.Fields(line) + if len(deviceFields) < 3 || len(lines) <= i+3 { + return fmt.Errorf("not enough fields in mdline (expected at least 3): %s", line) + } + mdName := deviceFields[0] // mdx + state := deviceFields[2] // active or inactive + + /* + Failed disks have the suffix (F) & Spare disks have the suffix (S). + Failed disks may also not be marked separately... + */ + fail := int64(strings.Count(line, "(F)")) + spare := int64(strings.Count(line, "(S)")) + + sts, err := evalStatusLine(lines[i], lines[i+1]) + if err != nil { + return fmt.Errorf("error parsing md device lines: %w", err) + } + + syncLineIdx := i + 2 + if strings.Contains(lines[i+2], "bitmap") { // skip bitmap line + syncLineIdx++ + } + + var rcvry recoveryLine + // If device is syncing at the moment, get the number of currently + // synced bytes, otherwise that number equals the size of the device. + rcvry.syncedBlocks = sts.size + recovering := strings.Contains(lines[syncLineIdx], "recovery") + resyncing := strings.Contains(lines[syncLineIdx], "resync") + checking := strings.Contains(lines[syncLineIdx], "check") + + // Append recovery and resyncing state info. + if recovering || resyncing || checking { + if recovering { + state = "recovering" + } else if checking { + state = "checking" + } else { + state = "resyncing" + } + + // Handle case when resync=PENDING or resync=DELAYED. + if strings.Contains(lines[syncLineIdx], "PENDING") || strings.Contains(lines[syncLineIdx], "DELAYED") { + rcvry.syncedBlocks = 0 + } else { + var err error + rcvry, err = evalRecoveryLine(lines[syncLineIdx]) + if err != nil { + return fmt.Errorf("error parsing sync line in md device %q: %w", mdName, err) + } + } + } + fields := map[string]interface{}{ + "DisksActive": sts.active, + "DisksFailed": fail, + "DisksSpare": spare, + "DisksTotal": sts.total, + "DisksDown": sts.down, + "BlocksTotal": sts.size, + "BlocksSynced": rcvry.syncedBlocks, + "BlocksSyncedPct": rcvry.pct, + "BlocksSyncedFinishTime": rcvry.finish, + "BlocksSyncedSpeed": rcvry.speed, + } + tags := map[string]string{ + "Name": mdName, + "ActivityState": state, + "Devices": evalComponentDevices(deviceFields), + } + acc.AddFields("mdstat", fields, tags) + } + + return nil +} + +func (k *MdstatConf) getProcMdstat() ([]byte, error) { + var mdStatFile string + if k.FileName == "" { + mdStatFile = proc(envProc, defaultHostProc) + "/mdstat" + } else { + mdStatFile = k.FileName + } + if _, err := os.Stat(mdStatFile); os.IsNotExist(err) { + return nil, fmt.Errorf("mdstat: %s does not exist", mdStatFile) + } else if err != nil { + return nil, err + } + + data, err := ioutil.ReadFile(mdStatFile) + if err != nil { + return nil, err + } + + return data, nil +} + +func init() { + inputs.Add("mdstat", func() telegraf.Input { return &MdstatConf{} }) +} + +// proc can be used to read file paths from env +func proc(env, path string) string { + // try to read full file path + if p := os.Getenv(env); p != "" { + return p + } + // return default path + return path +} diff --git a/plugins/inputs/mdstat/mdstat_notlinux.go b/plugins/inputs/mdstat/mdstat_notlinux.go new file mode 100644 index 0000000000000..f0fe87e66ba91 --- /dev/null +++ b/plugins/inputs/mdstat/mdstat_notlinux.go @@ -0,0 +1,3 @@ +// +build !linux + +package mdstat diff --git a/plugins/inputs/mdstat/mdstat_test.go b/plugins/inputs/mdstat/mdstat_test.go new file mode 100644 index 0000000000000..030ac2cb55f6f --- /dev/null +++ b/plugins/inputs/mdstat/mdstat_test.go @@ -0,0 +1,148 @@ +// +build linux + +package mdstat + +import ( + "io/ioutil" + "os" + "testing" + + "github.com/influxdata/telegraf/testutil" + "github.com/stretchr/testify/assert" +) + +func TestFullMdstatProcFile(t *testing.T) { + filename := makeFakeMDStatFile([]byte(mdStatFileFull)) + defer os.Remove(filename) + k := MdstatConf{ + FileName: filename, + } + acc := testutil.Accumulator{} + err := k.Gather(&acc) + assert.NoError(t, err) + + fields := map[string]interface{}{ + "BlocksSynced": int64(10620027200), + "BlocksSyncedFinishTime": float64(101.6), + "BlocksSyncedPct": float64(94.3), + "BlocksSyncedSpeed": float64(103517), + "BlocksTotal": int64(11251451904), + "DisksActive": int64(12), + "DisksFailed": int64(0), + "DisksSpare": int64(0), + "DisksTotal": int64(12), + "DisksDown": int64(0), + } + acc.AssertContainsFields(t, "mdstat", fields) +} + +func TestFailedDiskMdStatProcFile1(t *testing.T) { + filename := makeFakeMDStatFile([]byte(mdStatFileFailedDisk)) + defer os.Remove(filename) + + k := MdstatConf{ + FileName: filename, + } + + acc := testutil.Accumulator{} + err := k.Gather(&acc) + assert.NoError(t, err) + + fields := map[string]interface{}{ + "BlocksSynced": int64(5860144128), + "BlocksSyncedFinishTime": float64(0), + "BlocksSyncedPct": float64(0), + "BlocksSyncedSpeed": float64(0), + "BlocksTotal": int64(5860144128), + "DisksActive": int64(3), + "DisksFailed": int64(0), + "DisksSpare": int64(0), + "DisksTotal": int64(4), + "DisksDown": int64(1), + } + acc.AssertContainsFields(t, "mdstat", fields) +} + +func TestEmptyMdStatProcFile1(t *testing.T) { + filename := makeFakeMDStatFile([]byte(mdStatFileEmpty)) + defer os.Remove(filename) + + k := MdstatConf{ + FileName: filename, + } + + acc := testutil.Accumulator{} + err := k.Gather(&acc) + assert.NoError(t, err) +} + +func TestInvalidMdStatProcFile1(t *testing.T) { + filename := makeFakeMDStatFile([]byte(mdStatFileInvalid)) + defer os.Remove(filename) + + k := MdstatConf{ + FileName: filename, + } + + acc := testutil.Accumulator{} + err := k.Gather(&acc) + assert.Error(t, err) +} + +const mdStatFileFull = ` +Personalities : [raid1] [raid10] [linear] [multipath] [raid0] [raid6] [raid5] [raid4] +md2 : active raid10 sde[2] sdl[9] sdf[3] sdk[8] sdh[5] sdd[1] sdg[4] sdn[11] sdm[10] sdj[7] sdc[0] sdi[6] + 11251451904 blocks super 1.2 512K chunks 2 near-copies [12/12] [UUUUUUUUUUUU] + [==================>..] check = 94.3% (10620027200/11251451904) finish=101.6min speed=103517K/sec + bitmap: 35/84 pages [140KB], 65536KB chunk + +md1 : active raid1 sdb2[2] sda2[0] + 5909504 blocks super 1.2 [2/2] [UU] + +md0 : active raid1 sdb1[2] sda1[0] + 244005888 blocks super 1.2 [2/2] [UU] + bitmap: 1/2 pages [4KB], 65536KB chunk + +unused devices: +` + +const mdStatFileFailedDisk = ` +Personalities : [linear] [multipath] [raid0] [raid1] [raid6] [raid5] [raid4] [raid10] +md0 : active raid5 sdd1[3] sdb1[1] sda1[0] + 5860144128 blocks super 1.2 level 5, 64k chunk, algorithm 2 [4/3] [UUU_] + bitmap: 8/15 pages [32KB], 65536KB chunk + +unused devices: +` + +const mdStatFileEmpty = ` +Personalities : +unused devices: +` + +const mdStatFileInvalid = ` +Personalities : + +mdf1: testman actve + +md0 : active raid1 sdb1[2] sda1[0] + 244005888 blocks super 1.2 [2/2] [UU] + bitmap: 1/2 pages [4KB], 65536KB chunk + +unused devices: +` + +func makeFakeMDStatFile(content []byte) (filename string) { + fileobj, err := ioutil.TempFile("", "mdstat") + if err != nil { + panic(err) + } + + if _, err = fileobj.Write(content); err != nil { + panic(err) + } + if err := fileobj.Close(); err != nil { + panic(err) + } + return fileobj.Name() +} From b8ff3e9c56686f2239013e272f395657909ae94c Mon Sep 17 00:00:00 2001 From: Sebastian Spaink <3441183+sspaink@users.noreply.github.com> Date: Wed, 1 Sep 2021 09:35:10 -0700 Subject: [PATCH 028/176] fix: run go fmt on inputs.mdstat with go1.17 (#9702) --- plugins/inputs/mdstat/mdstat.go | 1 + plugins/inputs/mdstat/mdstat_notlinux.go | 1 + plugins/inputs/mdstat/mdstat_test.go | 1 + 3 files changed, 3 insertions(+) diff --git a/plugins/inputs/mdstat/mdstat.go b/plugins/inputs/mdstat/mdstat.go index 0f18379c4c092..81e3f36e7c767 100644 --- a/plugins/inputs/mdstat/mdstat.go +++ b/plugins/inputs/mdstat/mdstat.go @@ -1,3 +1,4 @@ +//go:build linux // +build linux // Copyright 2018 The Prometheus Authors diff --git a/plugins/inputs/mdstat/mdstat_notlinux.go b/plugins/inputs/mdstat/mdstat_notlinux.go index f0fe87e66ba91..409ae776102b0 100644 --- a/plugins/inputs/mdstat/mdstat_notlinux.go +++ b/plugins/inputs/mdstat/mdstat_notlinux.go @@ -1,3 +1,4 @@ +//go:build !linux // +build !linux package mdstat diff --git a/plugins/inputs/mdstat/mdstat_test.go b/plugins/inputs/mdstat/mdstat_test.go index 030ac2cb55f6f..fe6041abec353 100644 --- a/plugins/inputs/mdstat/mdstat_test.go +++ b/plugins/inputs/mdstat/mdstat_test.go @@ -1,3 +1,4 @@ +//go:build linux // +build linux package mdstat From 167b6e0075b5da04fd3c33a86e68c95b3727e485 Mon Sep 17 00:00:00 2001 From: Sebastian Spaink <3441183+sspaink@users.noreply.github.com> Date: Wed, 1 Sep 2021 22:21:53 -0700 Subject: [PATCH 029/176] fix: race condition in cookie test (#9659) --- plugins/common/cookie/cookie.go | 44 ++++++--- plugins/common/cookie/cookie_test.go | 128 +++++++++++---------------- 2 files changed, 80 insertions(+), 92 deletions(-) diff --git a/plugins/common/cookie/cookie.go b/plugins/common/cookie/cookie.go index 10213f78d9b37..e452a50a4b0a9 100644 --- a/plugins/common/cookie/cookie.go +++ b/plugins/common/cookie/cookie.go @@ -1,12 +1,14 @@ package cookie import ( + "context" "fmt" "io" "io/ioutil" "net/http" "net/http/cookiejar" "strings" + "sync" "time" clockutil "github.com/benbjohnson/clock" @@ -26,9 +28,25 @@ type CookieAuthConfig struct { Renewal config.Duration `toml:"cookie_auth_renewal"` client *http.Client + wg sync.WaitGroup } func (c *CookieAuthConfig) Start(client *http.Client, log telegraf.Logger, clock clockutil.Clock) (err error) { + if err = c.initializeClient(client); err != nil { + return err + } + + // continual auth renewal if set + if c.Renewal > 0 { + ticker := clock.Ticker(time.Duration(c.Renewal)) + // this context is used in the tests only, it is to cancel the goroutine + go c.authRenewal(context.Background(), ticker, log) + } + + return nil +} + +func (c *CookieAuthConfig) initializeClient(client *http.Client) (err error) { c.client = client if c.Method == "" { @@ -40,23 +58,21 @@ func (c *CookieAuthConfig) Start(client *http.Client, log telegraf.Logger, clock return err } - if err = c.auth(); err != nil { - return err - } + return c.auth() +} - // continual auth renewal if set - if c.Renewal > 0 { - ticker := clock.Ticker(time.Duration(c.Renewal)) - go func() { - for range ticker.C { - if err := c.auth(); err != nil && log != nil { - log.Errorf("renewal failed for %q: %v", c.URL, err) - } +func (c *CookieAuthConfig) authRenewal(ctx context.Context, ticker *clockutil.Ticker, log telegraf.Logger) { + for { + select { + case <-ctx.Done(): + c.wg.Done() + return + case <-ticker.C: + if err := c.auth(); err != nil && log != nil { + log.Errorf("renewal failed for %q: %v", c.URL, err) } - }() + } } - - return nil } func (c *CookieAuthConfig) auth() error { diff --git a/plugins/common/cookie/cookie_test.go b/plugins/common/cookie/cookie_test.go index 036ca2b5bb5a7..99269c27cd339 100644 --- a/plugins/common/cookie/cookie_test.go +++ b/plugins/common/cookie/cookie_test.go @@ -1,6 +1,7 @@ -package cookie_test +package cookie import ( + "context" "fmt" "io/ioutil" "net/http" @@ -12,7 +13,6 @@ import ( clockutil "github.com/benbjohnson/clock" "github.com/google/go-cmp/cmp" "github.com/influxdata/telegraf/config" - "github.com/influxdata/telegraf/plugins/common/cookie" "github.com/influxdata/telegraf/testutil" "github.com/stretchr/testify/require" ) @@ -118,44 +118,25 @@ func TestAuthConfig_Start(t *testing.T) { endpoint string } tests := []struct { - name string - fields fields - args args - wantErr error - assert func(t *testing.T, c *cookie.CookieAuthConfig, srv fakeServer, mock *clockutil.Mock) + name string + fields fields + args args + wantErr error + firstAuthCount int32 + lastAuthCount int32 + firstHTTPResponse int + lastHTTPResponse int }{ - { - name: "zero renewal does not renew", - args: args{ - renewal: 0, - endpoint: authEndpointNoCreds, - }, - assert: func(t *testing.T, c *cookie.CookieAuthConfig, srv fakeServer, mock *clockutil.Mock) { - // should have Cookie Authed once - srv.checkAuthCount(t, 1) - srv.checkResp(t, http.StatusOK) - mock.Add(renewalCheck) - srv.checkAuthCount(t, 1) - srv.checkResp(t, http.StatusOK) - }, - }, { name: "success no creds, no body, default method", args: args{ renewal: renewal, endpoint: authEndpointNoCreds, }, - assert: func(t *testing.T, c *cookie.CookieAuthConfig, srv fakeServer, mock *clockutil.Mock) { - // should have Cookie Authed once - srv.checkAuthCount(t, 1) - // default method set - require.Equal(t, http.MethodPost, c.Method) - srv.checkResp(t, http.StatusOK) - mock.Add(renewalCheck) - // should have Cookie Authed at least twice more - srv.checkAuthCount(t, 3) - srv.checkResp(t, http.StatusOK) - }, + firstAuthCount: 1, + lastAuthCount: 3, + firstHTTPResponse: http.StatusOK, + lastHTTPResponse: http.StatusOK, }, { name: "success with creds, no body", @@ -168,15 +149,10 @@ func TestAuthConfig_Start(t *testing.T) { renewal: renewal, endpoint: authEndpointWithBasicAuth, }, - assert: func(t *testing.T, c *cookie.CookieAuthConfig, srv fakeServer, mock *clockutil.Mock) { - // should have Cookie Authed once - srv.checkAuthCount(t, 1) - srv.checkResp(t, http.StatusOK) - mock.Add(renewalCheck) - // should have Cookie Authed at least twice more - srv.checkAuthCount(t, 3) - srv.checkResp(t, http.StatusOK) - }, + firstAuthCount: 1, + lastAuthCount: 3, + firstHTTPResponse: http.StatusOK, + lastHTTPResponse: http.StatusOK, }, { name: "failure with bad creds", @@ -189,16 +165,11 @@ func TestAuthConfig_Start(t *testing.T) { renewal: renewal, endpoint: authEndpointWithBasicAuth, }, - wantErr: fmt.Errorf("cookie auth renewal received status code: 401 (Unauthorized)"), - assert: func(t *testing.T, c *cookie.CookieAuthConfig, srv fakeServer, mock *clockutil.Mock) { - // should have never Cookie Authed - srv.checkAuthCount(t, 0) - srv.checkResp(t, http.StatusForbidden) - mock.Add(renewalCheck) - // should have still never Cookie Authed - srv.checkAuthCount(t, 0) - srv.checkResp(t, http.StatusForbidden) - }, + wantErr: fmt.Errorf("cookie auth renewal received status code: 401 (Unauthorized)"), + firstAuthCount: 0, + lastAuthCount: 0, + firstHTTPResponse: http.StatusForbidden, + lastHTTPResponse: http.StatusForbidden, }, { name: "success with no creds, with good body", @@ -210,15 +181,10 @@ func TestAuthConfig_Start(t *testing.T) { renewal: renewal, endpoint: authEndpointWithBody, }, - assert: func(t *testing.T, c *cookie.CookieAuthConfig, srv fakeServer, mock *clockutil.Mock) { - // should have Cookie Authed once - srv.checkAuthCount(t, 1) - srv.checkResp(t, http.StatusOK) - mock.Add(renewalCheck) - // should have Cookie Authed at least twice more - srv.checkAuthCount(t, 3) - srv.checkResp(t, http.StatusOK) - }, + firstAuthCount: 1, + lastAuthCount: 3, + firstHTTPResponse: http.StatusOK, + lastHTTPResponse: http.StatusOK, }, { name: "failure with bad body", @@ -230,23 +196,18 @@ func TestAuthConfig_Start(t *testing.T) { renewal: renewal, endpoint: authEndpointWithBody, }, - wantErr: fmt.Errorf("cookie auth renewal received status code: 401 (Unauthorized)"), - assert: func(t *testing.T, c *cookie.CookieAuthConfig, srv fakeServer, mock *clockutil.Mock) { - // should have never Cookie Authed - srv.checkAuthCount(t, 0) - srv.checkResp(t, http.StatusForbidden) - mock.Add(renewalCheck) - // should have still never Cookie Authed - srv.checkAuthCount(t, 0) - srv.checkResp(t, http.StatusForbidden) - }, + wantErr: fmt.Errorf("cookie auth renewal received status code: 401 (Unauthorized)"), + firstAuthCount: 0, + lastAuthCount: 0, + firstHTTPResponse: http.StatusForbidden, + lastHTTPResponse: http.StatusForbidden, }, } for _, tt := range tests { tt := tt t.Run(tt.name, func(t *testing.T) { srv := newFakeServer(t) - c := &cookie.CookieAuthConfig{ + c := &CookieAuthConfig{ URL: srv.URL + tt.args.endpoint, Method: tt.fields.Method, Username: tt.fields.Username, @@ -254,17 +215,28 @@ func TestAuthConfig_Start(t *testing.T) { Body: tt.fields.Body, Renewal: config.Duration(tt.args.renewal), } - - mock := clockutil.NewMock() - if err := c.Start(srv.Client(), testutil.Logger{Name: "cookie_auth"}, mock); tt.wantErr != nil { + if err := c.initializeClient(srv.Client()); tt.wantErr != nil { require.EqualError(t, err, tt.wantErr.Error()) } else { require.NoError(t, err) } + mock := clockutil.NewMock() + ticker := mock.Ticker(time.Duration(c.Renewal)) + defer ticker.Stop() + + c.wg.Add(1) + ctx, cancel := context.WithCancel(context.Background()) + go c.authRenewal(ctx, ticker, testutil.Logger{Name: "cookie_auth"}) + + srv.checkAuthCount(t, tt.firstAuthCount) + srv.checkResp(t, tt.firstHTTPResponse) + mock.Add(renewalCheck) + // Ensure that the auth renewal goroutine has completed + cancel() + c.wg.Wait() + srv.checkAuthCount(t, tt.lastAuthCount) + srv.checkResp(t, tt.lastHTTPResponse) - if tt.assert != nil { - tt.assert(t, c, srv, mock) - } srv.Close() }) } From 514a942a6ceb0a6972582a62e157aed6849eb05d Mon Sep 17 00:00:00 2001 From: Jake McCrary Date: Thu, 2 Sep 2021 09:56:45 -0500 Subject: [PATCH 030/176] Make prometheus serializer update timestamps and expiration time as new data arrives (#9139) --- plugins/serializers/prometheus/README.md | 6 +- plugins/serializers/prometheus/collection.go | 6 + .../serializers/prometheus/collection_test.go | 417 ++++++++++++++++++ 3 files changed, 428 insertions(+), 1 deletion(-) diff --git a/plugins/serializers/prometheus/README.md b/plugins/serializers/prometheus/README.md index 19c869ffbccb3..446def0b46d77 100644 --- a/plugins/serializers/prometheus/README.md +++ b/plugins/serializers/prometheus/README.md @@ -8,7 +8,11 @@ use the `metric_version = 2` option in order to properly round trip metrics. not be correct if the metric spans multiple batches. This issue can be somewhat, but not fully, mitigated by using outputs that support writing in "batch format". When using histogram and summary types, it is recommended to -use only the `prometheus_client` output. +use only the `prometheus_client` output. Histogram and Summary types +also update their expiration time based on the most recently received data. +If incoming metrics stop updating specific buckets or quantiles but continue +reporting others every bucket/quantile will continue to exist. + ### Configuration diff --git a/plugins/serializers/prometheus/collection.go b/plugins/serializers/prometheus/collection.go index ed442e23c85fd..caa8a7334d91d 100644 --- a/plugins/serializers/prometheus/collection.go +++ b/plugins/serializers/prometheus/collection.go @@ -241,6 +241,9 @@ func (c *Collection) Add(metric telegraf.Metric, now time.Time) { AddTime: now, Histogram: &Histogram{}, } + } else { + m.Time = metric.Time() + m.AddTime = now } switch { case strings.HasSuffix(field.Key, "_bucket"): @@ -289,6 +292,9 @@ func (c *Collection) Add(metric telegraf.Metric, now time.Time) { AddTime: now, Summary: &Summary{}, } + } else { + m.Time = metric.Time() + m.AddTime = now } switch { case strings.HasSuffix(field.Key, "_sum"): diff --git a/plugins/serializers/prometheus/collection_test.go b/plugins/serializers/prometheus/collection_test.go index d2c5f5d098162..deb400ba2d899 100644 --- a/plugins/serializers/prometheus/collection_test.go +++ b/plugins/serializers/prometheus/collection_test.go @@ -302,6 +302,117 @@ func TestCollectionExpire(t *testing.T) { }, }, }, + { + name: "entire histogram expires", + now: time.Unix(20, 0), + age: 10 * time.Second, + input: []Input{ + { + metric: testutil.MustMetric( + "prometheus", + map[string]string{}, + map[string]interface{}{ + "http_request_duration_seconds_sum": 10.0, + "http_request_duration_seconds_count": 2, + }, + time.Unix(0, 0), + telegraf.Histogram, + ), + addtime: time.Unix(0, 0), + }, { + metric: testutil.MustMetric( + "prometheus", + map[string]string{"le": "0.05"}, + map[string]interface{}{ + "http_request_duration_seconds_bucket": 1.0, + }, + time.Unix(0, 0), + telegraf.Histogram, + ), + addtime: time.Unix(0, 0), + }, { + metric: testutil.MustMetric( + "prometheus", + map[string]string{"le": "+Inf"}, + map[string]interface{}{ + "http_request_duration_seconds_bucket": 1.0, + }, + time.Unix(0, 0), + telegraf.Histogram, + ), + addtime: time.Unix(0, 0), + }, + }, + expected: []*dto.MetricFamily{}, + }, + { + name: "histogram does not expire because of addtime from bucket", + now: time.Unix(20, 0), + age: 10 * time.Second, + input: []Input{ + { + metric: testutil.MustMetric( + "prometheus", + map[string]string{"le": "+Inf"}, + map[string]interface{}{ + "http_request_duration_seconds_bucket": 1.0, + }, + time.Unix(0, 0), + telegraf.Histogram, + ), + addtime: time.Unix(0, 0), + }, { + metric: testutil.MustMetric( + "prometheus", + map[string]string{}, + map[string]interface{}{ + "http_request_duration_seconds_sum": 10.0, + "http_request_duration_seconds_count": 2, + }, + time.Unix(0, 0), + telegraf.Histogram, + ), + addtime: time.Unix(0, 0), + }, { + metric: testutil.MustMetric( + "prometheus", + map[string]string{"le": "0.05"}, + map[string]interface{}{ + "http_request_duration_seconds_bucket": 1.0, + }, + time.Unix(0, 0), + telegraf.Histogram, + ), + addtime: time.Unix(15, 0), // More recent addtime causes entire metric to stay valid + }, + }, + expected: []*dto.MetricFamily{ + { + Name: proto.String("http_request_duration_seconds"), + Help: proto.String(helpString), + Type: dto.MetricType_HISTOGRAM.Enum(), + Metric: []*dto.Metric{ + { + Label: []*dto.LabelPair{}, + Histogram: &dto.Histogram{ + SampleCount: proto.Uint64(2), + SampleSum: proto.Float64(10.0), + Bucket: []*dto.Bucket{ + { + UpperBound: proto.Float64(math.Inf(1)), + CumulativeCount: proto.Uint64(1), + }, + { + UpperBound: proto.Float64(0.05), + CumulativeCount: proto.Uint64(1), + }, + }, + }, + }, + }, + }, + }, + }, { name: "summary quantile updates", now: time.Unix(0, 0), @@ -379,6 +490,106 @@ func TestCollectionExpire(t *testing.T) { }, }, }, + { + name: "Entire summary expires", + now: time.Unix(20, 0), + age: 10 * time.Second, + input: []Input{ + { + metric: testutil.MustMetric( + "prometheus", + map[string]string{}, + map[string]interface{}{ + "rpc_duration_seconds_sum": 1.0, + "rpc_duration_seconds_count": 1, + }, + time.Unix(0, 0), + telegraf.Summary, + ), + addtime: time.Unix(0, 0), + }, { + metric: testutil.MustMetric( + "prometheus", + map[string]string{"quantile": "0.01"}, + map[string]interface{}{ + "rpc_duration_seconds": 1.0, + }, + time.Unix(0, 0), + telegraf.Summary, + ), + addtime: time.Unix(0, 0), + }, + }, + expected: []*dto.MetricFamily{}, + }, + { + name: "summary does not expire because of quantile addtime", + now: time.Unix(20, 0), + age: 10 * time.Second, + input: []Input{ + { + metric: testutil.MustMetric( + "prometheus", + map[string]string{}, + map[string]interface{}{ + "rpc_duration_seconds_sum": 1.0, + "rpc_duration_seconds_count": 1, + }, + time.Unix(0, 0), + telegraf.Summary, + ), + addtime: time.Unix(0, 0), + }, { + metric: testutil.MustMetric( + "prometheus", + map[string]string{"quantile": "0.5"}, + map[string]interface{}{ + "rpc_duration_seconds": 10.0, + }, + time.Unix(0, 0), + telegraf.Summary, + ), + addtime: time.Unix(0, 0), + }, { + metric: testutil.MustMetric( + "prometheus", + map[string]string{"quantile": "0.01"}, + map[string]interface{}{ + "rpc_duration_seconds": 1.0, + }, + time.Unix(0, 0), + telegraf.Summary, + ), + addtime: time.Unix(15, 0), // Recent addtime keeps entire metric around + }, + }, + expected: []*dto.MetricFamily{ + { + Name: proto.String("rpc_duration_seconds"), + Help: proto.String(helpString), + Type: dto.MetricType_SUMMARY.Enum(), + Metric: []*dto.Metric{ + { + Label: []*dto.LabelPair{}, + Summary: &dto.Summary{ + SampleSum: proto.Float64(1), + SampleCount: proto.Uint64(1), + Quantile: []*dto.Quantile{ + { + Quantile: proto.Float64(0.5), + Value: proto.Float64(10), + }, + { + Quantile: proto.Float64(0.01), + Value: proto.Float64(1), + }, + }, + }, + }, + }, + }, + }, + }, { name: "expire based on add time", now: time.Unix(20, 0), @@ -425,3 +636,209 @@ func TestCollectionExpire(t *testing.T) { }) } } + +func TestExportTimestamps(t *testing.T) { + tests := []struct { + name string + now time.Time + age time.Duration + input []Input + expected []*dto.MetricFamily + }{ + { + name: "histogram bucket updates", + now: time.Unix(23, 0), + age: 10 * time.Second, + input: []Input{ + { + metric: testutil.MustMetric( + "prometheus", + map[string]string{}, + map[string]interface{}{ + "http_request_duration_seconds_sum": 10.0, + "http_request_duration_seconds_count": 2, + }, + time.Unix(15, 0), + telegraf.Histogram, + ), + addtime: time.Unix(23, 0), + }, { + metric: testutil.MustMetric( + "prometheus", + map[string]string{"le": "0.05"}, + map[string]interface{}{ + "http_request_duration_seconds_bucket": 1.0, + }, + time.Unix(15, 0), + telegraf.Histogram, + ), + addtime: time.Unix(23, 0), + }, { + metric: testutil.MustMetric( + "prometheus", + map[string]string{"le": "+Inf"}, + map[string]interface{}{ + "http_request_duration_seconds_bucket": 1.0, + }, + time.Unix(15, 0), + telegraf.Histogram, + ), + addtime: time.Unix(23, 0), + }, { + // Next interval + metric: testutil.MustMetric( + "prometheus", + map[string]string{}, + map[string]interface{}{ + "http_request_duration_seconds_sum": 20.0, + "http_request_duration_seconds_count": 4, + }, + time.Unix(20, 0), // Updated timestamp + telegraf.Histogram, + ), + addtime: time.Unix(23, 0), + }, { + metric: testutil.MustMetric( + "prometheus", + map[string]string{"le": "0.05"}, + map[string]interface{}{ + "http_request_duration_seconds_bucket": 2.0, + }, + time.Unix(20, 0), // Updated timestamp + telegraf.Histogram, + ), + addtime: time.Unix(23, 0), + }, { + metric: testutil.MustMetric( + "prometheus", + map[string]string{"le": "+Inf"}, + map[string]interface{}{ + "http_request_duration_seconds_bucket": 2.0, + }, + time.Unix(20, 0), // Updated timestamp + telegraf.Histogram, + ), + addtime: time.Unix(23, 0), + }, + }, + expected: []*dto.MetricFamily{ + { + Name: proto.String("http_request_duration_seconds"), + Help: proto.String(helpString), + Type: dto.MetricType_HISTOGRAM.Enum(), + Metric: []*dto.Metric{ + { + Label: []*dto.LabelPair{}, + TimestampMs: proto.Int64(time.Unix(20, 0).UnixNano() / int64(time.Millisecond)), + Histogram: &dto.Histogram{ + SampleCount: proto.Uint64(4), + SampleSum: proto.Float64(20.0), + Bucket: []*dto.Bucket{ + { + UpperBound: proto.Float64(0.05), + CumulativeCount: proto.Uint64(2), + }, + { + UpperBound: proto.Float64(math.Inf(1)), + CumulativeCount: proto.Uint64(2), + }, + }, + }, + }, + }, + }, + }, + }, + { + name: "summary quantile updates", + now: time.Unix(23, 0), + age: 10 * time.Second, + input: []Input{ + { + metric: testutil.MustMetric( + "prometheus", + map[string]string{}, + map[string]interface{}{ + "rpc_duration_seconds_sum": 1.0, + "rpc_duration_seconds_count": 1, + }, + time.Unix(15, 0), + telegraf.Summary, + ), + addtime: time.Unix(23, 0), + }, { + metric: testutil.MustMetric( + "prometheus", + map[string]string{"quantile": "0.01"}, + map[string]interface{}{ + "rpc_duration_seconds": 1.0, + }, + time.Unix(15, 0), + telegraf.Summary, + ), + addtime: time.Unix(23, 0), + }, { + // Updated Summary + metric: testutil.MustMetric( + "prometheus", + map[string]string{}, + map[string]interface{}{ + "rpc_duration_seconds_sum": 2.0, + "rpc_duration_seconds_count": 2, + }, + time.Unix(20, 0), // Updated timestamp + telegraf.Summary, + ), + addtime: time.Unix(23, 0), + }, { + metric: testutil.MustMetric( + "prometheus", + map[string]string{"quantile": "0.01"}, + map[string]interface{}{ + "rpc_duration_seconds": 2.0, + }, + time.Unix(20, 0), // Updated timestamp + telegraf.Summary, + ), + addtime: time.Unix(23, 0), + }, + }, + expected: []*dto.MetricFamily{ + { + Name: proto.String("rpc_duration_seconds"), + Help: proto.String(helpString), + Type: dto.MetricType_SUMMARY.Enum(), + Metric: []*dto.Metric{ + { + Label: []*dto.LabelPair{}, + TimestampMs: proto.Int64(time.Unix(20, 0).UnixNano() / int64(time.Millisecond)), + Summary: &dto.Summary{ + SampleCount: proto.Uint64(2), + SampleSum: proto.Float64(2.0), + Quantile: []*dto.Quantile{ + { + Quantile: proto.Float64(0.01), + Value: proto.Float64(2), + }, + }, + }, + }, + }, + }, + }, + }, + } + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + c := NewCollection(FormatConfig{TimestampExport: ExportTimestamp}) + for _, item := range tt.input { + c.Add(item.metric, item.addtime) + } + c.Expire(tt.now, tt.age) + + actual := c.GetProto() + + require.Equal(t, tt.expected, actual) + }) + } +} From 04c3e9bb24feb36f24e8da75f6b764e44d6e58cf Mon Sep 17 00:00:00 2001 From: Matteo Concas Date: Thu, 2 Sep 2021 16:57:17 +0200 Subject: [PATCH 031/176] feat: Add rocm_smi input to monitor AMD GPUs (#9602) --- etc/telegraf.conf | 15 +- plugins/inputs/all/all.go | 1 + plugins/inputs/amd_rocm_smi/README.md | 58 ++++ plugins/inputs/amd_rocm_smi/amd_rocm_smi.go | 294 ++++++++++++++++++ .../inputs/amd_rocm_smi/amd_rocm_smi_test.go | 90 ++++++ .../amd_rocm_smi/testdata/vega-10-XT.json | 77 +++++ .../testdata/vega-20-WKS-GL-XE.json | 165 ++++++++++ 7 files changed, 696 insertions(+), 4 deletions(-) create mode 100644 plugins/inputs/amd_rocm_smi/README.md create mode 100644 plugins/inputs/amd_rocm_smi/amd_rocm_smi.go create mode 100644 plugins/inputs/amd_rocm_smi/amd_rocm_smi_test.go create mode 100644 plugins/inputs/amd_rocm_smi/testdata/vega-10-XT.json create mode 100644 plugins/inputs/amd_rocm_smi/testdata/vega-20-WKS-GL-XE.json diff --git a/etc/telegraf.conf b/etc/telegraf.conf index c49761c947bc4..43b1f8f3ade45 100644 --- a/etc/telegraf.conf +++ b/etc/telegraf.conf @@ -1870,7 +1870,7 @@ # ## Print Warp 10 error body # # print_error_body = false # -# ## Max string error size +# ## Max string error size # # max_string_error_size = 511 # # ## Optional TLS Config @@ -4343,19 +4343,19 @@ # ## List of metrics collected on above servers # ## Each metric consists in a name, a jmx path and either # ## a pass or drop slice attribute. -# ## This collect all heap memory usage metrics. +# ## This collect all heap memory usage metrics. # [[inputs.jolokia.metrics]] # name = "heap_memory_usage" # mbean = "java.lang:type=Memory" # attribute = "HeapMemoryUsage" # -# ## This collect thread counts metrics. +# ## This collect thread counts metrics. # [[inputs.jolokia.metrics]] # name = "thread_count" # mbean = "java.lang:type=Threading" # attribute = "TotalStartedThreadCount,ThreadCount,DaemonThreadCount,PeakThreadCount" # -# ## This collect number of class loaded/unloaded counts metrics. +# ## This collect number of class loaded/unloaded counts metrics. # [[inputs.jolokia.metrics]] # name = "class_count" # mbean = "java.lang:type=ClassLoading" @@ -5785,6 +5785,13 @@ # # Specify a list of one or more riak http servers # servers = ["http://localhost:8098"] +# # Query statistics from AMD Graphics cards using rocm-smi binary +# [[inputs.amd_rocm_smi]] +# ## Optional: path to rocm-smi binary, defaults to $PATH via exec.LookPath +# # bin_path = "/opt/rocm/bin/rocm-smi" +# +# ## Optional: timeout for GPU polling +# # timeout = "5s" # # Read API usage and limits for a Salesforce organisation # [[inputs.salesforce]] diff --git a/plugins/inputs/all/all.go b/plugins/inputs/all/all.go index 350a8cca08cdb..781e04e60928b 100644 --- a/plugins/inputs/all/all.go +++ b/plugins/inputs/all/all.go @@ -5,6 +5,7 @@ import ( _ "github.com/influxdata/telegraf/plugins/inputs/activemq" _ "github.com/influxdata/telegraf/plugins/inputs/aerospike" _ "github.com/influxdata/telegraf/plugins/inputs/aliyuncms" + _ "github.com/influxdata/telegraf/plugins/inputs/amd_rocm_smi" _ "github.com/influxdata/telegraf/plugins/inputs/amqp_consumer" _ "github.com/influxdata/telegraf/plugins/inputs/apache" _ "github.com/influxdata/telegraf/plugins/inputs/apcupsd" diff --git a/plugins/inputs/amd_rocm_smi/README.md b/plugins/inputs/amd_rocm_smi/README.md new file mode 100644 index 0000000000000..89a5b063065d7 --- /dev/null +++ b/plugins/inputs/amd_rocm_smi/README.md @@ -0,0 +1,58 @@ +# ROCm System Management Interface (SMI) Input Plugin + +This plugin uses a query on the [`rocm-smi`](https://github.com/RadeonOpenCompute/rocm_smi_lib/tree/master/python_smi_tools) binary to pull GPU stats including memory and GPU usage, temperatures and other. + +### Configuration + +```toml +# Pulls statistics from nvidia GPUs attached to the host +[[inputs.amd_rocm_smi]] + ## Optional: path to rocm-smi binary, defaults to $PATH via exec.LookPath + # bin_path = "/opt/rocm/bin/rocm-smi" + + ## Optional: timeout for GPU polling + # timeout = "5s" +``` + +### Metrics +- measurement: `amd_rocm_smi` + - tags + - `name` (entry name assigned by rocm-smi executable) + - `gpu_id` (id of the GPU according to rocm-smi) + - `gpu_unique_id` (unique id of the GPU) + + - fields + - `driver_version` (integer) + - `fan_speed`(integer) + - `memory_total`(integer B) + - `memory_used`(integer B) + - `memory_free`(integer B) + - `temperature_sensor_edge` (float, Celsius) + - `temperature_sensor_junction` (float, Celsius) + - `temperature_sensor_memory` (float, Celsius) + - `utilization_gpu` (integer, percentage) + - `utilization_memory` (integer, percentage) + - `clocks_current_sm` (integer, Mhz) + - `clocks_current_memory` (integer, Mhz) + - `power_draw` (float, Watt) + +### Troubleshooting +Check the full output by running `rocm-smi` binary manually. + +Linux: +```sh +rocm-smi rocm-smi -o -l -m -M -g -c -t -u -i -f -p -P -s -S -v --showreplaycount --showpids --showdriverversion --showmemvendor --showfwinfo --showproductname --showserial --showuniqueid --showbus --showpendingpages --showpagesinfo --showretiredpages --showunreservablepages --showmemuse --showvoltage --showtopo --showtopoweight --showtopohops --showtopotype --showtoponuma --showmeminfo all --json +``` +Please include the output of this command if opening a GitHub issue, together with ROCm version. +### Example Output +``` +amd_rocm_smi,gpu_id=0x6861,gpu_unique_id=0x2150e7d042a1124,host=ali47xl,name=card0 clocks_current_memory=167i,clocks_current_sm=852i,driver_version=51114i,fan_speed=14i,memory_free=17145282560i,memory_total=17163091968i,memory_used=17809408i,power_draw=7,temperature_sensor_edge=28,temperature_sensor_junction=29,temperature_sensor_memory=92,utilization_gpu=0i 1630572551000000000 +amd_rocm_smi,gpu_id=0x6861,gpu_unique_id=0x2150e7d042a1124,host=ali47xl,name=card0 clocks_current_memory=167i,clocks_current_sm=852i,driver_version=51114i,fan_speed=14i,memory_free=17145282560i,memory_total=17163091968i,memory_used=17809408i,power_draw=7,temperature_sensor_edge=29,temperature_sensor_junction=30,temperature_sensor_memory=91,utilization_gpu=0i 1630572701000000000 +amd_rocm_smi,gpu_id=0x6861,gpu_unique_id=0x2150e7d042a1124,host=ali47xl,name=card0 clocks_current_memory=167i,clocks_current_sm=852i,driver_version=51114i,fan_speed=14i,memory_free=17145282560i,memory_total=17163091968i,memory_used=17809408i,power_draw=7,temperature_sensor_edge=29,temperature_sensor_junction=29,temperature_sensor_memory=92,utilization_gpu=0i 1630572749000000000 +``` +### Limitations and notices +Please notice that this plugin has been developed and tested on a limited number of versions and small set of GPUs. Currently the latest ROCm version tested is 4.3.0. +Notice that depending on the device and driver versions the amount of information provided by `rocm-smi` can vary so that some fields would start/stop appearing in the metrics upon updates. +The `rocm-smi` JSON output is not perfectly homogeneous and is possibly changing in the future, hence parsing and unmarshaling can start failing upon updating ROCm. + +Inspired by the current state of the art of the `nvidia-smi` plugin. diff --git a/plugins/inputs/amd_rocm_smi/amd_rocm_smi.go b/plugins/inputs/amd_rocm_smi/amd_rocm_smi.go new file mode 100644 index 0000000000000..7fdd32f466b73 --- /dev/null +++ b/plugins/inputs/amd_rocm_smi/amd_rocm_smi.go @@ -0,0 +1,294 @@ +package amd_rocm_smi + +import ( + "encoding/json" + "fmt" + "os" + "os/exec" + "strconv" + "strings" + "time" + + "github.com/influxdata/telegraf" + "github.com/influxdata/telegraf/config" + "github.com/influxdata/telegraf/internal" + "github.com/influxdata/telegraf/plugins/inputs" +) + +const measurement = "amd_rocm_smi" + +type ROCmSMI struct { + BinPath string + Timeout config.Duration +} + +// Description returns the description of the ROCmSMI plugin +func (rsmi *ROCmSMI) Description() string { + return "Query statistics from AMD Graphics cards using rocm-smi binary" +} + +var ROCmSMIConfig = ` +## Optional: path to rocm-smi binary, defaults to $PATH via exec.LookPath +# bin_path = "/opt/rocm/bin/rocm-smi" + +## Optional: timeout for GPU polling +# timeout = "5s" +` + +// SampleConfig returns the sample configuration for the ROCmSMI plugin +func (rsmi *ROCmSMI) SampleConfig() string { + return ROCmSMIConfig +} + +// Gather implements the telegraf interface +func (rsmi *ROCmSMI) Gather(acc telegraf.Accumulator) error { + if _, err := os.Stat(rsmi.BinPath); os.IsNotExist(err) { + return fmt.Errorf("rocm-smi binary not found in path %s, cannot query GPUs statistics", rsmi.BinPath) + } + + data, err := rsmi.pollROCmSMI() + if err != nil { + return err + } + + err = gatherROCmSMI(data, acc) + if err != nil { + return err + } + + return nil +} + +func init() { + inputs.Add("amd_rocm_smi", func() telegraf.Input { + return &ROCmSMI{ + BinPath: "/opt/rocm/bin/rocm-smi", + Timeout: config.Duration(5 * time.Second), + } + }) +} + +func (rsmi *ROCmSMI) pollROCmSMI() ([]byte, error) { + // Construct and execute metrics query, there currently exist (ROCm v4.3.x) a "-a" option + // that does not provide all the information, so each needed parameter is set manually + cmd := exec.Command(rsmi.BinPath, + "-o", + "-l", + "-m", + "-M", + "-g", + "-c", + "-t", + "-u", + "-i", + "-f", + "-p", + "-P", + "-s", + "-S", + "-v", + "--showreplaycount", + "--showpids", + "--showdriverversion", + "--showmemvendor", + "--showfwinfo", + "--showproductname", + "--showserial", + "--showuniqueid", + "--showbus", + "--showpendingpages", + "--showpagesinfo", + "--showmeminfo", + "all", + "--showretiredpages", + "--showunreservablepages", + "--showmemuse", + "--showvoltage", + "--showtopo", + "--showtopoweight", + "--showtopohops", + "--showtopotype", + "--showtoponuma", + "--json") + + ret, _ := internal.StdOutputTimeout(cmd, + time.Duration(rsmi.Timeout)) + return ret, nil +} + +func gatherROCmSMI(ret []byte, acc telegraf.Accumulator) error { + var gpus map[string]GPU + var sys map[string]sysInfo + + err1 := json.Unmarshal(ret, &gpus) + if err1 != nil { + return err1 + } + + err2 := json.Unmarshal(ret, &sys) + if err2 != nil { + return err2 + } + + metrics := genTagsFields(gpus, sys) + for _, metric := range metrics { + acc.AddFields(measurement, metric.fields, metric.tags) + } + + return nil +} + +type metric struct { + tags map[string]string + fields map[string]interface{} +} + +func genTagsFields(gpus map[string]GPU, system map[string]sysInfo) []metric { + metrics := []metric{} + for cardID, payload := range gpus { + if strings.Contains(cardID, "card") { + tags := map[string]string{ + "name": cardID, + } + fields := map[string]interface{}{} + + totVRAM, _ := strconv.ParseInt(payload.GpuVRAMTotalMemory, 10, 64) + usdVRAM, _ := strconv.ParseInt(payload.GpuVRAMTotalUsedMemory, 10, 64) + strFree := strconv.FormatInt(totVRAM-usdVRAM, 10) + + setTagIfUsed(tags, "gpu_id", payload.GpuID) + setTagIfUsed(tags, "gpu_unique_id", payload.GpuUniqueID) + + setIfUsed("int", fields, "driver_version", strings.Replace(system["system"].DriverVersion, ".", "", -1)) + setIfUsed("int", fields, "fan_speed", payload.GpuFanSpeedPercentage) + setIfUsed("int64", fields, "memory_total", payload.GpuVRAMTotalMemory) + setIfUsed("int64", fields, "memory_used", payload.GpuVRAMTotalUsedMemory) + setIfUsed("int64", fields, "memory_free", strFree) + setIfUsed("float", fields, "temperature_sensor_edge", payload.GpuTemperatureSensorEdge) + setIfUsed("float", fields, "temperature_sensor_junction", payload.GpuTemperatureSensorJunction) + setIfUsed("float", fields, "temperature_sensor_memory", payload.GpuTemperatureSensorMemory) + setIfUsed("int", fields, "utilization_gpu", payload.GpuUsePercentage) + setIfUsed("int", fields, "utilization_memory", payload.GpuMemoryUsePercentage) + setIfUsed("int", fields, "clocks_current_sm", strings.Trim(payload.GpuSclkClockSpeed, "(Mhz)")) + setIfUsed("int", fields, "clocks_current_memory", strings.Trim(payload.GpuMclkClockSpeed, "(Mhz)")) + setIfUsed("float", fields, "power_draw", payload.GpuAveragePower) + + metrics = append(metrics, metric{tags, fields}) + } + } + return metrics +} + +func setTagIfUsed(m map[string]string, k, v string) { + if v != "" { + m[k] = v + } +} + +func setIfUsed(t string, m map[string]interface{}, k, v string) { + vals := strings.Fields(v) + if len(vals) < 1 { + return + } + + val := vals[0] + + switch t { + case "float": + if val != "" { + f, err := strconv.ParseFloat(val, 64) + if err == nil { + m[k] = f + } + } + case "int": + if val != "" { + i, err := strconv.Atoi(val) + if err == nil { + m[k] = i + } + } + case "int64": + if val != "" { + i, err := strconv.ParseInt(val, 10, 64) + if err == nil { + m[k] = i + } + } + case "str": + if val != "" { + m[k] = val + } + } +} + +type sysInfo struct { + DriverVersion string `json:"Driver version"` +} + +type GPU struct { + GpuID string `json:"GPU ID"` + GpuUniqueID string `json:"Unique ID"` + GpuVBIOSVersion string `json:"VBIOS version"` + GpuTemperatureSensorEdge string `json:"Temperature (Sensor edge) (C)"` + GpuTemperatureSensorJunction string `json:"Temperature (Sensor junction) (C)"` + GpuTemperatureSensorMemory string `json:"Temperature (Sensor memory) (C)"` + GpuDcefClkClockSpeed string `json:"dcefclk clock speed"` + GpuDcefClkClockLevel string `json:"dcefclk clock level"` + GpuFclkClockSpeed string `json:"fclk clock speed"` + GpuFclkClockLevel string `json:"fclk clock level"` + GpuMclkClockSpeed string `json:"mclk clock speed:"` + GpuMclkClockLevel string `json:"mclk clock level:"` + GpuSclkClockSpeed string `json:"sclk clock speed:"` + GpuSclkClockLevel string `json:"sclk clock level:"` + GpuSocclkClockSpeed string `json:"socclk clock speed"` + GpuSocclkClockLevel string `json:"socclk clock level"` + GpuPcieClock string `json:"pcie clock level"` + GpuFanSpeedLevel string `json:"Fan speed (level)"` + GpuFanSpeedPercentage string `json:"Fan speed (%)"` + GpuFanRPM string `json:"Fan RPM"` + GpuPerformanceLevel string `json:"Performance Level"` + GpuOverdrive string `json:"GPU OverDrive value (%)"` + GpuMaxPower string `json:"Max Graphics Package Power (W)"` + GpuAveragePower string `json:"Average Graphics Package Power (W)"` + GpuUsePercentage string `json:"GPU use (%)"` + GpuMemoryUsePercentage string `json:"GPU memory use (%)"` + GpuMemoryVendor string `json:"GPU memory vendor"` + GpuPCIeReplay string `json:"PCIe Replay Count"` + GpuSerialNumber string `json:"Serial Number"` + GpuVoltagemV string `json:"Voltage (mV)"` + GpuPCIBus string `json:"PCI Bus"` + GpuASDDirmware string `json:"ASD firmware version"` + GpuCEFirmware string `json:"CE firmware version"` + GpuDMCUFirmware string `json:"DMCU firmware version"` + GpuMCFirmware string `json:"MC firmware version"` + GpuMEFirmware string `json:"ME firmware version"` + GpuMECFirmware string `json:"MEC firmware version"` + GpuMEC2Firmware string `json:"MEC2 firmware version"` + GpuPFPFirmware string `json:"PFP firmware version"` + GpuRLCFirmware string `json:"RLC firmware version"` + GpuRLCSRLC string `json:"RLC SRLC firmware version"` + GpuRLCSRLG string `json:"RLC SRLG firmware version"` + GpuRLCSRLS string `json:"RLC SRLS firmware version"` + GpuSDMAFirmware string `json:"SDMA firmware version"` + GpuSDMA2Firmware string `json:"SDMA2 firmware version"` + GpuSMCFirmware string `json:"SMC firmware version"` + GpuSOSFirmware string `json:"SOS firmware version"` + GpuTARAS string `json:"TA RAS firmware version"` + GpuTAXGMI string `json:"TA XGMI firmware version"` + GpuUVDFirmware string `json:"UVD firmware version"` + GpuVCEFirmware string `json:"VCE firmware version"` + GpuVCNFirmware string `json:"VCN firmware version"` + GpuCardSeries string `json:"Card series"` + GpuCardModel string `json:"Card model"` + GpuCardVendor string `json:"Card vendor"` + GpuCardSKU string `json:"Card SKU"` + GpuNUMANode string `json:"(Topology) Numa Node"` + GpuNUMAAffinity string `json:"(Topology) Numa Affinity"` + GpuVisVRAMTotalMemory string `json:"VIS_VRAM Total Memory (B)"` + GpuVisVRAMTotalUsedMemory string `json:"VIS_VRAM Total Used Memory (B)"` + GpuVRAMTotalMemory string `json:"VRAM Total Memory (B)"` + GpuVRAMTotalUsedMemory string `json:"VRAM Total Used Memory (B)"` + GpuGTTTotalMemory string `json:"GTT Total Memory (B)"` + GpuGTTTotalUsedMemory string `json:"GTT Total Used Memory (B)"` +} diff --git a/plugins/inputs/amd_rocm_smi/amd_rocm_smi_test.go b/plugins/inputs/amd_rocm_smi/amd_rocm_smi_test.go new file mode 100644 index 0000000000000..7893760bdf952 --- /dev/null +++ b/plugins/inputs/amd_rocm_smi/amd_rocm_smi_test.go @@ -0,0 +1,90 @@ +package amd_rocm_smi + +import ( + "io/ioutil" + "path/filepath" + "testing" + "time" + + "github.com/influxdata/telegraf" + "github.com/influxdata/telegraf/testutil" + "github.com/stretchr/testify/require" +) + +func TestGatherValidJSON(t *testing.T) { + tests := []struct { + name string + filename string + expected []telegraf.Metric + }{ + { + name: "Vega 10 XT", + filename: "vega-10-XT.json", + expected: []telegraf.Metric{ + testutil.MustMetric( + "amd_rocm_smi", + map[string]string{ + "gpu_id": "0x6861", + "gpu_unique_id": "0x2150e7d042a1124", + "name": "card0", + }, + map[string]interface{}{ + "driver_version": 5925, + "fan_speed": 13, + "memory_total": int64(17163091968), + "memory_used": int64(17776640), + "memory_free": int64(17145315328), + "temperature_sensor_edge": 39.0, + "temperature_sensor_junction": 40.0, + "temperature_sensor_memory": 92.0, + "utilization_gpu": 0, + "clocks_current_sm": 1269, + "clocks_current_memory": 167, + "power_draw": 15.0, + }, + time.Unix(0, 0)), + }, + }, + { + name: "Vega 20 WKS GL-XE [Radeon Pro VII]", + filename: "vega-20-WKS-GL-XE.json", + expected: []telegraf.Metric{ + testutil.MustMetric( + "amd_rocm_smi", + map[string]string{ + "gpu_id": "0x66a1", + "gpu_unique_id": "0x2f048617326b1ea", + "name": "card0", + }, + map[string]interface{}{ + "driver_version": 5917, + "fan_speed": 0, + "memory_total": int64(34342961152), + "memory_used": int64(10850304), + "memory_free": int64(34332110848), + "temperature_sensor_edge": 36.0, + "temperature_sensor_junction": 38.0, + "temperature_sensor_memory": 35.0, + "utilization_gpu": 0, + "utilization_memory": 0, + "clocks_current_sm": 1725, + "clocks_current_memory": 1000, + "power_draw": 26.0, + }, + time.Unix(0, 0)), + }, + }, + } + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + var acc testutil.Accumulator + octets, err := ioutil.ReadFile(filepath.Join("testdata", tt.filename)) + require.NoError(t, err) + + err = gatherROCmSMI(octets, &acc) + require.NoError(t, err) + + testutil.RequireMetricsEqual(t, tt.expected, acc.GetTelegrafMetrics(), testutil.IgnoreTime()) + }) + } +} diff --git a/plugins/inputs/amd_rocm_smi/testdata/vega-10-XT.json b/plugins/inputs/amd_rocm_smi/testdata/vega-10-XT.json new file mode 100644 index 0000000000000..c4d51f5253a51 --- /dev/null +++ b/plugins/inputs/amd_rocm_smi/testdata/vega-10-XT.json @@ -0,0 +1,77 @@ +{ + "card0": { + "GPU ID": "0x6861", + "Unique ID": "0x2150e7d042a1124", + "VBIOS version": "113-D0510100-106", + "Temperature (Sensor edge) (C)": "39.0", + "Temperature (Sensor junction) (C)": "40.0", + "Temperature (Sensor memory) (C)": "92.0", + "dcefclk clock speed:": "(600Mhz)", + "dcefclk clock level:": "0", + "mclk clock speed:": "(167Mhz)", + "mclk clock level:": "0", + "sclk clock speed:": "(1269Mhz)", + "sclk clock level:": "3", + "socclk clock speed:": "(960Mhz)", + "socclk clock level:": "3", + "pcie clock level": "1 (8.0GT/s x16)", + "sclk clock level": "3 (1269Mhz)", + "Fan speed (level)": "33", + "Fan speed (%)": "13", + "Fan RPM": "682", + "Performance Level": "auto", + "GPU OverDrive value (%)": "0", + "GPU Memory OverDrive value (%)": "0", + "Max Graphics Package Power (W)": "170.0", + "Average Graphics Package Power (W)": "15.0", + "0": "8.0GT/s x16", + "1": "8.0GT/s x16 *", + "2": "847Mhz", + "3": "960Mhz *", + "4": "1028Mhz", + "5": "1107Mhz", + "6": "1440Mhz", + "7": "1500Mhz", + "GPU use (%)": "0", + "GPU memory vendor": "samsung", + "PCIe Replay Count": "0", + "Serial Number": "N/A", + "Voltage (mV)": "906", + "PCI Bus": "0000:04:00.0", + "VRAM Total Memory (B)": "17163091968", + "VRAM Total Used Memory (B)": "17776640", + "VIS_VRAM Total Memory (B)": "268435456", + "VIS_VRAM Total Used Memory (B)": "13557760", + "GTT Total Memory (B)": "17163091968", + "GTT Total Used Memory (B)": "25608192", + "ASD firmware version": "553648152", + "CE firmware version": "79", + "DMCU firmware version": "0", + "MC firmware version": "0", + "ME firmware version": "163", + "MEC firmware version": "432", + "MEC2 firmware version": "432", + "PFP firmware version": "186", + "RLC firmware version": "93", + "RLC SRLC firmware version": "0", + "RLC SRLG firmware version": "0", + "RLC SRLS firmware version": "0", + "SDMA firmware version": "430", + "SDMA2 firmware version": "430", + "SMC firmware version": "00.28.54.00", + "SOS firmware version": "0x0008015d", + "TA RAS firmware version": "00.00.00.00", + "TA XGMI firmware version": "00.00.00.00", + "UVD firmware version": "0x422b1100", + "VCE firmware version": "0x39060400", + "VCN firmware version": "0x00000000", + "Card model": "0xc1e", + "Card vendor": "Advanced Micro Devices, Inc. [AMD/ATI]", + "Card SKU": "D05101", + "(Topology) Numa Node": "0", + "(Topology) Numa Affinity": "0" + }, + "system": { + "Driver version": "5.9.25" + } +} \ No newline at end of file diff --git a/plugins/inputs/amd_rocm_smi/testdata/vega-20-WKS-GL-XE.json b/plugins/inputs/amd_rocm_smi/testdata/vega-20-WKS-GL-XE.json new file mode 100644 index 0000000000000..771565a607bd5 --- /dev/null +++ b/plugins/inputs/amd_rocm_smi/testdata/vega-20-WKS-GL-XE.json @@ -0,0 +1,165 @@ +{ + "card0": { + "GPU ID": "0x66a1", + "Unique ID": "0x2f048617326b1ea", + "VBIOS version": "113-D1631700-111", + "Temperature (Sensor edge) (C)": "36.0", + "Temperature (Sensor junction) (C)": "38.0", + "Temperature (Sensor memory) (C)": "35.0", + "dcefclk clock speed:": "(357Mhz)", + "dcefclk clock level:": "0", + "fclk clock speed:": "(1080Mhz)", + "fclk clock level:": "6", + "mclk clock speed:": "(1000Mhz)", + "mclk clock level:": "2", + "sclk clock speed:": "(1725Mhz)", + "sclk clock level:": "8", + "socclk clock speed:": "(971Mhz)", + "socclk clock level:": "7", + "pcie clock level": "1 (16.0GT/s x16)", + "sclk clock level": "8 (1725Mhz)", + "Fan speed (level)": "0", + "Fan speed (%)": "0", + "Fan RPM": "0", + "Performance Level": "high", + "GPU OverDrive value (%)": "0", + "Max Graphics Package Power (W)": "225.0", + "Average Graphics Package Power (W)": "26.0", + "0": "2.5GT/s x16", + "1": "16.0GT/s x16 *", + "2": "566Mhz", + "3": "618Mhz", + "4": "680Mhz", + "5": "755Mhz", + "6": "850Mhz", + "7": "971Mhz *", + "8": "1725Mhz *", + "GPU use (%)": "0", + "GPU memory use (%)": "0", + "GPU memory vendor": "samsung", + "PCIe Replay Count": "0", + "Serial Number": "692024000810", + "Voltage (mV)": "1000", + "PCI Bus": "0000:63:00.0", + "VRAM Total Memory (B)": "34342961152", + "VRAM Total Used Memory (B)": "10850304", + "VIS_VRAM Total Memory (B)": "34342961152", + "VIS_VRAM Total Used Memory (B)": "10850304", + "GTT Total Memory (B)": "54974742528", + "GTT Total Used Memory (B)": "11591680", + "ASD firmware version": "553648199", + "CE firmware version": "79", + "DMCU firmware version": "0", + "MC firmware version": "0", + "ME firmware version": "164", + "MEC firmware version": "448", + "MEC2 firmware version": "448", + "PFP firmware version": "188", + "RLC firmware version": "50", + "RLC SRLC firmware version": "1", + "RLC SRLG firmware version": "1", + "RLC SRLS firmware version": "1", + "SDMA firmware version": "144", + "SDMA2 firmware version": "144", + "SMC firmware version": "00.40.59.00", + "SOS firmware version": "0x00080b67", + "TA RAS firmware version": "27.00.01.36", + "TA XGMI firmware version": "32.00.00.02", + "UVD firmware version": "0x42002b13", + "VCE firmware version": "0x39060400", + "VCN firmware version": "0x00000000", + "Card series": "Radeon Instinct MI50 32GB", + "Card model": "0x834", + "Card vendor": "Advanced Micro Devices, Inc. [AMD/ATI]", + "Card SKU": "D16317", + "(Topology) Numa Node": "0", + "(Topology) Numa Affinity": "0" + }, + "system": { + "Driver version": "5.9.17", + "(Topology) Weight between DRM devices 0 and 1": "40", + "(Topology) Weight between DRM devices 0 and 2": "40", + "(Topology) Weight between DRM devices 0 and 3": "40", + "(Topology) Weight between DRM devices 0 and 4": "72", + "(Topology) Weight between DRM devices 0 and 5": "72", + "(Topology) Weight between DRM devices 0 and 6": "72", + "(Topology) Weight between DRM devices 0 and 7": "72", + "(Topology) Weight between DRM devices 1 and 2": "40", + "(Topology) Weight between DRM devices 1 and 3": "40", + "(Topology) Weight between DRM devices 1 and 4": "72", + "(Topology) Weight between DRM devices 1 and 5": "72", + "(Topology) Weight between DRM devices 1 and 6": "72", + "(Topology) Weight between DRM devices 1 and 7": "72", + "(Topology) Weight between DRM devices 2 and 3": "40", + "(Topology) Weight between DRM devices 2 and 4": "72", + "(Topology) Weight between DRM devices 2 and 5": "72", + "(Topology) Weight between DRM devices 2 and 6": "72", + "(Topology) Weight between DRM devices 2 and 7": "72", + "(Topology) Weight between DRM devices 3 and 4": "72", + "(Topology) Weight between DRM devices 3 and 5": "72", + "(Topology) Weight between DRM devices 3 and 6": "72", + "(Topology) Weight between DRM devices 3 and 7": "72", + "(Topology) Weight between DRM devices 4 and 5": "40", + "(Topology) Weight between DRM devices 4 and 6": "40", + "(Topology) Weight between DRM devices 4 and 7": "40", + "(Topology) Weight between DRM devices 5 and 6": "40", + "(Topology) Weight between DRM devices 5 and 7": "40", + "(Topology) Weight between DRM devices 6 and 7": "40", + "(Topology) Hops between DRM devices 0 and 1": "2", + "(Topology) Hops between DRM devices 0 and 2": "2", + "(Topology) Hops between DRM devices 0 and 3": "2", + "(Topology) Hops between DRM devices 0 and 4": "3", + "(Topology) Hops between DRM devices 0 and 5": "3", + "(Topology) Hops between DRM devices 0 and 6": "3", + "(Topology) Hops between DRM devices 0 and 7": "3", + "(Topology) Hops between DRM devices 1 and 2": "2", + "(Topology) Hops between DRM devices 1 and 3": "2", + "(Topology) Hops between DRM devices 1 and 4": "3", + "(Topology) Hops between DRM devices 1 and 5": "3", + "(Topology) Hops between DRM devices 1 and 6": "3", + "(Topology) Hops between DRM devices 1 and 7": "3", + "(Topology) Hops between DRM devices 2 and 3": "2", + "(Topology) Hops between DRM devices 2 and 4": "3", + "(Topology) Hops between DRM devices 2 and 5": "3", + "(Topology) Hops between DRM devices 2 and 6": "3", + "(Topology) Hops between DRM devices 2 and 7": "3", + "(Topology) Hops between DRM devices 3 and 4": "3", + "(Topology) Hops between DRM devices 3 and 5": "3", + "(Topology) Hops between DRM devices 3 and 6": "3", + "(Topology) Hops between DRM devices 3 and 7": "3", + "(Topology) Hops between DRM devices 4 and 5": "2", + "(Topology) Hops between DRM devices 4 and 6": "2", + "(Topology) Hops between DRM devices 4 and 7": "2", + "(Topology) Hops between DRM devices 5 and 6": "2", + "(Topology) Hops between DRM devices 5 and 7": "2", + "(Topology) Hops between DRM devices 6 and 7": "2", + "(Topology) Link type between DRM devices 0 and 1": "PCIE", + "(Topology) Link type between DRM devices 0 and 2": "PCIE", + "(Topology) Link type between DRM devices 0 and 3": "PCIE", + "(Topology) Link type between DRM devices 0 and 4": "PCIE", + "(Topology) Link type between DRM devices 0 and 5": "PCIE", + "(Topology) Link type between DRM devices 0 and 6": "PCIE", + "(Topology) Link type between DRM devices 0 and 7": "PCIE", + "(Topology) Link type between DRM devices 1 and 2": "PCIE", + "(Topology) Link type between DRM devices 1 and 3": "PCIE", + "(Topology) Link type between DRM devices 1 and 4": "PCIE", + "(Topology) Link type between DRM devices 1 and 5": "PCIE", + "(Topology) Link type between DRM devices 1 and 6": "PCIE", + "(Topology) Link type between DRM devices 1 and 7": "PCIE", + "(Topology) Link type between DRM devices 2 and 3": "PCIE", + "(Topology) Link type between DRM devices 2 and 4": "PCIE", + "(Topology) Link type between DRM devices 2 and 5": "PCIE", + "(Topology) Link type between DRM devices 2 and 6": "PCIE", + "(Topology) Link type between DRM devices 2 and 7": "PCIE", + "(Topology) Link type between DRM devices 3 and 4": "PCIE", + "(Topology) Link type between DRM devices 3 and 5": "PCIE", + "(Topology) Link type between DRM devices 3 and 6": "PCIE", + "(Topology) Link type between DRM devices 3 and 7": "PCIE", + "(Topology) Link type between DRM devices 4 and 5": "PCIE", + "(Topology) Link type between DRM devices 4 and 6": "PCIE", + "(Topology) Link type between DRM devices 4 and 7": "PCIE", + "(Topology) Link type between DRM devices 5 and 6": "PCIE", + "(Topology) Link type between DRM devices 5 and 7": "PCIE", + "(Topology) Link type between DRM devices 6 and 7": "PCIE" + } +} \ No newline at end of file From a86f5997b93b09feaaf9d9697a99445073aa4eed Mon Sep 17 00:00:00 2001 From: Sebastian Spaink <3441183+sspaink@users.noreply.github.com> Date: Thu, 2 Sep 2021 08:08:24 -0700 Subject: [PATCH 032/176] fix: migrate dgrijalva/jwt-go to golang-jwt/jwt/v4 (#9699) --- docs/LICENSE_OF_DEPENDENCIES.md | 2 +- go.mod | 2 +- go.sum | 4 ++-- plugins/inputs/dcos/client.go | 4 ++-- plugins/inputs/dcos/client_test.go | 2 +- plugins/inputs/dcos/dcos.go | 2 +- 6 files changed, 8 insertions(+), 8 deletions(-) diff --git a/docs/LICENSE_OF_DEPENDENCIES.md b/docs/LICENSE_OF_DEPENDENCIES.md index 1ec09fe87f486..a0c62d7f47767 100644 --- a/docs/LICENSE_OF_DEPENDENCIES.md +++ b/docs/LICENSE_OF_DEPENDENCIES.md @@ -60,7 +60,6 @@ following works: - github.com/davecgh/go-spew [ISC License](https://github.com/davecgh/go-spew/blob/master/LICENSE) - github.com/denisenkom/go-mssqldb [BSD 3-Clause "New" or "Revised" License](https://github.com/denisenkom/go-mssqldb/blob/master/LICENSE.txt) - github.com/devigned/tab [MIT License](https://github.com/devigned/tab/blob/master/LICENSE) -- github.com/dgrijalva/jwt-go [MIT License](https://github.com/dgrijalva/jwt-go/blob/master/LICENSE) - github.com/dimchansky/utfbom [Apache License 2.0](https://github.com/dimchansky/utfbom/blob/master/LICENSE) - github.com/docker/distribution [Apache License 2.0](https://github.com/docker/distribution/blob/master/LICENSE) - github.com/docker/docker [Apache License 2.0](https://github.com/docker/docker/blob/master/LICENSE) @@ -86,6 +85,7 @@ following works: - github.com/gofrs/uuid [MIT License](https://github.com/gofrs/uuid/blob/master/LICENSE) - github.com/gogo/googleapis [Apache License 2.0](https://github.com/gogo/googleapis/blob/master/LICENSE) - github.com/gogo/protobuf [BSD 3-Clause Clear License](https://github.com/gogo/protobuf/blob/master/LICENSE) +- github.com/golang-jwt/jwt [MIT License](https://github.com/golang-jwt/jwt/blob/master/LICENSE) - github.com/golang-sql/civil [Apache License 2.0](https://github.com/golang-sql/civil/blob/master/LICENSE) - github.com/golang/geo [Apache License 2.0](https://github.com/golang/geo/blob/master/LICENSE) - github.com/golang/groupcache [Apache License 2.0](https://github.com/golang/groupcache/blob/master/LICENSE) diff --git a/go.mod b/go.mod index ff441b60264d1..1d819d87bedae 100644 --- a/go.mod +++ b/go.mod @@ -79,7 +79,6 @@ require ( github.com/denisenkom/go-mssqldb v0.10.0 github.com/devigned/tab v0.1.1 // indirect github.com/dgrijalva/jwt-go v3.2.0+incompatible // indirect - github.com/dgrijalva/jwt-go/v4 v4.0.0-preview1 github.com/dimchansky/utfbom v1.1.1 github.com/docker/distribution v2.7.1+incompatible // indirect github.com/docker/docker v20.10.6+incompatible @@ -108,6 +107,7 @@ require ( github.com/gofrs/uuid v3.3.0+incompatible github.com/gogo/googleapis v1.4.0 // indirect github.com/gogo/protobuf v1.3.2 + github.com/golang-jwt/jwt/v4 v4.0.0 github.com/golang-sql/civil v0.0.0-20190719163853-cb61b32ac6fe // indirect github.com/golang/geo v0.0.0-20190916061304-5b978397cfec github.com/golang/groupcache v0.0.0-20200121045136-8c9f03a8e57e // indirect diff --git a/go.sum b/go.sum index d797edf56c569..93110f3928db3 100644 --- a/go.sum +++ b/go.sum @@ -460,8 +460,6 @@ github.com/devigned/tab v0.1.1/go.mod h1:XG9mPq0dFghrYvoBF3xdRrJzSTX1b7IQrvaL9mz github.com/dgrijalva/jwt-go v0.0.0-20170104182250-a601269ab70c/go.mod h1:E3ru+11k8xSBh+hMPgOLZmtrrCbhqsmaPHjLKYnJCaQ= github.com/dgrijalva/jwt-go v3.2.0+incompatible h1:7qlOGliEKZXTDg6OTjfoBKDXWrumCAMpl/TFQ4/5kLM= github.com/dgrijalva/jwt-go v3.2.0+incompatible/go.mod h1:E3ru+11k8xSBh+hMPgOLZmtrrCbhqsmaPHjLKYnJCaQ= -github.com/dgrijalva/jwt-go/v4 v4.0.0-preview1 h1:CaO/zOnF8VvUfEbhRatPcwKVWamvbYd8tQGRWacE9kU= -github.com/dgrijalva/jwt-go/v4 v4.0.0-preview1/go.mod h1:+hnT3ywWDTAFrW5aE+u2Sa/wT555ZqwoCS+pk3p6ry4= github.com/dgryski/go-bitstream v0.0.0-20180413035011-3522498ce2c8/go.mod h1:VMaSuZ+SZcx/wljOQKvp5srsbCiKDEb6K2wC4+PiBmQ= github.com/dgryski/go-sip13 v0.0.0-20190329191031-25c5027a8c7b/go.mod h1:vAd38F8PWV+bWy6jNmig1y/TA+kYO4g3RSRF0IAv0no= github.com/digitalocean/godo v1.42.1/go.mod h1:p7dOjjtSBqCTUksqtA5Fd3uaKs9kyTq2xcz76ulEJRU= @@ -706,6 +704,8 @@ github.com/gogo/protobuf v1.3.1/go.mod h1:SlYgWuQ5SjCEi6WLHjHCa1yvBfUnHcTbrrZtXP github.com/gogo/protobuf v1.3.2 h1:Ov1cvc58UF3b5XjBnZv7+opcTcQFZebYjWzi34vdm4Q= github.com/gogo/protobuf v1.3.2/go.mod h1:P1XiOD3dCwIKUDQYPy72D8LYyHL2YPYrpS2s69NZV8Q= github.com/goji/httpauth v0.0.0-20160601135302-2da839ab0f4d/go.mod h1:nnjvkQ9ptGaCkuDUx6wNykzzlUixGxvkme+H/lnzb+A= +github.com/golang-jwt/jwt/v4 v4.0.0 h1:RAqyYixv1p7uEnocuy8P1nru5wprCh/MH2BIlW5z5/o= +github.com/golang-jwt/jwt/v4 v4.0.0/go.mod h1:/xlHOz8bRuivTWchD4jCa+NbatV+wEUSzwAxVc6locg= github.com/golang-sql/civil v0.0.0-20190719163853-cb61b32ac6fe h1:lXe2qZdvpiX5WZkZR4hgp4KJVfY3nMkvmwbVkpv1rVY= github.com/golang-sql/civil v0.0.0-20190719163853-cb61b32ac6fe/go.mod h1:8vg3r2VgvsThLBIFL93Qb5yWzgyZWhEmBwUJWevAkK0= github.com/golang/freetype v0.0.0-20170609003504-e2365dfdc4a0/go.mod h1:E/TSTwGwJL78qG/PmXZO1EjYhfJinVAhrmmHX6Z8B9k= diff --git a/plugins/inputs/dcos/client.go b/plugins/inputs/dcos/client.go index fcb976e311ccf..08943d13db0f9 100644 --- a/plugins/inputs/dcos/client.go +++ b/plugins/inputs/dcos/client.go @@ -10,7 +10,7 @@ import ( "net/url" "time" - jwt "github.com/dgrijalva/jwt-go/v4" + jwt "github.com/golang-jwt/jwt/v4" ) const ( @@ -329,7 +329,7 @@ func (c *ClusterClient) createLoginToken(sa *ServiceAccount) (string, error) { UID: sa.AccountID, StandardClaims: jwt.StandardClaims{ // How long we have to login with this token - ExpiresAt: jwt.At(time.Now().Add(5 * time.Minute)), + ExpiresAt: time.Now().Add(time.Minute * 5).Unix(), }, }) return token.SignedString(sa.PrivateKey) diff --git a/plugins/inputs/dcos/client_test.go b/plugins/inputs/dcos/client_test.go index ece4b178f4556..70cf9ce7cfccd 100644 --- a/plugins/inputs/dcos/client_test.go +++ b/plugins/inputs/dcos/client_test.go @@ -8,7 +8,7 @@ import ( "net/url" "testing" - jwt "github.com/dgrijalva/jwt-go/v4" + jwt "github.com/golang-jwt/jwt/v4" "github.com/influxdata/telegraf/testutil" "github.com/stretchr/testify/require" ) diff --git a/plugins/inputs/dcos/dcos.go b/plugins/inputs/dcos/dcos.go index 8fcb321ff36cf..35822f30b074f 100644 --- a/plugins/inputs/dcos/dcos.go +++ b/plugins/inputs/dcos/dcos.go @@ -9,7 +9,7 @@ import ( "sync" "time" - jwt "github.com/dgrijalva/jwt-go/v4" + jwt "github.com/golang-jwt/jwt/v4" "github.com/influxdata/telegraf" "github.com/influxdata/telegraf/config" From 7af4c5fa1537091bc47e7ad3286a70e5c0756bc1 Mon Sep 17 00:00:00 2001 From: reimda Date: Thu, 2 Sep 2021 09:38:43 -0600 Subject: [PATCH 033/176] fix: bump runc to v1.0.0-rc95 to address CVE-2021-30465 (#9713) --- go.mod | 2 +- go.sum | 14 +++++++++++--- 2 files changed, 12 insertions(+), 4 deletions(-) diff --git a/go.mod b/go.mod index 1d819d87bedae..eb900304fdd9a 100644 --- a/go.mod +++ b/go.mod @@ -203,7 +203,7 @@ require ( github.com/openconfig/gnmi v0.0.0-20180912164834-33a1865c3029 github.com/opencontainers/go-digest v1.0.0 // indirect github.com/opencontainers/image-spec v1.0.1 // indirect - github.com/opencontainers/runc v1.0.0-rc93 // indirect + github.com/opencontainers/runc v1.0.0-rc95 // indirect github.com/opentracing-contrib/go-observer v0.0.0-20170622124052-a52f23424492 // indirect github.com/opentracing/opentracing-go v1.2.0 // indirect github.com/openzipkin/zipkin-go-opentracing v0.3.4 diff --git a/go.sum b/go.sum index 93110f3928db3..369a49f8412b6 100644 --- a/go.sum +++ b/go.sum @@ -322,12 +322,14 @@ github.com/cespare/xxhash v1.1.0/go.mod h1:XrSqR1VqqWfGrhpAt58auRo0WTKS1nRRg3ghf github.com/cespare/xxhash/v2 v2.1.1 h1:6MnRN8NT7+YBpUIWxHtefFZOKTAPgGjpQSxqLNn0+qY= github.com/cespare/xxhash/v2 v2.1.1/go.mod h1:VGX0DQ3Q6kWi7AoAeZDth3/j3BFtOZR5XLFGgcrjCOs= github.com/checkpoint-restore/go-criu/v4 v4.1.0/go.mod h1:xUQBLp4RLc5zJtWY++yjOoMoB5lihDt7fai+75m+rGw= +github.com/checkpoint-restore/go-criu/v5 v5.0.0/go.mod h1:cfwC0EG7HMUenopBsUf9d89JlCLQIfgVcNsNN0t6T2M= github.com/chzyer/logex v1.1.10/go.mod h1:+Ywpsq7O8HXn0nuIou7OrIPyXbp3wmkHB+jjWRnGsAI= github.com/chzyer/readline v0.0.0-20180603132655-2972be24d48e/go.mod h1:nSuG5e5PlCu98SY8svDHJxuZscDgtXS6KTTbou5AhLI= github.com/chzyer/test v0.0.0-20180213035817-a1ea475d72b1/go.mod h1:Q3SI9o4m/ZMnBNeIyt5eFwwo7qiLfzFZmjNmxjkiQlU= github.com/cilium/ebpf v0.0.0-20200110133405-4032b1d8aae3/go.mod h1:MA5e5Lr8slmEg9bt0VpxxWqJlO4iwu3FBdHUzV7wQVg= github.com/cilium/ebpf v0.0.0-20200702112145-1c8d4c9ef775/go.mod h1:7cR51M8ViRLIdUjrmSXlK9pkrsDlLHbO8jiB8X8JnOc= github.com/cilium/ebpf v0.2.0/go.mod h1:To2CFviqOWL/M0gIMsvSMlqe7em/l1ALkX1PyjrX2Qs= +github.com/cilium/ebpf v0.5.0/go.mod h1:4tRaxcgiL706VnOzHOdBlY8IEAIdxINsQBcU4xJJXRs= github.com/circonus-labs/circonus-gometrics v2.3.1+incompatible/go.mod h1:nmEj6Dob7S7YxXgwXpfOuvO54S+tGdZdw9fuRZt25Ag= github.com/circonus-labs/circonusllhist v0.1.3/go.mod h1:kMXHVDlOchFAehlya5ePtbp5jckzBHf4XRpQvBOLI+I= github.com/cisco-ie/nx-telemetry-proto v0.0.0-20190531143454-82441e232cf6 h1:57RI0wFkG/smvVTcz7F43+R0k+Hvci3jAVQF9lyMoOo= @@ -360,6 +362,7 @@ github.com/containerd/console v0.0.0-20180822173158-c12b1e7919c1/go.mod h1:Tj/on github.com/containerd/console v0.0.0-20181022165439-0650fd9eeb50/go.mod h1:Tj/on1eG8kiEhd0+fhSDzsPAFESxzBBvdyEgyryXffw= github.com/containerd/console v0.0.0-20191206165004-02ecf6a7291e/go.mod h1:8Pf4gM6VEbTNRIT26AyyU7hxdQU3MvAvxVI0sc00XBE= github.com/containerd/console v1.0.1/go.mod h1:XUsP6YE/mKtz6bxc+I8UiKKTP04qjQL4qcS3XoQ5xkw= +github.com/containerd/console v1.0.2/go.mod h1:ytZPjGgY2oeTkAONYafi2kSj0aYggsf8acV1PGKCbzQ= github.com/containerd/containerd v1.2.10/go.mod h1:bC6axHOhabU15QhwfG7w5PipXdVtMXFTttgp+kVtyUA= github.com/containerd/containerd v1.3.0-beta.2.0.20190828155532-0293cbd26c69/go.mod h1:bC6axHOhabU15QhwfG7w5PipXdVtMXFTttgp+kVtyUA= github.com/containerd/containerd v1.3.0/go.mod h1:bC6axHOhabU15QhwfG7w5PipXdVtMXFTttgp+kVtyUA= @@ -428,6 +431,7 @@ github.com/coreos/go-systemd v0.0.0-20190321100706-95778dfbb74e/go.mod h1:F5haX7 github.com/coreos/go-systemd v0.0.0-20190719114852-fd7a80b32e1f/go.mod h1:F5haX7vjVVG0kc13fIWeqUViNPyEJxv/OmvnBo0Yme4= github.com/coreos/go-systemd/v22 v22.0.0/go.mod h1:xO0FLkIi5MaZafQlIrOotqXZ90ih+1atmu1JpKERPPk= github.com/coreos/go-systemd/v22 v22.1.0/go.mod h1:xO0FLkIi5MaZafQlIrOotqXZ90ih+1atmu1JpKERPPk= +github.com/coreos/go-systemd/v22 v22.3.1/go.mod h1:Y58oyj3AT4RCenI/lSvhwexgC+NSVTIJ3seZv2GcEnc= github.com/coreos/pkg v0.0.0-20160727233714-3ac0863d7acf/go.mod h1:E3G3o1h8I7cfcXa63jLwjI0eiQQMgzzUDFVpN/nH/eA= github.com/coreos/pkg v0.0.0-20180928190104-399ea9e2e55f/go.mod h1:E3G3o1h8I7cfcXa63jLwjI0eiQQMgzzUDFVpN/nH/eA= github.com/couchbase/go-couchbase v0.1.0 h1:g4bCvDwRL+ZL6HLhYeRlXxEYP31Wpy0VFxnFw6efEp8= @@ -549,8 +553,9 @@ github.com/fortytw2/leaktest v1.3.0 h1:u8491cBMTQ8ft8aeV+adlcytMZylmA5nnwwkRZjI8 github.com/fortytw2/leaktest v1.3.0/go.mod h1:jDsjWgpAGjm2CA7WthBh/CdZYEPF31XHquHwclZch5g= github.com/franela/goblin v0.0.0-20200105215937-c9ffbefa60db/go.mod h1:7dvUGVsVBjqR7JHJk0brhHOZYGmfBYOrK0ZhYMEtBr4= github.com/franela/goreq v0.0.0-20171204163338-bcd34c9993f8/go.mod h1:ZhphrRTfi2rbfLwlschooIH4+wKKDR4Pdxhh+TRoA20= -github.com/frankban/quicktest v1.10.2 h1:19ARM85nVi4xH7xPXuc5eM/udya5ieh7b/Sv+d844Tk= github.com/frankban/quicktest v1.10.2/go.mod h1:K+q6oSqb0W0Ininfk863uOk1lMy69l/P6txr3mVT54s= +github.com/frankban/quicktest v1.11.3 h1:8sXhOn0uLys67V8EsXLc6eszDs8VXWxL3iRvebPhedY= +github.com/frankban/quicktest v1.11.3/go.mod h1:wRf/ReqHper53s+kmmSZizM8NamnL3IM0I9ntUbOk+k= github.com/fsnotify/fsnotify v1.4.7/go.mod h1:jwhsz4b93w/PPRr/qN1Yymfu8t87LnFCMoQvtojpjFo= github.com/fsnotify/fsnotify v1.4.9 h1:hsms1Qyu0jgnwNXIxa+/V/PDsU6CfLf6CNO8H7IWoS4= github.com/fsnotify/fsnotify v1.4.9/go.mod h1:znqG4EE+3YCdAaPaxE2ZRY/06pZUdp0tY4IgpuI1SZQ= @@ -685,6 +690,7 @@ github.com/godbus/dbus v0.0.0-20151105175453-c7fdd8b5cd55/go.mod h1:/YcGZj5zSblf github.com/godbus/dbus v0.0.0-20180201030542-885f9cc04c9c/go.mod h1:/YcGZj5zSblfDWMMoOzV4fas9FZnQYTkDnsGvmh2Grw= github.com/godbus/dbus v0.0.0-20190422162347-ade71ed3457e/go.mod h1:bBOAhwG1umN6/6ZUMtDFBMQR8jRg9O75tm9K00oMsK4= github.com/godbus/dbus/v5 v5.0.3/go.mod h1:xhWf0FNVPg57R7Z0UbKHbJfkEywrmjJnf7w5xrFpKfA= +github.com/godbus/dbus/v5 v5.0.4/go.mod h1:xhWf0FNVPg57R7Z0UbKHbJfkEywrmjJnf7w5xrFpKfA= github.com/gofrs/uuid v3.2.0+incompatible h1:y12jRkkFxsd7GpqdSZ+/KCs/fJbqpEXSGd4+jfEaewE= github.com/gofrs/uuid v3.2.0+incompatible/go.mod h1:b2aQJv3Z4Fp6yNu3cdSllBxTCLRxnplIgP/c0N/04lM= github.com/gofrs/uuid v3.3.0+incompatible h1:8K4tyRfvU1CYPgJsveYFQMhpFd/wXNM7iK6rR7UHz84= @@ -1255,10 +1261,10 @@ github.com/opencontainers/runc v1.0.0-rc8.0.20190926000215-3e425f80a8c9/go.mod h github.com/opencontainers/runc v1.0.0-rc8.0.20190926000215-3e425f80a8c9/go.mod h1:qT5XzbpPznkRYVz/mWwUaVBUv2rmF59PVA73FjuZG0U= github.com/opencontainers/runc v1.0.0-rc9/go.mod h1:qT5XzbpPznkRYVz/mWwUaVBUv2rmF59PVA73FjuZG0U= github.com/opencontainers/runc v1.0.0-rc9/go.mod h1:qT5XzbpPznkRYVz/mWwUaVBUv2rmF59PVA73FjuZG0U= -github.com/opencontainers/runc v1.0.0-rc93 h1:x2UMpOOVf3kQ8arv/EsDGwim8PTNqzL1/EYDr/+scOM= -github.com/opencontainers/runc v1.0.0-rc93 h1:x2UMpOOVf3kQ8arv/EsDGwim8PTNqzL1/EYDr/+scOM= github.com/opencontainers/runc v1.0.0-rc93/go.mod h1:3NOsor4w32B2tC0Zbl8Knk4Wg84SM2ImC1fxBuqJ/H0= github.com/opencontainers/runc v1.0.0-rc93/go.mod h1:3NOsor4w32B2tC0Zbl8Knk4Wg84SM2ImC1fxBuqJ/H0= +github.com/opencontainers/runc v1.0.0-rc95 h1:RMuWVfY3E1ILlVsC3RhIq38n4sJtlOFwU9gfFZSqrd0= +github.com/opencontainers/runc v1.0.0-rc95/go.mod h1:z+bZxa/+Tz/FmYVWkhUajJdzFeOqjc5vrqskhVyHGUM= github.com/opencontainers/runtime-spec v0.1.2-0.20190507144316-5b71a03e2700/go.mod h1:jwyrGlmzljRJv/Fgzds9SsS/C5hL+LL3ko9hs6T5lQ0= github.com/opencontainers/runtime-spec v0.1.2-0.20190507144316-5b71a03e2700/go.mod h1:jwyrGlmzljRJv/Fgzds9SsS/C5hL+LL3ko9hs6T5lQ0= github.com/opencontainers/runtime-spec v1.0.1/go.mod h1:jwyrGlmzljRJv/Fgzds9SsS/C5hL+LL3ko9hs6T5lQ0= @@ -1269,6 +1275,7 @@ github.com/opencontainers/runtime-spec v1.0.2/go.mod h1:jwyrGlmzljRJv/Fgzds9SsS/ github.com/opencontainers/runtime-spec v1.0.2/go.mod h1:jwyrGlmzljRJv/Fgzds9SsS/C5hL+LL3ko9hs6T5lQ0= github.com/opencontainers/runtime-spec v1.0.3-0.20200929063507-e6143ca7d51d/go.mod h1:jwyrGlmzljRJv/Fgzds9SsS/C5hL+LL3ko9hs6T5lQ0= github.com/opencontainers/runtime-spec v1.0.3-0.20200929063507-e6143ca7d51d/go.mod h1:jwyrGlmzljRJv/Fgzds9SsS/C5hL+LL3ko9hs6T5lQ0= +github.com/opencontainers/runtime-spec v1.0.3-0.20210326190908-1c3f411f0417/go.mod h1:jwyrGlmzljRJv/Fgzds9SsS/C5hL+LL3ko9hs6T5lQ0= github.com/opencontainers/runtime-tools v0.0.0-20181011054405-1d69bd0f9c39/go.mod h1:r3f7wjNzSs2extwzU3Y+6pKfobzPh+kKFJ3ofN+3nfs= github.com/opencontainers/runtime-tools v0.0.0-20181011054405-1d69bd0f9c39/go.mod h1:r3f7wjNzSs2extwzU3Y+6pKfobzPh+kKFJ3ofN+3nfs= github.com/opencontainers/selinux v1.6.0/go.mod h1:VVGKuOLlE7v4PJyT6h7mNWvq1rzqiriPsEqVhc+svHE= @@ -1914,6 +1921,7 @@ golang.org/x/sys v0.0.0-20210320140829-1e4c9ba3b0c4/go.mod h1:h1NjWce9XRLGQEsW7w golang.org/x/sys v0.0.0-20210324051608-47abb6519492/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20210330210617-4fbd30eecc44/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20210423082822-04245dca01da/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20210426230700-d19ff857e887/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20210510120138-977fb7262007/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20210510120138-977fb7262007/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20210514084401-e8d321eab015/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= From 5a71f761dcd641491c6092c11024dccd863eedf6 Mon Sep 17 00:00:00 2001 From: alrex Date: Thu, 2 Sep 2021 09:09:05 -0700 Subject: [PATCH 034/176] fix: outputs.opentelemetry use headers config in grpc requests (#9587) --- plugins/outputs/opentelemetry/opentelemetry.go | 5 +++++ plugins/outputs/opentelemetry/opentelemetry_test.go | 5 +++++ 2 files changed, 10 insertions(+) diff --git a/plugins/outputs/opentelemetry/opentelemetry.go b/plugins/outputs/opentelemetry/opentelemetry.go index ea68fbae6323a..a25fe2ff8dae8 100644 --- a/plugins/outputs/opentelemetry/opentelemetry.go +++ b/plugins/outputs/opentelemetry/opentelemetry.go @@ -13,6 +13,7 @@ import ( "go.opentelemetry.io/collector/model/otlpgrpc" "google.golang.org/grpc" "google.golang.org/grpc/credentials" + "google.golang.org/grpc/metadata" ) type OpenTelemetry struct { @@ -160,6 +161,10 @@ func (o *OpenTelemetry) Write(metrics []telegraf.Metric) error { } ctx, cancel := context.WithTimeout(context.Background(), time.Duration(o.Timeout)) + + if len(o.Headers) > 0 { + ctx = metadata.NewOutgoingContext(ctx, metadata.New(o.Headers)) + } defer cancel() _, err := o.metricsServiceClient.Export(ctx, md, o.callOptions...) return err diff --git a/plugins/outputs/opentelemetry/opentelemetry_test.go b/plugins/outputs/opentelemetry/opentelemetry_test.go index b61f480978ee4..cfafcd47fc10a 100644 --- a/plugins/outputs/opentelemetry/opentelemetry_test.go +++ b/plugins/outputs/opentelemetry/opentelemetry_test.go @@ -18,6 +18,7 @@ import ( "go.opentelemetry.io/collector/model/otlpgrpc" "go.opentelemetry.io/collector/model/pdata" "google.golang.org/grpc" + "google.golang.org/grpc/metadata" ) func TestOpenTelemetry(t *testing.T) { @@ -43,6 +44,7 @@ func TestOpenTelemetry(t *testing.T) { plugin := &OpenTelemetry{ ServiceAddress: m.Address(), Timeout: config.Duration(time.Second), + Headers: map[string]string{"test": "header1"}, metricsConverter: metricsConverter, grpcClientConn: m.GrpcClient(), metricsServiceClient: otlpgrpc.NewMetricsClient(m.GrpcClient()), @@ -131,5 +133,8 @@ func (m *mockOtelService) Address() string { func (m *mockOtelService) Export(ctx context.Context, request pdata.Metrics) (otlpgrpc.MetricsResponse, error) { m.metrics = request.Clone() + ctxMetadata, ok := metadata.FromIncomingContext(ctx) + assert.Equal(m.t, []string{"header1"}, ctxMetadata.Get("test")) + assert.True(m.t, ok) return otlpgrpc.MetricsResponse{}, nil } From 7de9c5ff279e10edf7fe3fdd596f3b33902c912b Mon Sep 17 00:00:00 2001 From: Alexander Krantz Date: Thu, 2 Sep 2021 09:23:30 -0700 Subject: [PATCH 035/176] fix: bump thrift to 0.14.2 and zipkin-go-opentracing 0.4.5 (#9700) --- docs/LICENSE_OF_DEPENDENCIES.md | 1 - go.mod | 7 +- go.sum | 8 +- .../stress_test_write/stress_test_write.go | 28 +- .../cmd/thrift_serialize/thrift_serialize.go | 19 +- plugins/inputs/zipkin/codec/codec.go | 2 +- plugins/inputs/zipkin/codec/jsonV1/jsonV1.go | 2 +- .../gen-go/zipkincore/GoUnusedProtection__.go | 5 + .../gen-go/zipkincore/zipkinCore-consts.go | 47 + .../thrift/gen-go/zipkincore/zipkinCore.go | 1556 +++++++++++++++++ plugins/inputs/zipkin/codec/thrift/thrift.go | 14 +- .../inputs/zipkin/codec/thrift/thrift_test.go | 2 +- 12 files changed, 1654 insertions(+), 37 deletions(-) create mode 100644 plugins/inputs/zipkin/codec/thrift/gen-go/zipkincore/GoUnusedProtection__.go create mode 100644 plugins/inputs/zipkin/codec/thrift/gen-go/zipkincore/zipkinCore-consts.go create mode 100644 plugins/inputs/zipkin/codec/thrift/gen-go/zipkincore/zipkinCore.go diff --git a/docs/LICENSE_OF_DEPENDENCIES.md b/docs/LICENSE_OF_DEPENDENCIES.md index a0c62d7f47767..46f8e5ff32793 100644 --- a/docs/LICENSE_OF_DEPENDENCIES.md +++ b/docs/LICENSE_OF_DEPENDENCIES.md @@ -173,7 +173,6 @@ following works: - github.com/opencontainers/go-digest [Apache License 2.0](https://github.com/opencontainers/go-digest/blob/master/LICENSE) - github.com/opencontainers/image-spec [Apache License 2.0](https://github.com/opencontainers/image-spec/blob/master/LICENSE) - github.com/opentracing/opentracing-go [Apache License 2.0](https://github.com/opentracing/opentracing-go/blob/master/LICENSE) -- github.com/openzipkin/zipkin-go-opentracing [MIT License](https://github.com/openzipkin/zipkin-go-opentracing/blob/master/LICENSE) - github.com/philhofer/fwd [MIT License](https://github.com/philhofer/fwd/blob/master/LICENSE.md) - github.com/pierrec/lz4 [BSD 3-Clause "New" or "Revised" License](https://github.com/pierrec/lz4/blob/master/LICENSE) - github.com/pion/dtls [MIT License](https://github.com/pion/dtls/blob/master/LICENSE) diff --git a/go.mod b/go.mod index eb900304fdd9a..8dd6c8f7a6fc4 100644 --- a/go.mod +++ b/go.mod @@ -42,7 +42,7 @@ require ( github.com/antchfx/xmlquery v1.3.5 github.com/antchfx/xpath v1.1.11 github.com/apache/arrow/go/arrow v0.0.0-20200601151325-b2287a20f230 // indirect - github.com/apache/thrift v0.13.0 + github.com/apache/thrift v0.14.2 github.com/aristanetworks/glog v0.0.0-20191112221043-67e8567f59f3 // indirect github.com/aristanetworks/goarista v0.0.0-20190325233358-a123909ec740 github.com/armon/go-metrics v0.3.3 // indirect @@ -205,8 +205,9 @@ require ( github.com/opencontainers/image-spec v1.0.1 // indirect github.com/opencontainers/runc v1.0.0-rc95 // indirect github.com/opentracing-contrib/go-observer v0.0.0-20170622124052-a52f23424492 // indirect - github.com/opentracing/opentracing-go v1.2.0 // indirect - github.com/openzipkin/zipkin-go-opentracing v0.3.4 + github.com/opentracing/opentracing-go v1.2.0 + github.com/openzipkin-contrib/zipkin-go-opentracing v0.4.5 + github.com/openzipkin/zipkin-go v0.2.5 github.com/philhofer/fwd v1.1.1 // indirect github.com/pierrec/lz4 v2.5.2+incompatible // indirect github.com/pion/dtls/v2 v2.0.9 diff --git a/go.sum b/go.sum index 369a49f8412b6..1d373bad3ce34 100644 --- a/go.sum +++ b/go.sum @@ -221,8 +221,9 @@ github.com/apache/arrow/go/arrow v0.0.0-20191024131854-af6fa24be0db/go.mod h1:VT github.com/apache/arrow/go/arrow v0.0.0-20200601151325-b2287a20f230 h1:5ultmol0yeX75oh1hY78uAFn3dupBQ/QUNxERCkiaUQ= github.com/apache/arrow/go/arrow v0.0.0-20200601151325-b2287a20f230/go.mod h1:QNYViu/X0HXDHw7m3KXzWSVXIbfUvJqBFe6Gj8/pYA0= github.com/apache/thrift v0.12.0/go.mod h1:cp2SuWMxlEZw2r+iP2GNCdIi4C1qmUzdZFSVb+bacwQ= -github.com/apache/thrift v0.13.0 h1:5hryIiq9gtn+MiLVn0wP37kb/uTeRZgN08WoCsAhIhI= github.com/apache/thrift v0.13.0/go.mod h1:cp2SuWMxlEZw2r+iP2GNCdIi4C1qmUzdZFSVb+bacwQ= +github.com/apache/thrift v0.14.2 h1:hY4rAyg7Eqbb27GB6gkhUKrRAuc8xRjlNtJq+LseKeY= +github.com/apache/thrift v0.14.2/go.mod h1:cp2SuWMxlEZw2r+iP2GNCdIi4C1qmUzdZFSVb+bacwQ= github.com/aristanetworks/glog v0.0.0-20191112221043-67e8567f59f3 h1:Bmjk+DjIi3tTAU0wxGaFbfjGUqlxxSXARq9A96Kgoos= github.com/aristanetworks/glog v0.0.0-20191112221043-67e8567f59f3/go.mod h1:KASm+qXFKs/xjSoWn30NrWBBvdTTQq+UjkhjEJHfSFA= github.com/aristanetworks/goarista v0.0.0-20190325233358-a123909ec740 h1:FD4/ikKOFxwP8muWDypbmBWc634+YcAs3eBrYAmRdZY= @@ -1291,12 +1292,13 @@ github.com/opentracing/opentracing-go v1.0.3-0.20180606204148-bd9c31933947/go.mo github.com/opentracing/opentracing-go v1.1.0/go.mod h1:UkNAQd3GIcIGf0SeVgPpRdFStlNbqXla1AfSYxPUl2o= github.com/opentracing/opentracing-go v1.2.0 h1:uEJPy/1a5RIPAJ0Ov+OIO8OxWu77jEv+1B0VhjKrZUs= github.com/opentracing/opentracing-go v1.2.0/go.mod h1:GxEUsuufX4nBwe+T+Wl9TAgYrxe9dPLANfrWvHYVTgc= +github.com/openzipkin-contrib/zipkin-go-opentracing v0.4.5 h1:ZCnq+JUrvXcDVhX/xRolRBZifmabN1HcS1wrPSvxhrU= github.com/openzipkin-contrib/zipkin-go-opentracing v0.4.5/go.mod h1:/wsWhb9smxSfWAKL3wpBW7V8scJMt8N8gnaMCS9E/cA= github.com/openzipkin/zipkin-go v0.1.6/go.mod h1:QgAqvLzwWbR/WpD4A3cGpPtJrZXNIiJc5AZX7/PBEpw= github.com/openzipkin/zipkin-go v0.2.1/go.mod h1:NaW6tEwdmWMaCDZzg8sh+IBNOxHMPnhQw8ySjnjRyN4= github.com/openzipkin/zipkin-go v0.2.2/go.mod h1:NaW6tEwdmWMaCDZzg8sh+IBNOxHMPnhQw8ySjnjRyN4= -github.com/openzipkin/zipkin-go-opentracing v0.3.4 h1:x/pBv/5VJNWkcHF1G9xqhug8Iw7X1y1zOMzDmyuvP2g= -github.com/openzipkin/zipkin-go-opentracing v0.3.4/go.mod h1:js2AbwmHW0YD9DwIw2JhQWmbfFi/UnWyYwdVhqbCDOE= +github.com/openzipkin/zipkin-go v0.2.5 h1:UwtQQx2pyPIgWYHRg+epgdx1/HnBQTgN3/oIYEJTQzU= +github.com/openzipkin/zipkin-go v0.2.5/go.mod h1:KpXfKdgRDnnhsxw4pNIH9Md5lyFqKUa4YDFlwRYAMyE= github.com/pact-foundation/pact-go v1.0.4/go.mod h1:uExwJY4kCzNPcHRj+hCR/HBbOOIwwtUjcrb0b5/5kLM= github.com/pascaldekloe/goe v0.0.0-20180627143212-57f6aae5913c/go.mod h1:lzWF7FIEvWOWxwDKqyGYQf6ZUaNfKdP144TG7ZOy1lc= github.com/pascaldekloe/goe v0.1.0 h1:cBOtyMzM9HTpWjXfbbunk26uA6nG3a8n06Wieeh0MwY= diff --git a/plugins/inputs/zipkin/cmd/stress_test_write/stress_test_write.go b/plugins/inputs/zipkin/cmd/stress_test_write/stress_test_write.go index 3889e2f2cd9ea..a1abccc420ad9 100644 --- a/plugins/inputs/zipkin/cmd/stress_test_write/stress_test_write.go +++ b/plugins/inputs/zipkin/cmd/stress_test_write/stress_test_write.go @@ -24,7 +24,10 @@ import ( "log" "time" - zipkin "github.com/openzipkin/zipkin-go-opentracing" + otlog "github.com/opentracing/opentracing-go/log" + zipkinot "github.com/openzipkin-contrib/zipkin-go-opentracing" + "github.com/openzipkin/zipkin-go" + zipkinhttp "github.com/openzipkin/zipkin-go/reporter/http" ) var ( @@ -46,27 +49,30 @@ func init() { func main() { flag.Parse() var hostname = fmt.Sprintf("http://%s:9411/api/v1/spans", ZipkinServerHost) - collector, err := zipkin.NewHTTPCollector( + reporter := zipkinhttp.NewReporter( hostname, - zipkin.HTTPBatchSize(BatchSize), - zipkin.HTTPMaxBacklog(MaxBackLog), - zipkin.HTTPBatchInterval(time.Duration(BatchTimeInterval)*time.Second)) + zipkinhttp.BatchSize(BatchSize), + zipkinhttp.MaxBacklog(MaxBackLog), + zipkinhttp.BatchInterval(time.Duration(BatchTimeInterval)*time.Second), + ) + defer reporter.Close() + + endpoint, err := zipkin.NewEndpoint("Trivial", "127.0.0.1:0") if err != nil { - log.Fatalf("Error initializing zipkin http collector: %v\n", err) + log.Fatalf("Error: %v\n", err) } - defer collector.Close() - - tracer, err := zipkin.NewTracer( - zipkin.NewRecorder(collector, false, "127.0.0.1:0", "Trivial")) + nativeTracer, err := zipkin.NewTracer(reporter, zipkin.WithLocalEndpoint(endpoint)) if err != nil { log.Fatalf("Error: %v\n", err) } + tracer := zipkinot.Wrap(nativeTracer) + log.Printf("Writing %d spans to zipkin server at %s\n", SpanCount, hostname) for i := 0; i < SpanCount; i++ { parent := tracer.StartSpan("Parent") - parent.LogEvent(fmt.Sprintf("Trace%d", i)) + parent.LogFields(otlog.Message(fmt.Sprintf("Trace%d", i))) parent.Finish() } log.Println("Done. Flushing remaining spans...") diff --git a/plugins/inputs/zipkin/cmd/thrift_serialize/thrift_serialize.go b/plugins/inputs/zipkin/cmd/thrift_serialize/thrift_serialize.go index b26e3d73fa3fd..9bf1f3261d9f6 100644 --- a/plugins/inputs/zipkin/cmd/thrift_serialize/thrift_serialize.go +++ b/plugins/inputs/zipkin/cmd/thrift_serialize/thrift_serialize.go @@ -24,6 +24,7 @@ Otherwise, the input file will be interpreted as json, and the output will be en package main import ( + "context" "encoding/json" "errors" "flag" @@ -32,7 +33,7 @@ import ( "log" "github.com/apache/thrift/lib/go/thrift" - "github.com/openzipkin/zipkin-go-opentracing/thrift/gen-go/zipkincore" + "github.com/influxdata/telegraf/plugins/inputs/zipkin/codec/thrift/gen-go/zipkincore" ) var ( @@ -100,20 +101,20 @@ func jsonToZipkinThrift(jsonRaw []byte) ([]byte, error) { zspans = append(zspans, spans...) buf := thrift.NewTMemoryBuffer() - transport := thrift.NewTBinaryProtocolTransport(buf) + transport := thrift.NewTBinaryProtocolConf(buf, nil) - if err = transport.WriteListBegin(thrift.STRUCT, len(spans)); err != nil { + if err = transport.WriteListBegin(context.Background(), thrift.STRUCT, len(spans)); err != nil { return nil, fmt.Errorf("error in beginning thrift write: %v", err) } for _, span := range zspans { - err = span.Write(transport) + err = span.Write(context.Background(), transport) if err != nil { return nil, fmt.Errorf("error converting zipkin struct to thrift: %v", err) } } - if err = transport.WriteListEnd(); err != nil { + if err = transport.WriteListEnd(context.Background()); err != nil { return nil, fmt.Errorf("error finishing thrift write: %v", err) } @@ -127,8 +128,8 @@ func thriftToJSONSpans(thriftData []byte) ([]byte, error) { return nil, err } - transport := thrift.NewTBinaryProtocolTransport(buffer) - _, size, err := transport.ReadListBegin() + transport := thrift.NewTBinaryProtocolConf(buffer, nil) + _, size, err := transport.ReadListBegin(context.Background()) if err != nil { err = fmt.Errorf("error in ReadListBegin: %v", err) return nil, err @@ -137,14 +138,14 @@ func thriftToJSONSpans(thriftData []byte) ([]byte, error) { var spans []*zipkincore.Span for i := 0; i < size; i++ { zs := &zipkincore.Span{} - if err = zs.Read(transport); err != nil { + if err = zs.Read(context.Background(), transport); err != nil { err = fmt.Errorf("Error reading into zipkin struct: %v", err) return nil, err } spans = append(spans, zs) } - err = transport.ReadListEnd() + err = transport.ReadListEnd(context.Background()) if err != nil { err = fmt.Errorf("error ending thrift read: %v", err) return nil, err diff --git a/plugins/inputs/zipkin/codec/codec.go b/plugins/inputs/zipkin/codec/codec.go index 167b8ec24f1a3..2754e13d969e7 100644 --- a/plugins/inputs/zipkin/codec/codec.go +++ b/plugins/inputs/zipkin/codec/codec.go @@ -3,8 +3,8 @@ package codec import ( "time" + "github.com/influxdata/telegraf/plugins/inputs/zipkin/codec/thrift/gen-go/zipkincore" "github.com/influxdata/telegraf/plugins/inputs/zipkin/trace" - "github.com/openzipkin/zipkin-go-opentracing/thrift/gen-go/zipkincore" ) //now is a mockable time for now diff --git a/plugins/inputs/zipkin/codec/jsonV1/jsonV1.go b/plugins/inputs/zipkin/codec/jsonV1/jsonV1.go index 1803486742301..4c054126fa95e 100644 --- a/plugins/inputs/zipkin/codec/jsonV1/jsonV1.go +++ b/plugins/inputs/zipkin/codec/jsonV1/jsonV1.go @@ -7,7 +7,7 @@ import ( "time" "github.com/influxdata/telegraf/plugins/inputs/zipkin/codec" - "github.com/openzipkin/zipkin-go-opentracing/thrift/gen-go/zipkincore" + "github.com/influxdata/telegraf/plugins/inputs/zipkin/codec/thrift/gen-go/zipkincore" ) // JSON decodes spans from bodies `POST`ed to the spans endpoint diff --git a/plugins/inputs/zipkin/codec/thrift/gen-go/zipkincore/GoUnusedProtection__.go b/plugins/inputs/zipkin/codec/thrift/gen-go/zipkincore/GoUnusedProtection__.go new file mode 100644 index 0000000000000..be7b2034832d4 --- /dev/null +++ b/plugins/inputs/zipkin/codec/thrift/gen-go/zipkincore/GoUnusedProtection__.go @@ -0,0 +1,5 @@ +// Code generated by Thrift Compiler (0.14.2). DO NOT EDIT. + +package zipkincore + +var GoUnusedProtection__ int diff --git a/plugins/inputs/zipkin/codec/thrift/gen-go/zipkincore/zipkinCore-consts.go b/plugins/inputs/zipkin/codec/thrift/gen-go/zipkincore/zipkinCore-consts.go new file mode 100644 index 0000000000000..7c5b5825acaa6 --- /dev/null +++ b/plugins/inputs/zipkin/codec/thrift/gen-go/zipkincore/zipkinCore-consts.go @@ -0,0 +1,47 @@ +// Code generated by Thrift Compiler (0.14.2). DO NOT EDIT. + +package zipkincore + +import ( + "bytes" + "context" + "fmt" + "github.com/apache/thrift/lib/go/thrift" + "time" +) + +// (needed to ensure safety because of naive import list construction.) +var _ = thrift.ZERO +var _ = fmt.Printf +var _ = context.Background +var _ = time.Now +var _ = bytes.Equal + +const CLIENT_SEND = "cs" +const CLIENT_RECV = "cr" +const SERVER_SEND = "ss" +const SERVER_RECV = "sr" +const MESSAGE_SEND = "ms" +const MESSAGE_RECV = "mr" +const WIRE_SEND = "ws" +const WIRE_RECV = "wr" +const CLIENT_SEND_FRAGMENT = "csf" +const CLIENT_RECV_FRAGMENT = "crf" +const SERVER_SEND_FRAGMENT = "ssf" +const SERVER_RECV_FRAGMENT = "srf" +const HTTP_HOST = "http.host" +const HTTP_METHOD = "http.method" +const HTTP_PATH = "http.path" +const HTTP_ROUTE = "http.route" +const HTTP_URL = "http.url" +const HTTP_STATUS_CODE = "http.status_code" +const HTTP_REQUEST_SIZE = "http.request.size" +const HTTP_RESPONSE_SIZE = "http.response.size" +const LOCAL_COMPONENT = "lc" +const ERROR = "error" +const CLIENT_ADDR = "ca" +const SERVER_ADDR = "sa" +const MESSAGE_ADDR = "ma" + +func init() { +} diff --git a/plugins/inputs/zipkin/codec/thrift/gen-go/zipkincore/zipkinCore.go b/plugins/inputs/zipkin/codec/thrift/gen-go/zipkincore/zipkinCore.go new file mode 100644 index 0000000000000..258fd4d1a0afc --- /dev/null +++ b/plugins/inputs/zipkin/codec/thrift/gen-go/zipkincore/zipkinCore.go @@ -0,0 +1,1556 @@ +// Code generated by Thrift Compiler (0.14.2). DO NOT EDIT. + +package zipkincore + +import ( + "bytes" + "context" + "database/sql/driver" + "errors" + "fmt" + "github.com/apache/thrift/lib/go/thrift" + "time" +) + +// (needed to ensure safety because of naive import list construction.) +var _ = thrift.ZERO +var _ = fmt.Printf +var _ = context.Background +var _ = time.Now +var _ = bytes.Equal + +//A subset of thrift base types, except BYTES. +type AnnotationType int64 + +const ( + AnnotationType_BOOL AnnotationType = 0 + AnnotationType_BYTES AnnotationType = 1 + AnnotationType_I16 AnnotationType = 2 + AnnotationType_I32 AnnotationType = 3 + AnnotationType_I64 AnnotationType = 4 + AnnotationType_DOUBLE AnnotationType = 5 + AnnotationType_STRING AnnotationType = 6 +) + +func (p AnnotationType) String() string { + switch p { + case AnnotationType_BOOL: + return "BOOL" + case AnnotationType_BYTES: + return "BYTES" + case AnnotationType_I16: + return "I16" + case AnnotationType_I32: + return "I32" + case AnnotationType_I64: + return "I64" + case AnnotationType_DOUBLE: + return "DOUBLE" + case AnnotationType_STRING: + return "STRING" + } + return "" +} + +func AnnotationTypeFromString(s string) (AnnotationType, error) { + switch s { + case "BOOL": + return AnnotationType_BOOL, nil + case "BYTES": + return AnnotationType_BYTES, nil + case "I16": + return AnnotationType_I16, nil + case "I32": + return AnnotationType_I32, nil + case "I64": + return AnnotationType_I64, nil + case "DOUBLE": + return AnnotationType_DOUBLE, nil + case "STRING": + return AnnotationType_STRING, nil + } + return AnnotationType(0), fmt.Errorf("not a valid AnnotationType string") +} + +func AnnotationTypePtr(v AnnotationType) *AnnotationType { return &v } + +func (p AnnotationType) MarshalText() ([]byte, error) { + return []byte(p.String()), nil +} + +func (p *AnnotationType) UnmarshalText(text []byte) error { + q, err := AnnotationTypeFromString(string(text)) + if err != nil { + return err + } + *p = q + return nil +} + +func (p *AnnotationType) Scan(value interface{}) error { + v, ok := value.(int64) + if !ok { + return errors.New("Scan value is not int64") + } + *p = AnnotationType(v) + return nil +} + +func (p *AnnotationType) Value() (driver.Value, error) { + if p == nil { + return nil, nil + } + return int64(*p), nil +} + +// Indicates the network context of a service recording an annotation with two +// exceptions. +// +// When a BinaryAnnotation, and key is CLIENT_ADDR or SERVER_ADDR, +// the endpoint indicates the source or destination of an RPC. This exception +// allows zipkin to display network context of uninstrumented services, or +// clients such as web browsers. +// +// Attributes: +// - Ipv4: IPv4 host address packed into 4 bytes. +// +// Ex for the ip 1.2.3.4, it would be (1 << 24) | (2 << 16) | (3 << 8) | 4 +// - Port: IPv4 port or 0, if unknown. +// +// Note: this is to be treated as an unsigned integer, so watch for negatives. +// - ServiceName: Classifier of a source or destination in lowercase, such as "zipkin-web". +// +// This is the primary parameter for trace lookup, so should be intuitive as +// possible, for example, matching names in service discovery. +// +// Conventionally, when the service name isn't known, service_name = "unknown". +// However, it is also permissible to set service_name = "" (empty string). +// The difference in the latter usage is that the span will not be queryable +// by service name unless more information is added to the span with non-empty +// service name, e.g. an additional annotation from the server. +// +// Particularly clients may not have a reliable service name at ingest. One +// approach is to set service_name to "" at ingest, and later assign a +// better label based on binary annotations, such as user agent. +// - Ipv6: IPv6 host address packed into 16 bytes. Ex Inet6Address.getBytes() +type Endpoint struct { + Ipv4 int32 `thrift:"ipv4,1" db:"ipv4" json:"ipv4"` + Port int16 `thrift:"port,2" db:"port" json:"port"` + ServiceName string `thrift:"service_name,3" db:"service_name" json:"service_name"` + Ipv6 []byte `thrift:"ipv6,4" db:"ipv6" json:"ipv6,omitempty"` +} + +func NewEndpoint() *Endpoint { + return &Endpoint{} +} + +func (p *Endpoint) GetIpv4() int32 { + return p.Ipv4 +} + +func (p *Endpoint) GetPort() int16 { + return p.Port +} + +func (p *Endpoint) GetServiceName() string { + return p.ServiceName +} + +var Endpoint_Ipv6_DEFAULT []byte + +func (p *Endpoint) GetIpv6() []byte { + return p.Ipv6 +} +func (p *Endpoint) IsSetIpv6() bool { + return p.Ipv6 != nil +} + +func (p *Endpoint) Read(ctx context.Context, iprot thrift.TProtocol) error { + if _, err := iprot.ReadStructBegin(ctx); err != nil { + return thrift.PrependError(fmt.Sprintf("%T read error: ", p), err) + } + + for { + _, fieldTypeId, fieldId, err := iprot.ReadFieldBegin(ctx) + if err != nil { + return thrift.PrependError(fmt.Sprintf("%T field %d read error: ", p, fieldId), err) + } + if fieldTypeId == thrift.STOP { + break + } + switch fieldId { + case 1: + if fieldTypeId == thrift.I32 { + if err := p.ReadField1(ctx, iprot); err != nil { + return err + } + } else { + if err := iprot.Skip(ctx, fieldTypeId); err != nil { + return err + } + } + case 2: + if fieldTypeId == thrift.I16 { + if err := p.ReadField2(ctx, iprot); err != nil { + return err + } + } else { + if err := iprot.Skip(ctx, fieldTypeId); err != nil { + return err + } + } + case 3: + if fieldTypeId == thrift.STRING { + if err := p.ReadField3(ctx, iprot); err != nil { + return err + } + } else { + if err := iprot.Skip(ctx, fieldTypeId); err != nil { + return err + } + } + case 4: + if fieldTypeId == thrift.STRING { + if err := p.ReadField4(ctx, iprot); err != nil { + return err + } + } else { + if err := iprot.Skip(ctx, fieldTypeId); err != nil { + return err + } + } + default: + if err := iprot.Skip(ctx, fieldTypeId); err != nil { + return err + } + } + if err := iprot.ReadFieldEnd(ctx); err != nil { + return err + } + } + if err := iprot.ReadStructEnd(ctx); err != nil { + return thrift.PrependError(fmt.Sprintf("%T read struct end error: ", p), err) + } + return nil +} + +func (p *Endpoint) ReadField1(ctx context.Context, iprot thrift.TProtocol) error { + if v, err := iprot.ReadI32(ctx); err != nil { + return thrift.PrependError("error reading field 1: ", err) + } else { + p.Ipv4 = v + } + return nil +} + +func (p *Endpoint) ReadField2(ctx context.Context, iprot thrift.TProtocol) error { + if v, err := iprot.ReadI16(ctx); err != nil { + return thrift.PrependError("error reading field 2: ", err) + } else { + p.Port = v + } + return nil +} + +func (p *Endpoint) ReadField3(ctx context.Context, iprot thrift.TProtocol) error { + if v, err := iprot.ReadString(ctx); err != nil { + return thrift.PrependError("error reading field 3: ", err) + } else { + p.ServiceName = v + } + return nil +} + +func (p *Endpoint) ReadField4(ctx context.Context, iprot thrift.TProtocol) error { + if v, err := iprot.ReadBinary(ctx); err != nil { + return thrift.PrependError("error reading field 4: ", err) + } else { + p.Ipv6 = v + } + return nil +} + +func (p *Endpoint) Write(ctx context.Context, oprot thrift.TProtocol) error { + if err := oprot.WriteStructBegin(ctx, "Endpoint"); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write struct begin error: ", p), err) + } + if p != nil { + if err := p.writeField1(ctx, oprot); err != nil { + return err + } + if err := p.writeField2(ctx, oprot); err != nil { + return err + } + if err := p.writeField3(ctx, oprot); err != nil { + return err + } + if err := p.writeField4(ctx, oprot); err != nil { + return err + } + } + if err := oprot.WriteFieldStop(ctx); err != nil { + return thrift.PrependError("write field stop error: ", err) + } + if err := oprot.WriteStructEnd(ctx); err != nil { + return thrift.PrependError("write struct stop error: ", err) + } + return nil +} + +func (p *Endpoint) writeField1(ctx context.Context, oprot thrift.TProtocol) (err error) { + if err := oprot.WriteFieldBegin(ctx, "ipv4", thrift.I32, 1); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field begin error 1:ipv4: ", p), err) + } + if err := oprot.WriteI32(ctx, int32(p.Ipv4)); err != nil { + return thrift.PrependError(fmt.Sprintf("%T.ipv4 (1) field write error: ", p), err) + } + if err := oprot.WriteFieldEnd(ctx); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field end error 1:ipv4: ", p), err) + } + return err +} + +func (p *Endpoint) writeField2(ctx context.Context, oprot thrift.TProtocol) (err error) { + if err := oprot.WriteFieldBegin(ctx, "port", thrift.I16, 2); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field begin error 2:port: ", p), err) + } + if err := oprot.WriteI16(ctx, int16(p.Port)); err != nil { + return thrift.PrependError(fmt.Sprintf("%T.port (2) field write error: ", p), err) + } + if err := oprot.WriteFieldEnd(ctx); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field end error 2:port: ", p), err) + } + return err +} + +func (p *Endpoint) writeField3(ctx context.Context, oprot thrift.TProtocol) (err error) { + if err := oprot.WriteFieldBegin(ctx, "service_name", thrift.STRING, 3); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field begin error 3:service_name: ", p), err) + } + if err := oprot.WriteString(ctx, string(p.ServiceName)); err != nil { + return thrift.PrependError(fmt.Sprintf("%T.service_name (3) field write error: ", p), err) + } + if err := oprot.WriteFieldEnd(ctx); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field end error 3:service_name: ", p), err) + } + return err +} + +func (p *Endpoint) writeField4(ctx context.Context, oprot thrift.TProtocol) (err error) { + if p.IsSetIpv6() { + if err := oprot.WriteFieldBegin(ctx, "ipv6", thrift.STRING, 4); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field begin error 4:ipv6: ", p), err) + } + if err := oprot.WriteBinary(ctx, p.Ipv6); err != nil { + return thrift.PrependError(fmt.Sprintf("%T.ipv6 (4) field write error: ", p), err) + } + if err := oprot.WriteFieldEnd(ctx); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field end error 4:ipv6: ", p), err) + } + } + return err +} + +func (p *Endpoint) Equals(other *Endpoint) bool { + if p == other { + return true + } else if p == nil || other == nil { + return false + } + if p.Ipv4 != other.Ipv4 { + return false + } + if p.Port != other.Port { + return false + } + if p.ServiceName != other.ServiceName { + return false + } + if bytes.Compare(p.Ipv6, other.Ipv6) != 0 { + return false + } + return true +} + +func (p *Endpoint) String() string { + if p == nil { + return "" + } + return fmt.Sprintf("Endpoint(%+v)", *p) +} + +// Associates an event that explains latency with a timestamp. +// +// Unlike log statements, annotations are often codes: for example "sr". +// +// Attributes: +// - Timestamp: Microseconds from epoch. +// +// This value should use the most precise value possible. For example, +// gettimeofday or multiplying currentTimeMillis by 1000. +// - Value: Usually a short tag indicating an event, like "sr" or "finagle.retry". +// - Host: The host that recorded the value, primarily for query by service name. +type Annotation struct { + Timestamp int64 `thrift:"timestamp,1" db:"timestamp" json:"timestamp"` + Value string `thrift:"value,2" db:"value" json:"value"` + Host *Endpoint `thrift:"host,3" db:"host" json:"host,omitempty"` +} + +func NewAnnotation() *Annotation { + return &Annotation{} +} + +func (p *Annotation) GetTimestamp() int64 { + return p.Timestamp +} + +func (p *Annotation) GetValue() string { + return p.Value +} + +var Annotation_Host_DEFAULT *Endpoint + +func (p *Annotation) GetHost() *Endpoint { + if !p.IsSetHost() { + return Annotation_Host_DEFAULT + } + return p.Host +} +func (p *Annotation) IsSetHost() bool { + return p.Host != nil +} + +func (p *Annotation) Read(ctx context.Context, iprot thrift.TProtocol) error { + if _, err := iprot.ReadStructBegin(ctx); err != nil { + return thrift.PrependError(fmt.Sprintf("%T read error: ", p), err) + } + + for { + _, fieldTypeId, fieldId, err := iprot.ReadFieldBegin(ctx) + if err != nil { + return thrift.PrependError(fmt.Sprintf("%T field %d read error: ", p, fieldId), err) + } + if fieldTypeId == thrift.STOP { + break + } + switch fieldId { + case 1: + if fieldTypeId == thrift.I64 { + if err := p.ReadField1(ctx, iprot); err != nil { + return err + } + } else { + if err := iprot.Skip(ctx, fieldTypeId); err != nil { + return err + } + } + case 2: + if fieldTypeId == thrift.STRING { + if err := p.ReadField2(ctx, iprot); err != nil { + return err + } + } else { + if err := iprot.Skip(ctx, fieldTypeId); err != nil { + return err + } + } + case 3: + if fieldTypeId == thrift.STRUCT { + if err := p.ReadField3(ctx, iprot); err != nil { + return err + } + } else { + if err := iprot.Skip(ctx, fieldTypeId); err != nil { + return err + } + } + default: + if err := iprot.Skip(ctx, fieldTypeId); err != nil { + return err + } + } + if err := iprot.ReadFieldEnd(ctx); err != nil { + return err + } + } + if err := iprot.ReadStructEnd(ctx); err != nil { + return thrift.PrependError(fmt.Sprintf("%T read struct end error: ", p), err) + } + return nil +} + +func (p *Annotation) ReadField1(ctx context.Context, iprot thrift.TProtocol) error { + if v, err := iprot.ReadI64(ctx); err != nil { + return thrift.PrependError("error reading field 1: ", err) + } else { + p.Timestamp = v + } + return nil +} + +func (p *Annotation) ReadField2(ctx context.Context, iprot thrift.TProtocol) error { + if v, err := iprot.ReadString(ctx); err != nil { + return thrift.PrependError("error reading field 2: ", err) + } else { + p.Value = v + } + return nil +} + +func (p *Annotation) ReadField3(ctx context.Context, iprot thrift.TProtocol) error { + p.Host = &Endpoint{} + if err := p.Host.Read(ctx, iprot); err != nil { + return thrift.PrependError(fmt.Sprintf("%T error reading struct: ", p.Host), err) + } + return nil +} + +func (p *Annotation) Write(ctx context.Context, oprot thrift.TProtocol) error { + if err := oprot.WriteStructBegin(ctx, "Annotation"); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write struct begin error: ", p), err) + } + if p != nil { + if err := p.writeField1(ctx, oprot); err != nil { + return err + } + if err := p.writeField2(ctx, oprot); err != nil { + return err + } + if err := p.writeField3(ctx, oprot); err != nil { + return err + } + } + if err := oprot.WriteFieldStop(ctx); err != nil { + return thrift.PrependError("write field stop error: ", err) + } + if err := oprot.WriteStructEnd(ctx); err != nil { + return thrift.PrependError("write struct stop error: ", err) + } + return nil +} + +func (p *Annotation) writeField1(ctx context.Context, oprot thrift.TProtocol) (err error) { + if err := oprot.WriteFieldBegin(ctx, "timestamp", thrift.I64, 1); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field begin error 1:timestamp: ", p), err) + } + if err := oprot.WriteI64(ctx, int64(p.Timestamp)); err != nil { + return thrift.PrependError(fmt.Sprintf("%T.timestamp (1) field write error: ", p), err) + } + if err := oprot.WriteFieldEnd(ctx); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field end error 1:timestamp: ", p), err) + } + return err +} + +func (p *Annotation) writeField2(ctx context.Context, oprot thrift.TProtocol) (err error) { + if err := oprot.WriteFieldBegin(ctx, "value", thrift.STRING, 2); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field begin error 2:value: ", p), err) + } + if err := oprot.WriteString(ctx, string(p.Value)); err != nil { + return thrift.PrependError(fmt.Sprintf("%T.value (2) field write error: ", p), err) + } + if err := oprot.WriteFieldEnd(ctx); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field end error 2:value: ", p), err) + } + return err +} + +func (p *Annotation) writeField3(ctx context.Context, oprot thrift.TProtocol) (err error) { + if p.IsSetHost() { + if err := oprot.WriteFieldBegin(ctx, "host", thrift.STRUCT, 3); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field begin error 3:host: ", p), err) + } + if err := p.Host.Write(ctx, oprot); err != nil { + return thrift.PrependError(fmt.Sprintf("%T error writing struct: ", p.Host), err) + } + if err := oprot.WriteFieldEnd(ctx); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field end error 3:host: ", p), err) + } + } + return err +} + +func (p *Annotation) Equals(other *Annotation) bool { + if p == other { + return true + } else if p == nil || other == nil { + return false + } + if p.Timestamp != other.Timestamp { + return false + } + if p.Value != other.Value { + return false + } + if !p.Host.Equals(other.Host) { + return false + } + return true +} + +func (p *Annotation) String() string { + if p == nil { + return "" + } + return fmt.Sprintf("Annotation(%+v)", *p) +} + +// Binary annotations are tags applied to a Span to give it context. For +// example, a binary annotation of HTTP_PATH ("http.path") could the path +// to a resource in a RPC call. +// +// Binary annotations of type STRING are always queryable, though more a +// historical implementation detail than a structural concern. +// +// Binary annotations can repeat, and vary on the host. Similar to Annotation, +// the host indicates who logged the event. This allows you to tell the +// difference between the client and server side of the same key. For example, +// the key "http.path" might be different on the client and server side due to +// rewriting, like "/api/v1/myresource" vs "/myresource. Via the host field, +// you can see the different points of view, which often help in debugging. +// +// Attributes: +// - Key: Name used to lookup spans, such as "http.path" or "finagle.version". +// - Value: Serialized thrift bytes, in TBinaryProtocol format. +// +// For legacy reasons, byte order is big-endian. See THRIFT-3217. +// - AnnotationType: The thrift type of value, most often STRING. +// +// annotation_type shouldn't vary for the same key. +// - Host: The host that recorded value, allowing query by service name or address. +// +// There are two exceptions: when key is "ca" or "sa", this is the source or +// destination of an RPC. This exception allows zipkin to display network +// context of uninstrumented services, such as browsers or databases. +type BinaryAnnotation struct { + Key string `thrift:"key,1" db:"key" json:"key"` + Value []byte `thrift:"value,2" db:"value" json:"value"` + AnnotationType AnnotationType `thrift:"annotation_type,3" db:"annotation_type" json:"annotation_type"` + Host *Endpoint `thrift:"host,4" db:"host" json:"host,omitempty"` +} + +func NewBinaryAnnotation() *BinaryAnnotation { + return &BinaryAnnotation{} +} + +func (p *BinaryAnnotation) GetKey() string { + return p.Key +} + +func (p *BinaryAnnotation) GetValue() []byte { + return p.Value +} + +func (p *BinaryAnnotation) GetAnnotationType() AnnotationType { + return p.AnnotationType +} + +var BinaryAnnotation_Host_DEFAULT *Endpoint + +func (p *BinaryAnnotation) GetHost() *Endpoint { + if !p.IsSetHost() { + return BinaryAnnotation_Host_DEFAULT + } + return p.Host +} +func (p *BinaryAnnotation) IsSetHost() bool { + return p.Host != nil +} + +func (p *BinaryAnnotation) Read(ctx context.Context, iprot thrift.TProtocol) error { + if _, err := iprot.ReadStructBegin(ctx); err != nil { + return thrift.PrependError(fmt.Sprintf("%T read error: ", p), err) + } + + for { + _, fieldTypeId, fieldId, err := iprot.ReadFieldBegin(ctx) + if err != nil { + return thrift.PrependError(fmt.Sprintf("%T field %d read error: ", p, fieldId), err) + } + if fieldTypeId == thrift.STOP { + break + } + switch fieldId { + case 1: + if fieldTypeId == thrift.STRING { + if err := p.ReadField1(ctx, iprot); err != nil { + return err + } + } else { + if err := iprot.Skip(ctx, fieldTypeId); err != nil { + return err + } + } + case 2: + if fieldTypeId == thrift.STRING { + if err := p.ReadField2(ctx, iprot); err != nil { + return err + } + } else { + if err := iprot.Skip(ctx, fieldTypeId); err != nil { + return err + } + } + case 3: + if fieldTypeId == thrift.I32 { + if err := p.ReadField3(ctx, iprot); err != nil { + return err + } + } else { + if err := iprot.Skip(ctx, fieldTypeId); err != nil { + return err + } + } + case 4: + if fieldTypeId == thrift.STRUCT { + if err := p.ReadField4(ctx, iprot); err != nil { + return err + } + } else { + if err := iprot.Skip(ctx, fieldTypeId); err != nil { + return err + } + } + default: + if err := iprot.Skip(ctx, fieldTypeId); err != nil { + return err + } + } + if err := iprot.ReadFieldEnd(ctx); err != nil { + return err + } + } + if err := iprot.ReadStructEnd(ctx); err != nil { + return thrift.PrependError(fmt.Sprintf("%T read struct end error: ", p), err) + } + return nil +} + +func (p *BinaryAnnotation) ReadField1(ctx context.Context, iprot thrift.TProtocol) error { + if v, err := iprot.ReadString(ctx); err != nil { + return thrift.PrependError("error reading field 1: ", err) + } else { + p.Key = v + } + return nil +} + +func (p *BinaryAnnotation) ReadField2(ctx context.Context, iprot thrift.TProtocol) error { + if v, err := iprot.ReadBinary(ctx); err != nil { + return thrift.PrependError("error reading field 2: ", err) + } else { + p.Value = v + } + return nil +} + +func (p *BinaryAnnotation) ReadField3(ctx context.Context, iprot thrift.TProtocol) error { + if v, err := iprot.ReadI32(ctx); err != nil { + return thrift.PrependError("error reading field 3: ", err) + } else { + temp := AnnotationType(v) + p.AnnotationType = temp + } + return nil +} + +func (p *BinaryAnnotation) ReadField4(ctx context.Context, iprot thrift.TProtocol) error { + p.Host = &Endpoint{} + if err := p.Host.Read(ctx, iprot); err != nil { + return thrift.PrependError(fmt.Sprintf("%T error reading struct: ", p.Host), err) + } + return nil +} + +func (p *BinaryAnnotation) Write(ctx context.Context, oprot thrift.TProtocol) error { + if err := oprot.WriteStructBegin(ctx, "BinaryAnnotation"); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write struct begin error: ", p), err) + } + if p != nil { + if err := p.writeField1(ctx, oprot); err != nil { + return err + } + if err := p.writeField2(ctx, oprot); err != nil { + return err + } + if err := p.writeField3(ctx, oprot); err != nil { + return err + } + if err := p.writeField4(ctx, oprot); err != nil { + return err + } + } + if err := oprot.WriteFieldStop(ctx); err != nil { + return thrift.PrependError("write field stop error: ", err) + } + if err := oprot.WriteStructEnd(ctx); err != nil { + return thrift.PrependError("write struct stop error: ", err) + } + return nil +} + +func (p *BinaryAnnotation) writeField1(ctx context.Context, oprot thrift.TProtocol) (err error) { + if err := oprot.WriteFieldBegin(ctx, "key", thrift.STRING, 1); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field begin error 1:key: ", p), err) + } + if err := oprot.WriteString(ctx, string(p.Key)); err != nil { + return thrift.PrependError(fmt.Sprintf("%T.key (1) field write error: ", p), err) + } + if err := oprot.WriteFieldEnd(ctx); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field end error 1:key: ", p), err) + } + return err +} + +func (p *BinaryAnnotation) writeField2(ctx context.Context, oprot thrift.TProtocol) (err error) { + if err := oprot.WriteFieldBegin(ctx, "value", thrift.STRING, 2); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field begin error 2:value: ", p), err) + } + if err := oprot.WriteBinary(ctx, p.Value); err != nil { + return thrift.PrependError(fmt.Sprintf("%T.value (2) field write error: ", p), err) + } + if err := oprot.WriteFieldEnd(ctx); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field end error 2:value: ", p), err) + } + return err +} + +func (p *BinaryAnnotation) writeField3(ctx context.Context, oprot thrift.TProtocol) (err error) { + if err := oprot.WriteFieldBegin(ctx, "annotation_type", thrift.I32, 3); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field begin error 3:annotation_type: ", p), err) + } + if err := oprot.WriteI32(ctx, int32(p.AnnotationType)); err != nil { + return thrift.PrependError(fmt.Sprintf("%T.annotation_type (3) field write error: ", p), err) + } + if err := oprot.WriteFieldEnd(ctx); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field end error 3:annotation_type: ", p), err) + } + return err +} + +func (p *BinaryAnnotation) writeField4(ctx context.Context, oprot thrift.TProtocol) (err error) { + if p.IsSetHost() { + if err := oprot.WriteFieldBegin(ctx, "host", thrift.STRUCT, 4); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field begin error 4:host: ", p), err) + } + if err := p.Host.Write(ctx, oprot); err != nil { + return thrift.PrependError(fmt.Sprintf("%T error writing struct: ", p.Host), err) + } + if err := oprot.WriteFieldEnd(ctx); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field end error 4:host: ", p), err) + } + } + return err +} + +func (p *BinaryAnnotation) Equals(other *BinaryAnnotation) bool { + if p == other { + return true + } else if p == nil || other == nil { + return false + } + if p.Key != other.Key { + return false + } + if bytes.Compare(p.Value, other.Value) != 0 { + return false + } + if p.AnnotationType != other.AnnotationType { + return false + } + if !p.Host.Equals(other.Host) { + return false + } + return true +} + +func (p *BinaryAnnotation) String() string { + if p == nil { + return "" + } + return fmt.Sprintf("BinaryAnnotation(%+v)", *p) +} + +// A trace is a series of spans (often RPC calls) which form a latency tree. +// +// Spans are usually created by instrumentation in RPC clients or servers, but +// can also represent in-process activity. Annotations in spans are similar to +// log statements, and are sometimes created directly by application developers +// to indicate events of interest, such as a cache miss. +// +// The root span is where parent_id = Nil; it usually has the longest duration +// in the trace. +// +// Span identifiers are packed into i64s, but should be treated opaquely. +// String encoding is fixed-width lower-hex, to avoid signed interpretation. +// +// Attributes: +// - TraceID: Unique 8-byte identifier for a trace, set on all spans within it. +// - Name: Span name in lowercase, rpc method for example. Conventionally, when the +// span name isn't known, name = "unknown". +// - ID: Unique 8-byte identifier of this span within a trace. A span is uniquely +// identified in storage by (trace_id, id). +// - ParentID: The parent's Span.id; absent if this the root span in a trace. +// - Annotations: Associates events that explain latency with a timestamp. Unlike log +// statements, annotations are often codes: for example SERVER_RECV("sr"). +// Annotations are sorted ascending by timestamp. +// - BinaryAnnotations: Tags a span with context, usually to support query or aggregation. For +// example, a binary annotation key could be "http.path". +// - Debug: True is a request to store this span even if it overrides sampling policy. +// - Timestamp: Epoch microseconds of the start of this span, absent if this an incomplete +// span. +// +// This value should be set directly by instrumentation, using the most +// precise value possible. For example, gettimeofday or syncing nanoTime +// against a tick of currentTimeMillis. +// +// For compatibility with instrumentation that precede this field, collectors +// or span stores can derive this via Annotation.timestamp. +// For example, SERVER_RECV.timestamp or CLIENT_SEND.timestamp. +// +// Timestamp is nullable for input only. Spans without a timestamp cannot be +// presented in a timeline: Span stores should not output spans missing a +// timestamp. +// +// There are two known edge-cases where this could be absent: both cases +// exist when a collector receives a span in parts and a binary annotation +// precedes a timestamp. This is possible when.. +// - The span is in-flight (ex not yet received a timestamp) +// - The span's start event was lost +// - Duration: Measurement in microseconds of the critical path, if known. Durations of +// less than one microsecond must be rounded up to 1 microsecond. +// +// This value should be set directly, as opposed to implicitly via annotation +// timestamps. Doing so encourages precision decoupled from problems of +// clocks, such as skew or NTP updates causing time to move backwards. +// +// For compatibility with instrumentation that precede this field, collectors +// or span stores can derive this by subtracting Annotation.timestamp. +// For example, SERVER_SEND.timestamp - SERVER_RECV.timestamp. +// +// If this field is persisted as unset, zipkin will continue to work, except +// duration query support will be implementation-specific. Similarly, setting +// this field non-atomically is implementation-specific. +// +// This field is i64 vs i32 to support spans longer than 35 minutes. +// - TraceIDHigh: Optional unique 8-byte additional identifier for a trace. If non zero, this +// means the trace uses 128 bit traceIds instead of 64 bit. +type Span struct { + TraceID int64 `thrift:"trace_id,1" db:"trace_id" json:"trace_id"` + // unused field # 2 + Name string `thrift:"name,3" db:"name" json:"name"` + ID int64 `thrift:"id,4" db:"id" json:"id"` + ParentID *int64 `thrift:"parent_id,5" db:"parent_id" json:"parent_id,omitempty"` + Annotations []*Annotation `thrift:"annotations,6" db:"annotations" json:"annotations"` + // unused field # 7 + BinaryAnnotations []*BinaryAnnotation `thrift:"binary_annotations,8" db:"binary_annotations" json:"binary_annotations"` + Debug bool `thrift:"debug,9" db:"debug" json:"debug"` + Timestamp *int64 `thrift:"timestamp,10" db:"timestamp" json:"timestamp,omitempty"` + Duration *int64 `thrift:"duration,11" db:"duration" json:"duration,omitempty"` + TraceIDHigh *int64 `thrift:"trace_id_high,12" db:"trace_id_high" json:"trace_id_high,omitempty"` +} + +func NewSpan() *Span { + return &Span{} +} + +func (p *Span) GetTraceID() int64 { + return p.TraceID +} + +func (p *Span) GetName() string { + return p.Name +} + +func (p *Span) GetID() int64 { + return p.ID +} + +var Span_ParentID_DEFAULT int64 + +func (p *Span) GetParentID() int64 { + if !p.IsSetParentID() { + return Span_ParentID_DEFAULT + } + return *p.ParentID +} + +func (p *Span) GetAnnotations() []*Annotation { + return p.Annotations +} + +func (p *Span) GetBinaryAnnotations() []*BinaryAnnotation { + return p.BinaryAnnotations +} + +var Span_Debug_DEFAULT bool = false + +func (p *Span) GetDebug() bool { + return p.Debug +} + +var Span_Timestamp_DEFAULT int64 + +func (p *Span) GetTimestamp() int64 { + if !p.IsSetTimestamp() { + return Span_Timestamp_DEFAULT + } + return *p.Timestamp +} + +var Span_Duration_DEFAULT int64 + +func (p *Span) GetDuration() int64 { + if !p.IsSetDuration() { + return Span_Duration_DEFAULT + } + return *p.Duration +} + +var Span_TraceIDHigh_DEFAULT int64 + +func (p *Span) GetTraceIDHigh() int64 { + if !p.IsSetTraceIDHigh() { + return Span_TraceIDHigh_DEFAULT + } + return *p.TraceIDHigh +} +func (p *Span) IsSetParentID() bool { + return p.ParentID != nil +} + +func (p *Span) IsSetDebug() bool { + return p.Debug != Span_Debug_DEFAULT +} + +func (p *Span) IsSetTimestamp() bool { + return p.Timestamp != nil +} + +func (p *Span) IsSetDuration() bool { + return p.Duration != nil +} + +func (p *Span) IsSetTraceIDHigh() bool { + return p.TraceIDHigh != nil +} + +func (p *Span) Read(ctx context.Context, iprot thrift.TProtocol) error { + if _, err := iprot.ReadStructBegin(ctx); err != nil { + return thrift.PrependError(fmt.Sprintf("%T read error: ", p), err) + } + + for { + _, fieldTypeId, fieldId, err := iprot.ReadFieldBegin(ctx) + if err != nil { + return thrift.PrependError(fmt.Sprintf("%T field %d read error: ", p, fieldId), err) + } + if fieldTypeId == thrift.STOP { + break + } + switch fieldId { + case 1: + if fieldTypeId == thrift.I64 { + if err := p.ReadField1(ctx, iprot); err != nil { + return err + } + } else { + if err := iprot.Skip(ctx, fieldTypeId); err != nil { + return err + } + } + case 3: + if fieldTypeId == thrift.STRING { + if err := p.ReadField3(ctx, iprot); err != nil { + return err + } + } else { + if err := iprot.Skip(ctx, fieldTypeId); err != nil { + return err + } + } + case 4: + if fieldTypeId == thrift.I64 { + if err := p.ReadField4(ctx, iprot); err != nil { + return err + } + } else { + if err := iprot.Skip(ctx, fieldTypeId); err != nil { + return err + } + } + case 5: + if fieldTypeId == thrift.I64 { + if err := p.ReadField5(ctx, iprot); err != nil { + return err + } + } else { + if err := iprot.Skip(ctx, fieldTypeId); err != nil { + return err + } + } + case 6: + if fieldTypeId == thrift.LIST { + if err := p.ReadField6(ctx, iprot); err != nil { + return err + } + } else { + if err := iprot.Skip(ctx, fieldTypeId); err != nil { + return err + } + } + case 8: + if fieldTypeId == thrift.LIST { + if err := p.ReadField8(ctx, iprot); err != nil { + return err + } + } else { + if err := iprot.Skip(ctx, fieldTypeId); err != nil { + return err + } + } + case 9: + if fieldTypeId == thrift.BOOL { + if err := p.ReadField9(ctx, iprot); err != nil { + return err + } + } else { + if err := iprot.Skip(ctx, fieldTypeId); err != nil { + return err + } + } + case 10: + if fieldTypeId == thrift.I64 { + if err := p.ReadField10(ctx, iprot); err != nil { + return err + } + } else { + if err := iprot.Skip(ctx, fieldTypeId); err != nil { + return err + } + } + case 11: + if fieldTypeId == thrift.I64 { + if err := p.ReadField11(ctx, iprot); err != nil { + return err + } + } else { + if err := iprot.Skip(ctx, fieldTypeId); err != nil { + return err + } + } + case 12: + if fieldTypeId == thrift.I64 { + if err := p.ReadField12(ctx, iprot); err != nil { + return err + } + } else { + if err := iprot.Skip(ctx, fieldTypeId); err != nil { + return err + } + } + default: + if err := iprot.Skip(ctx, fieldTypeId); err != nil { + return err + } + } + if err := iprot.ReadFieldEnd(ctx); err != nil { + return err + } + } + if err := iprot.ReadStructEnd(ctx); err != nil { + return thrift.PrependError(fmt.Sprintf("%T read struct end error: ", p), err) + } + return nil +} + +func (p *Span) ReadField1(ctx context.Context, iprot thrift.TProtocol) error { + if v, err := iprot.ReadI64(ctx); err != nil { + return thrift.PrependError("error reading field 1: ", err) + } else { + p.TraceID = v + } + return nil +} + +func (p *Span) ReadField3(ctx context.Context, iprot thrift.TProtocol) error { + if v, err := iprot.ReadString(ctx); err != nil { + return thrift.PrependError("error reading field 3: ", err) + } else { + p.Name = v + } + return nil +} + +func (p *Span) ReadField4(ctx context.Context, iprot thrift.TProtocol) error { + if v, err := iprot.ReadI64(ctx); err != nil { + return thrift.PrependError("error reading field 4: ", err) + } else { + p.ID = v + } + return nil +} + +func (p *Span) ReadField5(ctx context.Context, iprot thrift.TProtocol) error { + if v, err := iprot.ReadI64(ctx); err != nil { + return thrift.PrependError("error reading field 5: ", err) + } else { + p.ParentID = &v + } + return nil +} + +func (p *Span) ReadField6(ctx context.Context, iprot thrift.TProtocol) error { + _, size, err := iprot.ReadListBegin(ctx) + if err != nil { + return thrift.PrependError("error reading list begin: ", err) + } + tSlice := make([]*Annotation, 0, size) + p.Annotations = tSlice + for i := 0; i < size; i++ { + _elem0 := &Annotation{} + if err := _elem0.Read(ctx, iprot); err != nil { + return thrift.PrependError(fmt.Sprintf("%T error reading struct: ", _elem0), err) + } + p.Annotations = append(p.Annotations, _elem0) + } + if err := iprot.ReadListEnd(ctx); err != nil { + return thrift.PrependError("error reading list end: ", err) + } + return nil +} + +func (p *Span) ReadField8(ctx context.Context, iprot thrift.TProtocol) error { + _, size, err := iprot.ReadListBegin(ctx) + if err != nil { + return thrift.PrependError("error reading list begin: ", err) + } + tSlice := make([]*BinaryAnnotation, 0, size) + p.BinaryAnnotations = tSlice + for i := 0; i < size; i++ { + _elem1 := &BinaryAnnotation{} + if err := _elem1.Read(ctx, iprot); err != nil { + return thrift.PrependError(fmt.Sprintf("%T error reading struct: ", _elem1), err) + } + p.BinaryAnnotations = append(p.BinaryAnnotations, _elem1) + } + if err := iprot.ReadListEnd(ctx); err != nil { + return thrift.PrependError("error reading list end: ", err) + } + return nil +} + +func (p *Span) ReadField9(ctx context.Context, iprot thrift.TProtocol) error { + if v, err := iprot.ReadBool(ctx); err != nil { + return thrift.PrependError("error reading field 9: ", err) + } else { + p.Debug = v + } + return nil +} + +func (p *Span) ReadField10(ctx context.Context, iprot thrift.TProtocol) error { + if v, err := iprot.ReadI64(ctx); err != nil { + return thrift.PrependError("error reading field 10: ", err) + } else { + p.Timestamp = &v + } + return nil +} + +func (p *Span) ReadField11(ctx context.Context, iprot thrift.TProtocol) error { + if v, err := iprot.ReadI64(ctx); err != nil { + return thrift.PrependError("error reading field 11: ", err) + } else { + p.Duration = &v + } + return nil +} + +func (p *Span) ReadField12(ctx context.Context, iprot thrift.TProtocol) error { + if v, err := iprot.ReadI64(ctx); err != nil { + return thrift.PrependError("error reading field 12: ", err) + } else { + p.TraceIDHigh = &v + } + return nil +} + +func (p *Span) Write(ctx context.Context, oprot thrift.TProtocol) error { + if err := oprot.WriteStructBegin(ctx, "Span"); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write struct begin error: ", p), err) + } + if p != nil { + if err := p.writeField1(ctx, oprot); err != nil { + return err + } + if err := p.writeField3(ctx, oprot); err != nil { + return err + } + if err := p.writeField4(ctx, oprot); err != nil { + return err + } + if err := p.writeField5(ctx, oprot); err != nil { + return err + } + if err := p.writeField6(ctx, oprot); err != nil { + return err + } + if err := p.writeField8(ctx, oprot); err != nil { + return err + } + if err := p.writeField9(ctx, oprot); err != nil { + return err + } + if err := p.writeField10(ctx, oprot); err != nil { + return err + } + if err := p.writeField11(ctx, oprot); err != nil { + return err + } + if err := p.writeField12(ctx, oprot); err != nil { + return err + } + } + if err := oprot.WriteFieldStop(ctx); err != nil { + return thrift.PrependError("write field stop error: ", err) + } + if err := oprot.WriteStructEnd(ctx); err != nil { + return thrift.PrependError("write struct stop error: ", err) + } + return nil +} + +func (p *Span) writeField1(ctx context.Context, oprot thrift.TProtocol) (err error) { + if err := oprot.WriteFieldBegin(ctx, "trace_id", thrift.I64, 1); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field begin error 1:trace_id: ", p), err) + } + if err := oprot.WriteI64(ctx, int64(p.TraceID)); err != nil { + return thrift.PrependError(fmt.Sprintf("%T.trace_id (1) field write error: ", p), err) + } + if err := oprot.WriteFieldEnd(ctx); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field end error 1:trace_id: ", p), err) + } + return err +} + +func (p *Span) writeField3(ctx context.Context, oprot thrift.TProtocol) (err error) { + if err := oprot.WriteFieldBegin(ctx, "name", thrift.STRING, 3); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field begin error 3:name: ", p), err) + } + if err := oprot.WriteString(ctx, string(p.Name)); err != nil { + return thrift.PrependError(fmt.Sprintf("%T.name (3) field write error: ", p), err) + } + if err := oprot.WriteFieldEnd(ctx); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field end error 3:name: ", p), err) + } + return err +} + +func (p *Span) writeField4(ctx context.Context, oprot thrift.TProtocol) (err error) { + if err := oprot.WriteFieldBegin(ctx, "id", thrift.I64, 4); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field begin error 4:id: ", p), err) + } + if err := oprot.WriteI64(ctx, int64(p.ID)); err != nil { + return thrift.PrependError(fmt.Sprintf("%T.id (4) field write error: ", p), err) + } + if err := oprot.WriteFieldEnd(ctx); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field end error 4:id: ", p), err) + } + return err +} + +func (p *Span) writeField5(ctx context.Context, oprot thrift.TProtocol) (err error) { + if p.IsSetParentID() { + if err := oprot.WriteFieldBegin(ctx, "parent_id", thrift.I64, 5); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field begin error 5:parent_id: ", p), err) + } + if err := oprot.WriteI64(ctx, int64(*p.ParentID)); err != nil { + return thrift.PrependError(fmt.Sprintf("%T.parent_id (5) field write error: ", p), err) + } + if err := oprot.WriteFieldEnd(ctx); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field end error 5:parent_id: ", p), err) + } + } + return err +} + +func (p *Span) writeField6(ctx context.Context, oprot thrift.TProtocol) (err error) { + if err := oprot.WriteFieldBegin(ctx, "annotations", thrift.LIST, 6); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field begin error 6:annotations: ", p), err) + } + if err := oprot.WriteListBegin(ctx, thrift.STRUCT, len(p.Annotations)); err != nil { + return thrift.PrependError("error writing list begin: ", err) + } + for _, v := range p.Annotations { + if err := v.Write(ctx, oprot); err != nil { + return thrift.PrependError(fmt.Sprintf("%T error writing struct: ", v), err) + } + } + if err := oprot.WriteListEnd(ctx); err != nil { + return thrift.PrependError("error writing list end: ", err) + } + if err := oprot.WriteFieldEnd(ctx); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field end error 6:annotations: ", p), err) + } + return err +} + +func (p *Span) writeField8(ctx context.Context, oprot thrift.TProtocol) (err error) { + if err := oprot.WriteFieldBegin(ctx, "binary_annotations", thrift.LIST, 8); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field begin error 8:binary_annotations: ", p), err) + } + if err := oprot.WriteListBegin(ctx, thrift.STRUCT, len(p.BinaryAnnotations)); err != nil { + return thrift.PrependError("error writing list begin: ", err) + } + for _, v := range p.BinaryAnnotations { + if err := v.Write(ctx, oprot); err != nil { + return thrift.PrependError(fmt.Sprintf("%T error writing struct: ", v), err) + } + } + if err := oprot.WriteListEnd(ctx); err != nil { + return thrift.PrependError("error writing list end: ", err) + } + if err := oprot.WriteFieldEnd(ctx); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field end error 8:binary_annotations: ", p), err) + } + return err +} + +func (p *Span) writeField9(ctx context.Context, oprot thrift.TProtocol) (err error) { + if p.IsSetDebug() { + if err := oprot.WriteFieldBegin(ctx, "debug", thrift.BOOL, 9); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field begin error 9:debug: ", p), err) + } + if err := oprot.WriteBool(ctx, bool(p.Debug)); err != nil { + return thrift.PrependError(fmt.Sprintf("%T.debug (9) field write error: ", p), err) + } + if err := oprot.WriteFieldEnd(ctx); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field end error 9:debug: ", p), err) + } + } + return err +} + +func (p *Span) writeField10(ctx context.Context, oprot thrift.TProtocol) (err error) { + if p.IsSetTimestamp() { + if err := oprot.WriteFieldBegin(ctx, "timestamp", thrift.I64, 10); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field begin error 10:timestamp: ", p), err) + } + if err := oprot.WriteI64(ctx, int64(*p.Timestamp)); err != nil { + return thrift.PrependError(fmt.Sprintf("%T.timestamp (10) field write error: ", p), err) + } + if err := oprot.WriteFieldEnd(ctx); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field end error 10:timestamp: ", p), err) + } + } + return err +} + +func (p *Span) writeField11(ctx context.Context, oprot thrift.TProtocol) (err error) { + if p.IsSetDuration() { + if err := oprot.WriteFieldBegin(ctx, "duration", thrift.I64, 11); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field begin error 11:duration: ", p), err) + } + if err := oprot.WriteI64(ctx, int64(*p.Duration)); err != nil { + return thrift.PrependError(fmt.Sprintf("%T.duration (11) field write error: ", p), err) + } + if err := oprot.WriteFieldEnd(ctx); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field end error 11:duration: ", p), err) + } + } + return err +} + +func (p *Span) writeField12(ctx context.Context, oprot thrift.TProtocol) (err error) { + if p.IsSetTraceIDHigh() { + if err := oprot.WriteFieldBegin(ctx, "trace_id_high", thrift.I64, 12); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field begin error 12:trace_id_high: ", p), err) + } + if err := oprot.WriteI64(ctx, int64(*p.TraceIDHigh)); err != nil { + return thrift.PrependError(fmt.Sprintf("%T.trace_id_high (12) field write error: ", p), err) + } + if err := oprot.WriteFieldEnd(ctx); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field end error 12:trace_id_high: ", p), err) + } + } + return err +} + +func (p *Span) Equals(other *Span) bool { + if p == other { + return true + } else if p == nil || other == nil { + return false + } + if p.TraceID != other.TraceID { + return false + } + if p.Name != other.Name { + return false + } + if p.ID != other.ID { + return false + } + if p.ParentID != other.ParentID { + if p.ParentID == nil || other.ParentID == nil { + return false + } + if (*p.ParentID) != (*other.ParentID) { + return false + } + } + if len(p.Annotations) != len(other.Annotations) { + return false + } + for i, _tgt := range p.Annotations { + _src2 := other.Annotations[i] + if !_tgt.Equals(_src2) { + return false + } + } + if len(p.BinaryAnnotations) != len(other.BinaryAnnotations) { + return false + } + for i, _tgt := range p.BinaryAnnotations { + _src3 := other.BinaryAnnotations[i] + if !_tgt.Equals(_src3) { + return false + } + } + if p.Debug != other.Debug { + return false + } + if p.Timestamp != other.Timestamp { + if p.Timestamp == nil || other.Timestamp == nil { + return false + } + if (*p.Timestamp) != (*other.Timestamp) { + return false + } + } + if p.Duration != other.Duration { + if p.Duration == nil || other.Duration == nil { + return false + } + if (*p.Duration) != (*other.Duration) { + return false + } + } + if p.TraceIDHigh != other.TraceIDHigh { + if p.TraceIDHigh == nil || other.TraceIDHigh == nil { + return false + } + if (*p.TraceIDHigh) != (*other.TraceIDHigh) { + return false + } + } + return true +} + +func (p *Span) String() string { + if p == nil { + return "" + } + return fmt.Sprintf("Span(%+v)", *p) +} diff --git a/plugins/inputs/zipkin/codec/thrift/thrift.go b/plugins/inputs/zipkin/codec/thrift/thrift.go index 65a9e1488c2c4..c2c60a3395d2d 100644 --- a/plugins/inputs/zipkin/codec/thrift/thrift.go +++ b/plugins/inputs/zipkin/codec/thrift/thrift.go @@ -1,16 +1,16 @@ package thrift import ( + "context" "encoding/binary" "fmt" "net" "strconv" "time" - "github.com/influxdata/telegraf/plugins/inputs/zipkin/codec" - "github.com/apache/thrift/lib/go/thrift" - "github.com/openzipkin/zipkin-go-opentracing/thrift/gen-go/zipkincore" + "github.com/influxdata/telegraf/plugins/inputs/zipkin/codec" + "github.com/influxdata/telegraf/plugins/inputs/zipkin/codec/thrift/gen-go/zipkincore" ) // UnmarshalThrift converts raw bytes in thrift format to a slice of spans @@ -20,8 +20,8 @@ func UnmarshalThrift(body []byte) ([]*zipkincore.Span, error) { return nil, err } - transport := thrift.NewTBinaryProtocolTransport(buffer) - _, size, err := transport.ReadListBegin() + transport := thrift.NewTBinaryProtocolConf(buffer, nil) + _, size, err := transport.ReadListBegin(context.Background()) if err != nil { return nil, err } @@ -29,13 +29,13 @@ func UnmarshalThrift(body []byte) ([]*zipkincore.Span, error) { spans := make([]*zipkincore.Span, size) for i := 0; i < size; i++ { zs := &zipkincore.Span{} - if err = zs.Read(transport); err != nil { + if err = zs.Read(context.Background(), transport); err != nil { return nil, err } spans[i] = zs } - if err = transport.ReadListEnd(); err != nil { + if err = transport.ReadListEnd(context.Background()); err != nil { return nil, err } return spans, nil diff --git a/plugins/inputs/zipkin/codec/thrift/thrift_test.go b/plugins/inputs/zipkin/codec/thrift/thrift_test.go index 798fc269edf86..d4bbc1d54df20 100644 --- a/plugins/inputs/zipkin/codec/thrift/thrift_test.go +++ b/plugins/inputs/zipkin/codec/thrift/thrift_test.go @@ -6,7 +6,7 @@ import ( "github.com/google/go-cmp/cmp" - "github.com/openzipkin/zipkin-go-opentracing/thrift/gen-go/zipkincore" + "github.com/influxdata/telegraf/plugins/inputs/zipkin/codec/thrift/gen-go/zipkincore" ) func Test_endpointHost(t *testing.T) { From f988d77049d1d08821db8b613e889866d90be148 Mon Sep 17 00:00:00 2001 From: David Reimschussel Date: Thu, 2 Sep 2021 14:17:05 -0600 Subject: [PATCH 036/176] Update changelog (cherry picked from commit e9872741674c054b58a4236d3c3ea98f18515c65) --- CHANGELOG.md | 53 +++++ etc/telegraf.conf | 569 +++++++++++++++++++++++++++++++++++++++++----- 2 files changed, 561 insertions(+), 61 deletions(-) diff --git a/CHANGELOG.md b/CHANGELOG.md index 053e9ee59bbf7..42ca26772a37b 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -1,3 +1,56 @@ +## v1.20.0-rc0 [2021-09-02] + +#### Release Notes + + - [#9642](https://github.com/influxdata/telegraf/pull/9642) Build with Golang 1.17 + +#### Bugfixes + + - [#9700](https://github.com/influxdata/telegraf/pull/9700) Update thrift module to 0.14.2 and zipkin-go-opentracing 0.4.5 + - [#9587](https://github.com/influxdata/telegraf/pull/9587) `outputs.opentelemetry` Use headers config in grpc requests + - [#9713](https://github.com/influxdata/telegraf/pull/9713) Update runc module to v1.0.0-rc95 to address CVE-2021-30465 + - [#9699](https://github.com/influxdata/telegraf/pull/9699) Migrate dgrijalva/jwt-go to golang-jwt/jwt/v4 + - [#9139](https://github.com/influxdata/telegraf/pull/9139) `serializers.prometheus` Update timestamps and expiration time as new data arrives + - [#9625](https://github.com/influxdata/telegraf/pull/9625) `outputs.graylog` Output timestamp with fractional seconds + - [#9655](https://github.com/influxdata/telegraf/pull/9655) Update cloud.google.com/go/pubsub module from 1.2.0 to 1.15.0 + - [#9674](https://github.com/influxdata/telegraf/pull/9674) `inputs.mongodb` Change command based on server version + - [#9676](https://github.com/influxdata/telegraf/pull/9676) `outputs.dynatrace` Remove hardcoded int value + - [#9619](https://github.com/influxdata/telegraf/pull/9619) `outputs.influxdb_v2` Increase accepted retry-after header values. + - [#9652](https://github.com/influxdata/telegraf/pull/9652) Update github.com/tinylib/msgp module from 1.1.5 to 1.1.6 + - [#9471](https://github.com/influxdata/telegraf/pull/9471) `inputs.sql` Make timeout apply to single query + +#### Features + + - [#9665](https://github.com/influxdata/telegraf/pull/9665) `inputs.systemd_units` feat(plugins/inputs/systemd_units): add pattern support + - [#9598](https://github.com/influxdata/telegraf/pull/9598) `outputs.sql` Add bool datatype + - [#9386](https://github.com/influxdata/telegraf/pull/9386) `inputs.cloudwatch` Pull metrics from multiple AWS CloudWatch namespaces + - [#9411](https://github.com/influxdata/telegraf/pull/9411) `inputs.cloudwatch` Support AWS Web Identity Provider + - [#9570](https://github.com/influxdata/telegraf/pull/9570) `inputs.modbus` Add support for RTU over TCP + - [#9488](https://github.com/influxdata/telegraf/pull/9488) `inputs.procstat` Support cgroup globs and include systemd unit children + - [#9322](https://github.com/influxdata/telegraf/pull/9322) `inputs.suricata` Support alert event type + - [#5464](https://github.com/influxdata/telegraf/pull/5464) `inputs.prometheus` Add ability to query Consul Service catalog + - [#8641](https://github.com/influxdata/telegraf/pull/8641) `outputs.prometheus_client` Add Landing page + - [#9529](https://github.com/influxdata/telegraf/pull/9529) `inputs.http_listener_v2` Allows multiple paths and add path_tag + - [#9395](https://github.com/influxdata/telegraf/pull/9395) Add cookie authentication to HTTP input and output plugins + - [#8454](https://github.com/influxdata/telegraf/pull/8454) `inputs.syslog` Add RFC3164 support + - [#9351](https://github.com/influxdata/telegraf/pull/9351) `inputs.jenkins` Add option to include nodes by name + - [#9277](https://github.com/influxdata/telegraf/pull/9277) Add JSON, MessagePack, and Protocol-buffers format support to the XPath parser + - [#9343](https://github.com/influxdata/telegraf/pull/9343) `inputs.snmp_trap` Improve MIB lookup performance + - [#9342](https://github.com/influxdata/telegraf/pull/9342) `outputs.newrelic` Add option to override metric_url + - [#9306](https://github.com/influxdata/telegraf/pull/9306) `inputs.smart` Add power mode status + +#### New Input Plugins + + - [#9602](https://github.com/influxdata/telegraf/pull/9602) Add rocm_smi input to monitor AMD GPUs + - [#9101](https://github.com/influxdata/telegraf/pull/9101) Add mdstat input to gather from /proc/mdstat collection + - [#3536](https://github.com/influxdata/telegraf/pull/3536) Add Elasticsearch query input + +#### New Output Plugins + + - [#9228](https://github.com/influxdata/telegraf/pull/9228) Add OpenTelemetry output + - [#9426](https://github.com/influxdata/telegraf/pull/9426) Add Azure Data Explorer(ADX) output + + ## v1.19.3 [2021-08-18] #### Bugfixes diff --git a/etc/telegraf.conf b/etc/telegraf.conf index 43b1f8f3ade45..fabd2616141fb 100644 --- a/etc/telegraf.conf +++ b/etc/telegraf.conf @@ -315,6 +315,30 @@ # # "ai.cloud.roleInstance" = "kubernetes_pod_name" +# # Sends metrics to Azure Data Explorer +# [[outputs.azure_data_explorer]] +# ## Azure Data Exlorer cluster endpoint +# ## ex: endpoint_url = "https://clustername.australiasoutheast.kusto.windows.net" +# endpoint_url = "" +# +# ## The Azure Data Explorer database that the metrics will be ingested into. +# ## The plugin will NOT generate this database automatically, it's expected that this database already exists before ingestion. +# ## ex: "exampledatabase" +# database = "" +# +# ## Timeout for Azure Data Explorer operations +# # timeout = "20s" +# +# ## Type of metrics grouping used when pushing to Azure Data Explorer. +# ## Default is "TablePerMetric" for one table per different metric. +# ## For more information, please check the plugin README. +# # metrics_grouping_type = "TablePerMetric" +# +# ## Name of the single table to store all the metrics (Only needed if metrics_grouping_type is "SingleTable"). +# # table_name = "" +# + + # # Send aggregate metrics to Azure Monitor # [[outputs.azure_monitor]] # ## Timeout for HTTP writes. @@ -404,16 +428,19 @@ # # ## Amazon Credentials # ## Credentials are loaded in the following order -# ## 1) Assumed credentials via STS if role_arn is specified -# ## 2) explicit credentials from 'access_key' and 'secret_key' -# ## 3) shared profile from 'profile' -# ## 4) environment variables -# ## 5) shared credentials file -# ## 6) EC2 Instance Profile +# ## 1) Web identity provider credentials via STS if role_arn and web_identity_token_file are specified +# ## 2) Assumed credentials via STS if role_arn is specified +# ## 3) explicit credentials from 'access_key' and 'secret_key' +# ## 4) shared profile from 'profile' +# ## 5) environment variables +# ## 6) shared credentials file +# ## 7) EC2 Instance Profile # #access_key = "" # #secret_key = "" # #token = "" # #role_arn = "" +# #web_identity_token_file = "" +# #role_session_name = "" # #profile = "" # #shared_credential_file = "" # @@ -452,16 +479,19 @@ # # ## Amazon Credentials # ## Credentials are loaded in the following order -# ## 1) Assumed credentials via STS if role_arn is specified -# ## 2) explicit credentials from 'access_key' and 'secret_key' -# ## 3) shared profile from 'profile' -# ## 4) environment variables -# ## 5) shared credentials file -# ## 6) EC2 Instance Profile +# ## 1) Web identity provider credentials via STS if role_arn and web_identity_token_file are specified +# ## 2) Assumed credentials via STS if role_arn is specified +# ## 3) explicit credentials from 'access_key' and 'secret_key' +# ## 4) shared profile from 'profile' +# ## 5) environment variables +# ## 6) shared credentials file +# ## 7) EC2 Instance Profile # #access_key = "" # #secret_key = "" # #token = "" # #role_arn = "" +# #web_identity_token_file = "" +# #role_session_name = "" # #profile = "" # #shared_credential_file = "" # @@ -809,6 +839,15 @@ # ## Use TLS but skip chain & host verification # # insecure_skip_verify = false # +# ## Optional Cookie authentication +# # cookie_auth_url = "https://localhost/authMe" +# # cookie_auth_method = "POST" +# # cookie_auth_username = "username" +# # cookie_auth_password = "pa$$word" +# # cookie_auth_body = '{"username": "user", "password": "pa$$word", "authenticate": "me"}' +# ## cookie_auth_renewal not set or set to "0" will auth once and never renew the cookie +# # cookie_auth_renewal = "5m" +# # ## Data format to output. # ## Each data format has it's own unique set of configuration options, read # ## more about them here: @@ -1049,16 +1088,19 @@ # # ## Amazon Credentials # ## Credentials are loaded in the following order -# ## 1) Assumed credentials via STS if role_arn is specified -# ## 2) explicit credentials from 'access_key' and 'secret_key' -# ## 3) shared profile from 'profile' -# ## 4) environment variables -# ## 5) shared credentials file -# ## 6) EC2 Instance Profile +# ## 1) Web identity provider credentials via STS if role_arn and web_identity_token_file are specified +# ## 2) Assumed credentials via STS if role_arn is specified +# ## 3) explicit credentials from 'access_key' and 'secret_key' +# ## 4) shared profile from 'profile' +# ## 5) environment variables +# ## 6) shared credentials file +# ## 7) EC2 Instance Profile # #access_key = "" # #secret_key = "" # #token = "" # #role_arn = "" +# #web_identity_token_file = "" +# #role_session_name = "" # #profile = "" # #shared_credential_file = "" # @@ -1268,6 +1310,10 @@ # ## HTTP Proxy override. If unset use values from the standard # ## proxy environment variables to determine proxy, if any. # # http_proxy = "http://corporate.proxy:3128" +# +# ## Metric URL override to enable geographic location endpoints. +# # If not set use values from the standard +# # metric_url = "https://metric-api.newrelic.com/metric/v1" # # Send telegraf measurements to NSQD @@ -1284,6 +1330,41 @@ # data_format = "influx" +# # Send OpenTelemetry metrics over gRPC +# [[outputs.opentelemetry]] +# ## Override the default (localhost:4317) OpenTelemetry gRPC service +# ## address:port +# # service_address = "localhost:4317" +# +# ## Override the default (5s) request timeout +# # timeout = "5s" +# +# ## Optional TLS Config. +# ## +# ## Root certificates for verifying server certificates encoded in PEM format. +# # tls_ca = "/etc/telegraf/ca.pem" +# ## The public and private keypairs for the client encoded in PEM format. +# ## May contain intermediate certificates. +# # tls_cert = "/etc/telegraf/cert.pem" +# # tls_key = "/etc/telegraf/key.pem" +# ## Use TLS, but skip TLS chain and host verification. +# # insecure_skip_verify = false +# ## Send the specified TLS server name via SNI. +# # tls_server_name = "foo.example.com" +# +# ## Override the default (gzip) compression used to send data. +# ## Supports: "gzip", "none" +# # compression = "gzip" +# +# ## Additional OpenTelemetry resource attributes +# # [outputs.opentelemetry.attributes] +# # "service.name" = "demo" +# +# ## Additional gRPC request metadata +# # [outputs.opentelemetry.headers] +# # key1 = "value1" + + # # Configuration for OpenTSDB server to send metrics to # [[outputs.opentsdb]] # ## prefix for metrics keys @@ -1748,16 +1829,19 @@ # # ## Amazon Credentials # ## Credentials are loaded in the following order: -# ## 1) Assumed credentials via STS if role_arn is specified -# ## 2) Explicit credentials from 'access_key' and 'secret_key' -# ## 3) Shared profile from 'profile' -# ## 4) Environment variables -# ## 5) Shared credentials file -# ## 6) EC2 Instance Profile +# ## 1) Web identity provider credentials via STS if role_arn and web_identity_token_file are specified +# ## 2) Assumed credentials via STS if role_arn is specified +# ## 3) explicit credentials from 'access_key' and 'secret_key' +# ## 4) shared profile from 'profile' +# ## 5) environment variables +# ## 6) shared credentials file +# ## 7) EC2 Instance Profile # #access_key = "" # #secret_key = "" # #token = "" # #role_arn = "" +# #web_identity_token_file = "" +# #role_session_name = "" # #profile = "" # #shared_credential_file = "" # @@ -1870,7 +1954,7 @@ # ## Print Warp 10 error body # # print_error_body = false # -# ## Max string error size +# ## Max string error size # # max_string_error_size = 511 # # ## Optional TLS Config @@ -2896,6 +2980,15 @@ # # num_histogram_buckets = 100 # default: 10 +# # Query statistics from AMD Graphics cards using rocm-smi binary +# [[inputs.amd_rocm_smi]] +# ## Optional: path to rocm-smi binary, defaults to $PATH via exec.LookPath +# # bin_path = "/opt/rocm/bin/rocm-smi" +# +# ## Optional: timeout for GPU polling +# # timeout = "5s" + + # # Read Apache status information (mod_status) # [[inputs.apache]] # ## An array of URLs to gather from, must be directed at the machine @@ -3163,16 +3256,19 @@ # # ## Amazon Credentials # ## Credentials are loaded in the following order -# ## 1) Assumed credentials via STS if role_arn is specified -# ## 2) explicit credentials from 'access_key' and 'secret_key' -# ## 3) shared profile from 'profile' -# ## 4) environment variables -# ## 5) shared credentials file -# ## 6) EC2 Instance Profile +# ## 1) Web identity provider credentials via STS if role_arn and web_identity_token_file are specified +# ## 2) Assumed credentials via STS if role_arn is specified +# ## 3) explicit credentials from 'access_key' and 'secret_key' +# ## 4) shared profile from 'profile' +# ## 5) environment variables +# ## 6) shared credentials file +# ## 7) EC2 Instance Profile # # access_key = "" # # secret_key = "" # # token = "" # # role_arn = "" +# # web_identity_token_file = "" +# # role_session_name = "" # # profile = "" # # shared_credential_file = "" # @@ -3212,8 +3308,10 @@ # ## Configure the TTL for the internal cache of metrics. # # cache_ttl = "1h" # -# ## Metric Statistic Namespace (required) -# namespace = "AWS/ELB" +# ## Metric Statistic Namespaces (required) +# namespaces = ["AWS/ELB"] +# # A single metric statistic namespace that will be appended to namespaces on startup +# # namespace = "AWS/ELB" # # ## Maximum requests per second. Note that the global default AWS rate limit is # ## 50 reqs/sec, so if you define multiple namespaces, these should add up to a @@ -3632,6 +3730,72 @@ # # num_most_recent_indices = 0 +# # Derive metrics from aggregating Elasticsearch query results +# [[inputs.elasticsearch_query]] +# ## The full HTTP endpoint URL for your Elasticsearch instance +# ## Multiple urls can be specified as part of the same cluster, +# ## this means that only ONE of the urls will be written to each interval. +# urls = [ "http://node1.es.example.com:9200" ] # required. +# +# ## Elasticsearch client timeout, defaults to "5s". +# # timeout = "5s" +# +# ## Set to true to ask Elasticsearch a list of all cluster nodes, +# ## thus it is not necessary to list all nodes in the urls config option +# # enable_sniffer = false +# +# ## Set the interval to check if the Elasticsearch nodes are available +# ## This option is only used if enable_sniffer is also set (0s to disable it) +# # health_check_interval = "10s" +# +# ## HTTP basic authentication details (eg. when using x-pack) +# # username = "telegraf" +# # password = "mypassword" +# +# ## Optional TLS Config +# # tls_ca = "/etc/telegraf/ca.pem" +# # tls_cert = "/etc/telegraf/cert.pem" +# # tls_key = "/etc/telegraf/key.pem" +# ## Use TLS but skip chain & host verification +# # insecure_skip_verify = false +# +# [[inputs.elasticsearch_query.aggregation]] +# ## measurement name for the results of the aggregation query +# measurement_name = "measurement" +# +# ## Elasticsearch indexes to query (accept wildcards). +# index = "index-*" +# +# ## The date/time field in the Elasticsearch index (mandatory). +# date_field = "@timestamp" +# +# ## Time window to query (eg. "1m" to query documents from last minute). +# ## Normally should be set to same as collection interval +# query_period = "1m" +# +# ## Lucene query to filter results +# # filter_query = "*" +# +# ## Fields to aggregate values (must be numeric fields) +# # metric_fields = ["metric"] +# +# ## Aggregation function to use on the metric fields +# ## Must be set if 'metric_fields' is set +# ## Valid values are: avg, sum, min, max, sum +# # metric_function = "avg" +# +# ## Fields to be used as tags +# ## Must be text, non-analyzed fields. Metric aggregations are performed per tag +# # tags = ["field.keyword", "field2.keyword"] +# +# ## Set to true to not ignore documents when the tag(s) above are missing +# # include_missing_tag = false +# +# ## String value of the tag when the tag does not exist +# ## Used when include_missing_tag is true +# # missing_tag_value = "null" + + # # Returns ethtool statistics for given interfaces # [[inputs.ethtool]] # ## List of interfaces to pull metrics for @@ -3944,6 +4108,15 @@ # ## Use TLS but skip chain & host verification # # insecure_skip_verify = false # +# ## Optional Cookie authentication +# # cookie_auth_url = "https://localhost/authMe" +# # cookie_auth_method = "POST" +# # cookie_auth_username = "username" +# # cookie_auth_password = "pa$$word" +# # cookie_auth_body = '{"username": "user", "password": "pa$$word", "authenticate": "me"}' +# ## cookie_auth_renewal not set or set to "0" will auth once and never renew the cookie +# # cookie_auth_renewal = "5m" +# # ## Amount of time allowed to complete the HTTP request # # timeout = "5s" # @@ -4286,7 +4459,9 @@ # # job_include = [ "*" ] # # job_exclude = [ ] # -# ## Nodes to exclude from gathering +# ## Nodes to include or exclude from gathering +# ## When using both lists, node_exclude has priority. +# # node_include = [ "*" ] # # node_exclude = [ ] # # ## Worker pool for jenkins plugin only @@ -4343,19 +4518,19 @@ # ## List of metrics collected on above servers # ## Each metric consists in a name, a jmx path and either # ## a pass or drop slice attribute. -# ## This collect all heap memory usage metrics. +# ## This collect all heap memory usage metrics. # [[inputs.jolokia.metrics]] # name = "heap_memory_usage" # mbean = "java.lang:type=Memory" # attribute = "HeapMemoryUsage" # -# ## This collect thread counts metrics. +# ## This collect thread counts metrics. # [[inputs.jolokia.metrics]] # name = "thread_count" # mbean = "java.lang:type=Threading" # attribute = "TotalStartedThreadCount,ThreadCount,DaemonThreadCount,PeakThreadCount" # -# ## This collect number of class loaded/unloaded counts metrics. +# ## This collect number of class loaded/unloaded counts metrics. # [[inputs.jolokia.metrics]] # name = "class_count" # mbean = "java.lang:type=ClassLoading" @@ -4637,6 +4812,13 @@ # # timeout = "5s" +# # Get md array statistics from /proc/mdstat +# [[inputs.mdstat]] +# ## Sets file path +# ## If not specified, then default is /proc/mdstat +# # file_name = "/proc/mdstat" + + # # Read metrics from one or many memcached servers # [[inputs.memcached]] # ## An array of address to gather stats about. Specify an ip on hostname @@ -4708,7 +4890,7 @@ # [[inputs.modbus]] # ## Connection Configuration # ## -# ## The plugin supports connections to PLCs via MODBUS/TCP or +# ## The plugin supports connections to PLCs via MODBUS/TCP, RTU over TCP, ASCII over TCP or # ## via serial line communication in binary (RTU) or readable (ASCII) encoding # ## # ## Device name @@ -4735,8 +4917,11 @@ # # data_bits = 8 # # parity = "N" # # stop_bits = 1 -# # transmission_mode = "RTU" # +# ## For Modbus over TCP you can choose between "TCP", "RTUoverTCP" and "ASCIIoverTCP" +# ## default behaviour is "TCP" if the controller is TCP +# ## For Serial you can choose between "RTU" and "ASCII" +# # transmission_mode = "RTU" # # ## Measurements # ## @@ -5543,9 +5728,10 @@ # # pattern = "nginx" # ## user as argument for pgrep (ie, pgrep -u ) # # user = "nginx" -# ## Systemd unit name +# ## Systemd unit name, supports globs when include_systemd_children is set to true # # systemd_unit = "nginx.service" -# ## CGroup name or path +# # include_systemd_children = false +# ## CGroup name or path, supports globs # # cgroup = "systemd/system.slice/nginx.service" # # ## Windows service name @@ -5785,13 +5971,6 @@ # # Specify a list of one or more riak http servers # servers = ["http://localhost:8098"] -# # Query statistics from AMD Graphics cards using rocm-smi binary -# [[inputs.amd_rocm_smi]] -# ## Optional: path to rocm-smi binary, defaults to $PATH via exec.LookPath -# # bin_path = "/opt/rocm/bin/rocm-smi" -# -# ## Optional: timeout for GPU polling -# # timeout = "5s" # # Read API usage and limits for a Salesforce organisation # [[inputs.salesforce]] @@ -6186,6 +6365,13 @@ # ## values are "socket", "target", "device", "mount", "automount", "swap", # ## "timer", "path", "slice" and "scope ": # # unittype = "service" +# # +# ## Filter for a specific pattern, default is "" (i.e. all), other possible +# ## values are valid pattern for systemctl, e.g. "a*" for all units with +# ## names starting with "a" +# # pattern = "" +# ## pattern = "telegraf* influxdb*" +# ## pattern = "a*" # # Reads metrics from a Teamspeak 3 Server via ServerQuery @@ -6293,6 +6479,219 @@ # ## General connection timeout # # timeout = "5s" +# # Input plugin to collect Windows Event Log messages +# [[inputs.win_eventlog]] +# ## Telegraf should have Administrator permissions to subscribe for some Windows Events channels +# ## (System log, for example) +# +# ## LCID (Locale ID) for event rendering +# ## 1033 to force English language +# ## 0 to use default Windows locale +# # locale = 0 +# +# ## Name of eventlog, used only if xpath_query is empty +# ## Example: "Application" +# # eventlog_name = "" +# +# ## xpath_query can be in defined short form like "Event/System[EventID=999]" +# ## or you can form a XML Query. Refer to the Consuming Events article: +# ## https://docs.microsoft.com/en-us/windows/win32/wes/consuming-events +# ## XML query is the recommended form, because it is most flexible +# ## You can create or debug XML Query by creating Custom View in Windows Event Viewer +# ## and then copying resulting XML here +# xpath_query = ''' +# +# +# +# *[System[( (EventID >= 5152 and EventID <= 5158) or EventID=5379 or EventID=4672)]] +# +# +# +# +# +# +# +# +# +# +# +# +# +# +# ''' +# +# ## System field names: +# ## "Source", "EventID", "Version", "Level", "Task", "Opcode", "Keywords", "TimeCreated", +# ## "EventRecordID", "ActivityID", "RelatedActivityID", "ProcessID", "ThreadID", "ProcessName", +# ## "Channel", "Computer", "UserID", "UserName", "Message", "LevelText", "TaskText", "OpcodeText" +# +# ## In addition to System, Data fields can be unrolled from additional XML nodes in event. +# ## Human-readable representation of those nodes is formatted into event Message field, +# ## but XML is more machine-parsable +# +# # Process UserData XML to fields, if this node exists in Event XML +# process_userdata = true +# +# # Process EventData XML to fields, if this node exists in Event XML +# process_eventdata = true +# +# ## Separator character to use for unrolled XML Data field names +# separator = "_" +# +# ## Get only first line of Message field. For most events first line is usually more than enough +# only_first_line_of_message = true +# +# ## Parse timestamp from TimeCreated.SystemTime event field. +# ## Will default to current time of telegraf processing on parsing error or if set to false +# timestamp_from_event = true +# +# ## Fields to include as tags. Globbing supported ("Level*" for both "Level" and "LevelText") +# event_tags = ["Source", "EventID", "Level", "LevelText", "Task", "TaskText", "Opcode", "OpcodeText", "Keywords", "Channel", "Computer"] +# +# ## Default list of fields to send. All fields are sent by default. Globbing supported +# event_fields = ["*"] +# +# ## Fields to exclude. Also applied to data fields. Globbing supported +# exclude_fields = ["TimeCreated", "Binary", "Data_Address*"] +# +# ## Skip those tags or fields if their value is empty or equals to zero. Globbing supported +# exclude_empty = ["*ActivityID", "UserID"] + + +# # Input plugin to counterPath Performance Counters on Windows operating systems +# [[inputs.win_perf_counters]] +# ## By default this plugin returns basic CPU and Disk statistics. +# ## See the README file for more examples. +# ## Uncomment examples below or write your own as you see fit. If the system +# ## being polled for data does not have the Object at startup of the Telegraf +# ## agent, it will not be gathered. +# ## Settings: +# # PrintValid = false # Print All matching performance counters +# # Whether request a timestamp along with the PerfCounter data or just use current time +# # UsePerfCounterTime=true +# # If UseWildcardsExpansion params is set to true, wildcards (partial wildcards in instance names and wildcards in counters names) in configured counter paths will be expanded +# # and in case of localized Windows, counter paths will be also localized. It also returns instance indexes in instance names. +# # If false, wildcards (not partial) in instance names will still be expanded, but instance indexes will not be returned in instance names. +# #UseWildcardsExpansion = false +# # Period after which counters will be reread from configuration and wildcards in counter paths expanded +# CountersRefreshInterval="1m" +# +# [[inputs.win_perf_counters.object]] +# # Processor usage, alternative to native, reports on a per core. +# ObjectName = "Processor" +# Instances = ["*"] +# Counters = [ +# "% Idle Time", +# "% Interrupt Time", +# "% Privileged Time", +# "% User Time", +# "% Processor Time", +# "% DPC Time", +# ] +# Measurement = "win_cpu" +# # Set to true to include _Total instance when querying for all (*). +# # IncludeTotal=false +# # Print out when the performance counter is missing from object, counter or instance. +# # WarnOnMissing = false +# +# [[inputs.win_perf_counters.object]] +# # Disk times and queues +# ObjectName = "LogicalDisk" +# Instances = ["*"] +# Counters = [ +# "% Idle Time", +# "% Disk Time", +# "% Disk Read Time", +# "% Disk Write Time", +# "% User Time", +# "% Free Space", +# "Current Disk Queue Length", +# "Free Megabytes", +# ] +# Measurement = "win_disk" +# +# [[inputs.win_perf_counters.object]] +# ObjectName = "PhysicalDisk" +# Instances = ["*"] +# Counters = [ +# "Disk Read Bytes/sec", +# "Disk Write Bytes/sec", +# "Current Disk Queue Length", +# "Disk Reads/sec", +# "Disk Writes/sec", +# "% Disk Time", +# "% Disk Read Time", +# "% Disk Write Time", +# ] +# Measurement = "win_diskio" +# +# [[inputs.win_perf_counters.object]] +# ObjectName = "Network Interface" +# Instances = ["*"] +# Counters = [ +# "Bytes Received/sec", +# "Bytes Sent/sec", +# "Packets Received/sec", +# "Packets Sent/sec", +# "Packets Received Discarded", +# "Packets Outbound Discarded", +# "Packets Received Errors", +# "Packets Outbound Errors", +# ] +# Measurement = "win_net" +# +# +# [[inputs.win_perf_counters.object]] +# ObjectName = "System" +# Counters = [ +# "Context Switches/sec", +# "System Calls/sec", +# "Processor Queue Length", +# "System Up Time", +# ] +# Instances = ["------"] +# Measurement = "win_system" +# +# [[inputs.win_perf_counters.object]] +# # Example counterPath where the Instance portion must be removed to get data back, +# # such as from the Memory object. +# ObjectName = "Memory" +# Counters = [ +# "Available Bytes", +# "Cache Faults/sec", +# "Demand Zero Faults/sec", +# "Page Faults/sec", +# "Pages/sec", +# "Transition Faults/sec", +# "Pool Nonpaged Bytes", +# "Pool Paged Bytes", +# "Standby Cache Reserve Bytes", +# "Standby Cache Normal Priority Bytes", +# "Standby Cache Core Bytes", +# ] +# Instances = ["------"] # Use 6 x - to remove the Instance bit from the counterPath. +# Measurement = "win_mem" +# +# [[inputs.win_perf_counters.object]] +# # Example query where the Instance portion must be removed to get data back, +# # such as from the Paging File object. +# ObjectName = "Paging File" +# Counters = [ +# "% Usage", +# ] +# Instances = ["_Total"] +# Measurement = "win_swap" + + +# # Input plugin to report Windows services info. +# [[inputs.win_services]] +# ## Names of the services to monitor. Leave empty to monitor all the available services on the host. Globs accepted. +# service_names = [ +# "LanmanServer", +# "TermService", +# "Win*", +# ] + # # A plugin to collect stats from Varnish HTTP Cache # [[inputs.varnish]] @@ -7138,7 +7537,14 @@ # service_address = ":8080" # # ## Path to listen to. -# # path = "/telegraf" +# ## This option is deprecated and only available for backward-compatibility. Please use paths instead. +# # path = "" +# +# ## Paths to listen to. +# # paths = ["/telegraf"] +# +# ## Save path as http_listener_v2_path tag if set to true +# # path_tag = false # # ## HTTP methods to accept. # # methods = ["POST", "PUT"] @@ -7149,7 +7555,7 @@ # # write_timeout = "10s" # # ## Maximum allowed http request body size in bytes. -# ## 0 means to use the default of 524,288,00 bytes (500 mebibytes) +# ## 0 means to use the default of 524,288,000 bytes (500 mebibytes) # # max_body_size = "500MB" # # ## Part of the request to consume. Available options are "body" and @@ -7450,16 +7856,19 @@ # # ## Amazon Credentials # ## Credentials are loaded in the following order -# ## 1) Assumed credentials via STS if role_arn is specified -# ## 2) explicit credentials from 'access_key' and 'secret_key' -# ## 3) shared profile from 'profile' -# ## 4) environment variables -# ## 5) shared credentials file -# ## 6) EC2 Instance Profile +# ## 1) Web identity provider credentials via STS if role_arn and web_identity_token_file are specified +# ## 2) Assumed credentials via STS if role_arn is specified +# ## 3) explicit credentials from 'access_key' and 'secret_key' +# ## 4) shared profile from 'profile' +# ## 5) environment variables +# ## 6) shared credentials file +# ## 7) EC2 Instance Profile # # access_key = "" # # secret_key = "" # # token = "" # # role_arn = "" +# # web_identity_token_file = "" +# # role_session_name = "" # # profile = "" # # shared_credential_file = "" # @@ -7723,16 +8132,28 @@ # # Receive OpenTelemetry traces, metrics, and logs over gRPC # [[inputs.opentelemetry]] -# ## Override the OpenTelemetry gRPC service address:port +# ## Override the default (0.0.0.0:4317) destination OpenTelemetry gRPC service +# ## address:port # # service_address = "0.0.0.0:4317" # -# ## Override the default request timeout +# ## Override the default (5s) new connection timeout # # timeout = "5s" # -# ## Select a schema for metrics: prometheus-v1 or prometheus-v2 +# ## Override the default (prometheus-v1) metrics schema. +# ## Supports: "prometheus-v1", "prometheus-v2" # ## For more information about the alternatives, read the Prometheus input # ## plugin notes. # # metrics_schema = "prometheus-v1" +# +# ## Optional TLS Config. +# ## For advanced options: https://github.com/influxdata/telegraf/blob/v1.18.3/docs/TLS.md +# ## +# ## Set one or more allowed client CA certificate file names to +# ## enable mutually authenticated TLS connections. +# # tls_allowed_cacerts = ["/etc/telegraf/clientca.pem"] +# ## Add service certificate and key. +# # tls_cert = "/etc/telegraf/cert.pem" +# # tls_key = "/etc/telegraf/key.pem" # # Read metrics from one or many pgbouncer servers @@ -7909,6 +8330,19 @@ # # eg. To scrape pods on a specific node # # kubernetes_field_selector = "spec.nodeName=$HOSTNAME" # +# ## Scrape Services available in Consul Catalog +# # [inputs.prometheus.consul] +# # enabled = true +# # agent = "http://localhost:8500" +# # query_interval = "5m" +# +# # [[inputs.prometheus.consul.query]] +# # name = "a service name" +# # tag = "a service tag" +# # url = 'http://{{if ne .ServiceAddress ""}}{{.ServiceAddress}}{{else}}{{.Address}}{{end}}:{{.ServicePort}}/{{with .ServiceMeta.metrics_path}}{{.}}{{else}}metrics{{end}}' +# # [inputs.prometheus.consul.query.tags] +# # host = "{{.Node}}" +# # ## Use bearer token for authorization. ('bearer_token' takes priority) # # bearer_token = "/path/to/bearer/token" # ## OR @@ -7989,6 +8423,10 @@ # ## 1024. See README.md for details # ## # # service_address = "udp://:162" +# ## +# ## Path to mib files +# # path = ["/usr/share/snmp/mibs"] +# ## # ## Timeout running snmptranslate command # # timeout = "5s" # ## Snmp version, defaults to 2c @@ -8082,6 +8520,7 @@ # dsn = "username:password@mysqlserver:3307/dbname?param=value" # # ## Timeout for any operation +# ## Note that the timeout for queries is per query not per gather. # # timeout = "5s" # # ## Connection time limits @@ -8282,9 +8721,9 @@ # #max_ttl = "1000h" -# # Suricata stats plugin +# # Suricata stats and alerts plugin # [[inputs.suricata]] -# ## Data sink for Suricata stats log +# ## Data sink for Suricata stats and alerts logs # # This is expected to be a filename of a # # unix socket to be created for listening. # source = "/var/run/suricata-stats.sock" @@ -8292,6 +8731,9 @@ # # Delimiter for flattening field keys, e.g. subitem "alert" of "detect" # # becomes "detect_alert" when delimiter is "_". # delimiter = "_" +# +# ## Detect alert logs +# # alerts = false # # Accepts syslog messages following RFC5424 format with transports as per RFC5426, RFC5425, or RFC6587 @@ -8336,6 +8778,11 @@ # ## By default best effort parsing is off. # # best_effort = false # +# ## The RFC standard to use for message parsing +# ## By default RFC5424 is used. RFC3164 only supports UDP transport (no streaming support) +# ## Must be one of "RFC5424", or "RFC3164". +# # syslog_standard = "RFC5424" +# # ## Character to prepend to SD-PARAMs (default = "_"). # ## A syslog message can contain multiple parameters and multiple identifiers within structured data section. # ## Eg., [id1 name1="val1" name2="val2"][id2 name1="val1" nameA="valA"] From 890508431c2692db8e5389461dc88c7575ea0873 Mon Sep 17 00:00:00 2001 From: Samantha Wang <32681364+sjwang90@users.noreply.github.com> Date: Thu, 2 Sep 2021 14:24:26 -0700 Subject: [PATCH 037/176] docs: fix influxdb output readme (#9708) --- plugins/outputs/influxdb/README.md | 3 +-- 1 file changed, 1 insertion(+), 2 deletions(-) diff --git a/plugins/outputs/influxdb/README.md b/plugins/outputs/influxdb/README.md index cd1b36a723aeb..36fde827e176a 100644 --- a/plugins/outputs/influxdb/README.md +++ b/plugins/outputs/influxdb/README.md @@ -85,8 +85,7 @@ The InfluxDB output plugin writes metrics to the [InfluxDB v1.x] HTTP or UDP ser ``` ### Metrics - Reference the [influx serializer][] for details about metric production. - + [InfluxDB v1.x]: https://github.com/influxdata/influxdb [influx serializer]: /plugins/serializers/influx/README.md#Metrics From 0317d7c3db0d6f8da5f041dc1a4f83fa772bdb41 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Mariusz=20Bia=C5=82o=C5=84czyk?= Date: Tue, 7 Sep 2021 19:41:07 +0200 Subject: [PATCH 038/176] fix (inputs.ping): change edit to restart in README.md (#9728) --- plugins/inputs/ping/README.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/plugins/inputs/ping/README.md b/plugins/inputs/ping/README.md index 5829d6bd07283..10744a9b15e99 100644 --- a/plugins/inputs/ping/README.md +++ b/plugins/inputs/ping/README.md @@ -100,7 +100,7 @@ LimitNOFILE=8192 Restart Telegraf: ```sh -$ systemctl edit telegraf +$ systemctl restart telegraf ``` #### Linux Permissions From 147e3d13891070015812029e7523a76be0a4c113 Mon Sep 17 00:00:00 2001 From: alrex Date: Tue, 7 Sep 2021 14:15:13 -0700 Subject: [PATCH 039/176] fix: outputs.opentelemetry use attributes setting (#9588) --- plugins/outputs/opentelemetry/opentelemetry.go | 8 ++++++++ plugins/outputs/opentelemetry/opentelemetry_test.go | 2 ++ 2 files changed, 10 insertions(+) diff --git a/plugins/outputs/opentelemetry/opentelemetry.go b/plugins/outputs/opentelemetry/opentelemetry.go index a25fe2ff8dae8..874eaba819418 100644 --- a/plugins/outputs/opentelemetry/opentelemetry.go +++ b/plugins/outputs/opentelemetry/opentelemetry.go @@ -160,6 +160,14 @@ func (o *OpenTelemetry) Write(metrics []telegraf.Metric) error { return nil } + if len(o.Attributes) > 0 { + for i := 0; i < md.ResourceMetrics().Len(); i++ { + for k, v := range o.Attributes { + md.ResourceMetrics().At(i).Resource().Attributes().UpsertString(k, v) + } + } + } + ctx, cancel := context.WithTimeout(context.Background(), time.Duration(o.Timeout)) if len(o.Headers) > 0 { diff --git a/plugins/outputs/opentelemetry/opentelemetry_test.go b/plugins/outputs/opentelemetry/opentelemetry_test.go index cfafcd47fc10a..4ba3adbbb07d0 100644 --- a/plugins/outputs/opentelemetry/opentelemetry_test.go +++ b/plugins/outputs/opentelemetry/opentelemetry_test.go @@ -26,6 +26,7 @@ func TestOpenTelemetry(t *testing.T) { { rm := expect.ResourceMetrics().AppendEmpty() rm.Resource().Attributes().InsertString("host.name", "potato") + rm.Resource().Attributes().InsertString("attr-key", "attr-val") ilm := rm.InstrumentationLibraryMetrics().AppendEmpty() ilm.InstrumentationLibrary().SetName("My Library Name") m := ilm.Metrics().AppendEmpty() @@ -45,6 +46,7 @@ func TestOpenTelemetry(t *testing.T) { ServiceAddress: m.Address(), Timeout: config.Duration(time.Second), Headers: map[string]string{"test": "header1"}, + Attributes: map[string]string{"attr-key": "attr-val"}, metricsConverter: metricsConverter, grpcClientConn: m.GrpcClient(), metricsServiceClient: otlpgrpc.NewMetricsClient(m.GrpcClient()), From ba1484cb75c9d0d820d843aab917bb9d349a84b0 Mon Sep 17 00:00:00 2001 From: Alexander Krantz Date: Tue, 7 Sep 2021 14:25:30 -0700 Subject: [PATCH 040/176] Fix memory leak in couchbase input (#9544) --- plugins/inputs/couchbase/couchbase.go | 36 ++++++++++---------- plugins/inputs/couchbase/couchbase_test.go | 38 +++++++++------------- 2 files changed, 33 insertions(+), 41 deletions(-) diff --git a/plugins/inputs/couchbase/couchbase.go b/plugins/inputs/couchbase/couchbase.go index b62a7e970305d..7b99c76e6982c 100644 --- a/plugins/inputs/couchbase/couchbase.go +++ b/plugins/inputs/couchbase/couchbase.go @@ -53,7 +53,7 @@ func (cb *Couchbase) Description() string { // Returns one of the errors encountered while gathering stats (if any). func (cb *Couchbase) Gather(acc telegraf.Accumulator) error { if len(cb.Servers) == 0 { - return cb.gatherServer(acc, "http://localhost:8091/", nil) + return cb.gatherServer(acc, "http://localhost:8091/") } var wg sync.WaitGroup @@ -61,7 +61,7 @@ func (cb *Couchbase) Gather(acc telegraf.Accumulator) error { wg.Add(1) go func(serv string) { defer wg.Done() - acc.AddError(cb.gatherServer(acc, serv, nil)) + acc.AddError(cb.gatherServer(acc, serv)) }(serv) } @@ -70,26 +70,26 @@ func (cb *Couchbase) Gather(acc telegraf.Accumulator) error { return nil } -func (cb *Couchbase) gatherServer(acc telegraf.Accumulator, addr string, pool *couchbaseClient.Pool) error { - if pool == nil { - client, err := couchbaseClient.Connect(addr) - if err != nil { - return err - } +func (cb *Couchbase) gatherServer(acc telegraf.Accumulator, addr string) error { + escapedAddr := regexpURI.ReplaceAllString(addr, "${1}") - // `default` is the only possible pool name. It's a - // placeholder for a possible future Couchbase feature. See - // http://stackoverflow.com/a/16990911/17498. - p, err := client.GetPool("default") - if err != nil { - return err - } - pool = &p + client, err := couchbaseClient.Connect(addr) + if err != nil { + return err + } + + // `default` is the only possible pool name. It's a + // placeholder for a possible future Couchbase feature. See + // http://stackoverflow.com/a/16990911/17498. + pool, err := client.GetPool("default") + if err != nil { + return err } + defer pool.Close() for i := 0; i < len(pool.Nodes); i++ { node := pool.Nodes[i] - tags := map[string]string{"cluster": regexpURI.ReplaceAllString(addr, "${1}"), "hostname": node.Hostname} + tags := map[string]string{"cluster": escapedAddr, "hostname": node.Hostname} fields := make(map[string]interface{}) fields["memory_free"] = node.MemoryFree fields["memory_total"] = node.MemoryTotal @@ -97,7 +97,7 @@ func (cb *Couchbase) gatherServer(acc telegraf.Accumulator, addr string, pool *c } for bucketName := range pool.BucketMap { - tags := map[string]string{"cluster": regexpURI.ReplaceAllString(addr, "${1}"), "bucket": bucketName} + tags := map[string]string{"cluster": escapedAddr, "bucket": bucketName} bs := pool.BucketMap[bucketName].BasicStats fields := make(map[string]interface{}) cb.addBucketField(fields, "quota_percent_used", bs["quotaPercentUsed"]) diff --git a/plugins/inputs/couchbase/couchbase_test.go b/plugins/inputs/couchbase/couchbase_test.go index d8f6aa3ac3ad1..a739732458a51 100644 --- a/plugins/inputs/couchbase/couchbase_test.go +++ b/plugins/inputs/couchbase/couchbase_test.go @@ -8,42 +8,31 @@ import ( "github.com/influxdata/telegraf/testutil" "github.com/stretchr/testify/require" - - "github.com/couchbase/go-couchbase" ) func TestGatherServer(t *testing.T) { bucket := "blastro-df" fakeServer := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { - if r.URL.Path == "/pools/default/buckets/"+bucket+"/stats" { + if r.URL.Path == "/pools" { + _, _ = w.Write([]byte(poolsResponse)) + } else if r.URL.Path == "/pools/default" { + _, _ = w.Write([]byte(poolsDefaultResponse)) + } else if r.URL.Path == "/pools/default/buckets" { + _, _ = w.Write([]byte(bucketsResponse)) + } else if r.URL.Path == "/pools/default/buckets/"+bucket+"/stats" { _, _ = w.Write([]byte(bucketStatsResponse)) } else { w.WriteHeader(http.StatusNotFound) } })) - var pool couchbase.Pool - var err error - if err := json.Unmarshal([]byte(poolsDefaultResponse), &pool); err != nil { - t.Fatal("parse poolsDefaultResponse", err) - } - - if err := json.Unmarshal([]byte(bucketResponse), &pool.BucketMap); err != nil { - t.Fatal("parse bucketResponse", err) - } - - bucketStats := &BucketStats{} - if err := json.Unmarshal([]byte(bucketStatsResponse), bucketStats); err != nil { - t.Fatal("parse bucketResponse", err) - } - var cb Couchbase cb.BucketStatsIncluded = []string{"quota_percent_used", "ops_per_sec", "disk_fetches", "item_count", "disk_used", "data_used", "mem_used"} - err = cb.Init() + err := cb.Init() require.NoError(t, err) var acc testutil.Accumulator - err = cb.gatherServer(&acc, fakeServer.URL, &pool) + err = cb.gatherServer(&acc, fakeServer.URL) require.NoError(t, err) acc.AssertContainsTaggedFields(t, "couchbase_node", map[string]interface{}{"memory_free": 23181365248.0, "memory_total": 64424656896.0}, @@ -137,11 +126,14 @@ func TestGatherDetailedBucketMetrics(t *testing.T) { } } +// From `/pools` +const poolsResponse string = `{"pools":[{"name":"default","uri":"/pools/default"}]}` + // From `/pools/default` on a real cluster -const poolsDefaultResponse string = `{"storageTotals":{"ram":{"total":450972598272,"quotaTotal":360777252864,"quotaUsed":360777252864,"used":446826622976,"usedByData":255061495696,"quotaUsedPerNode":51539607552,"quotaTotalPerNode":51539607552},"hdd":{"total":1108766539776,"quotaTotal":1108766539776,"used":559135126484,"usedByData":515767865143,"free":498944942902}},"serverGroupsUri":"/pools/default/serverGroups?v=98656394","name":"default","alerts":["Metadata overhead warning. Over 63% of RAM allocated to bucket \"blastro-df\" on node \"172.16.8.148\" is taken up by keys and metadata.","Metadata overhead warning. Over 65% of RAM allocated to bucket \"blastro-df\" on node \"172.16.10.65\" is taken up by keys and metadata.","Metadata overhead warning. Over 64% of RAM allocated to bucket \"blastro-df\" on node \"172.16.13.173\" is taken up by keys and metadata.","Metadata overhead warning. Over 65% of RAM allocated to bucket \"blastro-df\" on node \"172.16.15.75\" is taken up by keys and metadata.","Metadata overhead warning. Over 65% of RAM allocated to bucket \"blastro-df\" on node \"172.16.13.105\" is taken up by keys and metadata.","Metadata overhead warning. Over 64% of RAM allocated to bucket \"blastro-df\" on node \"172.16.8.127\" is taken up by keys and metadata.","Metadata overhead warning. Over 63% of RAM allocated to bucket \"blastro-df\" on node \"172.16.15.120\" is taken up by keys and metadata.","Metadata overhead warning. Over 66% of RAM allocated to bucket \"blastro-df\" on node \"172.16.10.187\" is taken up by keys and metadata."],"alertsSilenceURL":"/controller/resetAlerts?token=2814&uuid=2bec87861652b990cf6aa5c7ee58c253","nodes":[{"systemStats":{"cpu_utilization_rate":35.43307086614173,"swap_total":0,"swap_used":0,"mem_total":64424656896,"mem_free":23181365248},"interestingStats":{"cmd_get":17.98201798201798,"couch_docs_actual_disk_size":68506048063,"couch_docs_data_size":38718796110,"couch_views_actual_disk_size":0,"couch_views_data_size":0,"curr_items":140158886,"curr_items_tot":279374646,"ep_bg_fetched":0.999000999000999,"get_hits":10.98901098901099,"mem_used":36497390640,"ops":829.1708291708292,"vb_replica_curr_items":139215760},"uptime":"341236","memoryTotal":64424656896,"memoryFree":23181365248,"mcdMemoryReserved":49152,"mcdMemoryAllocated":49152,"couchApiBase":"http://172.16.10.187:8092/","clusterMembership":"active","recoveryType":"none","status":"healthy","otpNode":"ns_1@172.16.10.187","thisNode":true,"hostname":"172.16.10.187:8091","clusterCompatibility":196608,"version":"3.0.1-1444-rel-community","os":"x86_64-unknown-linux-gnu","ports":{"proxy":11211,"direct":11210}},{"systemStats":{"cpu_utilization_rate":47.38255033557047,"swap_total":0,"swap_used":0,"mem_total":64424656896,"mem_free":23665811456},"interestingStats":{"cmd_get":172.8271728271728,"couch_docs_actual_disk_size":79360565405,"couch_docs_data_size":38736382876,"couch_views_actual_disk_size":0,"couch_views_data_size":0,"curr_items":140174377,"curr_items_tot":279383025,"ep_bg_fetched":0.999000999000999,"get_hits":167.8321678321678,"mem_used":36650059656,"ops":1685.314685314685,"vb_replica_curr_items":139208648},"uptime":"341210","memoryTotal":64424656896,"memoryFree":23665811456,"mcdMemoryReserved":49152,"mcdMemoryAllocated":49152,"couchApiBase":"http://172.16.10.65:8092/","clusterMembership":"active","recoveryType":"none","status":"healthy","otpNode":"ns_1@172.16.10.65","hostname":"172.16.10.65:8091","clusterCompatibility":196608,"version":"3.0.1-1444-rel-community","os":"x86_64-unknown-linux-gnu","ports":{"proxy":11211,"direct":11210}},{"systemStats":{"cpu_utilization_rate":25.5586592178771,"swap_total":0,"swap_used":0,"mem_total":64424656896,"mem_free":23726600192},"interestingStats":{"cmd_get":63.06306306306306,"couch_docs_actual_disk_size":79345105217,"couch_docs_data_size":38728086130,"couch_views_actual_disk_size":0,"couch_views_data_size":0,"curr_items":139195268,"curr_items_tot":279349113,"ep_bg_fetched":0,"get_hits":53.05305305305306,"mem_used":36476665576,"ops":1878.878878878879,"vb_replica_curr_items":140153845},"uptime":"341210","memoryTotal":64424656896,"memoryFree":23726600192,"mcdMemoryReserved":49152,"mcdMemoryAllocated":49152,"couchApiBase":"http://172.16.13.105:8092/","clusterMembership":"active","recoveryType":"none","status":"healthy","otpNode":"ns_1@172.16.13.105","hostname":"172.16.13.105:8091","clusterCompatibility":196608,"version":"3.0.1-1444-rel-community","os":"x86_64-unknown-linux-gnu","ports":{"proxy":11211,"direct":11210}},{"systemStats":{"cpu_utilization_rate":26.45803698435277,"swap_total":0,"swap_used":0,"mem_total":64424656896,"mem_free":23854841856},"interestingStats":{"cmd_get":51.05105105105105,"couch_docs_actual_disk_size":74465931949,"couch_docs_data_size":38723830730,"couch_views_actual_disk_size":0,"couch_views_data_size":0,"curr_items":139209869,"curr_items_tot":279380019,"ep_bg_fetched":0,"get_hits":47.04704704704704,"mem_used":36471784896,"ops":1831.831831831832,"vb_replica_curr_items":140170150},"uptime":"340526","memoryTotal":64424656896,"memoryFree":23854841856,"mcdMemoryReserved":49152,"mcdMemoryAllocated":49152,"couchApiBase":"http://172.16.13.173:8092/","clusterMembership":"active","recoveryType":"none","status":"healthy","otpNode":"ns_1@172.16.13.173","hostname":"172.16.13.173:8091","clusterCompatibility":196608,"version":"3.0.1-1444-rel-community","os":"x86_64-unknown-linux-gnu","ports":{"proxy":11211,"direct":11210}},{"systemStats":{"cpu_utilization_rate":47.31034482758621,"swap_total":0,"swap_used":0,"mem_total":64424656896,"mem_free":23773573120},"interestingStats":{"cmd_get":77.07707707707708,"couch_docs_actual_disk_size":74743093945,"couch_docs_data_size":38594660087,"couch_views_actual_disk_size":0,"couch_views_data_size":0,"curr_items":139215932,"curr_items_tot":278427644,"ep_bg_fetched":0,"get_hits":53.05305305305305,"mem_used":36306500344,"ops":1981.981981981982,"vb_replica_curr_items":139211712},"uptime":"340495","memoryTotal":64424656896,"memoryFree":23773573120,"mcdMemoryReserved":49152,"mcdMemoryAllocated":49152,"couchApiBase":"http://172.16.15.120:8092/","clusterMembership":"active","recoveryType":"none","status":"healthy","otpNode":"ns_1@172.16.15.120","hostname":"172.16.15.120:8091","clusterCompatibility":196608,"version":"3.0.1-1444-rel-community","os":"x86_64-unknown-linux-gnu","ports":{"proxy":11211,"direct":11210}},{"systemStats":{"cpu_utilization_rate":17.60660247592847,"swap_total":0,"swap_used":0,"mem_total":64424656896,"mem_free":23662190592},"interestingStats":{"cmd_get":146.8531468531468,"couch_docs_actual_disk_size":72932847344,"couch_docs_data_size":38581771457,"couch_views_actual_disk_size":0,"couch_views_data_size":0,"curr_items":139226879,"curr_items_tot":278436540,"ep_bg_fetched":0,"get_hits":144.8551448551448,"mem_used":36421860496,"ops":1495.504495504495,"vb_replica_curr_items":139209661},"uptime":"337174","memoryTotal":64424656896,"memoryFree":23662190592,"mcdMemoryReserved":49152,"mcdMemoryAllocated":49152,"couchApiBase":"http://172.16.8.127:8092/","clusterMembership":"active","recoveryType":"none","status":"healthy","otpNode":"ns_1@172.16.8.127","hostname":"172.16.8.127:8091","clusterCompatibility":196608,"version":"3.0.1-1444-rel-community","os":"x86_64-unknown-linux-gnu","ports":{"proxy":11211,"direct":11210}},{"systemStats":{"cpu_utilization_rate":21.68831168831169,"swap_total":0,"swap_used":0,"mem_total":64424656896,"mem_free":24049729536},"interestingStats":{"cmd_get":11.98801198801199,"couch_docs_actual_disk_size":66414273220,"couch_docs_data_size":38587642702,"couch_views_actual_disk_size":0,"couch_views_data_size":0,"curr_items":139193759,"curr_items_tot":278398926,"ep_bg_fetched":0,"get_hits":9.990009990009991,"mem_used":36237234088,"ops":883.1168831168832,"vb_replica_curr_items":139205167},"uptime":"341228","memoryTotal":64424656896,"memoryFree":24049729536,"mcdMemoryReserved":49152,"mcdMemoryAllocated":49152,"couchApiBase":"http://172.16.8.148:8092/","clusterMembership":"active","recoveryType":"none","status":"healthy","otpNode":"ns_1@172.16.8.148","hostname":"172.16.8.148:8091","clusterCompatibility":196608,"version":"3.0.1-1444-rel-community","os":"x86_64-unknown-linux-gnu","ports":{"proxy":11211,"direct":11210}}],"buckets":{"uri":"/pools/default/buckets?v=74117050&uuid=2bec87861652b990cf6aa5c7ee58c253","terseBucketsBase":"/pools/default/b/","terseStreamingBucketsBase":"/pools/default/bs/"},"remoteClusters":{"uri":"/pools/default/remoteClusters?uuid=2bec87861652b990cf6aa5c7ee58c253","validateURI":"/pools/default/remoteClusters?just_validate=1"},"controllers":{"addNode":{"uri":"/controller/addNode?uuid=2bec87861652b990cf6aa5c7ee58c253"},"rebalance":{"uri":"/controller/rebalance?uuid=2bec87861652b990cf6aa5c7ee58c253"},"failOver":{"uri":"/controller/failOver?uuid=2bec87861652b990cf6aa5c7ee58c253"},"startGracefulFailover":{"uri":"/controller/startGracefulFailover?uuid=2bec87861652b990cf6aa5c7ee58c253"},"reAddNode":{"uri":"/controller/reAddNode?uuid=2bec87861652b990cf6aa5c7ee58c253"},"reFailOver":{"uri":"/controller/reFailOver?uuid=2bec87861652b990cf6aa5c7ee58c253"},"ejectNode":{"uri":"/controller/ejectNode?uuid=2bec87861652b990cf6aa5c7ee58c253"},"setRecoveryType":{"uri":"/controller/setRecoveryType?uuid=2bec87861652b990cf6aa5c7ee58c253"},"setAutoCompaction":{"uri":"/controller/setAutoCompaction?uuid=2bec87861652b990cf6aa5c7ee58c253","validateURI":"/controller/setAutoCompaction?just_validate=1"},"clusterLogsCollection":{"startURI":"/controller/startLogsCollection?uuid=2bec87861652b990cf6aa5c7ee58c253","cancelURI":"/controller/cancelLogsCollection?uuid=2bec87861652b990cf6aa5c7ee58c253"},"replication":{"createURI":"/controller/createReplication?uuid=2bec87861652b990cf6aa5c7ee58c253","validateURI":"/controller/createReplication?just_validate=1"},"setFastWarmup":{"uri":"/controller/setFastWarmup?uuid=2bec87861652b990cf6aa5c7ee58c253","validateURI":"/controller/setFastWarmup?just_validate=1"}},"rebalanceStatus":"none","rebalanceProgressUri":"/pools/default/rebalanceProgress","stopRebalanceUri":"/controller/stopRebalance?uuid=2bec87861652b990cf6aa5c7ee58c253","nodeStatusesUri":"/nodeStatuses","maxBucketCount":10,"autoCompactionSettings":{"parallelDBAndViewCompaction":false,"databaseFragmentationThreshold":{"percentage":50,"size":"undefined"},"viewFragmentationThreshold":{"percentage":50,"size":"undefined"}},"fastWarmupSettings":{"fastWarmupEnabled":true,"minMemoryThreshold":10,"minItemsThreshold":10},"tasks":{"uri":"/pools/default/tasks?v=97479372"},"visualSettingsUri":"/internalSettings/visual?v=7111573","counters":{"rebalance_success":4,"rebalance_start":6,"rebalance_stop":2}}` +const poolsDefaultResponse string = `{"storageTotals":{"ram":{"total":450972598272,"quotaTotal":360777252864,"quotaUsed":360777252864,"used":446826622976,"usedByData":255061495696,"quotaUsedPerNode":51539607552,"quotaTotalPerNode":51539607552},"hdd":{"total":1108766539776,"quotaTotal":1108766539776,"used":559135126484,"usedByData":515767865143,"free":498944942902}},"serverGroupsUri":"/pools/default/serverGroups","name":"default","alerts":["Metadata overhead warning. Over 63% of RAM allocated to bucket \"blastro-df\" on node \"172.16.8.148\" is taken up by keys and metadata.","Metadata overhead warning. Over 65% of RAM allocated to bucket \"blastro-df\" on node \"172.16.10.65\" is taken up by keys and metadata.","Metadata overhead warning. Over 64% of RAM allocated to bucket \"blastro-df\" on node \"172.16.13.173\" is taken up by keys and metadata.","Metadata overhead warning. Over 65% of RAM allocated to bucket \"blastro-df\" on node \"172.16.15.75\" is taken up by keys and metadata.","Metadata overhead warning. Over 65% of RAM allocated to bucket \"blastro-df\" on node \"172.16.13.105\" is taken up by keys and metadata.","Metadata overhead warning. Over 64% of RAM allocated to bucket \"blastro-df\" on node \"172.16.8.127\" is taken up by keys and metadata.","Metadata overhead warning. Over 63% of RAM allocated to bucket \"blastro-df\" on node \"172.16.15.120\" is taken up by keys and metadata.","Metadata overhead warning. Over 66% of RAM allocated to bucket \"blastro-df\" on node \"172.16.10.187\" is taken up by keys and metadata."],"alertsSilenceURL":"/controller/resetAlerts","nodes":[{"systemStats":{"cpu_utilization_rate":35.43307086614173,"swap_total":0,"swap_used":0,"mem_total":64424656896,"mem_free":23181365248},"interestingStats":{"cmd_get":17.98201798201798,"couch_docs_actual_disk_size":68506048063,"couch_docs_data_size":38718796110,"couch_views_actual_disk_size":0,"couch_views_data_size":0,"curr_items":140158886,"curr_items_tot":279374646,"ep_bg_fetched":0.999000999000999,"get_hits":10.98901098901099,"mem_used":36497390640,"ops":829.1708291708292,"vb_replica_curr_items":139215760},"uptime":"341236","memoryTotal":64424656896,"memoryFree":23181365248,"mcdMemoryReserved":49152,"mcdMemoryAllocated":49152,"couchApiBase":"http://172.16.10.187:8092/","clusterMembership":"active","recoveryType":"none","status":"healthy","otpNode":"ns_1@172.16.10.187","thisNode":true,"hostname":"172.16.10.187:8091","clusterCompatibility":196608,"version":"3.0.1-1444-rel-community","os":"x86_64-unknown-linux-gnu","ports":{"proxy":11211,"direct":11210}},{"systemStats":{"cpu_utilization_rate":47.38255033557047,"swap_total":0,"swap_used":0,"mem_total":64424656896,"mem_free":23665811456},"interestingStats":{"cmd_get":172.8271728271728,"couch_docs_actual_disk_size":79360565405,"couch_docs_data_size":38736382876,"couch_views_actual_disk_size":0,"couch_views_data_size":0,"curr_items":140174377,"curr_items_tot":279383025,"ep_bg_fetched":0.999000999000999,"get_hits":167.8321678321678,"mem_used":36650059656,"ops":1685.314685314685,"vb_replica_curr_items":139208648},"uptime":"341210","memoryTotal":64424656896,"memoryFree":23665811456,"mcdMemoryReserved":49152,"mcdMemoryAllocated":49152,"couchApiBase":"http://172.16.10.65:8092/","clusterMembership":"active","recoveryType":"none","status":"healthy","otpNode":"ns_1@172.16.10.65","hostname":"172.16.10.65:8091","clusterCompatibility":196608,"version":"3.0.1-1444-rel-community","os":"x86_64-unknown-linux-gnu","ports":{"proxy":11211,"direct":11210}},{"systemStats":{"cpu_utilization_rate":25.5586592178771,"swap_total":0,"swap_used":0,"mem_total":64424656896,"mem_free":23726600192},"interestingStats":{"cmd_get":63.06306306306306,"couch_docs_actual_disk_size":79345105217,"couch_docs_data_size":38728086130,"couch_views_actual_disk_size":0,"couch_views_data_size":0,"curr_items":139195268,"curr_items_tot":279349113,"ep_bg_fetched":0,"get_hits":53.05305305305306,"mem_used":36476665576,"ops":1878.878878878879,"vb_replica_curr_items":140153845},"uptime":"341210","memoryTotal":64424656896,"memoryFree":23726600192,"mcdMemoryReserved":49152,"mcdMemoryAllocated":49152,"couchApiBase":"http://172.16.13.105:8092/","clusterMembership":"active","recoveryType":"none","status":"healthy","otpNode":"ns_1@172.16.13.105","hostname":"172.16.13.105:8091","clusterCompatibility":196608,"version":"3.0.1-1444-rel-community","os":"x86_64-unknown-linux-gnu","ports":{"proxy":11211,"direct":11210}},{"systemStats":{"cpu_utilization_rate":26.45803698435277,"swap_total":0,"swap_used":0,"mem_total":64424656896,"mem_free":23854841856},"interestingStats":{"cmd_get":51.05105105105105,"couch_docs_actual_disk_size":74465931949,"couch_docs_data_size":38723830730,"couch_views_actual_disk_size":0,"couch_views_data_size":0,"curr_items":139209869,"curr_items_tot":279380019,"ep_bg_fetched":0,"get_hits":47.04704704704704,"mem_used":36471784896,"ops":1831.831831831832,"vb_replica_curr_items":140170150},"uptime":"340526","memoryTotal":64424656896,"memoryFree":23854841856,"mcdMemoryReserved":49152,"mcdMemoryAllocated":49152,"couchApiBase":"http://172.16.13.173:8092/","clusterMembership":"active","recoveryType":"none","status":"healthy","otpNode":"ns_1@172.16.13.173","hostname":"172.16.13.173:8091","clusterCompatibility":196608,"version":"3.0.1-1444-rel-community","os":"x86_64-unknown-linux-gnu","ports":{"proxy":11211,"direct":11210}},{"systemStats":{"cpu_utilization_rate":47.31034482758621,"swap_total":0,"swap_used":0,"mem_total":64424656896,"mem_free":23773573120},"interestingStats":{"cmd_get":77.07707707707708,"couch_docs_actual_disk_size":74743093945,"couch_docs_data_size":38594660087,"couch_views_actual_disk_size":0,"couch_views_data_size":0,"curr_items":139215932,"curr_items_tot":278427644,"ep_bg_fetched":0,"get_hits":53.05305305305305,"mem_used":36306500344,"ops":1981.981981981982,"vb_replica_curr_items":139211712},"uptime":"340495","memoryTotal":64424656896,"memoryFree":23773573120,"mcdMemoryReserved":49152,"mcdMemoryAllocated":49152,"couchApiBase":"http://172.16.15.120:8092/","clusterMembership":"active","recoveryType":"none","status":"healthy","otpNode":"ns_1@172.16.15.120","hostname":"172.16.15.120:8091","clusterCompatibility":196608,"version":"3.0.1-1444-rel-community","os":"x86_64-unknown-linux-gnu","ports":{"proxy":11211,"direct":11210}},{"systemStats":{"cpu_utilization_rate":17.60660247592847,"swap_total":0,"swap_used":0,"mem_total":64424656896,"mem_free":23662190592},"interestingStats":{"cmd_get":146.8531468531468,"couch_docs_actual_disk_size":72932847344,"couch_docs_data_size":38581771457,"couch_views_actual_disk_size":0,"couch_views_data_size":0,"curr_items":139226879,"curr_items_tot":278436540,"ep_bg_fetched":0,"get_hits":144.8551448551448,"mem_used":36421860496,"ops":1495.504495504495,"vb_replica_curr_items":139209661},"uptime":"337174","memoryTotal":64424656896,"memoryFree":23662190592,"mcdMemoryReserved":49152,"mcdMemoryAllocated":49152,"couchApiBase":"http://172.16.8.127:8092/","clusterMembership":"active","recoveryType":"none","status":"healthy","otpNode":"ns_1@172.16.8.127","hostname":"172.16.8.127:8091","clusterCompatibility":196608,"version":"3.0.1-1444-rel-community","os":"x86_64-unknown-linux-gnu","ports":{"proxy":11211,"direct":11210}},{"systemStats":{"cpu_utilization_rate":21.68831168831169,"swap_total":0,"swap_used":0,"mem_total":64424656896,"mem_free":24049729536},"interestingStats":{"cmd_get":11.98801198801199,"couch_docs_actual_disk_size":66414273220,"couch_docs_data_size":38587642702,"couch_views_actual_disk_size":0,"couch_views_data_size":0,"curr_items":139193759,"curr_items_tot":278398926,"ep_bg_fetched":0,"get_hits":9.990009990009991,"mem_used":36237234088,"ops":883.1168831168832,"vb_replica_curr_items":139205167},"uptime":"341228","memoryTotal":64424656896,"memoryFree":24049729536,"mcdMemoryReserved":49152,"mcdMemoryAllocated":49152,"couchApiBase":"http://172.16.8.148:8092/","clusterMembership":"active","recoveryType":"none","status":"healthy","otpNode":"ns_1@172.16.8.148","hostname":"172.16.8.148:8091","clusterCompatibility":196608,"version":"3.0.1-1444-rel-community","os":"x86_64-unknown-linux-gnu","ports":{"proxy":11211,"direct":11210}}],"buckets":{"uri":"/pools/default/buckets","terseBucketsBase":"/pools/default/b/","terseStreamingBucketsBase":"/pools/default/bs/"},"remoteClusters":{"uri":"/pools/default/remoteClusters","validateURI":"/pools/default/remoteClusters?just_validate=1"},"controllers":{"addNode":{"uri":"/controller/addNode"},"rebalance":{"uri":"/controller/rebalance"},"failOver":{"uri":"/controller/failOver"},"startGracefulFailover":{"uri":"/controller/startGracefulFailover"},"reAddNode":{"uri":"/controller/reAddNode"},"reFailOver":{"uri":"/controller/reFailOver"},"ejectNode":{"uri":"/controller/ejectNode"},"setRecoveryType":{"uri":"/controller/setRecoveryType"},"setAutoCompaction":{"uri":"/controller/setAutoCompaction","validateURI":"/controller/setAutoCompaction?just_validate=1"},"clusterLogsCollection":{"startURI":"/controller/startLogsCollection","cancelURI":"/controller/cancelLogsCollection"},"replication":{"createURI":"/controller/createReplication","validateURI":"/controller/createReplication?just_validate=1"},"setFastWarmup":{"uri":"/controller/setFastWarmup","validateURI":"/controller/setFastWarmup?just_validate=1"}},"rebalanceStatus":"none","rebalanceProgressUri":"/pools/default/rebalanceProgress","stopRebalanceUri":"/controller/stopRebalance","nodeStatusesUri":"/nodeStatuses","maxBucketCount":10,"autoCompactionSettings":{"parallelDBAndViewCompaction":false,"databaseFragmentationThreshold":{"percentage":50,"size":"undefined"},"viewFragmentationThreshold":{"percentage":50,"size":"undefined"}},"fastWarmupSettings":{"fastWarmupEnabled":true,"minMemoryThreshold":10,"minItemsThreshold":10},"tasks":{"uri":"/pools/default/tasks"},"visualSettingsUri":"/internalSettings/visual","counters":{"rebalance_success":4,"rebalance_start":6,"rebalance_stop":2}}` -// From `/pools/default/buckets/blastro-df` on a real cluster -const bucketResponse string = `{"blastro-df": {"name":"blastro-df","bucketType":"membase","authType":"sasl","saslPassword":"","proxyPort":0,"replicaIndex":false,"uri":"/pools/default/buckets/blastro-df?bucket_uuid=2e6b9dc4c278300ce3a4f27ad540323f","streamingUri":"/pools/default/bucketsStreaming/blastro-df?bucket_uuid=2e6b9dc4c278300ce3a4f27ad540323f","localRandomKeyUri":"/pools/default/buckets/blastro-df/localRandomKey","controllers":{"compactAll":"/pools/default/buckets/blastro-df/controller/compactBucket","compactDB":"/pools/default/buckets/default/controller/compactDatabases","purgeDeletes":"/pools/default/buckets/blastro-df/controller/unsafePurgeBucket","startRecovery":"/pools/default/buckets/blastro-df/controller/startRecovery"},"nodes":[{"couchApiBase":"http://172.16.8.148:8092/blastro-df%2B2e6b9dc4c278300ce3a4f27ad540323f","systemStats":{"cpu_utilization_rate":18.39557399723375,"swap_total":0,"swap_used":0,"mem_total":64424656896,"mem_free":23791935488},"interestingStats":{"cmd_get":10.98901098901099,"couch_docs_actual_disk_size":79525832077,"couch_docs_data_size":38633186946,"couch_views_actual_disk_size":0,"couch_views_data_size":0,"curr_items":139229304,"curr_items_tot":278470058,"ep_bg_fetched":0,"get_hits":5.994005994005994,"mem_used":36284362960,"ops":1275.724275724276,"vb_replica_curr_items":139240754},"uptime":"343968","memoryTotal":64424656896,"memoryFree":23791935488,"mcdMemoryReserved":49152,"mcdMemoryAllocated":49152,"replication":1,"clusterMembership":"active","recoveryType":"none","status":"healthy","otpNode":"ns_1@172.16.8.148","hostname":"172.16.8.148:8091","clusterCompatibility":196608,"version":"3.0.1-1444-rel-community","os":"x86_64-unknown-linux-gnu","ports":{"proxy":11211,"direct":11210}},{"couchApiBase":"http://172.16.8.127:8092/blastro-df%2B2e6b9dc4c278300ce3a4f27ad540323f","systemStats":{"cpu_utilization_rate":21.97183098591549,"swap_total":0,"swap_used":0,"mem_total":64424656896,"mem_free":23533023232},"interestingStats":{"cmd_get":39.96003996003996,"couch_docs_actual_disk_size":63322357663,"couch_docs_data_size":38603481061,"couch_views_actual_disk_size":0,"couch_views_data_size":0,"curr_items":139262616,"curr_items_tot":278508069,"ep_bg_fetched":0.999000999000999,"get_hits":30.96903096903097,"mem_used":36475078736,"ops":1370.629370629371,"vb_replica_curr_items":139245453},"uptime":"339914","memoryTotal":64424656896,"memoryFree":23533023232,"mcdMemoryReserved":49152,"mcdMemoryAllocated":49152,"replication":1,"clusterMembership":"active","recoveryType":"none","status":"healthy","otpNode":"ns_1@172.16.8.127","hostname":"172.16.8.127:8091","clusterCompatibility":196608,"version":"3.0.1-1444-rel-community","os":"x86_64-unknown-linux-gnu","ports":{"proxy":11211,"direct":11210}},{"couchApiBase":"http://172.16.15.120:8092/blastro-df%2B2e6b9dc4c278300ce3a4f27ad540323f","systemStats":{"cpu_utilization_rate":23.38028169014084,"swap_total":0,"swap_used":0,"mem_total":64424656896,"mem_free":23672963072},"interestingStats":{"cmd_get":88.08808808808809,"couch_docs_actual_disk_size":80260594761,"couch_docs_data_size":38632863189,"couch_views_actual_disk_size":0,"couch_views_data_size":0,"curr_items":139251563,"curr_items_tot":278498913,"ep_bg_fetched":0,"get_hits":74.07407407407408,"mem_used":36348663000,"ops":1707.707707707708,"vb_replica_curr_items":139247350},"uptime":"343235","memoryTotal":64424656896,"memoryFree":23672963072,"mcdMemoryReserved":49152,"mcdMemoryAllocated":49152,"replication":1,"clusterMembership":"active","recoveryType":"none","status":"healthy","otpNode":"ns_1@172.16.15.120","hostname":"172.16.15.120:8091","clusterCompatibility":196608,"version":"3.0.1-1444-rel-community","os":"x86_64-unknown-linux-gnu","ports":{"proxy":11211,"direct":11210}},{"couchApiBase":"http://172.16.13.173:8092/blastro-df%2B2e6b9dc4c278300ce3a4f27ad540323f","systemStats":{"cpu_utilization_rate":22.15988779803646,"swap_total":0,"swap_used":0,"mem_total":64424656896,"mem_free":23818825728},"interestingStats":{"cmd_get":103.1031031031031,"couch_docs_actual_disk_size":68247785524,"couch_docs_data_size":38747583467,"couch_views_actual_disk_size":0,"couch_views_data_size":0,"curr_items":139245453,"curr_items_tot":279451313,"ep_bg_fetched":1.001001001001001,"get_hits":86.08608608608608,"mem_used":36524715864,"ops":1749.74974974975,"vb_replica_curr_items":140205860},"uptime":"343266","memoryTotal":64424656896,"memoryFree":23818825728,"mcdMemoryReserved":49152,"mcdMemoryAllocated":49152,"replication":1,"clusterMembership":"active","recoveryType":"none","status":"healthy","otpNode":"ns_1@172.16.13.173","hostname":"172.16.13.173:8091","clusterCompatibility":196608,"version":"3.0.1-1444-rel-community","os":"x86_64-unknown-linux-gnu","ports":{"proxy":11211,"direct":11210}},{"couchApiBase":"http://172.16.13.105:8092/blastro-df%2B2e6b9dc4c278300ce3a4f27ad540323f","systemStats":{"cpu_utilization_rate":21.94444444444444,"swap_total":0,"swap_used":0,"mem_total":64424656896,"mem_free":23721426944},"interestingStats":{"cmd_get":113.1131131131131,"couch_docs_actual_disk_size":68102832275,"couch_docs_data_size":38747477407,"couch_views_actual_disk_size":0,"couch_views_data_size":0,"curr_items":139230887,"curr_items_tot":279420530,"ep_bg_fetched":0,"get_hits":106.1061061061061,"mem_used":36524887624,"ops":1799.7997997998,"vb_replica_curr_items":140189643},"uptime":"343950","memoryTotal":64424656896,"memoryFree":23721426944,"mcdMemoryReserved":49152,"mcdMemoryAllocated":49152,"replication":1,"clusterMembership":"active","recoveryType":"none","status":"healthy","otpNode":"ns_1@172.16.13.105","hostname":"172.16.13.105:8091","clusterCompatibility":196608,"version":"3.0.1-1444-rel-community","os":"x86_64-unknown-linux-gnu","ports":{"proxy":11211,"direct":11210}},{"couchApiBase":"http://172.16.10.65:8092/blastro-df%2B2e6b9dc4c278300ce3a4f27ad540323f","systemStats":{"cpu_utilization_rate":60.62176165803109,"swap_total":0,"swap_used":0,"mem_total":64424656896,"mem_free":23618203648},"interestingStats":{"cmd_get":30.96903096903097,"couch_docs_actual_disk_size":69052175561,"couch_docs_data_size":38755695030,"couch_views_actual_disk_size":0,"couch_views_data_size":0,"curr_items":140210194,"curr_items_tot":279454253,"ep_bg_fetched":0,"get_hits":26.97302697302698,"mem_used":36543072472,"ops":1337.662337662338,"vb_replica_curr_items":139244059},"uptime":"343950","memoryTotal":64424656896,"memoryFree":23618203648,"mcdMemoryReserved":49152,"mcdMemoryAllocated":49152,"replication":1,"clusterMembership":"active","recoveryType":"none","status":"healthy","otpNode":"ns_1@172.16.10.65","hostname":"172.16.10.65:8091","clusterCompatibility":196608,"version":"3.0.1-1444-rel-community","os":"x86_64-unknown-linux-gnu","ports":{"proxy":11211,"direct":11210}},{"couchApiBase":"http://172.16.10.187:8092/blastro-df%2B2e6b9dc4c278300ce3a4f27ad540323f","systemStats":{"cpu_utilization_rate":21.83588317107093,"swap_total":0,"swap_used":0,"mem_total":64424656896,"mem_free":23062269952},"interestingStats":{"cmd_get":33.03303303303304,"couch_docs_actual_disk_size":74422029546,"couch_docs_data_size":38758172837,"couch_views_actual_disk_size":0,"couch_views_data_size":0,"curr_items":140194321,"curr_items_tot":279445526,"ep_bg_fetched":0,"get_hits":21.02102102102102,"mem_used":36527676832,"ops":1088.088088088088,"vb_replica_curr_items":139251205},"uptime":"343971","memoryTotal":64424656896,"memoryFree":23062269952,"mcdMemoryReserved":49152,"mcdMemoryAllocated":49152,"replication":1,"clusterMembership":"active","recoveryType":"none","status":"healthy","otpNode":"ns_1@172.16.10.187","thisNode":true,"hostname":"172.16.10.187:8091","clusterCompatibility":196608,"version":"3.0.1-1444-rel-community","os":"x86_64-unknown-linux-gnu","ports":{"proxy":11211,"direct":11210}}],"stats":{"uri":"/pools/default/buckets/blastro-df/stats","directoryURI":"/pools/default/buckets/blastro-df/statsDirectory","nodeStatsListURI":"/pools/default/buckets/blastro-df/nodes"},"ddocs":{"uri":"/pools/default/buckets/blastro-df/ddocs"},"nodeLocator":"vbucket","fastWarmupSettings":false,"autoCompactionSettings":false,"uuid":"2e6b9dc4c278300ce3a4f27ad540323f","vBucketServerMap":{"hashAlgorithm":"CRC","numReplicas":1,"serverList":["172.16.10.187:11210","172.16.10.65:11210","172.16.13.105:11210","172.16.13.173:11210","172.16.15.120:11210","172.16.8.127:11210","172.16.8.148:11210"],"vBucketMap":[[0,1],[0,1],[0,1],[0,1],[0,1],[0,1],[0,1],[0,1],[0,1],[0,1],[0,1],[0,1],[0,1],[0,1],[0,1],[0,6],[0,6],[0,6],[0,6],[0,6],[1,3],[1,3],[1,3],[1,4],[1,4],[1,5],[1,5],[1,5],[1,5],[1,5],[1,5],[1,5],[1,5],[1,5],[1,5],[0,2],[0,2],[0,2],[0,2],[0,2],[0,2],[0,2],[0,2],[0,2],[0,2],[0,2],[0,2],[0,2],[0,2],[1,6],[1,6],[1,6],[1,6],[1,6],[1,6],[1,6],[1,6],[1,6],[2,3],[2,3],[2,5],[2,5],[2,5],[2,5],[2,5],[2,5],[2,5],[2,5],[2,5],[0,3],[0,3],[0,3],[0,3],[0,3],[0,3],[0,3],[0,3],[0,3],[0,3],[0,3],[0,3],[0,3],[0,3],[2,5],[2,5],[2,6],[2,6],[2,6],[2,6],[2,6],[2,6],[2,6],[2,6],[3,5],[3,5],[3,5],[3,5],[3,5],[3,5],[3,5],[3,5],[3,5],[3,5],[0,4],[0,4],[0,4],[0,4],[0,4],[0,4],[0,4],[0,4],[0,4],[0,4],[0,4],[0,4],[0,4],[0,4],[3,5],[3,5],[3,5],[3,5],[3,5],[3,5],[3,6],[3,6],[3,6],[3,6],[4,5],[4,5],[4,5],[4,5],[4,5],[4,5],[4,5],[4,5],[4,5],[4,5],[0,6],[0,6],[0,6],[0,6],[0,6],[0,6],[0,6],[0,6],[0,6],[0,6],[0,6],[0,6],[0,6],[0,6],[0,6],[5,3],[5,4],[5,4],[5,4],[5,4],[5,4],[5,4],[5,4],[5,4],[5,4],[6,5],[6,5],[6,5],[6,5],[6,5],[6,5],[6,5],[6,5],[6,5],[1,0],[1,0],[1,0],[1,0],[1,0],[1,0],[1,0],[1,0],[1,0],[1,0],[1,0],[1,0],[1,0],[1,0],[1,0],[0,3],[0,3],[0,3],[0,4],[0,4],[0,4],[0,4],[0,4],[0,4],[0,4],[0,5],[0,5],[0,5],[0,5],[0,5],[0,5],[0,5],[0,5],[0,5],[0,5],[1,2],[1,2],[1,2],[1,2],[1,2],[1,2],[1,2],[1,2],[1,2],[1,2],[1,2],[1,2],[1,2],[1,2],[2,4],[2,4],[2,4],[2,4],[2,4],[2,4],[2,4],[2,4],[2,4],[2,4],[2,5],[2,5],[2,5],[2,5],[2,5],[2,5],[4,5],[4,5],[4,5],[4,5],[1,3],[1,3],[1,3],[1,3],[1,3],[1,3],[1,3],[1,3],[1,3],[1,3],[1,3],[1,3],[1,3],[1,3],[2,6],[2,6],[3,2],[3,2],[3,4],[3,4],[3,4],[3,4],[3,4],[3,4],[3,4],[3,5],[3,5],[3,5],[3,5],[2,0],[2,0],[2,0],[2,0],[2,0],[1,4],[1,4],[1,4],[1,4],[1,4],[1,4],[1,4],[1,4],[1,4],[1,4],[1,4],[1,4],[1,4],[1,4],[3,6],[3,6],[3,6],[3,6],[3,6],[3,6],[4,2],[4,3],[4,3],[4,3],[4,5],[4,5],[4,5],[4,5],[3,0],[3,0],[3,0],[3,0],[3,0],[3,0],[1,6],[1,6],[1,6],[1,6],[1,6],[1,6],[1,6],[1,6],[1,6],[1,6],[1,6],[1,6],[1,6],[1,6],[5,4],[5,4],[5,6],[5,6],[5,6],[5,6],[5,6],[5,6],[5,6],[5,6],[6,5],[6,5],[6,5],[6,5],[6,5],[4,0],[4,0],[4,0],[4,0],[4,0],[2,0],[2,0],[2,0],[2,0],[2,0],[2,0],[2,0],[2,0],[2,0],[2,0],[2,0],[2,0],[2,0],[2,0],[0,4],[0,4],[0,4],[0,5],[0,5],[0,5],[0,5],[0,5],[0,5],[0,5],[0,5],[0,5],[0,5],[0,5],[4,5],[4,5],[4,5],[4,5],[4,5],[4,6],[2,1],[2,1],[2,1],[2,1],[2,1],[2,1],[2,1],[2,1],[2,1],[2,1],[2,1],[2,1],[2,1],[2,1],[1,4],[1,4],[1,4],[1,4],[1,4],[1,4],[1,4],[1,4],[1,5],[1,5],[1,5],[1,5],[1,5],[1,5],[1,5],[4,6],[4,6],[4,6],[4,6],[4,6],[2,3],[2,3],[2,3],[2,3],[2,3],[2,3],[2,3],[2,3],[2,3],[2,3],[2,3],[2,3],[2,3],[2,3],[2,3],[3,4],[3,4],[3,4],[3,5],[3,5],[3,5],[3,5],[5,0],[5,0],[5,0],[2,0],[2,0],[3,0],[3,0],[3,0],[5,3],[5,3],[5,3],[5,3],[5,3],[2,4],[2,4],[2,4],[2,4],[2,4],[2,4],[2,4],[2,4],[2,4],[2,4],[2,4],[2,4],[2,4],[2,4],[4,3],[4,3],[4,3],[4,3],[4,3],[4,3],[4,3],[4,5],[4,5],[1,0],[3,0],[3,1],[3,1],[3,1],[3,1],[5,4],[5,4],[5,4],[5,4],[5,4],[2,6],[2,6],[2,6],[2,6],[2,6],[2,6],[2,6],[2,6],[2,6],[2,6],[2,6],[2,6],[2,6],[2,6],[5,6],[5,6],[5,6],[6,2],[6,2],[6,3],[6,3],[6,3],[4,0],[4,0],[4,0],[4,0],[4,0],[4,1],[4,1],[4,1],[5,6],[5,6],[5,6],[5,6],[3,0],[3,0],[3,0],[3,0],[3,0],[3,0],[3,0],[3,0],[3,0],[3,0],[3,0],[3,0],[3,0],[3,0],[0,5],[0,5],[0,5],[0,6],[0,6],[0,6],[0,6],[0,6],[0,1],[0,1],[4,6],[4,6],[4,6],[4,6],[5,0],[5,0],[5,0],[5,0],[5,0],[5,0],[3,1],[3,1],[3,1],[3,1],[3,1],[3,1],[3,1],[3,1],[3,1],[3,1],[3,1],[3,1],[3,1],[3,1],[1,5],[1,5],[1,5],[1,5],[1,5],[1,5],[1,5],[1,6],[2,0],[2,0],[5,2],[5,3],[5,3],[5,3],[5,3],[5,1],[5,1],[5,1],[5,1],[5,1],[3,2],[3,2],[3,2],[3,2],[3,2],[3,2],[3,2],[3,2],[3,2],[3,2],[3,2],[3,2],[3,2],[3,2],[3,2],[2,5],[2,5],[2,5],[2,5],[2,5],[2,5],[2,5],[4,1],[4,1],[4,1],[5,3],[5,3],[5,3],[5,3],[5,3],[2,0],[5,2],[5,2],[5,2],[5,2],[3,4],[3,4],[3,4],[3,4],[3,4],[3,4],[3,4],[3,4],[3,4],[3,4],[3,4],[3,4],[3,4],[3,4],[3,4],[1,0],[1,0],[1,0],[1,0],[1,0],[1,0],[1,0],[1,0],[1,0],[1,2],[5,4],[5,4],[5,4],[5,4],[5,4],[5,4],[5,4],[5,4],[5,4],[3,6],[3,6],[3,6],[3,6],[3,6],[3,6],[3,6],[3,6],[3,6],[3,6],[3,6],[3,6],[3,6],[3,6],[4,1],[4,1],[5,0],[5,0],[5,0],[5,0],[5,0],[5,0],[5,0],[5,1],[5,6],[5,6],[5,6],[5,6],[5,6],[5,6],[5,6],[5,6],[5,6],[5,6],[4,0],[4,0],[4,0],[4,0],[4,0],[4,0],[4,0],[4,0],[4,0],[4,0],[4,0],[4,0],[4,0],[4,0],[0,1],[0,1],[0,1],[0,1],[0,1],[0,1],[0,1],[0,1],[0,2],[0,2],[5,0],[5,0],[5,0],[5,0],[5,0],[5,0],[5,0],[5,0],[0,2],[0,2],[4,1],[4,1],[4,1],[4,1],[4,1],[4,1],[4,1],[4,1],[4,1],[4,1],[4,1],[4,1],[4,1],[4,1],[2,1],[2,1],[2,1],[2,1],[2,1],[2,1],[2,1],[2,1],[2,1],[2,1],[5,1],[5,1],[5,1],[5,1],[5,1],[5,1],[5,1],[5,1],[5,1],[3,1],[4,2],[4,2],[4,2],[4,2],[4,2],[4,2],[4,2],[4,2],[4,2],[4,2],[4,2],[4,2],[4,2],[4,2],[4,1],[4,1],[4,2],[4,2],[4,2],[6,3],[6,3],[6,3],[6,3],[6,3],[5,2],[5,2],[5,2],[5,2],[5,2],[5,2],[5,2],[5,2],[5,2],[5,2],[4,3],[4,3],[4,3],[4,3],[4,3],[4,3],[4,3],[4,3],[4,3],[4,3],[4,3],[4,3],[4,3],[4,3],[4,3],[6,0],[6,0],[6,0],[6,0],[6,0],[6,0],[6,0],[6,0],[6,0],[6,0],[5,3],[5,3],[5,3],[5,3],[5,3],[5,3],[5,3],[5,3],[5,3],[4,6],[4,6],[4,6],[4,6],[4,6],[4,6],[4,6],[4,6],[4,6],[4,6],[4,6],[4,6],[4,6],[4,6],[5,1],[5,1],[5,1],[5,1],[5,1],[5,1],[5,1],[6,1],[6,1],[6,1],[6,1],[6,1],[6,1],[6,1],[6,1],[6,1],[6,1],[6,2],[6,2],[6,2],[6,0],[6,0],[6,0],[6,0],[6,0],[6,0],[6,0],[6,0],[6,0],[6,0],[6,0],[6,0],[6,0],[6,0],[6,0],[1,2],[1,2],[1,2],[1,2],[1,2],[1,2],[1,2],[2,1],[2,3],[2,3],[1,2],[1,2],[1,2],[1,3],[1,3],[1,3],[1,3],[1,3],[3,1],[6,1],[6,1],[6,1],[6,1],[6,1],[6,1],[6,1],[6,1],[6,1],[6,1],[6,1],[6,1],[6,1],[6,1],[3,1],[3,1],[3,1],[3,1],[4,2],[4,2],[4,2],[4,2],[4,2],[4,2],[3,2],[3,2],[3,2],[3,2],[3,2],[3,2],[3,2],[3,2],[6,3],[6,3],[6,2],[6,2],[6,2],[6,2],[6,2],[6,2],[6,2],[6,2],[6,2],[6,2],[6,2],[6,2],[6,2],[6,2],[5,1],[5,1],[5,2],[5,2],[5,2],[5,2],[5,2],[5,2],[5,2],[5,2],[6,4],[6,4],[6,4],[6,4],[6,4],[6,4],[6,4],[5,2],[6,2],[6,2],[6,3],[6,3],[6,3],[6,3],[6,3],[6,3],[6,3],[6,3],[6,3],[6,3],[6,3],[6,3],[6,3],[6,3],[0,2],[0,2],[0,2],[0,2],[0,2],[0,2],[0,2],[0,3],[1,3],[1,3],[6,2],[6,2],[0,3],[0,3],[0,3],[0,3],[0,3],[0,3],[1,3],[6,4],[6,4],[6,4],[6,4],[6,4],[6,4],[6,4],[6,4],[6,4],[6,4],[6,4],[6,4],[6,4],[6,4],[6,4],[6,4],[6,4],[6,5],[6,5],[2,3],[2,3],[2,3],[2,3],[2,3],[2,3],[6,5],[6,5],[6,5],[6,5],[6,5],[6,5],[6,5],[6,5],[6,5],[6,2]]},"replicaNumber":1,"threadsNumber":3,"quota":{"ram":293601280000,"rawRAM":41943040000},"basicStats":{"quotaPercentUsed":68.85424936294555,"opsPerSec":5686.789686789687,"diskFetches":0,"itemCount":943239752,"diskUsed":409178772321,"dataUsed":212179309111,"memUsed":202156957464},"evictionPolicy":"valueOnly","bucketCapabilitiesVer":"","bucketCapabilities":["cbhello","touch","couchapi","cccp","xdcrCheckpointing","nodesExt"]}}` +// From `/pools/default/buckets` on a real cluster +const bucketsResponse string = `[{"name":"blastro-df","bucketType":"membase","authType":"sasl","saslPassword":"","proxyPort":0,"replicaIndex":false,"uri":"/pools/default/buckets/blastro-df?bucket_uuid=2e6b9dc4c278300ce3a4f27ad540323f","streamingUri":"/pools/default/bucketsStreaming/blastro-df?bucket_uuid=2e6b9dc4c278300ce3a4f27ad540323f","localRandomKeyUri":"/pools/default/buckets/blastro-df/localRandomKey","controllers":{"compactAll":"/pools/default/buckets/blastro-df/controller/compactBucket","compactDB":"/pools/default/buckets/default/controller/compactDatabases","purgeDeletes":"/pools/default/buckets/blastro-df/controller/unsafePurgeBucket","startRecovery":"/pools/default/buckets/blastro-df/controller/startRecovery"},"nodes":[{"couchApiBase":"http://172.16.8.148:8092/blastro-df%2B2e6b9dc4c278300ce3a4f27ad540323f","systemStats":{"cpu_utilization_rate":18.39557399723375,"swap_total":0,"swap_used":0,"mem_total":64424656896,"mem_free":23791935488},"interestingStats":{"cmd_get":10.98901098901099,"couch_docs_actual_disk_size":79525832077,"couch_docs_data_size":38633186946,"couch_views_actual_disk_size":0,"couch_views_data_size":0,"curr_items":139229304,"curr_items_tot":278470058,"ep_bg_fetched":0,"get_hits":5.994005994005994,"mem_used":36284362960,"ops":1275.724275724276,"vb_replica_curr_items":139240754},"uptime":"343968","memoryTotal":64424656896,"memoryFree":23791935488,"mcdMemoryReserved":49152,"mcdMemoryAllocated":49152,"replication":1,"clusterMembership":"active","recoveryType":"none","status":"healthy","otpNode":"ns_1@172.16.8.148","hostname":"172.16.8.148:8091","clusterCompatibility":196608,"version":"3.0.1-1444-rel-community","os":"x86_64-unknown-linux-gnu","ports":{"proxy":11211,"direct":11210}},{"couchApiBase":"http://172.16.8.127:8092/blastro-df%2B2e6b9dc4c278300ce3a4f27ad540323f","systemStats":{"cpu_utilization_rate":21.97183098591549,"swap_total":0,"swap_used":0,"mem_total":64424656896,"mem_free":23533023232},"interestingStats":{"cmd_get":39.96003996003996,"couch_docs_actual_disk_size":63322357663,"couch_docs_data_size":38603481061,"couch_views_actual_disk_size":0,"couch_views_data_size":0,"curr_items":139262616,"curr_items_tot":278508069,"ep_bg_fetched":0.999000999000999,"get_hits":30.96903096903097,"mem_used":36475078736,"ops":1370.629370629371,"vb_replica_curr_items":139245453},"uptime":"339914","memoryTotal":64424656896,"memoryFree":23533023232,"mcdMemoryReserved":49152,"mcdMemoryAllocated":49152,"replication":1,"clusterMembership":"active","recoveryType":"none","status":"healthy","otpNode":"ns_1@172.16.8.127","hostname":"172.16.8.127:8091","clusterCompatibility":196608,"version":"3.0.1-1444-rel-community","os":"x86_64-unknown-linux-gnu","ports":{"proxy":11211,"direct":11210}},{"couchApiBase":"http://172.16.15.120:8092/blastro-df%2B2e6b9dc4c278300ce3a4f27ad540323f","systemStats":{"cpu_utilization_rate":23.38028169014084,"swap_total":0,"swap_used":0,"mem_total":64424656896,"mem_free":23672963072},"interestingStats":{"cmd_get":88.08808808808809,"couch_docs_actual_disk_size":80260594761,"couch_docs_data_size":38632863189,"couch_views_actual_disk_size":0,"couch_views_data_size":0,"curr_items":139251563,"curr_items_tot":278498913,"ep_bg_fetched":0,"get_hits":74.07407407407408,"mem_used":36348663000,"ops":1707.707707707708,"vb_replica_curr_items":139247350},"uptime":"343235","memoryTotal":64424656896,"memoryFree":23672963072,"mcdMemoryReserved":49152,"mcdMemoryAllocated":49152,"replication":1,"clusterMembership":"active","recoveryType":"none","status":"healthy","otpNode":"ns_1@172.16.15.120","hostname":"172.16.15.120:8091","clusterCompatibility":196608,"version":"3.0.1-1444-rel-community","os":"x86_64-unknown-linux-gnu","ports":{"proxy":11211,"direct":11210}},{"couchApiBase":"http://172.16.13.173:8092/blastro-df%2B2e6b9dc4c278300ce3a4f27ad540323f","systemStats":{"cpu_utilization_rate":22.15988779803646,"swap_total":0,"swap_used":0,"mem_total":64424656896,"mem_free":23818825728},"interestingStats":{"cmd_get":103.1031031031031,"couch_docs_actual_disk_size":68247785524,"couch_docs_data_size":38747583467,"couch_views_actual_disk_size":0,"couch_views_data_size":0,"curr_items":139245453,"curr_items_tot":279451313,"ep_bg_fetched":1.001001001001001,"get_hits":86.08608608608608,"mem_used":36524715864,"ops":1749.74974974975,"vb_replica_curr_items":140205860},"uptime":"343266","memoryTotal":64424656896,"memoryFree":23818825728,"mcdMemoryReserved":49152,"mcdMemoryAllocated":49152,"replication":1,"clusterMembership":"active","recoveryType":"none","status":"healthy","otpNode":"ns_1@172.16.13.173","hostname":"172.16.13.173:8091","clusterCompatibility":196608,"version":"3.0.1-1444-rel-community","os":"x86_64-unknown-linux-gnu","ports":{"proxy":11211,"direct":11210}},{"couchApiBase":"http://172.16.13.105:8092/blastro-df%2B2e6b9dc4c278300ce3a4f27ad540323f","systemStats":{"cpu_utilization_rate":21.94444444444444,"swap_total":0,"swap_used":0,"mem_total":64424656896,"mem_free":23721426944},"interestingStats":{"cmd_get":113.1131131131131,"couch_docs_actual_disk_size":68102832275,"couch_docs_data_size":38747477407,"couch_views_actual_disk_size":0,"couch_views_data_size":0,"curr_items":139230887,"curr_items_tot":279420530,"ep_bg_fetched":0,"get_hits":106.1061061061061,"mem_used":36524887624,"ops":1799.7997997998,"vb_replica_curr_items":140189643},"uptime":"343950","memoryTotal":64424656896,"memoryFree":23721426944,"mcdMemoryReserved":49152,"mcdMemoryAllocated":49152,"replication":1,"clusterMembership":"active","recoveryType":"none","status":"healthy","otpNode":"ns_1@172.16.13.105","hostname":"172.16.13.105:8091","clusterCompatibility":196608,"version":"3.0.1-1444-rel-community","os":"x86_64-unknown-linux-gnu","ports":{"proxy":11211,"direct":11210}},{"couchApiBase":"http://172.16.10.65:8092/blastro-df%2B2e6b9dc4c278300ce3a4f27ad540323f","systemStats":{"cpu_utilization_rate":60.62176165803109,"swap_total":0,"swap_used":0,"mem_total":64424656896,"mem_free":23618203648},"interestingStats":{"cmd_get":30.96903096903097,"couch_docs_actual_disk_size":69052175561,"couch_docs_data_size":38755695030,"couch_views_actual_disk_size":0,"couch_views_data_size":0,"curr_items":140210194,"curr_items_tot":279454253,"ep_bg_fetched":0,"get_hits":26.97302697302698,"mem_used":36543072472,"ops":1337.662337662338,"vb_replica_curr_items":139244059},"uptime":"343950","memoryTotal":64424656896,"memoryFree":23618203648,"mcdMemoryReserved":49152,"mcdMemoryAllocated":49152,"replication":1,"clusterMembership":"active","recoveryType":"none","status":"healthy","otpNode":"ns_1@172.16.10.65","hostname":"172.16.10.65:8091","clusterCompatibility":196608,"version":"3.0.1-1444-rel-community","os":"x86_64-unknown-linux-gnu","ports":{"proxy":11211,"direct":11210}},{"couchApiBase":"http://172.16.10.187:8092/blastro-df%2B2e6b9dc4c278300ce3a4f27ad540323f","systemStats":{"cpu_utilization_rate":21.83588317107093,"swap_total":0,"swap_used":0,"mem_total":64424656896,"mem_free":23062269952},"interestingStats":{"cmd_get":33.03303303303304,"couch_docs_actual_disk_size":74422029546,"couch_docs_data_size":38758172837,"couch_views_actual_disk_size":0,"couch_views_data_size":0,"curr_items":140194321,"curr_items_tot":279445526,"ep_bg_fetched":0,"get_hits":21.02102102102102,"mem_used":36527676832,"ops":1088.088088088088,"vb_replica_curr_items":139251205},"uptime":"343971","memoryTotal":64424656896,"memoryFree":23062269952,"mcdMemoryReserved":49152,"mcdMemoryAllocated":49152,"replication":1,"clusterMembership":"active","recoveryType":"none","status":"healthy","otpNode":"ns_1@172.16.10.187","thisNode":true,"hostname":"172.16.10.187:8091","clusterCompatibility":196608,"version":"3.0.1-1444-rel-community","os":"x86_64-unknown-linux-gnu","ports":{"proxy":11211,"direct":11210}}],"stats":{"uri":"/pools/default/buckets/blastro-df/stats","directoryURI":"/pools/default/buckets/blastro-df/statsDirectory","nodeStatsListURI":"/pools/default/buckets/blastro-df/nodes"},"ddocs":{"uri":"/pools/default/buckets/blastro-df/ddocs"},"nodeLocator":"vbucket","fastWarmupSettings":false,"autoCompactionSettings":false,"uuid":"2e6b9dc4c278300ce3a4f27ad540323f","vBucketServerMap":{"hashAlgorithm":"CRC","numReplicas":1,"serverList":["172.16.10.187:11210","172.16.10.65:11210","172.16.13.105:11210","172.16.13.173:11210","172.16.15.120:11210","172.16.8.127:11210","172.16.8.148:11210"],"vBucketMap":[[0,1],[0,1],[0,1],[0,1],[0,1],[0,1],[0,1],[0,1],[0,1],[0,1],[0,1],[0,1],[0,1],[0,1],[0,1],[0,6],[0,6],[0,6],[0,6],[0,6],[1,3],[1,3],[1,3],[1,4],[1,4],[1,5],[1,5],[1,5],[1,5],[1,5],[1,5],[1,5],[1,5],[1,5],[1,5],[0,2],[0,2],[0,2],[0,2],[0,2],[0,2],[0,2],[0,2],[0,2],[0,2],[0,2],[0,2],[0,2],[0,2],[1,6],[1,6],[1,6],[1,6],[1,6],[1,6],[1,6],[1,6],[1,6],[2,3],[2,3],[2,5],[2,5],[2,5],[2,5],[2,5],[2,5],[2,5],[2,5],[2,5],[0,3],[0,3],[0,3],[0,3],[0,3],[0,3],[0,3],[0,3],[0,3],[0,3],[0,3],[0,3],[0,3],[0,3],[2,5],[2,5],[2,6],[2,6],[2,6],[2,6],[2,6],[2,6],[2,6],[2,6],[3,5],[3,5],[3,5],[3,5],[3,5],[3,5],[3,5],[3,5],[3,5],[3,5],[0,4],[0,4],[0,4],[0,4],[0,4],[0,4],[0,4],[0,4],[0,4],[0,4],[0,4],[0,4],[0,4],[0,4],[3,5],[3,5],[3,5],[3,5],[3,5],[3,5],[3,6],[3,6],[3,6],[3,6],[4,5],[4,5],[4,5],[4,5],[4,5],[4,5],[4,5],[4,5],[4,5],[4,5],[0,6],[0,6],[0,6],[0,6],[0,6],[0,6],[0,6],[0,6],[0,6],[0,6],[0,6],[0,6],[0,6],[0,6],[0,6],[5,3],[5,4],[5,4],[5,4],[5,4],[5,4],[5,4],[5,4],[5,4],[5,4],[6,5],[6,5],[6,5],[6,5],[6,5],[6,5],[6,5],[6,5],[6,5],[1,0],[1,0],[1,0],[1,0],[1,0],[1,0],[1,0],[1,0],[1,0],[1,0],[1,0],[1,0],[1,0],[1,0],[1,0],[0,3],[0,3],[0,3],[0,4],[0,4],[0,4],[0,4],[0,4],[0,4],[0,4],[0,5],[0,5],[0,5],[0,5],[0,5],[0,5],[0,5],[0,5],[0,5],[0,5],[1,2],[1,2],[1,2],[1,2],[1,2],[1,2],[1,2],[1,2],[1,2],[1,2],[1,2],[1,2],[1,2],[1,2],[2,4],[2,4],[2,4],[2,4],[2,4],[2,4],[2,4],[2,4],[2,4],[2,4],[2,5],[2,5],[2,5],[2,5],[2,5],[2,5],[4,5],[4,5],[4,5],[4,5],[1,3],[1,3],[1,3],[1,3],[1,3],[1,3],[1,3],[1,3],[1,3],[1,3],[1,3],[1,3],[1,3],[1,3],[2,6],[2,6],[3,2],[3,2],[3,4],[3,4],[3,4],[3,4],[3,4],[3,4],[3,4],[3,5],[3,5],[3,5],[3,5],[2,0],[2,0],[2,0],[2,0],[2,0],[1,4],[1,4],[1,4],[1,4],[1,4],[1,4],[1,4],[1,4],[1,4],[1,4],[1,4],[1,4],[1,4],[1,4],[3,6],[3,6],[3,6],[3,6],[3,6],[3,6],[4,2],[4,3],[4,3],[4,3],[4,5],[4,5],[4,5],[4,5],[3,0],[3,0],[3,0],[3,0],[3,0],[3,0],[1,6],[1,6],[1,6],[1,6],[1,6],[1,6],[1,6],[1,6],[1,6],[1,6],[1,6],[1,6],[1,6],[1,6],[5,4],[5,4],[5,6],[5,6],[5,6],[5,6],[5,6],[5,6],[5,6],[5,6],[6,5],[6,5],[6,5],[6,5],[6,5],[4,0],[4,0],[4,0],[4,0],[4,0],[2,0],[2,0],[2,0],[2,0],[2,0],[2,0],[2,0],[2,0],[2,0],[2,0],[2,0],[2,0],[2,0],[2,0],[0,4],[0,4],[0,4],[0,5],[0,5],[0,5],[0,5],[0,5],[0,5],[0,5],[0,5],[0,5],[0,5],[0,5],[4,5],[4,5],[4,5],[4,5],[4,5],[4,6],[2,1],[2,1],[2,1],[2,1],[2,1],[2,1],[2,1],[2,1],[2,1],[2,1],[2,1],[2,1],[2,1],[2,1],[1,4],[1,4],[1,4],[1,4],[1,4],[1,4],[1,4],[1,4],[1,5],[1,5],[1,5],[1,5],[1,5],[1,5],[1,5],[4,6],[4,6],[4,6],[4,6],[4,6],[2,3],[2,3],[2,3],[2,3],[2,3],[2,3],[2,3],[2,3],[2,3],[2,3],[2,3],[2,3],[2,3],[2,3],[2,3],[3,4],[3,4],[3,4],[3,5],[3,5],[3,5],[3,5],[5,0],[5,0],[5,0],[2,0],[2,0],[3,0],[3,0],[3,0],[5,3],[5,3],[5,3],[5,3],[5,3],[2,4],[2,4],[2,4],[2,4],[2,4],[2,4],[2,4],[2,4],[2,4],[2,4],[2,4],[2,4],[2,4],[2,4],[4,3],[4,3],[4,3],[4,3],[4,3],[4,3],[4,3],[4,5],[4,5],[1,0],[3,0],[3,1],[3,1],[3,1],[3,1],[5,4],[5,4],[5,4],[5,4],[5,4],[2,6],[2,6],[2,6],[2,6],[2,6],[2,6],[2,6],[2,6],[2,6],[2,6],[2,6],[2,6],[2,6],[2,6],[5,6],[5,6],[5,6],[6,2],[6,2],[6,3],[6,3],[6,3],[4,0],[4,0],[4,0],[4,0],[4,0],[4,1],[4,1],[4,1],[5,6],[5,6],[5,6],[5,6],[3,0],[3,0],[3,0],[3,0],[3,0],[3,0],[3,0],[3,0],[3,0],[3,0],[3,0],[3,0],[3,0],[3,0],[0,5],[0,5],[0,5],[0,6],[0,6],[0,6],[0,6],[0,6],[0,1],[0,1],[4,6],[4,6],[4,6],[4,6],[5,0],[5,0],[5,0],[5,0],[5,0],[5,0],[3,1],[3,1],[3,1],[3,1],[3,1],[3,1],[3,1],[3,1],[3,1],[3,1],[3,1],[3,1],[3,1],[3,1],[1,5],[1,5],[1,5],[1,5],[1,5],[1,5],[1,5],[1,6],[2,0],[2,0],[5,2],[5,3],[5,3],[5,3],[5,3],[5,1],[5,1],[5,1],[5,1],[5,1],[3,2],[3,2],[3,2],[3,2],[3,2],[3,2],[3,2],[3,2],[3,2],[3,2],[3,2],[3,2],[3,2],[3,2],[3,2],[2,5],[2,5],[2,5],[2,5],[2,5],[2,5],[2,5],[4,1],[4,1],[4,1],[5,3],[5,3],[5,3],[5,3],[5,3],[2,0],[5,2],[5,2],[5,2],[5,2],[3,4],[3,4],[3,4],[3,4],[3,4],[3,4],[3,4],[3,4],[3,4],[3,4],[3,4],[3,4],[3,4],[3,4],[3,4],[1,0],[1,0],[1,0],[1,0],[1,0],[1,0],[1,0],[1,0],[1,0],[1,2],[5,4],[5,4],[5,4],[5,4],[5,4],[5,4],[5,4],[5,4],[5,4],[3,6],[3,6],[3,6],[3,6],[3,6],[3,6],[3,6],[3,6],[3,6],[3,6],[3,6],[3,6],[3,6],[3,6],[4,1],[4,1],[5,0],[5,0],[5,0],[5,0],[5,0],[5,0],[5,0],[5,1],[5,6],[5,6],[5,6],[5,6],[5,6],[5,6],[5,6],[5,6],[5,6],[5,6],[4,0],[4,0],[4,0],[4,0],[4,0],[4,0],[4,0],[4,0],[4,0],[4,0],[4,0],[4,0],[4,0],[4,0],[0,1],[0,1],[0,1],[0,1],[0,1],[0,1],[0,1],[0,1],[0,2],[0,2],[5,0],[5,0],[5,0],[5,0],[5,0],[5,0],[5,0],[5,0],[0,2],[0,2],[4,1],[4,1],[4,1],[4,1],[4,1],[4,1],[4,1],[4,1],[4,1],[4,1],[4,1],[4,1],[4,1],[4,1],[2,1],[2,1],[2,1],[2,1],[2,1],[2,1],[2,1],[2,1],[2,1],[2,1],[5,1],[5,1],[5,1],[5,1],[5,1],[5,1],[5,1],[5,1],[5,1],[3,1],[4,2],[4,2],[4,2],[4,2],[4,2],[4,2],[4,2],[4,2],[4,2],[4,2],[4,2],[4,2],[4,2],[4,2],[4,1],[4,1],[4,2],[4,2],[4,2],[6,3],[6,3],[6,3],[6,3],[6,3],[5,2],[5,2],[5,2],[5,2],[5,2],[5,2],[5,2],[5,2],[5,2],[5,2],[4,3],[4,3],[4,3],[4,3],[4,3],[4,3],[4,3],[4,3],[4,3],[4,3],[4,3],[4,3],[4,3],[4,3],[4,3],[6,0],[6,0],[6,0],[6,0],[6,0],[6,0],[6,0],[6,0],[6,0],[6,0],[5,3],[5,3],[5,3],[5,3],[5,3],[5,3],[5,3],[5,3],[5,3],[4,6],[4,6],[4,6],[4,6],[4,6],[4,6],[4,6],[4,6],[4,6],[4,6],[4,6],[4,6],[4,6],[4,6],[5,1],[5,1],[5,1],[5,1],[5,1],[5,1],[5,1],[6,1],[6,1],[6,1],[6,1],[6,1],[6,1],[6,1],[6,1],[6,1],[6,1],[6,2],[6,2],[6,2],[6,0],[6,0],[6,0],[6,0],[6,0],[6,0],[6,0],[6,0],[6,0],[6,0],[6,0],[6,0],[6,0],[6,0],[6,0],[1,2],[1,2],[1,2],[1,2],[1,2],[1,2],[1,2],[2,1],[2,3],[2,3],[1,2],[1,2],[1,2],[1,3],[1,3],[1,3],[1,3],[1,3],[3,1],[6,1],[6,1],[6,1],[6,1],[6,1],[6,1],[6,1],[6,1],[6,1],[6,1],[6,1],[6,1],[6,1],[6,1],[3,1],[3,1],[3,1],[3,1],[4,2],[4,2],[4,2],[4,2],[4,2],[4,2],[3,2],[3,2],[3,2],[3,2],[3,2],[3,2],[3,2],[3,2],[6,3],[6,3],[6,2],[6,2],[6,2],[6,2],[6,2],[6,2],[6,2],[6,2],[6,2],[6,2],[6,2],[6,2],[6,2],[6,2],[5,1],[5,1],[5,2],[5,2],[5,2],[5,2],[5,2],[5,2],[5,2],[5,2],[6,4],[6,4],[6,4],[6,4],[6,4],[6,4],[6,4],[5,2],[6,2],[6,2],[6,3],[6,3],[6,3],[6,3],[6,3],[6,3],[6,3],[6,3],[6,3],[6,3],[6,3],[6,3],[6,3],[6,3],[0,2],[0,2],[0,2],[0,2],[0,2],[0,2],[0,2],[0,3],[1,3],[1,3],[6,2],[6,2],[0,3],[0,3],[0,3],[0,3],[0,3],[0,3],[1,3],[6,4],[6,4],[6,4],[6,4],[6,4],[6,4],[6,4],[6,4],[6,4],[6,4],[6,4],[6,4],[6,4],[6,4],[6,4],[6,4],[6,4],[6,5],[6,5],[2,3],[2,3],[2,3],[2,3],[2,3],[2,3],[6,5],[6,5],[6,5],[6,5],[6,5],[6,5],[6,5],[6,5],[6,5],[6,2]]},"replicaNumber":1,"threadsNumber":3,"quota":{"ram":293601280000,"rawRAM":41943040000},"basicStats":{"quotaPercentUsed":68.85424936294555,"opsPerSec":5686.789686789687,"diskFetches":0,"itemCount":943239752,"diskUsed":409178772321,"dataUsed":212179309111,"memUsed":202156957464},"evictionPolicy":"valueOnly","bucketCapabilitiesVer":"","bucketCapabilities":["cbhello","touch","couchapi","cccp","xdcrCheckpointing","nodesExt"]}]` const bucketStatsResponse string = `{"op":{"samples":{"couch_total_disk_size":[559341,559341,559341,559341,559341,559341,559341,559341,559341,559341,559341,559341,559341,559341,559341,559341,559341,559341,559341,559341,559341,559341,559341,559341,559341,559341,559341,559341,559341,559341,559341,559341,559341,559341,559341,559341,559341,559341,559341,559341,559341,559341,559341,559341,559341,559341,559341,559341,559341,559341,559341,559341,559341,559341,559341,559341,559341,559341,559341,559341],"couch_docs_fragmentation":[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],"couch_views_fragmentation":[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],"hit_ratio":[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],"ep_cache_miss_rate":[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],"ep_resident_items_rate":[100,100,100,100,100,100,100,100,100,100,100,100,100,100,100,100,100,100,100,100,100,100,100,100,100,100,100,100,100,100,100,100,100,100,100,100,100,100,100,100,100,100,100,100,100,100,100,100,100,100,100,100,100,100,100,100,100,100,100,100],"vb_avg_active_queue_age":[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],"vb_avg_replica_queue_age":[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],"vb_avg_pending_queue_age":[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],"vb_avg_total_queue_age":[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],"vb_active_resident_items_ratio":[100,100,100,100,100,100,100,100,100,100,100,100,100,100,100,100,100,100,100,100,100,100,100,100,100,100,100,100,100,100,100,100,100,100,100,100,100,100,100,100,100,100,100,100,100,100,100,100,100,100,100,100,100,100,100,100,100,100,100,100],"vb_replica_resident_items_ratio":[100,100,100,100,100,100,100,100,100,100,100,100,100,100,100,100,100,100,100,100,100,100,100,100,100,100,100,100,100,100,100,100,100,100,100,100,100,100,100,100,100,100,100,100,100,100,100,100,100,100,100,100,100,100,100,100,100,100,100,100],"vb_pending_resident_items_ratio":[100,100,100,100,100,100,100,100,100,100,100,100,100,100,100,100,100,100,100,100,100,100,100,100,100,100,100,100,100,100,100,100,100,100,100,100,100,100,100,100,100,100,100,100,100,100,100,100,100,100,100,100,100,100,100,100,100,100,100,100],"avg_disk_update_time":[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],"avg_disk_commit_time":[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],"avg_bg_wait_time":[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],"avg_active_timestamp_drift":[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],"avg_replica_timestamp_drift":[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],"ep_dcp_views+indexes_count":[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],"ep_dcp_views+indexes_items_remaining":[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],"ep_dcp_views+indexes_producer_count":[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],"ep_dcp_views+indexes_total_backlog_size":[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],"ep_dcp_views+indexes_items_sent":[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],"ep_dcp_views+indexes_total_bytes":[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],"ep_dcp_views+indexes_backoff":[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],"bg_wait_count":[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],"bg_wait_total":[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],"bytes_read":[118.1818181818182,142.2805247225025,180.8080808080808,197.7800201816347,141.9939577039275,118.5410334346505,142.4242424242424,148.4848484848485,197.3816717019134,202.4291497975709,118.0625630676085,142.4242424242424,179.6165489404642,197.979797979798,142.4242424242424,118.1818181818182,142.2805247225025,148.4848484848485,197.979797979798,201.816347124117,118.1818181818182,142.4242424242424,148.4848484848485,197.7800201816347,142.4242424242424,118.1818181818182,142.2805247225025,179.7979797979798,197.1830985915493,202.6342451874367,118.1818181818182,142.2805247225025,180.4435483870968,198.3805668016194,142.2805247225025,118.1818181818182,142.2805247225025,148.4848484848485,197.979797979798,202.020202020202,118.0625630676085,118.1818181818182,204.040404040404,197.7800201816347,142.1370967741935,118.4210526315789,118.1818181818182,172.5529767911201,197.5806451612903,202.4291497975709,118.0625630676085,118.1818181818182,172.7272727272727,197.7800201816347,142.4242424242424,118.0625630676085,118.1818181818182,204.040404040404,197.979797979798,201.816347124117],"bytes_written":[36420.20202020202,37762.86579212916,37225.25252525252,50460.14127144299,37686.80765357502,36530.90172239109,37801.0101010101,37111.11111111111,50358.50956696878,60511.13360323886,36383.45105953582,37801.0101010101,37393.54187689203,50511.11111111111,37801.0101010101,36420.20202020202,37762.86579212916,37111.11111111111,50511.11111111111,60327.95156407669,36420.20202020202,37801.0101010101,37111.11111111111,50460.14127144299,37801.0101010101,36420.20202020202,37762.86579212916,37431.31313131313,50307.84708249497,60572.44174265451,36420.20202020202,37762.86579212916,37150.20161290323,50613.36032388664,37762.86579212916,36420.20202020202,37762.86579212916,37111.11111111111,50511.11111111111,60388.88888888889,36383.45105953582,36420.20202020202,38812.12121212122,50460.14127144299,37724.79838709677,36493.92712550607,36420.20202020202,38453.07769929364,50409.27419354839,60511.13360323886,36383.45105953582,36420.20202020202,38491.91919191919,50460.14127144299,37801.0101010101,36383.45105953582,36420.20202020202,38812.12121212122,50511.11111111111,60327.95156407669],"cas_badval":[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],"cas_hits":[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],"cas_misses":[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],"cmd_get":[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],"cmd_lookup":[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],"cmd_set":[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],"couch_docs_actual_disk_size":[559341,559341,559341,559341,559341,559341,559341,559341,559341,559341,559341,559341,559341,559341,559341,559341,559341,559341,559341,559341,559341,559341,559341,559341,559341,559341,559341,559341,559341,559341,559341,559341,559341,559341,559341,559341,559341,559341,559341,559341,559341,559341,559341,559341,559341,559341,559341,559341,559341,559341,559341,559341,559341,559341,559341,559341,559341,559341,559341,559341],"couch_docs_data_size":[531373,531373,531373,531373,531373,531373,531373,531373,531373,531373,531373,531373,531373,531373,531373,531373,531373,531373,531373,531373,531373,531373,531373,531373,531373,531373,531373,531373,531373,531373,531373,531373,531373,531373,531373,531373,531373,531373,531373,531373,531373,531373,531373,531373,531373,531373,531373,531373,531373,531373,531373,531373,531373,531373,531373,531373,531373,531373,531373,531373],"couch_docs_disk_size":[531373,531373,531373,531373,531373,531373,531373,531373,531373,531373,531373,531373,531373,531373,531373,531373,531373,531373,531373,531373,531373,531373,531373,531373,531373,531373,531373,531373,531373,531373,531373,531373,531373,531373,531373,531373,531373,531373,531373,531373,531373,531373,531373,531373,531373,531373,531373,531373,531373,531373,531373,531373,531373,531373,531373,531373,531373,531373,531373,531373],"couch_spatial_data_size":[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],"couch_spatial_disk_size":[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],"couch_spatial_ops":[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],"couch_views_actual_disk_size":[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],"couch_views_data_size":[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],"couch_views_disk_size":[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],"couch_views_ops":[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],"curr_connections":[14,14,14,14,14,14,14,14,14,14,14,14,14,14,14,14,14,14,14,14,14,14,14,14,14,14,14,14,14,14,14,14,14,14,14,14,14,14,14,14,14,14,14,14,14,14,14,14,14,14,14,14,14,14,14,14,14,14,14,14],"curr_items":[1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1],"curr_items_tot":[1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1],"decr_hits":[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],"decr_misses":[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],"delete_hits":[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],"delete_misses":[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],"disk_commit_count":[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],"disk_commit_total":[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],"disk_update_count":[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],"disk_update_total":[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],"disk_write_queue":[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],"ep_active_ahead_exceptions":[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],"ep_active_hlc_drift":[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],"ep_active_hlc_drift_count":[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],"ep_bg_fetched":[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],"ep_clock_cas_drift_threshold_exceeded":[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],"ep_data_read_failed":[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],"ep_data_write_failed":[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],"ep_dcp_2i_backoff":[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],"ep_dcp_2i_count":[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],"ep_dcp_2i_items_remaining":[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],"ep_dcp_2i_items_sent":[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],"ep_dcp_2i_producer_count":[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],"ep_dcp_2i_total_backlog_size":[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],"ep_dcp_2i_total_bytes":[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],"ep_dcp_cbas_backoff":[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],"ep_dcp_cbas_count":[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],"ep_dcp_cbas_items_remaining":[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],"ep_dcp_cbas_items_sent":[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],"ep_dcp_cbas_producer_count":[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],"ep_dcp_cbas_total_backlog_size":[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],"ep_dcp_cbas_total_bytes":[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],"ep_dcp_eventing_backoff":[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],"ep_dcp_eventing_count":[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],"ep_dcp_eventing_items_remaining":[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],"ep_dcp_eventing_items_sent":[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],"ep_dcp_eventing_producer_count":[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],"ep_dcp_eventing_total_backlog_size":[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],"ep_dcp_eventing_total_bytes":[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],"ep_dcp_fts_backoff":[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],"ep_dcp_fts_count":[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],"ep_dcp_fts_items_remaining":[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],"ep_dcp_fts_items_sent":[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],"ep_dcp_fts_producer_count":[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],"ep_dcp_fts_total_backlog_size":[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],"ep_dcp_fts_total_bytes":[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],"ep_dcp_other_backoff":[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],"ep_dcp_other_count":[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],"ep_dcp_other_items_remaining":[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],"ep_dcp_other_items_sent":[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],"ep_dcp_other_producer_count":[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],"ep_dcp_other_total_backlog_size":[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],"ep_dcp_other_total_bytes":[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],"ep_dcp_replica_backoff":[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],"ep_dcp_replica_count":[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],"ep_dcp_replica_items_remaining":[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],"ep_dcp_replica_items_sent":[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],"ep_dcp_replica_producer_count":[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],"ep_dcp_replica_total_backlog_size":[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],"ep_dcp_replica_total_bytes":[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],"ep_dcp_views_backoff":[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],"ep_dcp_views_count":[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],"ep_dcp_views_items_remaining":[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],"ep_dcp_views_items_sent":[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],"ep_dcp_views_producer_count":[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],"ep_dcp_views_total_backlog_size":[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],"ep_dcp_views_total_bytes":[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],"ep_dcp_xdcr_backoff":[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],"ep_dcp_xdcr_count":[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],"ep_dcp_xdcr_items_remaining":[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],"ep_dcp_xdcr_items_sent":[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],"ep_dcp_xdcr_producer_count":[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],"ep_dcp_xdcr_total_backlog_size":[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],"ep_dcp_xdcr_total_bytes":[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],"ep_diskqueue_drain":[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],"ep_diskqueue_fill":[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],"ep_diskqueue_items":[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],"ep_flusher_todo":[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],"ep_item_commit_failed":[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],"ep_kv_size":[10340,10340,10340,10340,10340,10340,10340,10340,10340,10340,10340,10340,10340,10340,10340,10340,10340,10340,10340,10340,10340,10340,10340,10340,10340,10340,10340,10340,10340,10340,10340,10340,10340,10340,10340,10340,10340,10340,10340,10340,10340,10340,10340,10340,10340,10340,10340,10340,10340,10340,10340,10340,10340,10340,10340,10340,10340,10340,10340,10340],"ep_max_size":[8264876032,8264876032,8264876032,8264876032,8264876032,8264876032,8264876032,8264876032,8264876032,8264876032,8264876032,8264876032,8264876032,8264876032,8264876032,8264876032,8264876032,8264876032,8264876032,8264876032,8264876032,8264876032,8264876032,8264876032,8264876032,8264876032,8264876032,8264876032,8264876032,8264876032,8264876032,8264876032,8264876032,8264876032,8264876032,8264876032,8264876032,8264876032,8264876032,8264876032,8264876032,8264876032,8264876032,8264876032,8264876032,8264876032,8264876032,8264876032,8264876032,8264876032,8264876032,8264876032,8264876032,8264876032,8264876032,8264876032,8264876032,8264876032,8264876032,8264876032],"ep_mem_high_wat":[7025144627,7025144627,7025144627,7025144627,7025144627,7025144627,7025144627,7025144627,7025144627,7025144627,7025144627,7025144627,7025144627,7025144627,7025144627,7025144627,7025144627,7025144627,7025144627,7025144627,7025144627,7025144627,7025144627,7025144627,7025144627,7025144627,7025144627,7025144627,7025144627,7025144627,7025144627,7025144627,7025144627,7025144627,7025144627,7025144627,7025144627,7025144627,7025144627,7025144627,7025144627,7025144627,7025144627,7025144627,7025144627,7025144627,7025144627,7025144627,7025144627,7025144627,7025144627,7025144627,7025144627,7025144627,7025144627,7025144627,7025144627,7025144627,7025144627,7025144627],"ep_mem_low_wat":[6198657024,6198657024,6198657024,6198657024,6198657024,6198657024,6198657024,6198657024,6198657024,6198657024,6198657024,6198657024,6198657024,6198657024,6198657024,6198657024,6198657024,6198657024,6198657024,6198657024,6198657024,6198657024,6198657024,6198657024,6198657024,6198657024,6198657024,6198657024,6198657024,6198657024,6198657024,6198657024,6198657024,6198657024,6198657024,6198657024,6198657024,6198657024,6198657024,6198657024,6198657024,6198657024,6198657024,6198657024,6198657024,6198657024,6198657024,6198657024,6198657024,6198657024,6198657024,6198657024,6198657024,6198657024,6198657024,6198657024,6198657024,6198657024,6198657024,6198657024],"ep_meta_data_memory":[68,68,68,68,68,68,68,68,68,68,68,68,68,68,68,68,68,68,68,68,68,68,68,68,68,68,68,68,68,68,68,68,68,68,68,68,68,68,68,68,68,68,68,68,68,68,68,68,68,68,68,68,68,68,68,68,68,68,68,68],"ep_num_non_resident":[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],"ep_num_ops_del_meta":[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],"ep_num_ops_del_ret_meta":[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],"ep_num_ops_get_meta":[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],"ep_num_ops_set_meta":[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],"ep_num_ops_set_ret_meta":[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],"ep_num_value_ejects":[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],"ep_oom_errors":[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],"ep_ops_create":[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],"ep_ops_update":[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],"ep_overhead":[403824,403824,403824,403824,403824,403824,403824,403824,403824,403824,403824,403824,403824,403824,403824,403824,403824,403824,403824,403824,403824,403824,403824,403824,403824,403824,403824,403824,403824,403824,403824,403824,403824,403824,403824,403824,403824,403824,403824,403824,403824,403824,403824,403824,403824,403824,403824,403824,403824,403824,403824,403824,403824,403824,403824,403824,403824,403824,403824,403824],"ep_queue_size":[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],"ep_replica_ahead_exceptions":[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],"ep_replica_hlc_drift":[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],"ep_replica_hlc_drift_count":[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],"ep_tmp_oom_errors":[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],"ep_vb_total":[64,64,64,64,64,64,64,64,64,64,64,64,64,64,64,64,64,64,64,64,64,64,64,64,64,64,64,64,64,64,64,64,64,64,64,64,64,64,64,64,64,64,64,64,64,64,64,64,64,64,64,64,64,64,64,64,64,64,64,64],"evictions":[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],"get_hits":[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],"get_misses":[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],"incr_hits":[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],"incr_misses":[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],"mem_used":[4937016,4937016,4937016,4937016,4937016,4937016,4937016,4937016,4937016,4937016,4937016,4937016,4937016,4937016,4937016,4937016,4937016,4937016,4937016,4937016,4937016,4937016,4937016,4937016,4937016,4937016,4937016,4937016,4937016,4937016,4937016,4937016,4937016,4937016,4937016,4937016,4937016,4937016,4937016,4937016,4937016,4937016,4937016,4937016,4937016,4937016,4937016,4937016,4937016,4937016,4937016,4937016,4937016,4937016,4937016,4937016,4937016,4937016,4937016,4937016],"misses":[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],"ops":[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],"timestamp":[1615918120012,1615918121003,1615918121993,1615918122984,1615918123977,1615918124964,1615918125954,1615918126944,1615918127937,1615918128925,1615918129916,1615918130906,1615918131897,1615918132887,1615918133877,1615918134867,1615918135858,1615918136848,1615918137838,1615918138829,1615918139819,1615918140809,1615918141799,1615918142790,1615918143780,1615918144770,1615918145761,1615918146751,1615918147745,1615918148732,1615918149722,1615918150713,1615918151705,1615918152693,1615918153684,1615918154674,1615918155665,1615918156655,1615918157645,1615918158635,1615918159626,1615918160616,1615918161606,1615918162597,1615918163589,1615918164577,1615918165567,1615918166558,1615918167550,1615918168538,1615918169529,1615918170519,1615918171509,1615918172500,1615918173490,1615918174481,1615918175471,1615918176461,1615918177451,1615918178442],"vb_active_eject":[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],"vb_active_itm_memory":[88,88,88,88,88,88,88,88,88,88,88,88,88,88,88,88,88,88,88,88,88,88,88,88,88,88,88,88,88,88,88,88,88,88,88,88,88,88,88,88,88,88,88,88,88,88,88,88,88,88,88,88,88,88,88,88,88,88,88,88],"vb_active_meta_data_memory":[68,68,68,68,68,68,68,68,68,68,68,68,68,68,68,68,68,68,68,68,68,68,68,68,68,68,68,68,68,68,68,68,68,68,68,68,68,68,68,68,68,68,68,68,68,68,68,68,68,68,68,68,68,68,68,68,68,68,68,68],"vb_active_num":[64,64,64,64,64,64,64,64,64,64,64,64,64,64,64,64,64,64,64,64,64,64,64,64,64,64,64,64,64,64,64,64,64,64,64,64,64,64,64,64,64,64,64,64,64,64,64,64,64,64,64,64,64,64,64,64,64,64,64,64],"vb_active_num_non_resident":[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],"vb_active_ops_create":[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],"vb_active_ops_update":[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],"vb_active_queue_age":[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],"vb_active_queue_drain":[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],"vb_active_queue_fill":[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],"vb_active_queue_size":[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],"vb_active_sync_write_aborted_count":[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],"vb_active_sync_write_accepted_count":[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],"vb_active_sync_write_committed_count":[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],"vb_pending_curr_items":[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],"vb_pending_eject":[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],"vb_pending_itm_memory":[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],"vb_pending_meta_data_memory":[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],"vb_pending_num":[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],"vb_pending_num_non_resident":[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],"vb_pending_ops_create":[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],"vb_pending_ops_update":[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],"vb_pending_queue_age":[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],"vb_pending_queue_drain":[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],"vb_pending_queue_fill":[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],"vb_pending_queue_size":[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],"vb_replica_curr_items":[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],"vb_replica_eject":[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],"vb_replica_itm_memory":[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],"vb_replica_meta_data_memory":[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],"vb_replica_num":[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],"vb_replica_num_non_resident":[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],"vb_replica_ops_create":[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],"vb_replica_ops_update":[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],"vb_replica_queue_age":[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],"vb_replica_queue_drain":[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],"vb_replica_queue_fill":[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],"vb_replica_queue_size":[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],"vb_total_queue_age":[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],"xdc_ops":[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],"allocstall":[18446744073709551615,18446744073709551615,18446744073709551615,18446744073709551615,18446744073709551615,18446744073709551615,18446744073709551615,18446744073709551615,18446744073709551615,18446744073709551615,18446744073709551615,18446744073709551615,18446744073709551615,18446744073709551615,18446744073709551615,18446744073709551615,18446744073709551615,18446744073709551615,18446744073709551615,18446744073709551615,18446744073709551615,18446744073709551615,18446744073709551615,18446744073709551615,18446744073709551615,18446744073709551615,18446744073709551615,18446744073709551615,18446744073709551615,18446744073709551615,18446744073709551615,18446744073709551615,18446744073709551615,18446744073709551615,18446744073709551615,18446744073709551615,18446744073709551615,18446744073709551615,18446744073709551615,18446744073709551615,18446744073709551615,18446744073709551615,18446744073709551615,18446744073709551615,18446744073709551615,18446744073709551615,18446744073709551615,18446744073709551615,18446744073709551615,18446744073709551615,18446744073709551615,18446744073709551615,18446744073709551615,18446744073709551615,18446744073709551615,18446744073709551615,18446744073709551615,18446744073709551615,18446744073709551615,18446744073709551615],"cpu_cores_available":[12,12,12,12,12,12,12,12,12,12,12,12,12,12,12,12,12,12,12,12,12,12,12,12,12,12,12,12,12,12,12,12,12,12,12,12,12,12,12,12,12,12,12,12,12,12,12,12,12,12,12,12,12,12,12,12,12,12,12,12],"cpu_irq_rate":[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],"cpu_stolen_rate":[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],"cpu_sys_rate":[4.942965779467681,5.243268776570619,6.823027718550106,4.815073272854153,4.853128991060026,5.068836045056321,4.983108108108108,4.110738255033557,3.201347935973041,3.959561920808762,3.610411418975651,3.459915611814346,3.691275167785235,4.553119730185498,6.470588235294118,4.545454545454546,5.046257359125315,5.976430976430977,5.564924114671164,3.703703703703704,3.529411764705882,3.544303797468354,3.826787512588117,5.118961788031723,7.166947723440135,5.87248322147651,4.289318755256518,5.485232067510548,4.765886287625418,4.672897196261682,4.184100418410042,4.560810810810811,7.02928870292887,6.081081081081081,5.378151260504202,6.239460370994941,8.984047019311502,6.896551724137931,9.636517328825022,9.335576114381833,7.64063811922754,8.684654300168635,6.543624161073826,6.465155331654072,5.961376994122586,3.807106598984772,3.36417157275021,3.700588730025231,3.775167785234899,9.45945945945946,3.114478114478115,3.451178451178451,4.465037910699242,3.852596314907873,3.462837837837838,5.205709487825357,5.218855218855219,6.532663316582915,5.885057471264368,4.030226700251889],"cpu_user_rate":[15.20912547528517,9.58904109589041,10.76759061833689,8.443824145150035,8.301404853128991,10.95118898623279,9.797297297297296,6.879194630872483,6.823925863521483,6.908171861836562,6.54911838790932,6.835443037974684,7.382550335570469,10.28667790893761,16.97478991596639,11.53198653198653,9.75609756097561,11.11111111111111,12.05733558178752,7.154882154882155,6.890756302521009,6.666666666666667,7.150050352467271,10.23792357606345,12.7318718381113,9.479865771812081,7.905803195962994,8.016877637130802,9.19732441471572,9.600679694137638,7.364016736401673,8.108108108108109,15.31380753138075,13.85135135135135,10.58823529411765,12.64755480607083,18.47187237615449,13.28847771236333,19.8647506339814,21.86711522287637,23.5936188077246,22.17537942664418,12.08053691275168,16.96053736356003,32.49370277078086,8.20642978003384,10.17661900756939,7.653490328006728,10.82214765100671,14.27364864864865,6.986531986531986,7.407407407407407,10.02527379949452,11.55778894472362,8.192567567567568,12.34256926952141,14.05723905723906,28.64321608040201,13.14942528735632,7.388748950461797],"cpu_utilization_rate":[20.15209125475285,14.83230987246103,17.59061833688699,13.25889741800419,13.15453384418902,16.02002503128911,14.78040540540541,10.98993288590604,10.02527379949452,10.86773378264532,10.15952980688497,10.29535864978903,11.0738255033557,14.8397976391231,23.4453781512605,16.07744107744108,14.80235492010092,17.08754208754209,17.62225969645868,10.85858585858586,10.42016806722689,10.21097046413502,10.97683786505539,15.35688536409517,19.89881956155143,15.35234899328859,12.19512195121951,13.50210970464135,13.96321070234114,14.27357689039932,11.54811715481171,12.66891891891892,22.34309623430962,19.93243243243243,15.96638655462185,18.88701517706577,27.45591939546599,20.18502943650126,29.50126796280642,31.2026913372582,31.23425692695214,30.86003372681282,18.6241610738255,23.42569269521411,38.45507976490345,12.01353637901861,13.5407905803196,11.35407905803196,14.59731543624161,23.73310810810811,10.1010101010101,10.85858585858586,14.49031171019377,15.41038525963149,11.65540540540541,17.54827875734677,19.27609427609428,35.17587939698493,19.03448275862069,11.41897565071369],"hibernated_requests":[7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7],"hibernated_waked":[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],"mem_actual_free":[7004864512,6998364160,7056683008,7055605760,7059243008,7078457344,7079067648,7079514112,7078977536,7088099328,7091081216,7091773440,7091589120,7080108032,7073554432,7073914880,7080144896,7065124864,7063183360,7072677888,7073767424,7073542144,7073542144,7074902016,7053836288,7050895360,7055720448,7056822272,7057281024,7053025280,7052763136,7051984896,7049113600,7040618496,7045636096,7050907648,7021027328,7001329664,6985895936,6985895936,6955642880,7059750912,7058616320,7050067968,7047163904,7045873664,7050272768,7068528640,7073677312,7079116800,7078252544,7075880960,7065079808,7066251264,7065726976,7063486464,7064797184,7066206208,7068819456,7071809536],"mem_actual_used":[10175004672,10181505024,10123186176,10124263424,10120626176,10101411840,10100801536,10100355072,10100891648,10091769856,10088787968,10088095744,10088280064,10099761152,10106314752,10105954304,10099724288,10114744320,10116685824,10107191296,10106101760,10106327040,10106327040,10104967168,10126032896,10128973824,10124148736,10123046912,10122588160,10126843904,10127106048,10127884288,10130755584,10139250688,10134233088,10128961536,10158841856,10178539520,10193973248,10193973248,10224226304,10120118272,10121252864,10129801216,10132705280,10133995520,10129596416,10111340544,10106191872,10100752384,10101616640,10103988224,10114789376,10113617920,10114142208,10116382720,10115072000,10113662976,10111049728,10108059648],"mem_free":[7004864512,6998364160,7056683008,7055605760,7059243008,7078457344,7079067648,7079514112,7078977536,7088099328,7091081216,7091773440,7091589120,7080108032,7073554432,7073914880,7080144896,7065124864,7063183360,7072677888,7073767424,7073542144,7073542144,7074902016,7053836288,7050895360,7055720448,7056822272,7057281024,7053025280,7052763136,7051984896,7049113600,7040618496,7045636096,7050907648,7021027328,7001329664,6985895936,6985895936,6955642880,7059750912,7058616320,7050067968,7047163904,7045873664,7050272768,7068528640,7073677312,7079116800,7078252544,7075880960,7065079808,7066251264,7065726976,7063486464,7064797184,7066206208,7068819456,7071809536],"mem_limit":[17179869184,17179869184,17179869184,17179869184,17179869184,17179869184,17179869184,17179869184,17179869184,17179869184,17179869184,17179869184,17179869184,17179869184,17179869184,17179869184,17179869184,17179869184,17179869184,17179869184,17179869184,17179869184,17179869184,17179869184,17179869184,17179869184,17179869184,17179869184,17179869184,17179869184,17179869184,17179869184,17179869184,17179869184,17179869184,17179869184,17179869184,17179869184,17179869184,17179869184,17179869184,17179869184,17179869184,17179869184,17179869184,17179869184,17179869184,17179869184,17179869184,17179869184,17179869184,17179869184,17179869184,17179869184,17179869184,17179869184,17179869184,17179869184,17179869184,17179869184],"mem_total":[17179869184,17179869184,17179869184,17179869184,17179869184,17179869184,17179869184,17179869184,17179869184,17179869184,17179869184,17179869184,17179869184,17179869184,17179869184,17179869184,17179869184,17179869184,17179869184,17179869184,17179869184,17179869184,17179869184,17179869184,17179869184,17179869184,17179869184,17179869184,17179869184,17179869184,17179869184,17179869184,17179869184,17179869184,17179869184,17179869184,17179869184,17179869184,17179869184,17179869184,17179869184,17179869184,17179869184,17179869184,17179869184,17179869184,17179869184,17179869184,17179869184,17179869184,17179869184,17179869184,17179869184,17179869184,17179869184,17179869184,17179869184,17179869184,17179869184,17179869184],"mem_used_sys":[16694517760,16707862528,16608030720,16610041856,16604663808,16553811968,16553463808,16553369600,16553861120,16539238400,16536092672,16535760896,16535707648,16553418752,16559439872,16558895104,16554569728,16580468736,16582680576,16565084160,16564649984,16565272576,16565272576,16562823168,16599863296,16602157056,16597528576,16596774912,16595107840,16593002496,16593485824,16596668416,16598691840,16607469568,16599904256,16590753792,16644947968,16684613632,16714768384,16714768384,16781234176,16573353984,16575979520,16593072128,16603037696,16605077504,16599199744,16581554176,16570187776,16560140288,16561221632,16565153792,16577990656,16577200128,16578031616,16582909952,16569671680,16565702656,16560218112,16554315776],"odp_report_failed":[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],"rest_requests":[2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,8,2,2,2,2,2,2,2,2,3,2,2,1,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,3,2,2,2,2,2],"swap_total":[1073741824,1073741824,1073741824,1073741824,1073741824,1073741824,1073741824,1073741824,1073741824,1073741824,1073741824,1073741824,1073741824,1073741824,1073741824,1073741824,1073741824,1073741824,1073741824,1073741824,1073741824,1073741824,1073741824,1073741824,1073741824,1073741824,1073741824,1073741824,1073741824,1073741824,1073741824,1073741824,1073741824,1073741824,1073741824,1073741824,1073741824,1073741824,1073741824,1073741824,1073741824,1073741824,1073741824,1073741824,1073741824,1073741824,1073741824,1073741824,1073741824,1073741824,1073741824,1073741824,1073741824,1073741824,1073741824,1073741824,1073741824,1073741824,1073741824,1073741824],"swap_used":[122683392,122683392,122683392,122683392,122683392,122683392,122683392,122683392,122683392,122683392,122683392,122683392,122683392,122683392,122683392,122683392,122683392,122683392,122683392,122683392,122683392,122683392,122683392,122683392,122683392,122683392,122683392,122683392,122683392,122683392,122683392,122683392,122683392,122683392,122683392,122683392,122683392,122683392,122683392,122683392,122683392,122683392,122683392,122683392,122683392,122683392,122683392,122683392,122683392,122683392,122683392,122683392,122683392,122683392,122683392,122683392,122683392,122683392,122683392,122683392]},"samplesCount":60,"isPersistent":true,"lastTStamp":1615918178442,"interval":1000},"hot_keys":[{"name":"first-duck","ops":6.003482019571351e-05}]}` const bucketStatsResponseWithMissing string = `{"op":{"samples":{"couch_total_disk_size":[559341,559341,559341,559341,559341,559341,559341,559341,559341,559341,559341,559341,559341,559341,559341,559341,559341,559341,559341,559341,559341,559341,559341,559341,559341,559341,559341,559341,559341,559341,559341,559341,559341,559341,559341,559341,559341,559341,559341,559341,559341,559341,559341,559341,559341,559341,559341,559341,559341,559341,559341,559341,559341,559341,559341,559341,559341,559341,559341,559341]},"samplesCount":60,"isPersistent":true,"lastTStamp":1615918178442,"interval":1000},"hot_keys":[{"name":"first-duck","ops":6.003482019571351e-05}]}` From 95ef67445668010841a6ed70140fded0b472cd94 Mon Sep 17 00:00:00 2001 From: Daniel Dyla Date: Wed, 8 Sep 2021 14:31:42 -0400 Subject: [PATCH 041/176] feat(dynatrace-output): remove special handling from counters (#9675) Co-authored-by: Armin Ruech --- go.mod | 2 +- go.sum | 4 +- plugins/outputs/dynatrace/README.md | 8 +- plugins/outputs/dynatrace/dynatrace.go | 24 +++-- plugins/outputs/dynatrace/dynatrace_test.go | 102 +++++++++----------- 5 files changed, 67 insertions(+), 73 deletions(-) diff --git a/go.mod b/go.mod index 8dd6c8f7a6fc4..c133b72dda361 100644 --- a/go.mod +++ b/go.mod @@ -85,7 +85,7 @@ require ( github.com/docker/go-connections v0.4.0 // indirect github.com/docker/go-units v0.4.0 // indirect github.com/doclambda/protobufquery v0.0.0-20210317203640-88ffabe06a60 - github.com/dynatrace-oss/dynatrace-metric-utils-go v0.2.0 + github.com/dynatrace-oss/dynatrace-metric-utils-go v0.3.0 github.com/eapache/go-resiliency v1.2.0 // indirect github.com/eapache/go-xerial-snappy v0.0.0-20180814174437-776d5712da21 // indirect github.com/eapache/queue v1.1.0 // indirect diff --git a/go.sum b/go.sum index 1d373bad3ce34..01266f3e9cbf7 100644 --- a/go.sum +++ b/go.sum @@ -507,8 +507,8 @@ github.com/dustin/go-humanize v0.0.0-20171111073723-bb3d318650d4/go.mod h1:Htrtb github.com/dustin/go-humanize v1.0.0 h1:VSnTsYCnlFHaM2/igO1h6X3HA71jcobQuxemgkq4zYo= github.com/dustin/go-humanize v1.0.0/go.mod h1:HtrtbFcZ19U5GC7JDqmcUSB87Iq5E25KnS6fMYU6eOk= github.com/dustin/go-humanize v1.0.0/go.mod h1:HtrtbFcZ19U5GC7JDqmcUSB87Iq5E25KnS6fMYU6eOk= -github.com/dynatrace-oss/dynatrace-metric-utils-go v0.2.0 h1:TEG5Jj7RYM2JBCUH3nLqCmSZy6srnaefvXxjUTCuHyA= -github.com/dynatrace-oss/dynatrace-metric-utils-go v0.2.0/go.mod h1:qw0E9EJ0PnSlhWawDNuqE0zhc1hqOBUCFIAj3dd9DNw= +github.com/dynatrace-oss/dynatrace-metric-utils-go v0.3.0 h1:q2Ayh9s6Cr75bS5URiOUAoyFXemgKQaBJphbhAaJHCY= +github.com/dynatrace-oss/dynatrace-metric-utils-go v0.3.0/go.mod h1:qw0E9EJ0PnSlhWawDNuqE0zhc1hqOBUCFIAj3dd9DNw= github.com/eapache/go-resiliency v1.1.0/go.mod h1:kFI+JgMyC7bLPUVY133qvEBtVayf5mFgVsvEsIPBvNs= github.com/eapache/go-resiliency v1.2.0 h1:v7g92e/KSN71Rq7vSThKaWIq68fL4YHvWyiUKorFR1Q= github.com/eapache/go-resiliency v1.2.0/go.mod h1:kFI+JgMyC7bLPUVY133qvEBtVayf5mFgVsvEsIPBvNs= diff --git a/plugins/outputs/dynatrace/README.md b/plugins/outputs/dynatrace/README.md index 666f821f6356c..f25b8708942d6 100644 --- a/plugins/outputs/dynatrace/README.md +++ b/plugins/outputs/dynatrace/README.md @@ -2,10 +2,12 @@ This plugin sends Telegraf metrics to [Dynatrace](https://www.dynatrace.com) via the [Dynatrace Metrics API V2](https://www.dynatrace.com/support/help/dynatrace-api/environment-api/metric-v2/). It may be run alongside the Dynatrace OneAgent for automatic authentication or it may be run standalone on a host without a OneAgent by specifying a URL and API Token. More information on the plugin can be found in the [Dynatrace documentation](https://www.dynatrace.com/support/help/how-to-use-dynatrace/metrics/metric-ingestion/ingestion-methods/telegraf/). +All metrics are reported as gauges, unless they are specified to be delta counters using the `additional_counters` config option (see below). +See the [Dynatrace Metrics ingestion protocol documentation](https://www.dynatrace.com/support/help/how-to-use-dynatrace/metrics/metric-ingestion/metric-ingestion-protocol) for details on the types defined there. ## Requirements -You will either need a Dynatrace OneAgent (version 1.201 or higher) installed on the same host as Telegraf; or a Dynatrace environment with version 1.202 or higher. Monotonic counters (e.g. `diskio.reads`, `system.uptime`) require Dynatrace 208 or later. +You will either need a Dynatrace OneAgent (version 1.201 or higher) installed on the same host as Telegraf; or a Dynatrace environment with version 1.202 or higher. - Telegraf minimum version: Telegraf 1.16 @@ -65,7 +67,7 @@ You can learn more about how to use the Dynatrace API [here](https://www.dynatra prefix = "telegraf" ## Flag for skipping the tls certificate check, just for testing purposes, should be false by default insecure_skip_verify = false - ## If you want to convert values represented as gauges to counters, add the metric names here + ## If you want metrics to be treated and reported as delta counters, add the metric names here additional_counters = [ ] ## Optional dimensions to be added to every metric @@ -119,7 +121,7 @@ insecure_skip_verify = false *required*: `false` -If you want to convert values represented as gauges to counters, add the metric names here. +If you want a metric to be treated and reported as a delta counter, add its name to this list. ```toml additional_counters = [ ] diff --git a/plugins/outputs/dynatrace/dynatrace.go b/plugins/outputs/dynatrace/dynatrace.go index 470eb0e2cd0c6..11796e8e12994 100644 --- a/plugins/outputs/dynatrace/dynatrace.go +++ b/plugins/outputs/dynatrace/dynatrace.go @@ -69,7 +69,7 @@ const sampleConfig = ` ## Connection timeout, defaults to "5s" if not set. timeout = "5s" - ## If you want to convert values represented as gauges to counters, add the metric names here + ## If you want metrics to be treated and reported as delta counters, add the metric names here additional_counters = [ ] ## Optional dimensions to be added to every metric @@ -122,16 +122,10 @@ func (d *Dynatrace) Write(metrics []telegraf.Metric) error { dims = append(dims, dimensions.NewDimension(tag.Key, tag.Value)) } - metricType := tm.Type() for _, field := range tm.FieldList() { metricName := tm.Name() + "." + field.Key - for _, i := range d.AddCounterMetrics { - if metricName == i { - metricType = telegraf.Counter - } - } - typeOpt := getTypeOption(metricType, field) + typeOpt := d.getTypeOption(tm, field) if typeOpt == nil { // Unsupported type. Log only once per unsupported metric name @@ -267,15 +261,19 @@ func init() { }) } -func getTypeOption(metricType telegraf.ValueType, field *telegraf.Field) dtMetric.MetricOption { - if metricType == telegraf.Counter { +func (d *Dynatrace) getTypeOption(metric telegraf.Metric, field *telegraf.Field) dtMetric.MetricOption { + metricName := metric.Name() + "." + field.Key + for _, i := range d.AddCounterMetrics { + if metricName != i { + continue + } switch v := field.Value.(type) { case float64: - return dtMetric.WithFloatCounterValueTotal(v) + return dtMetric.WithFloatCounterValueDelta(v) case uint64: - return dtMetric.WithIntCounterValueTotal(int64(v)) + return dtMetric.WithIntCounterValueDelta(int64(v)) case int64: - return dtMetric.WithIntCounterValueTotal(v) + return dtMetric.WithIntCounterValueDelta(v) default: return nil } diff --git a/plugins/outputs/dynatrace/dynatrace_test.go b/plugins/outputs/dynatrace/dynatrace_test.go index 65cd3d2a86f0a..c3cb091cbf549 100644 --- a/plugins/outputs/dynatrace/dynatrace_test.go +++ b/plugins/outputs/dynatrace/dynatrace_test.go @@ -2,10 +2,13 @@ package dynatrace import ( "encoding/json" + "fmt" "io/ioutil" "net/http" "net/http/httptest" "regexp" + "sort" + "strings" "testing" "time" @@ -123,26 +126,37 @@ func TestMissingAPIToken(t *testing.T) { } func TestSendMetrics(t *testing.T) { + expected := []string{} + ts := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { // check the encoded result bodyBytes, err := ioutil.ReadAll(r.Body) require.NoError(t, err) bodyString := string(bodyBytes) - expected := "mymeasurement.myfield,dt.metrics.source=telegraf gauge,3.14 1289430000000\nmymeasurement.value,dt.metrics.source=telegraf count,3.14 1289430000000" - if bodyString != expected { - t.Errorf("Metric encoding failed. expected: %#v but got: %#v", expected, bodyString) + + lines := strings.Split(bodyString, "\n") + + sort.Strings(lines) + sort.Strings(expected) + + expectedString := strings.Join(expected, "\n") + foundString := strings.Join(lines, "\n") + if foundString != expectedString { + t.Errorf("Metric encoding failed. expected: %#v but got: %#v", expectedString, foundString) } w.WriteHeader(http.StatusOK) - err = json.NewEncoder(w).Encode(`{"linesOk":10,"linesInvalid":0,"error":null}`) + err = json.NewEncoder(w).Encode(fmt.Sprintf(`{"linesOk":%d,"linesInvalid":0,"error":null}`, len(lines))) require.NoError(t, err) })) defer ts.Close() - d := &Dynatrace{} + d := &Dynatrace{ + URL: ts.URL, + APIToken: "123", + Log: testutil.Logger{}, + AddCounterMetrics: []string{}, + } - d.URL = ts.URL - d.APIToken = "123" - d.Log = testutil.Logger{} err := d.Init() require.NoError(t, err) err = d.Connect() @@ -150,22 +164,43 @@ func TestSendMetrics(t *testing.T) { // Init metrics + // Simple metrics are exported as a gauge unless in additional_counters + expected = append(expected, "simple_metric.value,dt.metrics.source=telegraf gauge,3.14 1289430000000") + expected = append(expected, "simple_metric.counter,dt.metrics.source=telegraf count,delta=5 1289430000000") + d.AddCounterMetrics = append(d.AddCounterMetrics, "simple_metric.counter") m1 := metric.New( - "mymeasurement", + "simple_metric", map[string]string{}, - map[string]interface{}{"myfield": float64(3.14)}, + map[string]interface{}{"value": float64(3.14), "counter": 5}, time.Date(2010, time.November, 10, 23, 0, 0, 0, time.UTC), ) + // Even if Type() returns counter, all metrics are treated as a gauge unless explicitly added to additional_counters + expected = append(expected, "counter_type.value,dt.metrics.source=telegraf gauge,3.14 1289430000000") + expected = append(expected, "counter_type.counter,dt.metrics.source=telegraf count,delta=5 1289430000000") + d.AddCounterMetrics = append(d.AddCounterMetrics, "counter_type.counter") m2 := metric.New( - "mymeasurement", + "counter_type", map[string]string{}, - map[string]interface{}{"value": float64(3.14)}, + map[string]interface{}{"value": float64(3.14), "counter": 5}, time.Date(2010, time.November, 10, 23, 0, 0, 0, time.UTC), telegraf.Counter, ) - metrics := []telegraf.Metric{m1, m2} + expected = append(expected, "complex_metric.int,dt.metrics.source=telegraf gauge,1 1289430000000") + expected = append(expected, "complex_metric.int64,dt.metrics.source=telegraf gauge,2 1289430000000") + expected = append(expected, "complex_metric.float,dt.metrics.source=telegraf gauge,3 1289430000000") + expected = append(expected, "complex_metric.float64,dt.metrics.source=telegraf gauge,4 1289430000000") + expected = append(expected, "complex_metric.true,dt.metrics.source=telegraf gauge,1 1289430000000") + expected = append(expected, "complex_metric.false,dt.metrics.source=telegraf gauge,0 1289430000000") + m3 := metric.New( + "complex_metric", + map[string]string{}, + map[string]interface{}{"int": 1, "int64": int64(2), "float": 3.0, "float64": float64(4.0), "true": true, "false": false}, + time.Date(2010, time.November, 10, 23, 0, 0, 0, time.UTC), + ) + + metrics := []telegraf.Metric{m1, m2, m3} err = d.Write(metrics) require.NoError(t, err) @@ -475,47 +510,6 @@ func TestStaticDimensionsOverrideMetric(t *testing.T) { require.NoError(t, err) } -func TestSendCounterMetricWithoutTags(t *testing.T) { - ts := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { - w.WriteHeader(http.StatusOK) - // check the encoded result - bodyBytes, err := ioutil.ReadAll(r.Body) - require.NoError(t, err) - bodyString := string(bodyBytes) - expected := "mymeasurement.value,dt.metrics.source=telegraf gauge,32 1289430000000" - if bodyString != expected { - t.Errorf("Metric encoding failed. expected: %#v but got: %#v", expected, bodyString) - } - err = json.NewEncoder(w).Encode(`{"linesOk":1,"linesInvalid":0,"error":null}`) - require.NoError(t, err) - })) - defer ts.Close() - - d := &Dynatrace{} - - d.URL = ts.URL - d.APIToken = "123" - d.Log = testutil.Logger{} - err := d.Init() - require.NoError(t, err) - err = d.Connect() - require.NoError(t, err) - - // Init metrics - - m1 := metric.New( - "mymeasurement", - map[string]string{}, - map[string]interface{}{"value": 32}, - time.Date(2010, time.November, 10, 23, 0, 0, 0, time.UTC), - ) - - metrics := []telegraf.Metric{m1} - - err = d.Write(metrics) - require.NoError(t, err) -} - var warnfCalledTimes int type loggerStub struct { From 7ca2ba7f9ec83e7ba7e03712719692f327323983 Mon Sep 17 00:00:00 2001 From: Samantha Wang Date: Thu, 9 Sep 2021 12:37:50 -0700 Subject: [PATCH 042/176] Revert "Merge branch 'master' into master" This reverts commit 98d061753a3e22077eac8ddf946444863f238030, reversing changes made to 95ef67445668010841a6ed70140fded0b472cd94. --- CONTRIBUTING.md | 8 ++++++ docs/EXTERNAL_PLUGINS.md | 58 +++++++++++++++++++++++++++++++++++++++- 2 files changed, 65 insertions(+), 1 deletion(-) diff --git a/CONTRIBUTING.md b/CONTRIBUTING.md index 19650f52f9fd9..525a752714e84 100644 --- a/CONTRIBUTING.md +++ b/CONTRIBUTING.md @@ -14,6 +14,14 @@ 5. Open a new [pull request][]. 6. The pull request title needs to follow [conventional commit format](https://www.conventionalcommits.org/en/v1.0.0/#summary) +**Note:** If you have a pull request with only one commit, then that commit needs to follow the conventional commit format or the `Semantic Pull Request` check will fail. This is because github will use the pull request title if there are multiple commits, but if there is only one commit it will use it instead. + +#### Contributing an External Plugin *(new)* + +Input, output, and processor plugins written for internal Telegraf can be run as externally-compiled plugins through the [Execd Input](/plugins/inputs/execd), [Execd Output](/plugins/outputs/execd), and [Execd Processor](/plugins/processors/execd) Plugins without having to change the plugin code. + +Follow the guidelines of how to integrate your plugin with the [Execd Go Shim](/plugins/common/shim) to easily compile it as a separate app and run it with the respective `execd` plugin. +Check out our [guidelines](/docs/EXTERNAL_PLUGINS.md#external-plugin-guidelines) on how to build and set up your external plugins to run with `execd`. #### Security Vulnerability Reporting InfluxData takes security and our users' trust very seriously. If you believe you have found a security issue in any of our diff --git a/docs/EXTERNAL_PLUGINS.md b/docs/EXTERNAL_PLUGINS.md index 728145c180ea6..83759ed72bb63 100644 --- a/docs/EXTERNAL_PLUGINS.md +++ b/docs/EXTERNAL_PLUGINS.md @@ -1,13 +1,69 @@ ### External Plugins - +[External plugins](/EXTERNAL_PLUGINS.md) are external programs that are built outside +of Telegraf that can run through an `execd` plugin. These external plugins allow for +more flexibility compared to internal Telegraf plugins. - External plugins can be written in any language (internal Telegraf plugins can only written in Go) - External plugins can access to libraries not written in Go - Utilize licensed software that isn't available to the open source community - Can include large dependencies that would otherwise bloat Telegraf +- You don't need to wait on the Telegraf team to publish your plugin and start working with it. +- using the [shim](/plugins/common/shim) you can easily convert plugins between internal and external use + +### External Plugin Guidelines +The guidelines of writing external plugins would follow those for our general [input](/docs/INPUTS.md), +[output](/docs/OUTPUTS.md), [processor](/docs/PROCESSORS.md), and [aggregator](/docs/AGGREGATORS.md) plugins. +Please reference the documentation on how to create these plugins written in Go. +_For listed [external plugins](/EXTERNAL_PLUGINS.md), the author of the external plugin is also responsible for the maintenance +and feature development of external plugins. Expect to have users open plugin issues on its respective GitHub repository._ + +#### Execd Go Shim +For Go plugins, there is a [Execd Go Shim](/plugins/common/shim/) that will make it trivial to extract an internal input, processor, or output plugin from the main Telegraf repo out to a stand-alone repo. This shim allows anyone to build and run it as a separate app using one of the `execd`plugins: - [inputs.execd](/plugins/inputs/execd) - [processors.execd](/plugins/processors/execd) - [outputs.execd](/plugins/outputs/execd) +Follow the [Steps to externalize a plugin](/plugins/common/shim#steps-to-externalize-a-plugin) and [Steps to build and run your plugin](/plugins/common/shim#steps-to-build-and-run-your-plugin) to properly with the Execd Go Shim + +#### Step-by-Step guidelines +This is a guide to help you set up your plugin to use it with `execd` +1. Write your Telegraf plugin. Depending on the plugin, follow the guidelines on how to create the plugin itself using InfluxData's best practices: + - [Input Plugins](/docs/INPUTS.md) + - [Processor Plugins](/docs/PROCESSORS.md) + - [Aggregator Plugins](/docs/AGGREGATORS.md) + - [Output Plugins](/docs/OUTPUTS.md) +2. If your plugin is written in Go, include the steps for the [Execd Go Shim](/plugins/common/shim#steps-to-build-and-run-your-plugin) + 1. Move the project to an external repo, it's recommended to preserve the path + structure, (but not strictly necessary). eg if your plugin was at + `plugins/inputs/cpu`, it's recommended that it also be under `plugins/inputs/cpu` + in the new repo. For a further example of what this might look like, take a + look at [ssoroka/rand](https://github.com/ssoroka/rand) or + [danielnelson/telegraf-execd-openvpn](https://github.com/danielnelson//telegraf-execd-openvpn) + 1. Copy [main.go](/plugins/common/shim/example/cmd/main.go) into your project under the `cmd` folder. + This will be the entrypoint to the plugin when run as a stand-alone program, and + it will call the shim code for you to make that happen. It's recommended to + have only one plugin per repo, as the shim is not designed to run multiple + plugins at the same time (it would vastly complicate things). + 1. Edit the main.go file to import your plugin. Within Telegraf this would have + been done in an all.go file, but here we don't split the two apart, and the change + just goes in the top of main.go. If you skip this step, your plugin will do nothing. + eg: `_ "github.com/me/my-plugin-telegraf/plugins/inputs/cpu"` + 1. Optionally add a [plugin.conf](./example/cmd/plugin.conf) for configuration + specific to your plugin. Note that this config file **must be separate from the + rest of the config for Telegraf, and must not be in a shared directory where + Telegraf is expecting to load all configs**. If Telegraf reads this config file + it will not know which plugin it relates to. Telegraf instead uses an execd config + block to look for this plugin. + 1. Add usage and development instructions in the homepage of your repository for running + your plugin with its respective `execd` plugin. Please refer to + [openvpn](https://github.com/danielnelson/telegraf-execd-openvpn#usage) and [awsalarms](https://github.com/vipinvkmenon/awsalarms#installation) + for examples. Include the following steps: + 1. How to download the release package for your platform or how to clone the binary for your external plugin + 1. The commands to build your binary + 1. Location to edit your `telegraf.conf` + 1. Configuration to run your external plugin with [inputs.execd](/plugins/inputs/execd), + [processors.execd](/plugins/processors/execd) or [outputs.execd](/plugins/outputs/execd) + 1. Submit your plugin by opening a PR to add your external plugin to the [/EXTERNAL_PLUGINS.md](/EXTERNAL_PLUGINS.md) + list. Please include the plugin name, link to the plugin repository and a short description of the plugin. From 317ee71c325385872a475eac9330d336f13a6378 Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Mon, 13 Sep 2021 12:38:59 -0500 Subject: [PATCH 043/176] fix: bump github.com/Azure/go-autorest/autorest/adal from 0.9.10->0.9.15 --- docs/LICENSE_OF_DEPENDENCIES.md | 2 +- go.mod | 4 ++-- go.sum | 6 ++++-- plugins/outputs/azure_monitor/README.md | 6 +++--- plugins/outputs/azure_monitor/azure_monitor_test.go | 8 +++++++- 5 files changed, 17 insertions(+), 9 deletions(-) diff --git a/docs/LICENSE_OF_DEPENDENCIES.md b/docs/LICENSE_OF_DEPENDENCIES.md index 46f8e5ff32793..c50b0ea3f3f9f 100644 --- a/docs/LICENSE_OF_DEPENDENCIES.md +++ b/docs/LICENSE_OF_DEPENDENCIES.md @@ -85,7 +85,7 @@ following works: - github.com/gofrs/uuid [MIT License](https://github.com/gofrs/uuid/blob/master/LICENSE) - github.com/gogo/googleapis [Apache License 2.0](https://github.com/gogo/googleapis/blob/master/LICENSE) - github.com/gogo/protobuf [BSD 3-Clause Clear License](https://github.com/gogo/protobuf/blob/master/LICENSE) -- github.com/golang-jwt/jwt [MIT License](https://github.com/golang-jwt/jwt/blob/master/LICENSE) +- github.com/golang-jwt/jwt [MIT License](https://github.com/golang-jwt/jwt/blob/main/LICENSE) - github.com/golang-sql/civil [Apache License 2.0](https://github.com/golang-sql/civil/blob/master/LICENSE) - github.com/golang/geo [Apache License 2.0](https://github.com/golang/geo/blob/master/LICENSE) - github.com/golang/groupcache [Apache License 2.0](https://github.com/golang/groupcache/blob/master/LICENSE) diff --git a/go.mod b/go.mod index c133b72dda361..54d8eea5e8782 100644 --- a/go.mod +++ b/go.mod @@ -19,13 +19,13 @@ require ( github.com/Azure/go-ansiterm v0.0.0-20170929234023-d6e3b3328b78 // indirect github.com/Azure/go-autorest v14.2.0+incompatible // indirect github.com/Azure/go-autorest/autorest v0.11.17 - github.com/Azure/go-autorest/autorest/adal v0.9.10 + github.com/Azure/go-autorest/autorest/adal v0.9.15 github.com/Azure/go-autorest/autorest/azure/auth v0.5.6 github.com/Azure/go-autorest/autorest/azure/cli v0.4.2 // indirect github.com/Azure/go-autorest/autorest/date v0.3.0 // indirect github.com/Azure/go-autorest/autorest/to v0.3.0 // indirect github.com/Azure/go-autorest/autorest/validation v0.2.0 // indirect - github.com/Azure/go-autorest/logger v0.2.0 // indirect + github.com/Azure/go-autorest/logger v0.2.1 // indirect github.com/Azure/go-autorest/tracing v0.6.0 // indirect github.com/BurntSushi/toml v0.3.1 github.com/Mellanox/rdmamap v0.0.0-20191106181932-7c3c4763a6ee diff --git a/go.sum b/go.sum index 01266f3e9cbf7..1cc02dab3cf01 100644 --- a/go.sum +++ b/go.sum @@ -98,8 +98,9 @@ github.com/Azure/go-autorest/autorest/adal v0.8.2/go.mod h1:ZjhuQClTqx435SRJ2iMl github.com/Azure/go-autorest/autorest/adal v0.9.0/go.mod h1:/c022QCutn2P7uY+/oQWWNcK9YU+MH96NgK+jErpbcg= github.com/Azure/go-autorest/autorest/adal v0.9.2/go.mod h1:/3SMAM86bP6wC9Ev35peQDUeqFZBMH07vvUOmg4z/fE= github.com/Azure/go-autorest/autorest/adal v0.9.5/go.mod h1:B7KF7jKIeC9Mct5spmyCB/A8CG/sEz1vwIRGv/bbw7A= -github.com/Azure/go-autorest/autorest/adal v0.9.10 h1:r6fZHMaHD8B6LDCn0o5vyBFHIHrM6Ywwx7mb49lPItI= github.com/Azure/go-autorest/autorest/adal v0.9.10/go.mod h1:B7KF7jKIeC9Mct5spmyCB/A8CG/sEz1vwIRGv/bbw7A= +github.com/Azure/go-autorest/autorest/adal v0.9.15 h1:X+p2GF0GWyOiSmqohIaEeuNFNDY4I4EOlVuUQvFdWMk= +github.com/Azure/go-autorest/autorest/adal v0.9.15/go.mod h1:tGMin8I49Yij6AQ+rvV+Xa/zwxYQB5hmsd6DkfAx2+A= github.com/Azure/go-autorest/autorest/azure/auth v0.4.2/go.mod h1:90gmfKdlmKgfjUpnCEpOJzsUEjrWDSLwHIG73tSXddM= github.com/Azure/go-autorest/autorest/azure/auth v0.5.6 h1:cgiBtUxatlt/e3qY6fQJioqbocWHr5osz259MomF5M0= github.com/Azure/go-autorest/autorest/azure/auth v0.5.6/go.mod h1:nYlP+G+n8MhD5CjIi6W8nFTIJn/PnTHes5nUbK6BxD0= @@ -121,8 +122,9 @@ github.com/Azure/go-autorest/autorest/to v0.3.0/go.mod h1:MgwOyqaIuKdG4TL/2ywSsI github.com/Azure/go-autorest/autorest/validation v0.2.0 h1:15vMO4y76dehZSq7pAaOLQxC6dZYsSrj2GQpflyM/L4= github.com/Azure/go-autorest/autorest/validation v0.2.0/go.mod h1:3EEqHnBxQGHXRYq3HT1WyXAvT7LLY3tl70hw6tQIbjI= github.com/Azure/go-autorest/logger v0.1.0/go.mod h1:oExouG+K6PryycPJfVSxi/koC6LSNgds39diKLz7Vrc= -github.com/Azure/go-autorest/logger v0.2.0 h1:e4RVHVZKC5p6UANLJHkM4OfR1UKZPj8Wt8Pcx+3oqrE= github.com/Azure/go-autorest/logger v0.2.0/go.mod h1:T9E3cAhj2VqvPOtCYAvby9aBXkZmbF5NWuPV8+WeEW8= +github.com/Azure/go-autorest/logger v0.2.1 h1:IG7i4p/mDa2Ce4TRyAO8IHnVhAVF3RFU+ZtXWSmf4Tg= +github.com/Azure/go-autorest/logger v0.2.1/go.mod h1:T9E3cAhj2VqvPOtCYAvby9aBXkZmbF5NWuPV8+WeEW8= github.com/Azure/go-autorest/tracing v0.5.0/go.mod h1:r/s2XiOKccPW3HrqB+W0TQzfbtp2fGCgRFtBroKn4Dk= github.com/Azure/go-autorest/tracing v0.6.0 h1:TYi4+3m5t6K48TGI9AUdb+IzbnSxvnvUMfuitfgcfuo= github.com/Azure/go-autorest/tracing v0.6.0/go.mod h1:+vhtPC754Xsa23ID7GlGsrdKBpUA79WCAKPPZVC2DeU= diff --git a/plugins/outputs/azure_monitor/README.md b/plugins/outputs/azure_monitor/README.md index 6f2abb97ec3ed..9d835c1eb6f4b 100644 --- a/plugins/outputs/azure_monitor/README.md +++ b/plugins/outputs/azure_monitor/README.md @@ -40,7 +40,7 @@ written as a dimension on each Azure Monitor metric. ## The Azure Resource ID against which metric will be logged, e.g. ## ex: resource_id = "/subscriptions//resourceGroups//providers/Microsoft.Compute/virtualMachines/" # resource_id = "" - + ## Optionally, if in Azure US Government, China, or other sovereign ## cloud environment, set the appropriate REST endpoint for receiving ## metrics. (Note: region may be unused in this context) @@ -76,7 +76,7 @@ preferred authentication methods are different from the *order* in which each authentication is checked. Here are the preferred authentication methods: 1. Managed Service Identity (MSI) token - - This is the prefered authentication method. Telegraf will automatically + - This is the preferred authentication method. Telegraf will automatically authenticate using this method when running on Azure VMs. 2. AAD Application Tokens (Service Principals) - Primarily useful if Telegraf is writing metrics for other resources. @@ -132,7 +132,7 @@ authenticate when running Telegraf on Azure VMs. Azure Monitor only accepts values with a numeric type. The plugin will drop fields with a string type by default. The plugin can set all string type fields as extra dimensions in the Azure Monitor custom metric by setting the -configuration option `strings_as_dimensions` to `true`. +configuration option `strings_as_dimensions` to `true`. Keep in mind, Azure Monitor allows a maximum of 10 dimensions per metric. The plugin will deterministically dropped any dimensions that exceed the 10 diff --git a/plugins/outputs/azure_monitor/azure_monitor_test.go b/plugins/outputs/azure_monitor/azure_monitor_test.go index c702f46b0e0b5..803b0441af207 100644 --- a/plugins/outputs/azure_monitor/azure_monitor_test.go +++ b/plugins/outputs/azure_monitor/azure_monitor_test.go @@ -6,10 +6,12 @@ import ( "encoding/json" "net/http" "net/http/httptest" + "os" "testing" "time" "github.com/Azure/go-autorest/autorest" + "github.com/Azure/go-autorest/autorest/adal" "github.com/influxdata/telegraf" "github.com/influxdata/telegraf/testutil" "github.com/stretchr/testify/require" @@ -209,7 +211,11 @@ func TestAggregate(t *testing.T) { } for _, tt := range tests { t.Run(tt.name, func(t *testing.T) { - err := tt.plugin.Connect() + msiEndpoint, err := adal.GetMSIVMEndpoint() + require.NoError(t, err) + + os.Setenv("MSI_ENDPOINT", msiEndpoint) + err = tt.plugin.Connect() require.NoError(t, err) // Reset globals From 3c27f598bb909adf6d12ba9cbd6a135d263e9615 Mon Sep 17 00:00:00 2001 From: Sven Rebhan <36194019+srebhan@users.noreply.github.com> Date: Tue, 14 Sep 2021 15:31:44 +0200 Subject: [PATCH 044/176] fix: Update gopcua library to latest version (#9560) --- go.mod | 2 +- go.sum | 4 ++-- plugins/inputs/opcua/opcua_client.go | 8 +++++--- 3 files changed, 8 insertions(+), 6 deletions(-) diff --git a/go.mod b/go.mod index 54d8eea5e8782..a05a8f5faeeaa 100644 --- a/go.mod +++ b/go.mod @@ -121,7 +121,7 @@ require ( github.com/google/uuid v1.2.0 // indirect github.com/googleapis/gax-go/v2 v2.0.5 // indirect github.com/googleapis/gnostic v0.4.1 // indirect - github.com/gopcua/opcua v0.1.13 + github.com/gopcua/opcua v0.2.0-rc2.0.20210409063412-baabb9b14fd2 github.com/gorilla/mux v1.7.3 github.com/gorilla/websocket v1.4.2 github.com/gosnmp/gosnmp v1.32.0 diff --git a/go.sum b/go.sum index 1cc02dab3cf01..b8002a55b2b33 100644 --- a/go.sum +++ b/go.sum @@ -819,8 +819,8 @@ github.com/googleapis/gnostic v0.1.0/go.mod h1:sJBsCZ4ayReDTBIg8b9dl28c5xFWyhBTV github.com/googleapis/gnostic v0.4.0/go.mod h1:on+2t9HRStVgn95RSsFWFz+6Q0Snyqv1awfrALZdbtU= github.com/googleapis/gnostic v0.4.1 h1:DLJCy1n/vrD4HPjOvYcT8aYQXpPIzoRZONaYwyycI+I= github.com/googleapis/gnostic v0.4.1/go.mod h1:LRhVm6pbyptWbWbuZ38d1eyptfvIytN3ir6b65WBswg= -github.com/gopcua/opcua v0.1.13 h1:UP746MKRFNbv+CQGfrPwgH7rGxOlSGzVu9ieZdcox4E= -github.com/gopcua/opcua v0.1.13/go.mod h1:a6QH4F9XeODklCmWuvaOdL8v9H0d73CEKUHWVZLQyE8= +github.com/gopcua/opcua v0.2.0-rc2.0.20210409063412-baabb9b14fd2 h1:OtFKr0Kwe1oLpMR+uNMh/DPgC5fxAq4xRe6HBv8LDqQ= +github.com/gopcua/opcua v0.2.0-rc2.0.20210409063412-baabb9b14fd2/go.mod h1:a6QH4F9XeODklCmWuvaOdL8v9H0d73CEKUHWVZLQyE8= github.com/gophercloud/gophercloud v0.1.0/go.mod h1:vxM41WHh5uqHVBMZHzuwNOHh8XEoIEcSTewFxm1c5g8= github.com/gophercloud/gophercloud v0.12.0/go.mod h1:gmC5oQqMDOMO1t1gq5DquX/yAU808e/4mzjjDA76+Ss= github.com/gopherjs/gopherjs v0.0.0-20181017120253-0766667cb4d1 h1:EGx4pi6eqNxGaHF6qqu48+N2wcFQ5qg5FXgOdqsJ5d8= diff --git a/plugins/inputs/opcua/opcua_client.go b/plugins/inputs/opcua/opcua_client.go index 53454ba8816a7..8dec41eb343e3 100644 --- a/plugins/inputs/opcua/opcua_client.go +++ b/plugins/inputs/opcua/opcua_client.go @@ -406,10 +406,10 @@ func Connect(o *OpcUA) error { o.state = Connecting if o.client != nil { - if err := o.client.CloseSession(); err != nil { + if err := o.client.Close(); err != nil { // Only log the error but to not bail-out here as this prevents // reconnections for multiple parties (see e.g. #9523). - o.Log.Errorf("Closing session failed: %v", err) + o.Log.Errorf("Closing connection failed: %v", err) } } @@ -445,8 +445,10 @@ func Connect(o *OpcUA) error { } func (o *OpcUA) setupOptions() error { + ctx, cancel := context.WithTimeout(context.Background(), time.Duration(o.ConnectTimeout)) + defer cancel() // Get a list of the endpoints for our target server - endpoints, err := opcua.GetEndpoints(o.Endpoint) + endpoints, err := opcua.GetEndpoints(ctx, o.Endpoint) if err != nil { return err } From a3454be2d884dce2b1a2eabc019c53a7df2a6ffa Mon Sep 17 00:00:00 2001 From: Jacob Marble Date: Tue, 14 Sep 2021 11:07:28 -0700 Subject: [PATCH 045/176] chore: update influxdb-observability for OpenTelemetry plugins (#9718) --- go.mod | 10 +++++----- go.sum | 19 ++++++++++--------- .../outputs/opentelemetry/opentelemetry.go | 2 ++ .../opentelemetry/opentelemetry_test.go | 4 ++-- 4 files changed, 19 insertions(+), 16 deletions(-) diff --git a/go.mod b/go.mod index a05a8f5faeeaa..1558135cd9a3f 100644 --- a/go.mod +++ b/go.mod @@ -140,9 +140,9 @@ require ( github.com/hashicorp/golang-lru v0.5.4 // indirect github.com/hashicorp/serf v0.9.5 // indirect github.com/influxdata/go-syslog/v3 v3.0.0 - github.com/influxdata/influxdb-observability/common v0.2.4 - github.com/influxdata/influxdb-observability/influx2otel v0.2.4 - github.com/influxdata/influxdb-observability/otel2influx v0.2.4 + github.com/influxdata/influxdb-observability/common v0.2.7 + github.com/influxdata/influxdb-observability/influx2otel v0.2.7 + github.com/influxdata/influxdb-observability/otel2influx v0.2.7 github.com/influxdata/tail v1.0.1-0.20210707231403-b283181d1fa7 github.com/influxdata/toml v0.0.0-20190415235208-270119a8ce65 github.com/influxdata/wlog v0.0.0-20160411224016-7c63b0a71ef8 @@ -269,7 +269,7 @@ require ( go.etcd.io/etcd/api/v3 v3.5.0 // indirect go.mongodb.org/mongo-driver v1.5.3 go.opencensus.io v0.23.0 // indirect - go.opentelemetry.io/collector/model v0.0.0-20210723184018-3b7d6ce4830c + go.opentelemetry.io/collector/model v0.35.0 go.starlark.net v0.0.0-20210406145628-7a1108eaa012 go.uber.org/atomic v1.7.0 // indirect go.uber.org/multierr v1.6.0 // indirect @@ -290,7 +290,7 @@ require ( google.golang.org/api v0.54.0 google.golang.org/appengine v1.6.7 // indirect google.golang.org/genproto v0.0.0-20210813162853-db860fec028c - google.golang.org/grpc v1.39.1 + google.golang.org/grpc v1.40.0 google.golang.org/protobuf v1.27.1 gopkg.in/asn1-ber.v1 v1.0.0-20181015200546-f715ec2f112d // indirect gopkg.in/djherbis/times.v1 v1.2.0 diff --git a/go.sum b/go.sum index b8002a55b2b33..a0e2e9e2bdd7d 100644 --- a/go.sum +++ b/go.sum @@ -924,12 +924,12 @@ github.com/influxdata/flux v0.65.0/go.mod h1:BwN2XG2lMszOoquQaFdPET8FRQfrXiZsWmc github.com/influxdata/go-syslog/v3 v3.0.0 h1:jichmjSZlYK0VMmlz+k4WeOQd7z745YLsvGMqwtYt4I= github.com/influxdata/go-syslog/v3 v3.0.0/go.mod h1:tulsOp+CecTAYC27u9miMgq21GqXRW6VdKbOG+QSP4Q= github.com/influxdata/influxdb v1.8.2/go.mod h1:SIzcnsjaHRFpmlxpJ4S3NT64qtEKYweNTUMb/vh0OMQ= -github.com/influxdata/influxdb-observability/common v0.2.4 h1:GMycMMB0IMLzStLVgWIRJ4UFP5x5JOfITTRryL1dpgQ= -github.com/influxdata/influxdb-observability/common v0.2.4/go.mod h1:xbwEYfQLQIHnmcLQL8vniaZ1aEHI5D0K5Y6afiV5Wmo= -github.com/influxdata/influxdb-observability/influx2otel v0.2.4 h1:23qw/xv9ke6LIYo0/pNLhiS9bqlrkx2YiU3SNUKLxts= -github.com/influxdata/influxdb-observability/influx2otel v0.2.4/go.mod h1:WnBBHlTEB/orMD3io5TX8EZEnKryNviUbdlLhWwcqo0= -github.com/influxdata/influxdb-observability/otel2influx v0.2.4 h1:wDLEz/JxGXRJdmU9wT7YwslEaU6la27/Qs4f3a9VPhI= -github.com/influxdata/influxdb-observability/otel2influx v0.2.4/go.mod h1:HniEElFGVVs0KgHCjU/iIv6PFFvpicaLKd72PlCqn1o= +github.com/influxdata/influxdb-observability/common v0.2.7 h1:C+oDh8Kbw+Ykx9yog/uJXL27rwMN3hgTLQfAFg1eQO0= +github.com/influxdata/influxdb-observability/common v0.2.7/go.mod h1:+8VMGrfWZnXjc1c/oP+N4O/sHoneWgN3ojAHwgYgV4A= +github.com/influxdata/influxdb-observability/influx2otel v0.2.7 h1:YIXH+qNQgAtTA5U3s/wxDxxh5Vz+ylhZhyuRxtfTBqs= +github.com/influxdata/influxdb-observability/influx2otel v0.2.7/go.mod h1:ASyDMoPChvIgbEOvghwc5NxngOgXThp9MFKs7efNLtQ= +github.com/influxdata/influxdb-observability/otel2influx v0.2.7 h1:FACov3tcGCKfEGXsyUbgUOQx3zXffXaCFbN3ntAzh1E= +github.com/influxdata/influxdb-observability/otel2influx v0.2.7/go.mod h1:tE3OSy4RyAHIjxYlFZBsWorEM3aqaUeqSx3mbacm8KI= github.com/influxdata/influxdb1-client v0.0.0-20191209144304-8bf82d3c094d/go.mod h1:qj24IKcXYK6Iy9ceXlo3Tc+vtHo9lIhSX5JddghvEPo= github.com/influxdata/influxql v1.1.0/go.mod h1:KpVI7okXjK6PRi3Z5B+mtKZli+R1DnZgb3N+tzevNgo= github.com/influxdata/line-protocol v0.0.0-20180522152040-32c6aa80de5e/go.mod h1:4kt73NQhadE3daL3WhR5EJ/J2ocX0PZzwxQ0gXJ7oFE= @@ -1616,8 +1616,8 @@ go.opencensus.io v0.22.4/go.mod h1:yxeiOL68Rb0Xd1ddK5vPZ/oVn4vY4Ynel7k9FzqtOIw= go.opencensus.io v0.22.5/go.mod h1:5pWMHQbX5EPX2/62yrJeAkowc+lfs/XD7Uxpq3pI6kk= go.opencensus.io v0.23.0 h1:gqCw0LfLxScz8irSi8exQc7fyQ0fKQU/qnC/X8+V/1M= go.opencensus.io v0.23.0/go.mod h1:XItmlyltB5F7CS4xOC1DcqMoFqwtC6OG2xF7mCv7P7E= -go.opentelemetry.io/collector/model v0.0.0-20210723184018-3b7d6ce4830c h1:3s2a2cav7u4W1b0cOYxmlj1y1NcVuDZwgUaAQ6wfImo= -go.opentelemetry.io/collector/model v0.0.0-20210723184018-3b7d6ce4830c/go.mod h1:PcHNnM+RUl0uD8VkSn93PO78N7kQYhfqpI/eki57pl4= +go.opentelemetry.io/collector/model v0.35.0 h1:NpKjghiqlei4ecwjOYOMhD6tj4gY8yiWHPJmbFs/ArI= +go.opentelemetry.io/collector/model v0.35.0/go.mod h1:+7YCSjJG+MqiIFjauzt7oM2qkqBsaJWh5hcsO4fwsAc= go.opentelemetry.io/proto/otlp v0.7.0/go.mod h1:PqfVotwruBrMGOCsRd/89rSnXhoiJIqeYNgFYFoEGnI= go.starlark.net v0.0.0-20210406145628-7a1108eaa012 h1:4RGobP/iq7S22H0Bb92OEt+M8/cfBQnW+T+a2MC0sQo= go.starlark.net v0.0.0-20210406145628-7a1108eaa012/go.mod h1:t3mmBBPzAVvK0L0n1drDmrQsJ8FoIx4INCqVMTr/Zo0= @@ -2190,8 +2190,9 @@ google.golang.org/grpc v1.37.1/go.mod h1:NREThFqKR1f3iQ6oBuvc5LadQuXVGo9rkm5ZGrQ google.golang.org/grpc v1.38.0/go.mod h1:NREThFqKR1f3iQ6oBuvc5LadQuXVGo9rkm5ZGrQdJfM= google.golang.org/grpc v1.38.0/go.mod h1:NREThFqKR1f3iQ6oBuvc5LadQuXVGo9rkm5ZGrQdJfM= google.golang.org/grpc v1.39.0/go.mod h1:PImNr+rS9TWYb2O4/emRugxiyHZ5JyHW5F+RPnDzfrE= -google.golang.org/grpc v1.39.1 h1:f37vZbBVTiJ6jKG5mWz8ySOBxNqy6ViPgyhSdVnxF3E= google.golang.org/grpc v1.39.1/go.mod h1:PImNr+rS9TWYb2O4/emRugxiyHZ5JyHW5F+RPnDzfrE= +google.golang.org/grpc v1.40.0 h1:AGJ0Ih4mHjSeibYkFGh1dD9KJ/eOtZ93I6hoHhukQ5Q= +google.golang.org/grpc v1.40.0/go.mod h1:ogyxbiOoUXAkP+4+xa6PZSE9DZgIHtSpzjDTB9KAK34= google.golang.org/grpc/cmd/protoc-gen-go-grpc v1.1.0/go.mod h1:6Kw0yEErY5E/yWrBtf03jp27GLLJujG4z/JK95pnjjw= google.golang.org/protobuf v0.0.0-20200109180630-ec00e32a8dfd/go.mod h1:DFci5gLYBciE7Vtevhsrf46CRTquxDuWsQurQQe4oz8= google.golang.org/protobuf v0.0.0-20200221191635-4d8936d0db64/go.mod h1:kwYJMbMJ01Woi6D6+Kah6886xMZcty6N08ah7+eCXa0= diff --git a/plugins/outputs/opentelemetry/opentelemetry.go b/plugins/outputs/opentelemetry/opentelemetry.go index 874eaba819418..e1bbc9322e759 100644 --- a/plugins/outputs/opentelemetry/opentelemetry.go +++ b/plugins/outputs/opentelemetry/opentelemetry.go @@ -13,6 +13,8 @@ import ( "go.opentelemetry.io/collector/model/otlpgrpc" "google.golang.org/grpc" "google.golang.org/grpc/credentials" + // This causes the gRPC library to register gzip compression. + _ "google.golang.org/grpc/encoding/gzip" "google.golang.org/grpc/metadata" ) diff --git a/plugins/outputs/opentelemetry/opentelemetry_test.go b/plugins/outputs/opentelemetry/opentelemetry_test.go index 4ba3adbbb07d0..6ebf1829bd540 100644 --- a/plugins/outputs/opentelemetry/opentelemetry_test.go +++ b/plugins/outputs/opentelemetry/opentelemetry_test.go @@ -33,9 +33,9 @@ func TestOpenTelemetry(t *testing.T) { m.SetName("cpu_temp") m.SetDataType(pdata.MetricDataTypeGauge) dp := m.Gauge().DataPoints().AppendEmpty() - dp.LabelsMap().Insert("foo", "bar") + dp.Attributes().InsertString("foo", "bar") dp.SetTimestamp(pdata.Timestamp(1622848686000000000)) - dp.SetValue(87.332) + dp.SetDoubleVal(87.332) } m := newMockOtelService(t) t.Cleanup(m.Cleanup) From cfd50de57c20f499a5b964f95374466951e3bcbe Mon Sep 17 00:00:00 2001 From: Sebastian Spaink <3441183+sspaink@users.noreply.github.com> Date: Tue, 14 Sep 2021 13:56:49 -0700 Subject: [PATCH 046/176] fix(inputs.tail): change test default watch method to poll when Win --- plugins/inputs/tail/tail_test.go | 107 +++++++++++++++---------------- 1 file changed, 52 insertions(+), 55 deletions(-) diff --git a/plugins/inputs/tail/tail_test.go b/plugins/inputs/tail/tail_test.go index b855691e6f1ab..16c38519a83b6 100644 --- a/plugins/inputs/tail/tail_test.go +++ b/plugins/inputs/tail/tail_test.go @@ -450,89 +450,86 @@ func TestCharacterEncoding(t *testing.T) { ), } + watchMethod := defaultWatchMethod + if runtime.GOOS == "windows" { + watchMethod = "poll" + } + tests := []struct { - name string - plugin *Tail - offset int64 - expected []telegraf.Metric + name string + testfiles string + fromBeginning bool + characterEncoding string + offset int64 + expected []telegraf.Metric }{ { - name: "utf-8", - plugin: &Tail{ - Files: []string{filepath.Join(testdataDir, "cpu-utf-8.influx")}, - FromBeginning: true, - MaxUndeliveredLines: 1000, - Log: testutil.Logger{}, - CharacterEncoding: "utf-8", - }, - expected: full, + name: "utf-8", + testfiles: "cpu-utf-8.influx", + fromBeginning: true, + characterEncoding: "utf-8", + expected: full, }, { - name: "utf-8 seek", - plugin: &Tail{ - Files: []string{filepath.Join(testdataDir, "cpu-utf-8.influx")}, - MaxUndeliveredLines: 1000, - Log: testutil.Logger{}, - CharacterEncoding: "utf-8", - }, - offset: 0x33, - expected: full[1:], + name: "utf-8 seek", + testfiles: "cpu-utf-8.influx", + characterEncoding: "utf-8", + offset: 0x33, + expected: full[1:], }, { - name: "utf-16le", - plugin: &Tail{ - Files: []string{filepath.Join(testdataDir, "cpu-utf-16le.influx")}, - FromBeginning: true, - MaxUndeliveredLines: 1000, - Log: testutil.Logger{}, - CharacterEncoding: "utf-16le", - }, - expected: full, + name: "utf-16le", + testfiles: "cpu-utf-16le.influx", + fromBeginning: true, + characterEncoding: "utf-16le", + expected: full, }, { - name: "utf-16le seek", - plugin: &Tail{ - Files: []string{filepath.Join(testdataDir, "cpu-utf-16le.influx")}, - MaxUndeliveredLines: 1000, - Log: testutil.Logger{}, - CharacterEncoding: "utf-16le", - }, - offset: 0x68, - expected: full[1:], + name: "utf-16le seek", + testfiles: "cpu-utf-16le.influx", + characterEncoding: "utf-16le", + offset: 0x68, + expected: full[1:], }, { - name: "utf-16be", - plugin: &Tail{ - Files: []string{filepath.Join(testdataDir, "cpu-utf-16be.influx")}, - FromBeginning: true, - MaxUndeliveredLines: 1000, - Log: testutil.Logger{}, - CharacterEncoding: "utf-16be", - }, - expected: full, + name: "utf-16be", + testfiles: "cpu-utf-16be.influx", + fromBeginning: true, + characterEncoding: "utf-16be", + expected: full, }, } for _, tt := range tests { t.Run(tt.name, func(t *testing.T) { - tt.plugin.SetParserFunc(func() (parsers.Parser, error) { + + plugin := &Tail{ + Files: []string{filepath.Join(testdataDir, tt.testfiles)}, + FromBeginning: tt.fromBeginning, + MaxUndeliveredLines: 1000, + Log: testutil.Logger{}, + CharacterEncoding: tt.characterEncoding, + WatchMethod: watchMethod, + } + + plugin.SetParserFunc(func() (parsers.Parser, error) { handler := influx.NewMetricHandler() return influx.NewParser(handler), nil }) if tt.offset != 0 { - tt.plugin.offsets = map[string]int64{ - tt.plugin.Files[0]: tt.offset, + plugin.offsets = map[string]int64{ + plugin.Files[0]: tt.offset, } } - err := tt.plugin.Init() + err := plugin.Init() require.NoError(t, err) var acc testutil.Accumulator - err = tt.plugin.Start(&acc) + err = plugin.Start(&acc) require.NoError(t, err) acc.Wait(len(tt.expected)) - tt.plugin.Stop() + plugin.Stop() actual := acc.GetTelegrafMetrics() for _, m := range actual { From 357959f0876985c3b2e19c9fec19fb7d26b1c734 Mon Sep 17 00:00:00 2001 From: Goutham Veeramachaneni Date: Tue, 14 Sep 2021 23:04:34 +0200 Subject: [PATCH 047/176] fix: Add error message logging to outputs.http (#9727) --- plugins/outputs/http/http.go | 15 ++++++++++++--- 1 file changed, 12 insertions(+), 3 deletions(-) diff --git a/plugins/outputs/http/http.go b/plugins/outputs/http/http.go index 83faef0dae241..edaae3f6ec07d 100644 --- a/plugins/outputs/http/http.go +++ b/plugins/outputs/http/http.go @@ -1,6 +1,7 @@ package http import ( + "bufio" "bytes" "context" "fmt" @@ -18,7 +19,8 @@ import ( ) const ( - defaultURL = "http://127.0.0.1:8080/telegraf" + maxErrMsgLen = 1024 + defaultURL = "http://127.0.0.1:8080/telegraf" ) var sampleConfig = ` @@ -182,11 +184,18 @@ func (h *HTTP) write(reqBody []byte) error { return err } defer resp.Body.Close() - _, err = ioutil.ReadAll(resp.Body) if resp.StatusCode < 200 || resp.StatusCode >= 300 { - return fmt.Errorf("when writing to [%s] received status code: %d", h.URL, resp.StatusCode) + errorLine := "" + scanner := bufio.NewScanner(io.LimitReader(resp.Body, maxErrMsgLen)) + if scanner.Scan() { + errorLine = scanner.Text() + } + + return fmt.Errorf("when writing to [%s] received status code: %d. body: %s", h.URL, resp.StatusCode, errorLine) } + + _, err = ioutil.ReadAll(resp.Body) if err != nil { return fmt.Errorf("when writing to [%s] received error: %v", h.URL, err) } From 646273abe0c61bd9d9bb7ca04deadfe698c03f73 Mon Sep 17 00:00:00 2001 From: Doron-Bargo <62555360+Doron-Bargo@users.noreply.github.com> Date: Wed, 15 Sep 2021 00:06:11 +0300 Subject: [PATCH 048/176] fix: pagination error on cloudwatch plugin (#9693) --- plugins/inputs/cloudwatch/cloudwatch.go | 20 +++++++++++--------- 1 file changed, 11 insertions(+), 9 deletions(-) diff --git a/plugins/inputs/cloudwatch/cloudwatch.go b/plugins/inputs/cloudwatch/cloudwatch.go index 1cd7958301611..7dbd3c7faa7be 100644 --- a/plugins/inputs/cloudwatch/cloudwatch.go +++ b/plugins/inputs/cloudwatch/cloudwatch.go @@ -410,18 +410,21 @@ func (c *CloudWatch) fetchNamespaceMetrics() ([]*cwClient.Metric, error) { default: recentlyActive = nil } - params = &cwClient.ListMetricsInput{ - Dimensions: []*cwClient.DimensionFilter{}, - NextToken: token, - MetricName: nil, - RecentlyActive: recentlyActive, - } + for _, namespace := range c.Namespaces { - params.Namespace = aws.String(namespace) + + params = &cwClient.ListMetricsInput{ + Dimensions: []*cwClient.DimensionFilter{}, + NextToken: token, + MetricName: nil, + RecentlyActive: recentlyActive, + Namespace: aws.String(namespace), + } + for { resp, err := c.client.ListMetrics(params) if err != nil { - return nil, err + return nil, fmt.Errorf("failed to list metrics with params per namespace: %v", err) } metrics = append(metrics, resp.Metrics...) @@ -432,7 +435,6 @@ func (c *CloudWatch) fetchNamespaceMetrics() ([]*cwClient.Metric, error) { params.NextToken = resp.NextToken } } - return metrics, nil } From 40fa10ba0b66cc941ed202a23c5fb952da06aeee Mon Sep 17 00:00:00 2001 From: Sanyam Arya Date: Tue, 14 Sep 2021 23:26:49 +0200 Subject: [PATCH 049/176] feat: Internet Speed Monitor Input Plugin (#9623) --- docs/LICENSE_OF_DEPENDENCIES.md | 1 + go.mod | 3 +- go.sum | 5 +- plugins/inputs/all/all.go | 1 + plugins/inputs/internet_speed/README.md | 30 +++++++ .../inputs/internet_speed/internet_speed.go | 82 +++++++++++++++++++ .../internet_speed/internet_speed_test.go | 44 ++++++++++ 7 files changed, 164 insertions(+), 2 deletions(-) create mode 100644 plugins/inputs/internet_speed/README.md create mode 100644 plugins/inputs/internet_speed/internet_speed.go create mode 100644 plugins/inputs/internet_speed/internet_speed_test.go diff --git a/docs/LICENSE_OF_DEPENDENCIES.md b/docs/LICENSE_OF_DEPENDENCIES.md index c50b0ea3f3f9f..b36594a1faea2 100644 --- a/docs/LICENSE_OF_DEPENDENCIES.md +++ b/docs/LICENSE_OF_DEPENDENCIES.md @@ -193,6 +193,7 @@ following works: - github.com/safchain/ethtool [Apache License 2.0](https://github.com/safchain/ethtool/blob/master/LICENSE) - github.com/samuel/go-zookeeper [BSD 3-Clause Clear License](https://github.com/samuel/go-zookeeper/blob/master/LICENSE) - github.com/shirou/gopsutil [BSD 3-Clause Clear License](https://github.com/shirou/gopsutil/blob/master/LICENSE) +- github.com/showwin/speedtest-go [MIT License](https://github.com/showwin/speedtest-go/blob/master/LICENSE) - github.com/signalfx/com_signalfx_metrics_protobuf [Apache License 2.0](https://github.com/signalfx/com_signalfx_metrics_protobuf/blob/master/LICENSE) - github.com/signalfx/gohistogram [MIT License](https://github.com/signalfx/gohistogram/blob/master/LICENSE) - github.com/signalfx/golib [Apache License 2.0](https://github.com/signalfx/golib/blob/master/LICENSE) diff --git a/go.mod b/go.mod index 1558135cd9a3f..f115d32594712 100644 --- a/go.mod +++ b/go.mod @@ -35,7 +35,7 @@ require ( github.com/StackExchange/wmi v0.0.0-20210224194228-fe8f1750fd46 // indirect github.com/aerospike/aerospike-client-go v1.27.0 github.com/alecthomas/participle v0.4.1 // indirect - github.com/alecthomas/units v0.0.0-20190924025748-f65c72e2690d + github.com/alecthomas/units v0.0.0-20210208195552-ff826a37aa15 github.com/aliyun/alibaba-cloud-sdk-go v1.61.1004 github.com/amir/raidman v0.0.0-20170415203553-1ccc43bfb9c9 github.com/antchfx/jsonquery v1.1.4 @@ -233,6 +233,7 @@ require ( github.com/sensu/sensu-go/api/core/v2 v2.9.0 github.com/shirou/gopsutil v3.21.6-0.20210624221800-cb512c850043+incompatible github.com/shopspring/decimal v0.0.0-20200105231215-408a2507e114 // indirect + github.com/showwin/speedtest-go v1.1.4 github.com/signalfx/com_signalfx_metrics_protobuf v0.0.2 // indirect github.com/signalfx/gohistogram v0.0.0-20160107210732-1ccfd2ff5083 // indirect github.com/signalfx/golib/v3 v3.3.34 diff --git a/go.sum b/go.sum index a0e2e9e2bdd7d..1d2611560b766 100644 --- a/go.sum +++ b/go.sum @@ -202,8 +202,9 @@ github.com/alecthomas/template v0.0.0-20160405071501-a0175ee3bccc/go.mod h1:LOuy github.com/alecthomas/template v0.0.0-20190718012654-fb15b899a751/go.mod h1:LOuyumcjzFXgccqObfd/Ljyb9UuFJ6TxHnclSeseNhc= github.com/alecthomas/units v0.0.0-20151022065526-2efee857e7cf/go.mod h1:ybxpYRFXyAe+OPACYpWeL0wqObRcbAqCMya13uyzqw0= github.com/alecthomas/units v0.0.0-20190717042225-c3de453c63f4/go.mod h1:ybxpYRFXyAe+OPACYpWeL0wqObRcbAqCMya13uyzqw0= -github.com/alecthomas/units v0.0.0-20190924025748-f65c72e2690d h1:UQZhZ2O0vMHr2cI+DC1Mbh0TJxzA3RcLoMsFw+aXw7E= github.com/alecthomas/units v0.0.0-20190924025748-f65c72e2690d/go.mod h1:rBZYJk541a8SKzHPHnH3zbiI+7dagKZ0cgpgrD7Fyho= +github.com/alecthomas/units v0.0.0-20210208195552-ff826a37aa15 h1:AUNCr9CiJuwrRYS3XieqF+Z9B9gNxo/eANAJCF2eiN4= +github.com/alecthomas/units v0.0.0-20210208195552-ff826a37aa15/go.mod h1:OMCwj8VM1Kc9e19TLln2VL61YJF0x1XFtfdL4JdbSyE= github.com/alexflint/go-filemutex v0.0.0-20171022225611-72bdc8eae2ae/go.mod h1:CgnQgUtFrFz9mxFNtED3jI5tLDjKlOM+oUF/sTk6ps0= github.com/aliyun/alibaba-cloud-sdk-go v1.61.1004 h1:YtaYjXmemIMyySUbs0VGFPqsLpsNHf4TW/L6yqpJQ9s= github.com/aliyun/alibaba-cloud-sdk-go v1.61.1004/go.mod h1:pUKYbK5JQ+1Dfxk80P0qxGqe5dkxDoabbZS7zOcouyA= @@ -1434,6 +1435,8 @@ github.com/shirou/w32 v0.0.0-20160930032740-bb4de0191aa4/go.mod h1:qsXQc7+bwAM3Q github.com/shopspring/decimal v0.0.0-20180709203117-cd690d0c9e24/go.mod h1:M+9NzErvs504Cn4c5DxATwIqPbtswREoFCre64PpcG4= github.com/shopspring/decimal v0.0.0-20200105231215-408a2507e114 h1:Pm6R878vxWWWR+Sa3ppsLce/Zq+JNTs6aVvRu13jv9A= github.com/shopspring/decimal v0.0.0-20200105231215-408a2507e114/go.mod h1:DKyhrW/HYNuLGql+MJL6WCR6knT2jwCFRcu2hWCYk4o= +github.com/showwin/speedtest-go v1.1.4 h1:pcY1W5LYZu44lH6Fuu80nu/Pj67n//VArlZudbAgR6E= +github.com/showwin/speedtest-go v1.1.4/go.mod h1:dJugxvC/AQDt4HQQKZ9lKNa2+b1c8nzj9IL0a/F8l1U= github.com/shurcooL/httpfs v0.0.0-20190707220628-8d4bc4ba7749/go.mod h1:ZY1cvUeJuFPAdZ/B6v7RHavJWZn2YPVFQ1OSXhCGOkg= github.com/shurcooL/sanitized_anchor_name v1.0.0/go.mod h1:1NzhyTcUVG4SuEtjjoZeVRXNmyL/1OwPU0+IJeTBvfc= github.com/shurcooL/vfsgen v0.0.0-20181202132449-6a9ea43bcacd/go.mod h1:TrYk7fJVaAttu97ZZKrO9UbRa8izdowaMIZcxYMbVaw= diff --git a/plugins/inputs/all/all.go b/plugins/inputs/all/all.go index 781e04e60928b..60a52903ef079 100644 --- a/plugins/inputs/all/all.go +++ b/plugins/inputs/all/all.go @@ -74,6 +74,7 @@ import ( _ "github.com/influxdata/telegraf/plugins/inputs/intel_powerstat" _ "github.com/influxdata/telegraf/plugins/inputs/intel_rdt" _ "github.com/influxdata/telegraf/plugins/inputs/internal" + _ "github.com/influxdata/telegraf/plugins/inputs/internet_speed" _ "github.com/influxdata/telegraf/plugins/inputs/interrupts" _ "github.com/influxdata/telegraf/plugins/inputs/ipmi_sensor" _ "github.com/influxdata/telegraf/plugins/inputs/ipset" diff --git a/plugins/inputs/internet_speed/README.md b/plugins/inputs/internet_speed/README.md new file mode 100644 index 0000000000000..f9a71446f4979 --- /dev/null +++ b/plugins/inputs/internet_speed/README.md @@ -0,0 +1,30 @@ +# Internet Speed Monitor + +The `Internet Speed Monitor` collects data about the internet speed on the system. + +## Configuration + +```toml +# Monitors internet speed in the network +[[inputs.internet_speed]] + ## Sets if runs file download test + ## Default: false + enable_file_download = false +``` + +## Metrics + +It collects latency, download speed and upload speed + + +| Name | filed name | type | Unit | +| -------------- | ---------- | ------- | ---- | +| Download Speed | download | float64 | Mbps | +| Upload Speed | upload | float64 | Mbps | +| Latency | latency | float64 | ms | + +## Example Output + +```sh +internet_speed,host=Sanyam-Ubuntu download=41.791,latency=28.518,upload=59.798 1631031183000000000 +``` \ No newline at end of file diff --git a/plugins/inputs/internet_speed/internet_speed.go b/plugins/inputs/internet_speed/internet_speed.go new file mode 100644 index 0000000000000..cf0c5cfb13117 --- /dev/null +++ b/plugins/inputs/internet_speed/internet_speed.go @@ -0,0 +1,82 @@ +package internet_speed + +import ( + "fmt" + + "github.com/influxdata/telegraf" + "github.com/influxdata/telegraf/plugins/inputs" + "github.com/showwin/speedtest-go/speedtest" +) + +// InternetSpeed is used to store configuration values. +type InternetSpeed struct { + EnableFileDownload bool `toml:"enable_file_download"` + Log telegraf.Logger `toml:"-"` +} + +const sampleConfig = ` + ## Sets if runs file download test + ## Default: false + enable_file_download = false +` + +// Description returns information about the plugin. +func (is *InternetSpeed) Description() string { + return "Monitors internet speed using speedtest.net service" +} + +// SampleConfig displays configuration instructions. +func (is *InternetSpeed) SampleConfig() string { + return sampleConfig +} + +const measurement = "internet_speed" + +func (is *InternetSpeed) Gather(acc telegraf.Accumulator) error { + user, err := speedtest.FetchUserInfo() + if err != nil { + return fmt.Errorf("fetching user info failed: %v", err) + } + serverList, err := speedtest.FetchServerList(user) + if err != nil { + return fmt.Errorf("fetching server list failed: %v", err) + } + + if len(serverList.Servers) < 1 { + return fmt.Errorf("no servers found") + } + s := serverList.Servers[0] + is.Log.Debug("Starting Speed Test") + is.Log.Debug("Running Ping...") + err = s.PingTest() + if err != nil { + return fmt.Errorf("ping test failed: %v", err) + } + is.Log.Debug("Running Download...") + err = s.DownloadTest(is.EnableFileDownload) + if err != nil { + return fmt.Errorf("download test failed: %v", err) + } + is.Log.Debug("Running Upload...") + err = s.UploadTest(is.EnableFileDownload) + if err != nil { + return fmt.Errorf("upload test failed failed: %v", err) + } + + is.Log.Debug("Test finished.") + + fields := make(map[string]interface{}) + fields["download"] = s.DLSpeed + fields["upload"] = s.ULSpeed + fields["latency"] = s.Latency + + tags := make(map[string]string) + + acc.AddFields(measurement, fields, tags) + return nil +} +func init() { + inputs.Add("internet_speed", func() telegraf.Input { + return &InternetSpeed{} + }) +} diff --git a/plugins/inputs/internet_speed/internet_speed_test.go b/plugins/inputs/internet_speed/internet_speed_test.go new file mode 100644 index 0000000000000..669426ff683ad --- /dev/null +++ b/plugins/inputs/internet_speed/internet_speed_test.go @@ -0,0 +1,44 @@ +package internet_speed + +import ( + "testing" + + "github.com/influxdata/telegraf/testutil" + "github.com/stretchr/testify/require" +) + +func TestGathering(t *testing.T) { + if testing.Short() { + t.Skip("Skipping network-dependent test in short mode.") + } + internetSpeed := &InternetSpeed{ + EnableFileDownload: false, + Log: testutil.Logger{}, + } + + acc := &testutil.Accumulator{} + + require.NoError(t, internetSpeed.Gather(acc)) +} + +func TestDataGen(t *testing.T) { + if testing.Short() { + t.Skip("Skipping network-dependent test in short mode.") + } + internetSpeed := &InternetSpeed{ + EnableFileDownload: false, + Log: testutil.Logger{}, + } + + acc := &testutil.Accumulator{} + require.NoError(t, internetSpeed.Gather(acc)) + + metric, ok := acc.Get("internet_speed") + require.True(t, ok) + + tags := metric.Tags + + fields := metric.Fields + + acc.AssertContainsTaggedFields(t, "internet_speed", fields, tags) +} From 96773387ae1a57068367041c2e57c4b05030f114 Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Tue, 14 Sep 2021 22:36:14 +0100 Subject: [PATCH 050/176] fix: bump github.com/prometheus/client_golang from 1.7.1 to 1.11.0 (#9653) --- go.mod | 6 +++--- go.sum | 12 ++++++++---- 2 files changed, 11 insertions(+), 7 deletions(-) diff --git a/go.mod b/go.mod index f115d32594712..5981bc49d62b1 100644 --- a/go.mod +++ b/go.mod @@ -160,7 +160,7 @@ require ( github.com/jhump/protoreflect v1.8.3-0.20210616212123-6cc1efa697ca github.com/jmespath/go-jmespath v0.4.0 github.com/jpillora/backoff v1.0.0 // indirect - github.com/json-iterator/go v1.1.10 // indirect + github.com/json-iterator/go v1.1.11 // indirect github.com/jstemmer/go-junit-report v0.9.1 // indirect github.com/kardianos/service v1.0.0 github.com/karrick/godirwalk v1.16.1 @@ -217,9 +217,9 @@ require ( github.com/pkg/browser v0.0.0-20180916011732-0a3d74bf9ce4 // indirect github.com/pkg/errors v0.9.1 github.com/pmezard/go-difflib v1.0.0 // indirect - github.com/prometheus/client_golang v1.7.1 + github.com/prometheus/client_golang v1.11.0 github.com/prometheus/client_model v0.2.0 - github.com/prometheus/common v0.15.0 + github.com/prometheus/common v0.26.0 github.com/prometheus/procfs v0.6.0 github.com/prometheus/prometheus v1.8.2-0.20200911110723-e83ef207b6c2 github.com/rcrowley/go-metrics v0.0.0-20200313005456-10cdbea86bc0 // indirect diff --git a/go.sum b/go.sum index 1d2611560b766..2622f28fff755 100644 --- a/go.sum +++ b/go.sum @@ -581,6 +581,7 @@ github.com/go-kit/kit v0.8.0/go.mod h1:xBxKIO96dXMWWy0MnWVtmwkA9/13aqxPnvrjFYMA2 github.com/go-kit/kit v0.9.0/go.mod h1:xBxKIO96dXMWWy0MnWVtmwkA9/13aqxPnvrjFYMA2as= github.com/go-kit/kit v0.10.0 h1:dXFJfIHVvUcpSgDOV+Ne6t7jXri8Tfv2uOLHUZ2XNuo= github.com/go-kit/kit v0.10.0/go.mod h1:xUsJbQ/Fp4kEt7AFgCuvyX4a71u8h9jB8tj/ORgOZ7o= +github.com/go-kit/log v0.1.0/go.mod h1:zbhenjAZHb184qTLMA9ZjW7ThYL0H2mk7Q6pNt4vbaY= github.com/go-logfmt/logfmt v0.3.0/go.mod h1:Qt1PoO58o5twSAckw1HlFXLmHsOX5/0LbT9GBnD5lWE= github.com/go-logfmt/logfmt v0.4.0/go.mod h1:3RMwSq7FuexP4Kalkev3ejPJsZTpXXBr9+V4qmtdjCk= github.com/go-logfmt/logfmt v0.5.0 h1:TrB8swr/68K7m9CcGut2g3UOihhbcbiMAYiuTXdEih4= @@ -1018,8 +1019,9 @@ github.com/json-iterator/go v1.1.6/go.mod h1:+SdeFBvtyEkXs7REEP0seUULqWtbJapLOCV github.com/json-iterator/go v1.1.7/go.mod h1:KdQUCv79m/52Kvf8AW2vK1V8akMuk1QjK/uOdHXbAo4= github.com/json-iterator/go v1.1.8/go.mod h1:KdQUCv79m/52Kvf8AW2vK1V8akMuk1QjK/uOdHXbAo4= github.com/json-iterator/go v1.1.9/go.mod h1:KdQUCv79m/52Kvf8AW2vK1V8akMuk1QjK/uOdHXbAo4= -github.com/json-iterator/go v1.1.10 h1:Kz6Cvnvv2wGdaG/V8yMvfkmNiXq9Ya2KUv4rouJJr68= github.com/json-iterator/go v1.1.10/go.mod h1:KdQUCv79m/52Kvf8AW2vK1V8akMuk1QjK/uOdHXbAo4= +github.com/json-iterator/go v1.1.11 h1:uVUAXhF2To8cbw/3xN3pxj6kk7TYKs98NIrTqPlMWAQ= +github.com/json-iterator/go v1.1.11/go.mod h1:KdQUCv79m/52Kvf8AW2vK1V8akMuk1QjK/uOdHXbAo4= github.com/jstemmer/go-junit-report v0.0.0-20190106144839-af01ea7f8024/go.mod h1:6v2b51hI/fHJwM22ozAgKL4VKDeJcHhJFhtBdhmNjmU= github.com/jstemmer/go-junit-report v0.9.1 h1:6QPYqodiu3GuPL+7mfx+NwDdp2eTkp9IfEUpgAwUN0o= github.com/jstemmer/go-junit-report v0.9.1/go.mod h1:Brl9GWCQeLvo8nXZwPNNblvFj/XSXhF0NWZEnDohbsk= @@ -1353,8 +1355,9 @@ github.com/prometheus/client_golang v1.1.0/go.mod h1:I1FGZT9+L76gKKOs5djB6ezCbFQ github.com/prometheus/client_golang v1.3.0/go.mod h1:hJaj2vgQTGQmVCsAACORcieXFeDPbaTKGT+JTgUa3og= github.com/prometheus/client_golang v1.4.0/go.mod h1:e9GMxYsXl05ICDXkRhurwBS4Q3OK1iX/F2sw+iXX5zU= github.com/prometheus/client_golang v1.6.0/go.mod h1:ZLOG9ck3JLRdB5MgO8f+lLTe83AXG6ro35rLTxvnIl4= -github.com/prometheus/client_golang v1.7.1 h1:NTGy1Ja9pByO+xAeH/qiWnLrKtr3hJPNjaVUwnjpdpA= github.com/prometheus/client_golang v1.7.1/go.mod h1:PY5Wy2awLA44sXw4AOSfFBetzPP4j5+D6mVACh+pe2M= +github.com/prometheus/client_golang v1.11.0 h1:HNkLOAEQMIDv/K+04rukrLx6ch7msSRwf3/SASFAGtQ= +github.com/prometheus/client_golang v1.11.0/go.mod h1:Z6t4BnS23TR94PD6BsDNk8yVqroYurpAkEiz0P2BEV0= github.com/prometheus/client_model v0.0.0-20171117100541-99fa1f4be8e5/go.mod h1:MbSGuTsp3dbXC40dX6PRTWyKYBIrTGTE9sqQNg2J8bo= github.com/prometheus/client_model v0.0.0-20180712105110-5c3871d89910/go.mod h1:MbSGuTsp3dbXC40dX6PRTWyKYBIrTGTE9sqQNg2J8bo= github.com/prometheus/client_model v0.0.0-20190115171406-56726106282f/go.mod h1:MbSGuTsp3dbXC40dX6PRTWyKYBIrTGTE9sqQNg2J8bo= @@ -1371,8 +1374,8 @@ github.com/prometheus/common v0.7.0/go.mod h1:DjGbpBbp5NYNiECxcL/VnbXCCaQpKd3tt2 github.com/prometheus/common v0.9.1/go.mod h1:yhUN8i9wzaXS3w1O07YhxHEBxD+W35wd8bs7vj7HSQ4= github.com/prometheus/common v0.10.0/go.mod h1:Tlit/dnDKsSWFlCLTWaA1cyBgKHSMdTB80sz/V91rCo= github.com/prometheus/common v0.13.0/go.mod h1:U+gB1OBLb1lF3O42bTCL+FK18tX9Oar16Clt/msog/s= -github.com/prometheus/common v0.15.0 h1:4fgOnadei3EZvgRwxJ7RMpG1k1pOZth5Pc13tyspaKM= -github.com/prometheus/common v0.15.0/go.mod h1:U+gB1OBLb1lF3O42bTCL+FK18tX9Oar16Clt/msog/s= +github.com/prometheus/common v0.26.0 h1:iMAkS2TDoNWnKM+Kopnx/8tnEStIfpYA0ur0xQzzhMQ= +github.com/prometheus/common v0.26.0/go.mod h1:M7rCNAaPfAosfx8veZJCuw84e35h3Cfd9VFqTh1DIvc= github.com/prometheus/procfs v0.0.0-20180125133057-cb4147076ac7/go.mod h1:c3At6R/oaqEKCNdg8wHV1ftS6bRYblBhIjjI8uT2IGk= github.com/prometheus/procfs v0.0.0-20181005140218-185b4288413d/go.mod h1:c3At6R/oaqEKCNdg8wHV1ftS6bRYblBhIjjI8uT2IGk= github.com/prometheus/procfs v0.0.0-20190117184657-bf6a532e95b1/go.mod h1:c3At6R/oaqEKCNdg8wHV1ftS6bRYblBhIjjI8uT2IGk= @@ -1932,6 +1935,7 @@ golang.org/x/sys v0.0.0-20210426230700-d19ff857e887/go.mod h1:h1NjWce9XRLGQEsW7w golang.org/x/sys v0.0.0-20210510120138-977fb7262007/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20210510120138-977fb7262007/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20210514084401-e8d321eab015/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20210603081109-ebe580a85c40/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20210603125802-9665404d3644/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20210611083646-a4fc73990273/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20210615035016-665e8c7367d1/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= From c331669f2eab1bba58db8c16b21e8577910d604f Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Tue, 14 Sep 2021 22:36:58 +0100 Subject: [PATCH 051/176] fix: bump github.com/Azure/azure-event-hubs-go/v3 from 3.2.0 to 3.3.13 (#9677) --- go.mod | 14 +++++++------- go.sum | 30 +++++++++++++++++++----------- 2 files changed, 26 insertions(+), 18 deletions(-) diff --git a/go.mod b/go.mod index 5981bc49d62b1..4dcb4550d4764 100644 --- a/go.mod +++ b/go.mod @@ -8,23 +8,23 @@ require ( cloud.google.com/go/pubsub v1.15.0 code.cloudfoundry.org/clock v1.0.0 // indirect collectd.org v0.5.0 - github.com/Azure/azure-amqp-common-go/v3 v3.0.0 // indirect - github.com/Azure/azure-event-hubs-go/v3 v3.2.0 + github.com/Azure/azure-amqp-common-go/v3 v3.0.1 // indirect + github.com/Azure/azure-event-hubs-go/v3 v3.3.13 github.com/Azure/azure-kusto-go v0.3.2 github.com/Azure/azure-pipeline-go v0.2.3 // indirect - github.com/Azure/azure-sdk-for-go v45.1.0+incompatible // indirect + github.com/Azure/azure-sdk-for-go v51.1.0+incompatible // indirect github.com/Azure/azure-storage-blob-go v0.13.0 // indirect github.com/Azure/azure-storage-queue-go v0.0.0-20191125232315-636801874cdd - github.com/Azure/go-amqp v0.12.6 // indirect + github.com/Azure/go-amqp v0.13.12 // indirect github.com/Azure/go-ansiterm v0.0.0-20170929234023-d6e3b3328b78 // indirect github.com/Azure/go-autorest v14.2.0+incompatible // indirect - github.com/Azure/go-autorest/autorest v0.11.17 + github.com/Azure/go-autorest/autorest v0.11.18 github.com/Azure/go-autorest/autorest/adal v0.9.15 github.com/Azure/go-autorest/autorest/azure/auth v0.5.6 github.com/Azure/go-autorest/autorest/azure/cli v0.4.2 // indirect github.com/Azure/go-autorest/autorest/date v0.3.0 // indirect - github.com/Azure/go-autorest/autorest/to v0.3.0 // indirect - github.com/Azure/go-autorest/autorest/validation v0.2.0 // indirect + github.com/Azure/go-autorest/autorest/to v0.4.0 // indirect + github.com/Azure/go-autorest/autorest/validation v0.3.1 // indirect github.com/Azure/go-autorest/logger v0.2.1 // indirect github.com/Azure/go-autorest/tracing v0.6.0 // indirect github.com/BurntSushi/toml v0.3.1 diff --git a/go.sum b/go.sum index 2622f28fff755..c5a0778443420 100644 --- a/go.sum +++ b/go.sum @@ -55,10 +55,10 @@ collectd.org v0.3.0/go.mod h1:A/8DzQBkF6abtvrT2j/AU/4tiBgJWYyh0y/oB/4MlWE= collectd.org v0.5.0 h1:y4uFSAuOmeVhG3GCRa3/oH+ysePfO/+eGJNfd0Qa3d8= collectd.org v0.5.0/go.mod h1:A/8DzQBkF6abtvrT2j/AU/4tiBgJWYyh0y/oB/4MlWE= dmitri.shuralyov.com/gpu/mtl v0.0.0-20190408044501-666a987793e9/go.mod h1:H6x//7gZCb22OMCxBHrMx7a5I7Hp++hsVxbQ4BYO7hU= -github.com/Azure/azure-amqp-common-go/v3 v3.0.0 h1:j9tjcwhypb/jek3raNrwlCIl7iKQYOug7CLpSyBBodc= -github.com/Azure/azure-amqp-common-go/v3 v3.0.0/go.mod h1:SY08giD/XbhTz07tJdpw1SoxQXHPN30+DI3Z04SYqyg= -github.com/Azure/azure-event-hubs-go/v3 v3.2.0 h1:CQlxKH5a4NX1ZmbdqXUPRwuNGh2XvtgmhkZvkEuWzhs= -github.com/Azure/azure-event-hubs-go/v3 v3.2.0/go.mod h1:BPIIJNH/l/fVHYq3Rm6eg4clbrULrQ3q7+icmqHyyLc= +github.com/Azure/azure-amqp-common-go/v3 v3.0.1 h1:mXh+eyOxGLBfqDtfmbtby0l7XfG/6b2NkuZ3B7i6zHA= +github.com/Azure/azure-amqp-common-go/v3 v3.0.1/go.mod h1:PBIGdzcO1teYoufTKMcGibdKaYZv4avS+O6LNIp8bq0= +github.com/Azure/azure-event-hubs-go/v3 v3.3.13 h1:aiI2RLjp0MzLCuFUXzR8b3h3bdPIc2c3vBYXRK8jX3E= +github.com/Azure/azure-event-hubs-go/v3 v3.3.13/go.mod h1:dJ/WqDn0KEJkNznL9UT/UbXzfmkffCjSNl9x2Y8JI28= github.com/Azure/azure-kusto-go v0.3.2 h1:XpS9co6GvEDl2oICF9HsjEsQVwEpRK6wbNWb9Z+uqsY= github.com/Azure/azure-kusto-go v0.3.2/go.mod h1:wd50n4qlsSxh+G4f80t+Fnl2ShK9AcXD+lMOstiKuYo= github.com/Azure/azure-pipeline-go v0.1.8/go.mod h1:XA1kFWRVhSK+KNFiOhfv83Fv8L9achrP7OxIzeTn1Yg= @@ -67,18 +67,19 @@ github.com/Azure/azure-pipeline-go v0.2.1/go.mod h1:UGSo8XybXnIGZ3epmeBw7Jdz+HiU github.com/Azure/azure-pipeline-go v0.2.3 h1:7U9HBg1JFK3jHl5qmo4CTZKFTVgMwdFHMVtCdfBE21U= github.com/Azure/azure-pipeline-go v0.2.3/go.mod h1:x841ezTBIMG6O3lAcl8ATHnsOPVl2bqk7S3ta6S6u4k= github.com/Azure/azure-sdk-for-go v16.2.1+incompatible/go.mod h1:9XXNKU+eRnpl9moKnB4QOLf1HestfXbmab5FXxiDBjc= -github.com/Azure/azure-sdk-for-go v37.1.0+incompatible/go.mod h1:9XXNKU+eRnpl9moKnB4QOLf1HestfXbmab5FXxiDBjc= github.com/Azure/azure-sdk-for-go v44.1.0+incompatible/go.mod h1:9XXNKU+eRnpl9moKnB4QOLf1HestfXbmab5FXxiDBjc= -github.com/Azure/azure-sdk-for-go v45.1.0+incompatible h1:kxtaPD8n2z5Za+9e3sKsYG2IX6PG2R6VXtgS7gAbh3A= github.com/Azure/azure-sdk-for-go v45.1.0+incompatible/go.mod h1:9XXNKU+eRnpl9moKnB4QOLf1HestfXbmab5FXxiDBjc= +github.com/Azure/azure-sdk-for-go v51.1.0+incompatible h1:7uk6GWtUqKg6weLv2dbKnzwb0ml1Qn70AdtRccZ543w= +github.com/Azure/azure-sdk-for-go v51.1.0+incompatible/go.mod h1:9XXNKU+eRnpl9moKnB4QOLf1HestfXbmab5FXxiDBjc= github.com/Azure/azure-storage-blob-go v0.6.0/go.mod h1:oGfmITT1V6x//CswqY2gtAHND+xIP64/qL7a5QJix0Y= github.com/Azure/azure-storage-blob-go v0.8.0/go.mod h1:lPI3aLPpuLTeUwh1sViKXFxwl2B6teiRqI0deQUvsw0= github.com/Azure/azure-storage-blob-go v0.13.0 h1:lgWHvFh+UYBNVQLFHXkvul2f6yOPA9PIH82RTG2cSwc= github.com/Azure/azure-storage-blob-go v0.13.0/go.mod h1:pA9kNqtjUeQF2zOSu4s//nUdBD+e64lEuc4sVnuOfNs= github.com/Azure/azure-storage-queue-go v0.0.0-20191125232315-636801874cdd h1:b3wyxBl3vvr15tUAziPBPK354y+LSdfPCpex5oBttHo= github.com/Azure/azure-storage-queue-go v0.0.0-20191125232315-636801874cdd/go.mod h1:K6am8mT+5iFXgingS9LUc7TmbsW6XBw3nxaRyaMyWc8= -github.com/Azure/go-amqp v0.12.6 h1:34yItuwhA/nusvq2sPSNPQxZLCf/CtaogYH8n578mnY= -github.com/Azure/go-amqp v0.12.6/go.mod h1:qApuH6OFTSKZFmCOxccvAv5rLizBQf4v8pRmG138DPo= +github.com/Azure/go-amqp v0.13.0/go.mod h1:qj+o8xPCz9tMSbQ83Vp8boHahuRDl5mkNHyt1xlxUTs= +github.com/Azure/go-amqp v0.13.12 h1:u/m0QvBgNVlcMqj4bPHxtEyANOzS+cXXndVMYGsC29A= +github.com/Azure/go-amqp v0.13.12/go.mod h1:D5ZrjQqB1dyp1A+G73xeL/kNn7D5qHJIIsNNps7YNmk= github.com/Azure/go-ansiterm v0.0.0-20170929234023-d6e3b3328b78 h1:w+iIsaOQNcT7OZ575w+acHgRric5iCyQh+xv+KJ4HB8= github.com/Azure/go-ansiterm v0.0.0-20170929234023-d6e3b3328b78/go.mod h1:LmzpDX56iTiv29bbRTIsUNlaFfuhWRQBWjQdVyAevI8= github.com/Azure/go-autorest v10.8.1+incompatible/go.mod h1:r+4oMnoxhatjLLJ6zxSWATqVooLgysK6ZNox3g/xq24= @@ -88,9 +89,11 @@ github.com/Azure/go-autorest/autorest v0.9.0/go.mod h1:xyHB1BMZT0cuDHU7I0+g046+B github.com/Azure/go-autorest/autorest v0.9.3/go.mod h1:GsRuLYvwzLjjjRoWEIyMUaYq8GNUx2nRB378IPt/1p0= github.com/Azure/go-autorest/autorest v0.10.0/go.mod h1:/FALq9T/kS7b5J5qsQ+RSTUdAmGFqi0vUdVNNx8q630= github.com/Azure/go-autorest/autorest v0.11.1/go.mod h1:JFgpikqFJ/MleTTxwepExTKnFUKKszPS8UavbQYUMuw= +github.com/Azure/go-autorest/autorest v0.11.3/go.mod h1:JFgpikqFJ/MleTTxwepExTKnFUKKszPS8UavbQYUMuw= github.com/Azure/go-autorest/autorest v0.11.4/go.mod h1:JFgpikqFJ/MleTTxwepExTKnFUKKszPS8UavbQYUMuw= -github.com/Azure/go-autorest/autorest v0.11.17 h1:2zCdHwNgRH+St1J+ZMf66xI8aLr/5KMy+wWLH97zwYM= github.com/Azure/go-autorest/autorest v0.11.17/go.mod h1:eipySxLmqSyC5s5k1CLupqet0PSENBEDP93LQ9a8QYw= +github.com/Azure/go-autorest/autorest v0.11.18 h1:90Y4srNYrwOtAgVo3ndrQkTYn6kf1Eg/AjTFJ8Is2aM= +github.com/Azure/go-autorest/autorest v0.11.18/go.mod h1:dSiJPy22c3u0OtOKDNttNgqpNFY/GeWa7GH/Pz56QRA= github.com/Azure/go-autorest/autorest/adal v0.5.0/go.mod h1:8Z9fGy2MpX0PvDjB1pEgQTmVqjGhiHBW7RJJEciWzS0= github.com/Azure/go-autorest/autorest/adal v0.8.0/go.mod h1:Z6vX6WXXuyieHAXwMj0S6HY6e6wcHn37qQMBQlvY3lc= github.com/Azure/go-autorest/autorest/adal v0.8.1/go.mod h1:ZjhuQClTqx435SRJ2iMlOxPYt3d2C/T/7TiQCVZSn3Q= @@ -99,6 +102,7 @@ github.com/Azure/go-autorest/autorest/adal v0.9.0/go.mod h1:/c022QCutn2P7uY+/oQW github.com/Azure/go-autorest/autorest/adal v0.9.2/go.mod h1:/3SMAM86bP6wC9Ev35peQDUeqFZBMH07vvUOmg4z/fE= github.com/Azure/go-autorest/autorest/adal v0.9.5/go.mod h1:B7KF7jKIeC9Mct5spmyCB/A8CG/sEz1vwIRGv/bbw7A= github.com/Azure/go-autorest/autorest/adal v0.9.10/go.mod h1:B7KF7jKIeC9Mct5spmyCB/A8CG/sEz1vwIRGv/bbw7A= +github.com/Azure/go-autorest/autorest/adal v0.9.13/go.mod h1:W/MM4U6nLxnIskrw4UwWzlHfGjwUS50aOsc/I3yuU8M= github.com/Azure/go-autorest/autorest/adal v0.9.15 h1:X+p2GF0GWyOiSmqohIaEeuNFNDY4I4EOlVuUQvFdWMk= github.com/Azure/go-autorest/autorest/adal v0.9.15/go.mod h1:tGMin8I49Yij6AQ+rvV+Xa/zwxYQB5hmsd6DkfAx2+A= github.com/Azure/go-autorest/autorest/azure/auth v0.4.2/go.mod h1:90gmfKdlmKgfjUpnCEpOJzsUEjrWDSLwHIG73tSXddM= @@ -117,10 +121,12 @@ github.com/Azure/go-autorest/autorest/mocks v0.3.0/go.mod h1:a8FDP3DYzQ4RYfVAxAN github.com/Azure/go-autorest/autorest/mocks v0.4.0/go.mod h1:LTp+uSrOhSkaKrUy935gNZuuIPPVsHlr9DSOxSayd+k= github.com/Azure/go-autorest/autorest/mocks v0.4.1 h1:K0laFcLE6VLTOwNgSxaGbUcLPuGXlNkbVvq4cW4nIHk= github.com/Azure/go-autorest/autorest/mocks v0.4.1/go.mod h1:LTp+uSrOhSkaKrUy935gNZuuIPPVsHlr9DSOxSayd+k= -github.com/Azure/go-autorest/autorest/to v0.3.0 h1:zebkZaadz7+wIQYgC7GXaz3Wb28yKYfVkkBKwc38VF8= github.com/Azure/go-autorest/autorest/to v0.3.0/go.mod h1:MgwOyqaIuKdG4TL/2ywSsIWKAfJfgHDo8ObuUk3t5sA= -github.com/Azure/go-autorest/autorest/validation v0.2.0 h1:15vMO4y76dehZSq7pAaOLQxC6dZYsSrj2GQpflyM/L4= +github.com/Azure/go-autorest/autorest/to v0.4.0 h1:oXVqrxakqqV1UZdSazDOPOLvOIz+XA683u8EctwboHk= +github.com/Azure/go-autorest/autorest/to v0.4.0/go.mod h1:fE8iZBn7LQR7zH/9XU2NcPR4o9jEImooCeWJcYV/zLE= github.com/Azure/go-autorest/autorest/validation v0.2.0/go.mod h1:3EEqHnBxQGHXRYq3HT1WyXAvT7LLY3tl70hw6tQIbjI= +github.com/Azure/go-autorest/autorest/validation v0.3.1 h1:AgyqjAd94fwNAoTjl/WQXg4VvFeRFpO+UhNyRXqF1ac= +github.com/Azure/go-autorest/autorest/validation v0.3.1/go.mod h1:yhLgjC0Wda5DYXl6JAsWyUe4KVNffhoDhG0zVzUMo3E= github.com/Azure/go-autorest/logger v0.1.0/go.mod h1:oExouG+K6PryycPJfVSxi/koC6LSNgds39diKLz7Vrc= github.com/Azure/go-autorest/logger v0.2.0/go.mod h1:T9E3cAhj2VqvPOtCYAvby9aBXkZmbF5NWuPV8+WeEW8= github.com/Azure/go-autorest/logger v0.2.1 h1:IG7i4p/mDa2Ce4TRyAO8IHnVhAVF3RFU+ZtXWSmf4Tg= @@ -463,8 +469,10 @@ github.com/davecgh/go-xdr v0.0.0-20161123171359-e6a2ba005892/go.mod h1:CTDl0pzVz github.com/denisenkom/go-mssqldb v0.10.0 h1:QykgLZBorFE95+gO3u9esLd0BmbvpWp0/waNNZfHBM8= github.com/denisenkom/go-mssqldb v0.10.0/go.mod h1:xbL0rPBG9cCiLr28tMa8zpbdarY27NDyej4t/EjAShU= github.com/denverdino/aliyungo v0.0.0-20190125010748-a747050bb1ba/go.mod h1:dV8lFg6daOBZbT6/BDGIz6Y3WFGn8juu6G+CQ6LHtl0= +github.com/devigned/tab v0.0.1/go.mod h1:oVYrfgGyond090gxCvvbjZji79+peOiSV6vhZhKJM0Y= github.com/devigned/tab v0.1.1 h1:3mD6Kb1mUOYeLpJvTVSDwSg5ZsfSxfvxGRTxRsJsITA= github.com/devigned/tab v0.1.1/go.mod h1:XG9mPq0dFghrYvoBF3xdRrJzSTX1b7IQrvaL9mzjeJY= +github.com/devigned/tab/opencensus v0.1.2/go.mod h1:U6xXMXnNwXJpdaK0mnT3zdng4WTi+vCfqn7YHofEv2A= github.com/dgrijalva/jwt-go v0.0.0-20170104182250-a601269ab70c/go.mod h1:E3ru+11k8xSBh+hMPgOLZmtrrCbhqsmaPHjLKYnJCaQ= github.com/dgrijalva/jwt-go v3.2.0+incompatible h1:7qlOGliEKZXTDg6OTjfoBKDXWrumCAMpl/TFQ4/5kLM= github.com/dgrijalva/jwt-go v3.2.0+incompatible/go.mod h1:E3ru+11k8xSBh+hMPgOLZmtrrCbhqsmaPHjLKYnJCaQ= From 779ed5ec42c09de94130435143bd86151e52666c Mon Sep 17 00:00:00 2001 From: Sebastian Spaink <3441183+sspaink@users.noreply.github.com> Date: Tue, 14 Sep 2021 14:42:59 -0700 Subject: [PATCH 052/176] chore: run ci tests in parallel and get test insights (#9686) --- .circleci/config.yml | 169 +++++++++++------- Makefile | 4 - scripts/install_gotestsum.sh | 46 +++++ .../{mac_installgo.sh => installgo_mac.sh} | 10 +- scripts/installgo_windows.sh | 25 +++ 5 files changed, 185 insertions(+), 69 deletions(-) create mode 100755 scripts/install_gotestsum.sh rename scripts/{mac_installgo.sh => installgo_mac.sh} (78%) create mode 100644 scripts/installgo_windows.sh diff --git a/.circleci/config.yml b/.circleci/config.yml index 3daec86da98b4..b2043e1fa291c 100644 --- a/.circleci/config.yml +++ b/.circleci/config.yml @@ -1,6 +1,6 @@ version: 2.1 orbs: - win: circleci/windows@2.4.0 + win: circleci/windows@2.4.0 aws-cli: circleci/aws-cli@1.4.0 executors: @@ -26,27 +26,84 @@ executors: commands: check-changed-files-or-halt: - steps: - - run: ./scripts/check-file-changes.sh - check-changed-files-or-halt-windows: - steps: - - run: - command: ./scripts/check-file-changes.sh - shell: bash.exe + steps: + - run: ./scripts/check-file-changes.sh test-go: parameters: - goarch: + os: + type: string + default: "linux" + gotestsum: type: string - default: "amd64" + default: "gotestsum" + cache_version: + type: string + default: "v3" steps: - checkout - check-changed-files-or-halt - - attach_workspace: - at: '/go' - - run: 'GOARCH=<< parameters.goarch >> make' - - run: 'GOARCH=<< parameters.goarch >> make check' - - run: 'GOARCH=<< parameters.goarch >> make check-deps' - - run: 'GOARCH=<< parameters.goarch >> make test' + - when: + condition: + equal: [ linux, << parameters.os >> ] + steps: + - restore_cache: + key: linux-go-<< parameters.cache_version >>-{{ checksum "go.sum" }} + - attach_workspace: + at: '/go' + - when: + condition: + equal: [ darwin, << parameters.os >> ] + steps: + - restore_cache: + key: darwin-go-<< parameters.cache_version >>-{{ checksum "go.sum" }} + - run: 'sh ./scripts/installgo_mac.sh' + - when: + condition: + equal: [ windows, << parameters.os >> ] + steps: + - run: rm -rf /c/Go + - restore_cache: + key: windows-go-<< parameters.cache_version >>-{{ checksum "go.sum" }} + - run: 'sh ./scripts/installgo_windows.sh' + - run: mkdir -p test-results + - run: ./scripts/install_gotestsum.sh << parameters.os >> << parameters.gotestsum >> + - run: | + PACKAGE_NAMES=$(go list ./... | circleci tests split --split-by=timings --timings-type=classname) + ./<< parameters.gotestsum >> --junitfile test-results/gotestsum-report.xml -- -short $PACKAGE_NAMES + - store_test_results: + path: test-results + - when: + condition: + equal: [ linux, << parameters.os >> ] + steps: + - save_cache: + name: 'Saving cache' + key: linux-go-<< parameters.cache_version >>-{{ checksum "go.sum" }} + paths: + - '~/go/src/github.com/influxdata/telegraf/gotestsum' + - when: + condition: + equal: [ darwin, << parameters.os >> ] + steps: + - save_cache: + name: 'Saving cache' + key: darwin-go-<< parameters.cache_version >>-{{ checksum "go.sum" }} + paths: + - '/go/src/github.com/influxdata/telegraf/gotestsum' + - '/usr/local/Cellar/go' + - '/usr/local/bin/go' + - '/usr/local/bin/gofmt' + - when: + condition: + equal: [ windows, << parameters.os >> ] + steps: + - save_cache: + name: 'Saving cache' + key: windows-go-<< parameters.cache_version >>-{{ checksum "go.sum" }} + paths: + - 'C:\Go' + - 'C:\Users\circleci\project\gotestsum.exe' + package-build: parameters: release: @@ -65,11 +122,11 @@ commands: at: '/go' - when: condition: << parameters.release >> - steps: + steps: - run: 'mips=1 mipsel=1 arm64=1 amd64=1 static=1 armel=1 armhf=1 s390x=1 ppc641e=1 i386=1 windows=1 darwin=1 make package' - when: condition: << parameters.nightly >> - steps: + steps: - run: 'mips=1 mipsel=1 arm64=1 amd64=1 static=1 armel=1 armhf=1 s390x=1 ppc641e=1 i386=1 windows=1 darwin=1 NIGHTLY=1 make package' - run: 'make upload-nightly' - unless: @@ -96,6 +153,8 @@ jobs: - check-changed-files-or-halt - run: 'make deps' - run: 'make tidy' + - run: 'make check' + - run: 'make check-deps' - save_cache: name: 'go module cache' key: go-mod-v1-{{ checksum "go.sum" }} @@ -109,51 +168,37 @@ jobs: executor: go-1_16 steps: - test-go + parallelism: 4 test-go-1_16-386: executor: go-1_16 steps: - - test-go: - goarch: "386" + - test-go + parallelism: 4 test-go-1_17: executor: go-1_17 steps: - test-go + parallelism: 4 test-go-1_17-386: executor: go-1_17 steps: - - test-go: - goarch: "386" + - test-go + parallelism: 4 test-go-mac: executor: mac steps: - - checkout - - restore_cache: - key: mac-go-mod-v0-{{ checksum "go.sum" }} - - check-changed-files-or-halt - - run: 'sh ./scripts/mac_installgo.sh' - - save_cache: - name: 'Saving cache' - key: mac-go-mod-v0-{{ checksum "go.sum" }} - paths: - - '/usr/local/Cellar/go' - - '/usr/local/bin/go' - - '/usr/local/bin/gofmt' - - run: 'make deps' - - run: 'make tidy' - - run: 'make' - - run: 'make check' - - run: 'make test' + - test-go: + os: darwin + parallelism: 4 test-go-windows: executor: name: win/default - shell: powershell.exe + shell: bash.exe steps: - - checkout - - check-changed-files-or-halt-windows - - run: choco upgrade golang --version=1.17.0 - - run: choco install make - - run: git config --system core.longpaths true - - run: make test-windows + - test-go: + os: windows + gotestsum: "gotestsum.exe" + parallelism: 4 windows-package: executor: go-1_17 @@ -283,14 +328,14 @@ jobs: command: | echo "Go tests complete." share-artifacts: - executor: aws-cli/default + executor: aws-cli/default steps: - run: command: | PR=${CIRCLE_PULL_REQUEST##*/} printf -v payload '{ "pullRequestNumber": "%s" }' "$PR" - curl -X POST "https://182c7jdgog.execute-api.us-east-1.amazonaws.com/prod/shareArtifacts" --data "$payload" - + curl -X POST "https://182c7jdgog.execute-api.us-east-1.amazonaws.com/prod/shareArtifacts" --data "$payload" + workflows: version: 2 check: @@ -331,47 +376,47 @@ workflows: filters: tags: only: /.*/ - - 'test-awaiter': + - 'test-awaiter': requires: - 'test-go-1_16' - 'test-go-1_16-386' - 'test-go-1_17' - 'test-go-1_17-386' - 'windows-package': - requires: + requires: - 'test-go-windows' - 'darwin-package': - requires: + requires: - 'test-go-mac' - 'i386-package': - requires: + requires: - 'test-awaiter' - 'ppc641e-package': - requires: + requires: - 'test-awaiter' - 's390x-package': - requires: + requires: - 'test-awaiter' - 'armel-package': - requires: + requires: - 'test-awaiter' - 'amd64-package': - requires: + requires: - 'test-awaiter' - 'arm64-package': - requires: + requires: - 'test-awaiter' - 'armhf-package': - requires: + requires: - 'test-awaiter' - 'static-package': requires: - 'test-awaiter' - 'mipsel-package': - requires: + requires: - 'test-awaiter' - 'mips-package': - requires: + requires: - 'test-awaiter' - 'share-artifacts': requires: @@ -412,7 +457,7 @@ workflows: only: /.*/ - 'package-sign-mac': requires: - - 'package-sign-windows' + - 'package-sign-windows' filters: tags: only: /.*/ diff --git a/Makefile b/Makefile index f0bb01dd2a35e..230eedf600f6f 100644 --- a/Makefile +++ b/Makefile @@ -119,10 +119,6 @@ fmtcheck: exit 1 ;\ fi -.PHONY: test-windows -test-windows: - go test -short ./... - .PHONY: vet vet: @echo 'go vet $$(go list ./... | grep -v ./plugins/parsers/influx)' diff --git a/scripts/install_gotestsum.sh b/scripts/install_gotestsum.sh new file mode 100755 index 0000000000000..0b813e20879fa --- /dev/null +++ b/scripts/install_gotestsum.sh @@ -0,0 +1,46 @@ +#!/bin/sh + +set -eux + +OS=$1 +EXE=$2 +VERSION="1.7.0" + +WINDOWS_SHA="7ae12ddb171375f0c14d6a09dd27a5c1d1fc72edeea674e3d6e7489a533b40c1" +DARWIN_SHA="a8e2351604882af1a67601cbeeacdcfa9b17fc2f6fbac291cf5d434efdf2d85b" +LINUX_SHA="b5c98cc408c75e76a097354d9487dca114996e821b3af29a0442aa6c9159bd40" + +setup_gotestsum () { + echo "installing gotestsum" + curl -L "https://github.com/gotestyourself/gotestsum/releases/download/v${VERSION}/gotestsum_${VERSION}_${OS}_amd64.tar.gz" --output gotestsum.tar.gz + + if [ "$OS" = "windows" ]; then + SHA=$WINDOWS_SHA + SHATOOL="sha256sum" + elif [ "$OS" = "darwin" ]; then + SHA=$DARWIN_SHA + SHATOOL="shasum --algorithm 256" + elif [ "$OS" = "linux" ]; then + SHA=$LINUX_SHA + SHATOOL="sha256sum" + fi + + if ! echo "${SHA} gotestsum.tar.gz" | ${SHATOOL} --check -; then + echo "Checksum failed" >&2 + exit 1 + fi + + tar --extract --file=gotestsum.tar.gz "${EXE}" +} + +if test -f "${EXE}"; then + echo "gotestsum is already installed" + v=$(./"${EXE}" --version) + echo "$v is installed, required version is ${VERSION}" + if [ "$v" != "gotestsum version ${VERSION}" ]; then + setup_gotestsum + ${EXE} --version + fi +else + setup_gotestsum +fi diff --git a/scripts/mac_installgo.sh b/scripts/installgo_mac.sh similarity index 78% rename from scripts/mac_installgo.sh rename to scripts/installgo_mac.sh index cb41ee5f666cd..b839358136d98 100644 --- a/scripts/mac_installgo.sh +++ b/scripts/installgo_mac.sh @@ -14,16 +14,20 @@ path="/usr/local/Cellar" setup_go () { echo "installing go" curl -L https://golang.org/dl/go${GO_VERSION}.${GO_ARCH}.tar.gz --output go${GO_VERSION}.${GO_ARCH}.tar.gz - echo "${GO_VERSION_SHA} go${GO_VERSION}.${GO_ARCH}.tar.gz" | shasum -a 256 --check + if ! echo "${GO_VERSION_SHA} go${GO_VERSION}.${GO_ARCH}.tar.gz" | shasum --algorithm 256 --check -; then + echo "Checksum failed" >&2 + exit 1 + fi + sudo rm -rf ${path}/go sudo tar -C $path -xzf go${GO_VERSION}.${GO_ARCH}.tar.gz ln -sf ${path}/go/bin/go /usr/local/bin/go ln -sf ${path}/go/bin/gofmt /usr/local/bin/gofmt } -if command -v go &> /dev/null; then +if command -v go >/dev/null 2>&1; then echo "Go is already installed" - v=`go version | { read _ _ v _; echo ${v#go}; }` + v=$(go version | { read -r _ _ v _; echo "${v#go}"; }) echo "$v is installed, required version is ${GO_VERSION}" if [ "$v" != ${GO_VERSION} ]; then setup_go diff --git a/scripts/installgo_windows.sh b/scripts/installgo_windows.sh new file mode 100644 index 0000000000000..d035447570c8a --- /dev/null +++ b/scripts/installgo_windows.sh @@ -0,0 +1,25 @@ +#!/bin/sh + +set -eux + +GO_VERSION="1.17" + +setup_go () { + choco upgrade golang --version=${GO_VERSION} + choco install make + git config --system core.longpaths true + rm -rf /c/Go + cp -r /c/Program\ Files/Go /c/ +} + +if command -v go >/dev/null 2>&1; then + echo "Go is already installed" + v=$(go version | { read -r _ _ v _; echo "${v#go}"; }) + echo "$v is installed, required version is ${GO_VERSION}" + if [ "$v" != ${GO_VERSION} ]; then + setup_go + go version + fi +else + setup_go +fi From 0e9391d43fbef7857b197ee4f903f975d35d3bde Mon Sep 17 00:00:00 2001 From: John Seekins Date: Wed, 15 Sep 2021 11:56:52 -0600 Subject: [PATCH 053/176] feat: add count of bonded slaves (for easier alerting) (#9762) --- plugins/inputs/bond/README.md | 6 ++++++ plugins/inputs/bond/bond.go | 9 +++++++++ plugins/inputs/bond/bond_test.go | 1 + 3 files changed, 16 insertions(+) diff --git a/plugins/inputs/bond/README.md b/plugins/inputs/bond/README.md index abcf72c9193ca..d905038a9d533 100644 --- a/plugins/inputs/bond/README.md +++ b/plugins/inputs/bond/README.md @@ -27,6 +27,7 @@ The plugin collects these metrics from `/proc/net/bonding/*` files. - bond_slave - failures - status + - count ### Description: @@ -39,6 +40,9 @@ status failures Amount of failures for bond's slave interface. + +count + Number of slaves attached to bond ``` ### Tags: @@ -79,7 +83,9 @@ Output: > bond,bond=bond1,host=local active_slave="eth0",status=1i 1509704525000000000 > bond_slave,bond=bond1,interface=eth0,host=local status=1i,failures=0i 1509704525000000000 > bond_slave,host=local,bond=bond1,interface=eth1 status=1i,failures=0i 1509704525000000000 +> bond_slave,host=local,bond=bond1 count=2i 1509704525000000000 > bond,bond=bond0,host=isvetlov-mac.local status=1i 1509704525000000000 > bond_slave,bond=bond0,interface=eth1,host=local status=1i,failures=0i 1509704525000000000 > bond_slave,bond=bond0,interface=eth2,host=local status=1i,failures=0i 1509704525000000000 +> bond_slave,bond=bond0,host=local count=2i 1509704525000000000 ``` diff --git a/plugins/inputs/bond/bond.go b/plugins/inputs/bond/bond.go index b71f36e629feb..dc9b083ec5af9 100644 --- a/plugins/inputs/bond/bond.go +++ b/plugins/inputs/bond/bond.go @@ -122,6 +122,7 @@ func (bond *Bond) gatherBondPart(bondName string, rawFile string, acc telegraf.A func (bond *Bond) gatherSlavePart(bondName string, rawFile string, acc telegraf.Accumulator) error { var slave string var status int + var slaveCount int scanner := bufio.NewScanner(strings.NewReader(rawFile)) for scanner.Scan() { @@ -155,8 +156,16 @@ func (bond *Bond) gatherSlavePart(bondName string, rawFile string, acc telegraf. "interface": slave, } acc.AddFields("bond_slave", fields, tags) + slaveCount++ } } + fields := map[string]interface{}{ + "count": slaveCount, + } + tags := map[string]string{ + "bond": bondName, + } + acc.AddFields("bond_slave", fields, tags) return scanner.Err() } diff --git a/plugins/inputs/bond/bond_test.go b/plugins/inputs/bond/bond_test.go index 342a3f4eb831d..8dc24f4cafa45 100644 --- a/plugins/inputs/bond/bond_test.go +++ b/plugins/inputs/bond/bond_test.go @@ -75,4 +75,5 @@ func TestGatherBondInterface(t *testing.T) { acc.AssertContainsTaggedFields(t, "bond", map[string]interface{}{"active_slave": "eth2", "status": 1}, map[string]string{"bond": "bondAB"}) acc.AssertContainsTaggedFields(t, "bond_slave", map[string]interface{}{"failures": 2, "status": 0}, map[string]string{"bond": "bondAB", "interface": "eth3"}) acc.AssertContainsTaggedFields(t, "bond_slave", map[string]interface{}{"failures": 0, "status": 1}, map[string]string{"bond": "bondAB", "interface": "eth2"}) + acc.AssertContainsTaggedFields(t, "bond_slave", map[string]interface{}{"count": 2}, map[string]string{"bond": "bondAB"}) } From c076398440971c01f67eb326c434c1eab1c361b2 Mon Sep 17 00:00:00 2001 From: Sven Rebhan <36194019+srebhan@users.noreply.github.com> Date: Wed, 15 Sep 2021 19:58:40 +0200 Subject: [PATCH 054/176] fix: Fix panic for non-existing metric names (#9757) --- plugins/parsers/registry.go | 2 +- plugins/parsers/xpath/parser.go | 26 ++-- plugins/parsers/xpath/parser_test.go | 189 ++++++++++++++++++--------- 3 files changed, 141 insertions(+), 76 deletions(-) diff --git a/plugins/parsers/registry.go b/plugins/parsers/registry.go index cc2102c9532d2..f07c789a272f1 100644 --- a/plugins/parsers/registry.go +++ b/plugins/parsers/registry.go @@ -395,7 +395,7 @@ func NewXPathParserConfigs(metricName string, cfgs []XPathConfig) []xpath.Config configs := make([]xpath.Config, 0, len(cfgs)) for _, cfg := range cfgs { config := xpath.Config(cfg) - config.MetricName = metricName + config.MetricDefaultName = metricName configs = append(configs, config) } return configs diff --git a/plugins/parsers/xpath/parser.go b/plugins/parsers/xpath/parser.go index 52224530a9250..75ebfd92035c1 100644 --- a/plugins/parsers/xpath/parser.go +++ b/plugins/parsers/xpath/parser.go @@ -35,14 +35,14 @@ type Parser struct { } type Config struct { - MetricName string - MetricQuery string `toml:"metric_name"` - Selection string `toml:"metric_selection"` - Timestamp string `toml:"timestamp"` - TimestampFmt string `toml:"timestamp_format"` - Tags map[string]string `toml:"tags"` - Fields map[string]string `toml:"fields"` - FieldsInt map[string]string `toml:"fields_int"` + MetricDefaultName string `toml:"-"` + MetricQuery string `toml:"metric_name"` + Selection string `toml:"metric_selection"` + Timestamp string `toml:"timestamp"` + TimestampFmt string `toml:"timestamp_format"` + Tags map[string]string `toml:"tags"` + Fields map[string]string `toml:"fields"` + FieldsInt map[string]string `toml:"fields_int"` FieldSelection string `toml:"field_selection"` FieldNameQuery string `toml:"field_name"` @@ -160,13 +160,19 @@ func (p *Parser) parseQuery(starttime time.Time, doc, selected dataNode, config // Determine the metric name. If a query was specified, use the result of this query and the default metric name // otherwise. - metricname = config.MetricName + metricname = config.MetricDefaultName if len(config.MetricQuery) > 0 { v, err := p.executeQuery(doc, selected, config.MetricQuery) if err != nil { return nil, fmt.Errorf("failed to query metric name: %v", err) } - metricname = v.(string) + var ok bool + if metricname, ok = v.(string); !ok { + if v == nil { + p.Log.Infof("Hint: Empty metric-name-node. If you wanted to set a constant please use `metric_name = \"'name'\"`.") + } + return nil, fmt.Errorf("failed to query metric name: query result is of type %T not 'string'", v) + } } // By default take the time the parser was invoked and override the value diff --git a/plugins/parsers/xpath/parser_test.go b/plugins/parsers/xpath/parser_test.go index 46e4dba690102..8e7a3087c0888 100644 --- a/plugins/parsers/xpath/parser_test.go +++ b/plugins/parsers/xpath/parser_test.go @@ -148,8 +148,8 @@ func TestInvalidTypeQueriesFail(t *testing.T) { input: singleMetricValuesXML, configs: []Config{ { - MetricName: "test", - Timestamp: "/Device_1/Timestamp_unix", + MetricDefaultName: "test", + Timestamp: "/Device_1/Timestamp_unix", FieldsInt: map[string]string{ "a": "/Device_1/value_string", }, @@ -185,8 +185,8 @@ func TestInvalidTypeQueries(t *testing.T) { input: singleMetricValuesXML, configs: []Config{ { - MetricName: "test", - Timestamp: "/Device_1/Timestamp_unix", + MetricDefaultName: "test", + Timestamp: "/Device_1/Timestamp_unix", Fields: map[string]string{ "a": "number(/Device_1/value_string)", }, @@ -207,8 +207,8 @@ func TestInvalidTypeQueries(t *testing.T) { input: singleMetricValuesXML, configs: []Config{ { - MetricName: "test", - Timestamp: "/Device_1/Timestamp_unix", + MetricDefaultName: "test", + Timestamp: "/Device_1/Timestamp_unix", Fields: map[string]string{ "a": "boolean(/Device_1/value_string)", }, @@ -252,8 +252,8 @@ func TestParseTimestamps(t *testing.T) { input: singleMetricValuesXML, configs: []Config{ { - MetricName: "test", - Timestamp: "/Device_1/Timestamp_unix", + MetricDefaultName: "test", + Timestamp: "/Device_1/Timestamp_unix", }, }, defaultTags: map[string]string{}, @@ -269,9 +269,9 @@ func TestParseTimestamps(t *testing.T) { input: singleMetricValuesXML, configs: []Config{ { - MetricName: "test", - Timestamp: "/Device_1/Timestamp_unix", - TimestampFmt: "unix", + MetricDefaultName: "test", + Timestamp: "/Device_1/Timestamp_unix", + TimestampFmt: "unix", }, }, defaultTags: map[string]string{}, @@ -287,9 +287,9 @@ func TestParseTimestamps(t *testing.T) { input: singleMetricValuesXML, configs: []Config{ { - MetricName: "test", - Timestamp: "/Device_1/Timestamp_unix_ms", - TimestampFmt: "unix_ms", + MetricDefaultName: "test", + Timestamp: "/Device_1/Timestamp_unix_ms", + TimestampFmt: "unix_ms", }, }, defaultTags: map[string]string{}, @@ -305,9 +305,9 @@ func TestParseTimestamps(t *testing.T) { input: singleMetricValuesXML, configs: []Config{ { - MetricName: "test", - Timestamp: "/Device_1/Timestamp_unix_us", - TimestampFmt: "unix_us", + MetricDefaultName: "test", + Timestamp: "/Device_1/Timestamp_unix_us", + TimestampFmt: "unix_us", }, }, defaultTags: map[string]string{}, @@ -323,9 +323,9 @@ func TestParseTimestamps(t *testing.T) { input: singleMetricValuesXML, configs: []Config{ { - MetricName: "test", - Timestamp: "/Device_1/Timestamp_unix_ns", - TimestampFmt: "unix_ns", + MetricDefaultName: "test", + Timestamp: "/Device_1/Timestamp_unix_ns", + TimestampFmt: "unix_ns", }, }, defaultTags: map[string]string{}, @@ -341,9 +341,9 @@ func TestParseTimestamps(t *testing.T) { input: singleMetricValuesXML, configs: []Config{ { - MetricName: "test", - Timestamp: "/Device_1/Timestamp_iso", - TimestampFmt: "2006-01-02T15:04:05Z", + MetricDefaultName: "test", + Timestamp: "/Device_1/Timestamp_iso", + TimestampFmt: "2006-01-02T15:04:05Z", }, }, defaultTags: map[string]string{}, @@ -382,8 +382,8 @@ func TestParseSingleValues(t *testing.T) { input: singleMetricValuesXML, configs: []Config{ { - MetricName: "test", - Timestamp: "/Device_1/Timestamp_unix", + MetricDefaultName: "test", + Timestamp: "/Device_1/Timestamp_unix", Fields: map[string]string{ "a": "/Device_1/value_int", "b": "/Device_1/value_float", @@ -410,8 +410,8 @@ func TestParseSingleValues(t *testing.T) { input: singleMetricValuesXML, configs: []Config{ { - MetricName: "test", - Timestamp: "/Device_1/Timestamp_unix", + MetricDefaultName: "test", + Timestamp: "/Device_1/Timestamp_unix", Fields: map[string]string{ "a": "number(Device_1/value_int)", "b": "number(/Device_1/value_float)", @@ -438,8 +438,8 @@ func TestParseSingleValues(t *testing.T) { input: singleMetricValuesXML, configs: []Config{ { - MetricName: "test", - Timestamp: "/Device_1/Timestamp_unix", + MetricDefaultName: "test", + Timestamp: "/Device_1/Timestamp_unix", Fields: map[string]string{ "b": "number(/Device_1/value_float)", "c": "boolean(/Device_1/value_bool)", @@ -468,8 +468,8 @@ func TestParseSingleValues(t *testing.T) { input: singleMetricValuesXML, configs: []Config{ { - MetricName: "test", - Timestamp: "/Device_1/Timestamp_unix", + MetricDefaultName: "test", + Timestamp: "/Device_1/Timestamp_unix", Fields: map[string]string{ "x": "substring-before(/Device_1/value_position, ';')", "y": "substring-after(/Device_1/value_position, ';')", @@ -492,8 +492,8 @@ func TestParseSingleValues(t *testing.T) { input: singleMetricValuesXML, configs: []Config{ { - MetricName: "test", - Timestamp: "/Device_1/Timestamp_unix", + MetricDefaultName: "test", + Timestamp: "/Device_1/Timestamp_unix", Fields: map[string]string{ "x": "number(substring-before(/Device_1/value_position, ';'))", "y": "number(substring-after(/Device_1/value_position, ';'))", @@ -516,8 +516,8 @@ func TestParseSingleValues(t *testing.T) { input: singleMetricValuesXML, configs: []Config{ { - MetricName: "test", - Timestamp: "/Device_1/Timestamp_unix", + MetricDefaultName: "test", + Timestamp: "/Device_1/Timestamp_unix", FieldsInt: map[string]string{ "x": "substring-before(/Device_1/value_position, ';')", "y": "substring-after(/Device_1/value_position, ';')", @@ -540,8 +540,8 @@ func TestParseSingleValues(t *testing.T) { input: singleMetricValuesXML, configs: []Config{ { - MetricName: "test", - Timestamp: "/Device_1/Timestamp_unix", + MetricDefaultName: "test", + Timestamp: "/Device_1/Timestamp_unix", Tags: map[string]string{ "state": "/Device_1/State", "name": "substring-after(/Device_1/Name, ' ')", @@ -587,8 +587,8 @@ func TestParseSingleAttributes(t *testing.T) { input: singleMetricAttributesXML, configs: []Config{ { - MetricName: "test", - Timestamp: "/Device_1/Timestamp_unix/@value", + MetricDefaultName: "test", + Timestamp: "/Device_1/Timestamp_unix/@value", }, }, defaultTags: map[string]string{}, @@ -604,9 +604,9 @@ func TestParseSingleAttributes(t *testing.T) { input: singleMetricAttributesXML, configs: []Config{ { - MetricName: "test", - Timestamp: "/Device_1/Timestamp_iso/@value", - TimestampFmt: "2006-01-02T15:04:05Z", + MetricDefaultName: "test", + Timestamp: "/Device_1/Timestamp_iso/@value", + TimestampFmt: "2006-01-02T15:04:05Z", }, }, defaultTags: map[string]string{}, @@ -622,8 +622,8 @@ func TestParseSingleAttributes(t *testing.T) { input: singleMetricAttributesXML, configs: []Config{ { - MetricName: "test", - Timestamp: "/Device_1/Timestamp_unix/@value", + MetricDefaultName: "test", + Timestamp: "/Device_1/Timestamp_unix/@value", Fields: map[string]string{ "a": "/Device_1/attr_int/@_", "b": "/Device_1/attr_float/@_", @@ -650,8 +650,8 @@ func TestParseSingleAttributes(t *testing.T) { input: singleMetricAttributesXML, configs: []Config{ { - MetricName: "test", - Timestamp: "/Device_1/Timestamp_unix/@value", + MetricDefaultName: "test", + Timestamp: "/Device_1/Timestamp_unix/@value", Fields: map[string]string{ "a": "number(/Device_1/attr_int/@_)", "b": "number(/Device_1/attr_float/@_)", @@ -678,8 +678,8 @@ func TestParseSingleAttributes(t *testing.T) { input: singleMetricAttributesXML, configs: []Config{ { - MetricName: "test", - Timestamp: "/Device_1/Timestamp_unix/@value", + MetricDefaultName: "test", + Timestamp: "/Device_1/Timestamp_unix/@value", Fields: map[string]string{ "b": "number(/Device_1/attr_float/@_)", "c": "boolean(/Device_1/attr_bool/@_)", @@ -708,8 +708,8 @@ func TestParseSingleAttributes(t *testing.T) { input: singleMetricAttributesXML, configs: []Config{ { - MetricName: "test", - Timestamp: "/Device_1/Timestamp_unix/@value", + MetricDefaultName: "test", + Timestamp: "/Device_1/Timestamp_unix/@value", Fields: map[string]string{ "name": "substring-after(/Device_1/Name/@value, ' ')", }, @@ -730,8 +730,8 @@ func TestParseSingleAttributes(t *testing.T) { input: singleMetricAttributesXML, configs: []Config{ { - MetricName: "test", - Timestamp: "/Device_1/Timestamp_unix/@value", + MetricDefaultName: "test", + Timestamp: "/Device_1/Timestamp_unix/@value", Tags: map[string]string{ "state": "/Device_1/State/@_", "name": "substring-after(/Device_1/Name/@value, ' ')", @@ -754,8 +754,8 @@ func TestParseSingleAttributes(t *testing.T) { input: singleMetricAttributesXML, configs: []Config{ { - MetricName: "test", - Timestamp: "/Device_1/Timestamp_unix/@value", + MetricDefaultName: "test", + Timestamp: "/Device_1/Timestamp_unix/@value", Fields: map[string]string{ "a": "/Device_1/attr_bool_numeric/@_ = 1", }, @@ -799,8 +799,8 @@ func TestParseMultiValues(t *testing.T) { input: singleMetricMultiValuesXML, configs: []Config{ { - MetricName: "test", - Timestamp: "/Timestamp/@value", + MetricDefaultName: "test", + Timestamp: "/Timestamp/@value", Fields: map[string]string{ "a": "number(/Device/Value[1])", "b": "number(/Device/Value[2])", @@ -831,8 +831,8 @@ func TestParseMultiValues(t *testing.T) { input: singleMetricMultiValuesXML, configs: []Config{ { - MetricName: "test", - Timestamp: "/Timestamp/@value", + MetricDefaultName: "test", + Timestamp: "/Timestamp/@value", FieldsInt: map[string]string{ "a": "/Device/Value[1]", "b": "/Device/Value[2]", @@ -886,9 +886,9 @@ func TestParseMultiNodes(t *testing.T) { input: multipleNodesXML, configs: []Config{ { - MetricName: "test", - Selection: "/Device", - Timestamp: "/Timestamp/@value", + MetricDefaultName: "test", + Selection: "/Device", + Timestamp: "/Timestamp/@value", Fields: map[string]string{ "value": "number(Value)", "active": "Active = 1", @@ -999,9 +999,9 @@ func TestParseMetricQuery(t *testing.T) { input: metricNameQueryXML, configs: []Config{ { - MetricName: "test", - MetricQuery: "name(/Device_1/Metric/@*[1])", - Timestamp: "/Device_1/Timestamp_unix", + MetricDefaultName: "test", + MetricQuery: "name(/Device_1/Metric/@*[1])", + Timestamp: "/Device_1/Timestamp_unix", Fields: map[string]string{ "value": "/Device_1/Metric/@*[1]", }, @@ -1017,6 +1017,29 @@ func TestParseMetricQuery(t *testing.T) { time.Unix(1577923199, 0), ), }, + { + name: "parse metric name constant", + input: metricNameQueryXML, + configs: []Config{ + { + MetricDefaultName: "test", + MetricQuery: "'the_metric'", + Timestamp: "/Device_1/Timestamp_unix", + Fields: map[string]string{ + "value": "/Device_1/Metric/@*[1]", + }, + }, + }, + defaultTags: map[string]string{}, + expected: testutil.MustMetric( + "the_metric", + map[string]string{}, + map[string]interface{}{ + "value": "ok", + }, + time.Unix(1577923199, 0), + ), + }, } for _, tt := range tests { @@ -1032,6 +1055,42 @@ func TestParseMetricQuery(t *testing.T) { } } +func TestParseErrors(t *testing.T) { + var tests = []struct { + name string + input string + configs []Config + expected string + }{ + { + name: "string metric name query", + input: metricNameQueryXML, + configs: []Config{ + { + MetricDefaultName: "test", + MetricQuery: "arbitrary", + Timestamp: "/Device_1/Timestamp_unix", + Fields: map[string]string{ + "value": "/Device_1/Metric/@*[1]", + }, + }, + }, + expected: "failed to query metric name: query result is of type not 'string'", + }, + } + + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + parser := &Parser{Configs: tt.configs, DefaultTags: map[string]string{}, Log: testutil.Logger{Name: "parsers.xml"}} + require.NoError(t, parser.Init()) + + _, err := parser.ParseLine(tt.input) + require.Error(t, err) + require.Equal(t, tt.expected, err.Error()) + }) + } +} + func TestEmptySelection(t *testing.T) { var tests = []struct { name string @@ -1146,7 +1205,7 @@ func TestTestCases(t *testing.T) { filename := filepath.FromSlash(tt.filename) cfg, header, err := loadTestConfiguration(filename) require.NoError(t, err) - cfg.MetricName = "xml" + cfg.MetricDefaultName = "xml" // Load the xml-content input, err := testutil.ParseRawLinesFrom(header, "File:") From 5fe738723c8ba4e5ddb67d8b1c886148d1a27b63 Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Wed, 15 Sep 2021 11:59:16 -0600 Subject: [PATCH 055/176] fix: bump google.golang.org/grpc from 1.39.1 to 1.40.0 (#9751) From 783945e55d03d36059ac1bd2da9551ee29c5ebb0 Mon Sep 17 00:00:00 2001 From: reimda Date: Wed, 15 Sep 2021 15:35:23 -0600 Subject: [PATCH 056/176] Update release.sh to include new builds on website (#9765) --- scripts/release.sh | 38 +++++++++++++++++++++++++++++++++++--- 1 file changed, 35 insertions(+), 3 deletions(-) diff --git a/scripts/release.sh b/scripts/release.sh index b445efc0494b3..22cac0a09cf53 100644 --- a/scripts/release.sh +++ b/scripts/release.sh @@ -103,6 +103,16 @@ do done < manifest echo "" +package="$(grep *_darwin_amd64.dmg manifest | cut -f2 -d' ')" +cat -< Date: Thu, 16 Sep 2021 12:01:34 -0400 Subject: [PATCH 057/176] fix: bump github.com/antchfx/xmlquery from 1.3.5 to 1.3.6 (#9750) --- go.mod | 2 +- go.sum | 4 ++-- 2 files changed, 3 insertions(+), 3 deletions(-) diff --git a/go.mod b/go.mod index 4dcb4550d4764..24b6f664b2756 100644 --- a/go.mod +++ b/go.mod @@ -39,7 +39,7 @@ require ( github.com/aliyun/alibaba-cloud-sdk-go v1.61.1004 github.com/amir/raidman v0.0.0-20170415203553-1ccc43bfb9c9 github.com/antchfx/jsonquery v1.1.4 - github.com/antchfx/xmlquery v1.3.5 + github.com/antchfx/xmlquery v1.3.6 github.com/antchfx/xpath v1.1.11 github.com/apache/arrow/go/arrow v0.0.0-20200601151325-b2287a20f230 // indirect github.com/apache/thrift v0.14.2 diff --git a/go.sum b/go.sum index c5a0778443420..e09c7b36318ff 100644 --- a/go.sum +++ b/go.sum @@ -219,8 +219,8 @@ github.com/amir/raidman v0.0.0-20170415203553-1ccc43bfb9c9/go.mod h1:eliMa/PW+RD github.com/andreyvit/diff v0.0.0-20170406064948-c7f18ee00883/go.mod h1:rCTlJbsFo29Kk6CurOXKm700vrz8f0KW0JNfpkRJY/8= github.com/antchfx/jsonquery v1.1.4 h1:+OlFO3QS9wjU0MKx9MgHm5f6o6hdd4e9mUTp0wTjxlM= github.com/antchfx/jsonquery v1.1.4/go.mod h1:cHs8r6Bymd8j6HI6Ej1IJbjahKvLBcIEh54dfmo+E9A= -github.com/antchfx/xmlquery v1.3.5 h1:I7TuBRqsnfFuL11ruavGm911Awx9IqSdiU6W/ztSmVw= -github.com/antchfx/xmlquery v1.3.5/go.mod h1:64w0Xesg2sTaawIdNqMB+7qaW/bSqkQm+ssPaCMWNnc= +github.com/antchfx/xmlquery v1.3.6 h1:kaEVzH1mNo/2AJZrhZjAaAUTy2Nn2zxGfYYU8jWfXOo= +github.com/antchfx/xmlquery v1.3.6/go.mod h1:64w0Xesg2sTaawIdNqMB+7qaW/bSqkQm+ssPaCMWNnc= github.com/antchfx/xpath v1.1.7/go.mod h1:Yee4kTMuNiPYJ7nSNorELQMr1J33uOpXDMByNYhvtNk= github.com/antchfx/xpath v1.1.10/go.mod h1:Yee4kTMuNiPYJ7nSNorELQMr1J33uOpXDMByNYhvtNk= github.com/antchfx/xpath v1.1.11 h1:WOFtK8TVAjLm3lbgqeP0arlHpvCEeTANeWZ/csPpJkQ= From a02f49c6ff5b43955e117f0a1290f2c4b6543d45 Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Thu, 16 Sep 2021 10:02:47 -0600 Subject: [PATCH 058/176] fix: bump github.com/miekg/dns from 1.1.31 to 1.1.43 (#9656) --- go.mod | 2 +- go.sum | 3 ++- 2 files changed, 3 insertions(+), 2 deletions(-) diff --git a/go.mod b/go.mod index 24b6f664b2756..65a894974bea6 100644 --- a/go.mod +++ b/go.mod @@ -180,7 +180,7 @@ require ( github.com/mdlayher/genetlink v1.0.0 // indirect github.com/mdlayher/netlink v1.1.0 // indirect github.com/microsoft/ApplicationInsights-Go v0.4.4 - github.com/miekg/dns v1.1.31 + github.com/miekg/dns v1.1.43 github.com/minio/highwayhash v1.0.1 // indirect github.com/mitchellh/go-homedir v1.1.0 // indirect github.com/mitchellh/mapstructure v1.2.2 // indirect diff --git a/go.sum b/go.sum index e09c7b36318ff..5ff7799dc902b 100644 --- a/go.sum +++ b/go.sum @@ -1146,8 +1146,9 @@ github.com/microsoft/ApplicationInsights-Go v0.4.4 h1:G4+H9WNs6ygSCe6sUyxRc2U81T github.com/microsoft/ApplicationInsights-Go v0.4.4/go.mod h1:fKRUseBqkw6bDiXTs3ESTiU/4YTIHsQS4W3fP2ieF4U= github.com/miekg/dns v1.0.14/go.mod h1:W1PPwlIAgtquWBMBEV9nkV9Cazfe8ScdGz/Lj7v3Nrg= github.com/miekg/dns v1.1.26/go.mod h1:bPDLeHnStXmXAq1m/Ch/hvfNHr14JKNPMBo3VZKjuso= -github.com/miekg/dns v1.1.31 h1:sJFOl9BgwbYAWOGEwr61FU28pqsBNdpRBnhGXtO06Oo= github.com/miekg/dns v1.1.31/go.mod h1:KNUDUusw/aVsxyTYZM1oqvCicbwhgbNgztCETuNZ7xM= +github.com/miekg/dns v1.1.43 h1:JKfpVSCB84vrAmHzyrsxB5NAr5kLoMXZArPSw7Qlgyg= +github.com/miekg/dns v1.1.43/go.mod h1:+evo5L0630/F6ca/Z9+GAqzhjGyn8/c+TBaOyfEl0V4= github.com/miekg/pkcs11 v1.0.3/go.mod h1:XsNlhZGX73bx86s2hdc/FuaLm2CPZJemRLMA+WTFxgs= github.com/mikioh/ipaddr v0.0.0-20190404000644-d465c8ab6721 h1:RlZweED6sbSArvlE924+mUcZuXKLBHA35U7LN621Bws= github.com/mikioh/ipaddr v0.0.0-20190404000644-d465c8ab6721/go.mod h1:Ickgr2WtCLZ2MDGd4Gr0geeCH5HybhRJbonOgQpvSxc= From 81eed8d436ace8b896f23c4fdc2a08a568a686aa Mon Sep 17 00:00:00 2001 From: Alan Pope Date: Thu, 16 Sep 2021 18:22:24 +0100 Subject: [PATCH 059/176] docs: Move nightly builds (#9747) --- README.md | 41 +---------------------------------------- docs/NIGHTLIES.md | 42 ++++++++++++++++++++++++++++++++++++++++++ docs/README.md | 3 +++ 3 files changed, 46 insertions(+), 40 deletions(-) create mode 100644 docs/NIGHTLIES.md diff --git a/README.md b/README.md index c4a89b751c5d2..57b2d4e8cc33d 100644 --- a/README.md +++ b/README.md @@ -80,46 +80,7 @@ version. ### Nightly Builds -These builds are generated from the master branch: - -FreeBSD - .tar.gz -- [telegraf-nightly_freebsd_amd64.tar.gz](https://dl.influxdata.com/telegraf/nightlies/telegraf-nightly_freebsd_amd64.tar.gz) -- [telegraf-nightly_freebsd_armv7.tar.gz](https://dl.influxdata.com/telegraf/nightlies/telegraf-nightly_freebsd_armv7.tar.gz) -- [telegraf-nightly_freebsd_i386.tar.gz](https://dl.influxdata.com/telegraf/nightlies/telegraf-nightly_freebsd_i386.tar.gz) - -Linux - .rpm -- [telegraf-nightly.arm64.rpm](https://dl.influxdata.com/telegraf/nightlies/telegraf-nightly.arm64.rpm) -- [telegraf-nightly.armel.rpm](https://dl.influxdata.com/telegraf/nightlies/telegraf-nightly.armel.rpm) -- [telegraf-nightly.armv6hl.rpm](https://dl.influxdata.com/telegraf/nightlies/telegraf-nightly.armv6hl.rpm) -- [telegraf-nightly.i386.rpm](https://dl.influxdata.com/telegraf/nightlies/telegraf-nightly.i386.rpm) -- [telegraf-nightly.ppc64le.rpm](https://dl.influxdata.com/telegraf/nightlies/telegraf-nightly.ppc64le.rpm) -- [telegraf-nightly.s390x.rpm](https://dl.influxdata.com/telegraf/nightlies/telegraf-nightly.s390x.rpm) -- [telegraf-nightly.x86_64.rpm](https://dl.influxdata.com/telegraf/nightlies/telegraf-nightly.x86_64.rpm) - -Linux - .deb -- [telegraf_nightly_amd64.deb](https://dl.influxdata.com/telegraf/nightlies/telegraf_nightly_amd64.deb) -- [telegraf_nightly_arm64.deb](https://dl.influxdata.com/telegraf/nightlies/telegraf_nightly_arm64.deb) -- [telegraf_nightly_armel.deb](https://dl.influxdata.com/telegraf/nightlies/telegraf_nightly_armel.deb) -- [telegraf_nightly_armhf.deb](https://dl.influxdata.com/telegraf/nightlies/telegraf_nightly_armhf.deb) -- [telegraf_nightly_i386.deb](https://dl.influxdata.com/telegraf/nightlies/telegraf_nightly_i386.deb) -- [telegraf_nightly_ppc64el.deb](https://dl.influxdata.com/telegraf/nightlies/telegraf_nightly_ppc64el.deb) -- [telegraf_nightly_s390x.deb](https://dl.influxdata.com/telegraf/nightlies/telegraf_nightly_s390x.deb) - -Linux - .tar.gz -- [telegraf-nightly_linux_amd64.tar.gz](https://dl.influxdata.com/telegraf/nightlies/telegraf-nightly_linux_amd64.tar.gz) -- [telegraf-nightly_linux_arm64.tar.gz](https://dl.influxdata.com/telegraf/nightlies/telegraf-nightly_linux_arm64.tar.gz) -- [telegraf-nightly_linux_armel.tar.gz](https://dl.influxdata.com/telegraf/nightlies/telegraf-nightly_linux_armel.tar.gz) -- [telegraf-nightly_linux_armhf.tar.gz](https://dl.influxdata.com/telegraf/nightlies/telegraf-nightly_linux_armhf.tar.gz) -- [telegraf-nightly_linux_i386.tar.gz](https://dl.influxdata.com/telegraf/nightlies/telegraf-nightly_linux_i386.tar.gz) -- [telegraf-nightly_linux_s390x.tar.gz](https://dl.influxdata.com/telegraf/nightlies/telegraf-nightly_linux_s390x.tar.gz) -- [telegraf-static-nightly_linux_amd64.tar.gz](https://dl.influxdata.com/telegraf/nightlies/telegraf-static-nightly_linux_amd64.tar.gz) - -OSX - .tar.gz -- [telegraf-nightly_darwin_amd64.tar.gz](https://dl.influxdata.com/telegraf/nightlies/telegraf-nightly_darwin_amd64.tar.gz) - -Windows - .zip -- [telegraf-nightly_windows_i386.zip](https://dl.influxdata.com/telegraf/nightlies/telegraf-nightly_windows_i386.zip) -- [telegraf-nightly_windows_amd64.zip](https://dl.influxdata.com/telegraf/nightlies/telegraf-nightly_windows_amd64.zip) +[Nightly](/docs/NIGHTLIES.md) builds are available, generated from the master branch. ## How to use it: diff --git a/docs/NIGHTLIES.md b/docs/NIGHTLIES.md new file mode 100644 index 0000000000000..63cdc2d82cfdc --- /dev/null +++ b/docs/NIGHTLIES.md @@ -0,0 +1,42 @@ +### Nightly Builds + +These builds are generated from the master branch: + +FreeBSD - .tar.gz +- [telegraf-nightly_freebsd_amd64.tar.gz](https://dl.influxdata.com/telegraf/nightlies/telegraf-nightly_freebsd_amd64.tar.gz) +- [telegraf-nightly_freebsd_armv7.tar.gz](https://dl.influxdata.com/telegraf/nightlies/telegraf-nightly_freebsd_armv7.tar.gz) +- [telegraf-nightly_freebsd_i386.tar.gz](https://dl.influxdata.com/telegraf/nightlies/telegraf-nightly_freebsd_i386.tar.gz) + +Linux - .rpm +- [telegraf-nightly.arm64.rpm](https://dl.influxdata.com/telegraf/nightlies/telegraf-nightly.arm64.rpm) +- [telegraf-nightly.armel.rpm](https://dl.influxdata.com/telegraf/nightlies/telegraf-nightly.armel.rpm) +- [telegraf-nightly.armv6hl.rpm](https://dl.influxdata.com/telegraf/nightlies/telegraf-nightly.armv6hl.rpm) +- [telegraf-nightly.i386.rpm](https://dl.influxdata.com/telegraf/nightlies/telegraf-nightly.i386.rpm) +- [telegraf-nightly.ppc64le.rpm](https://dl.influxdata.com/telegraf/nightlies/telegraf-nightly.ppc64le.rpm) +- [telegraf-nightly.s390x.rpm](https://dl.influxdata.com/telegraf/nightlies/telegraf-nightly.s390x.rpm) +- [telegraf-nightly.x86_64.rpm](https://dl.influxdata.com/telegraf/nightlies/telegraf-nightly.x86_64.rpm) + +Linux - .deb +- [telegraf_nightly_amd64.deb](https://dl.influxdata.com/telegraf/nightlies/telegraf_nightly_amd64.deb) +- [telegraf_nightly_arm64.deb](https://dl.influxdata.com/telegraf/nightlies/telegraf_nightly_arm64.deb) +- [telegraf_nightly_armel.deb](https://dl.influxdata.com/telegraf/nightlies/telegraf_nightly_armel.deb) +- [telegraf_nightly_armhf.deb](https://dl.influxdata.com/telegraf/nightlies/telegraf_nightly_armhf.deb) +- [telegraf_nightly_i386.deb](https://dl.influxdata.com/telegraf/nightlies/telegraf_nightly_i386.deb) +- [telegraf_nightly_ppc64el.deb](https://dl.influxdata.com/telegraf/nightlies/telegraf_nightly_ppc64el.deb) +- [telegraf_nightly_s390x.deb](https://dl.influxdata.com/telegraf/nightlies/telegraf_nightly_s390x.deb) + +Linux - .tar.gz +- [telegraf-nightly_linux_amd64.tar.gz](https://dl.influxdata.com/telegraf/nightlies/telegraf-nightly_linux_amd64.tar.gz) +- [telegraf-nightly_linux_arm64.tar.gz](https://dl.influxdata.com/telegraf/nightlies/telegraf-nightly_linux_arm64.tar.gz) +- [telegraf-nightly_linux_armel.tar.gz](https://dl.influxdata.com/telegraf/nightlies/telegraf-nightly_linux_armel.tar.gz) +- [telegraf-nightly_linux_armhf.tar.gz](https://dl.influxdata.com/telegraf/nightlies/telegraf-nightly_linux_armhf.tar.gz) +- [telegraf-nightly_linux_i386.tar.gz](https://dl.influxdata.com/telegraf/nightlies/telegraf-nightly_linux_i386.tar.gz) +- [telegraf-nightly_linux_s390x.tar.gz](https://dl.influxdata.com/telegraf/nightlies/telegraf-nightly_linux_s390x.tar.gz) +- [telegraf-static-nightly_linux_amd64.tar.gz](https://dl.influxdata.com/telegraf/nightlies/telegraf-static-nightly_linux_amd64.tar.gz) + +OSX - .tar.gz +- [telegraf-nightly_darwin_amd64.tar.gz](https://dl.influxdata.com/telegraf/nightlies/telegraf-nightly_darwin_amd64.tar.gz) + +Windows - .zip +- [telegraf-nightly_windows_i386.zip](https://dl.influxdata.com/telegraf/nightlies/telegraf-nightly_windows_i386.zip) +- [telegraf-nightly_windows_amd64.zip](https://dl.influxdata.com/telegraf/nightlies/telegraf-nightly_windows_amd64.zip) diff --git a/docs/README.md b/docs/README.md index b7b55336c5a04..99320dee95588 100644 --- a/docs/README.md +++ b/docs/README.md @@ -10,6 +10,8 @@ - [Profiling][profiling] - [Windows Service][winsvc] - [FAQ][faq] +- Developer Builds + - [Nightlies](nightlies) [conf]: /docs/CONFIGURATION.md [metrics]: /docs/METRICS.md @@ -19,3 +21,4 @@ [profiling]: /docs/PROFILING.md [winsvc]: /docs/WINDOWS_SERVICE.md [faq]: /docs/FAQ.md +[nightlies]: /docs/NIGHTLIES.md \ No newline at end of file From b806ad88488b057b9864d7365e24b1651726caa3 Mon Sep 17 00:00:00 2001 From: Michael Hall Date: Thu, 16 Sep 2021 14:14:41 -0400 Subject: [PATCH 060/176] docs: Add list of 3rd party builds to the README (#8576) --- README.md | 11 +++++++++++ 1 file changed, 11 insertions(+) diff --git a/README.md b/README.md index 57b2d4e8cc33d..2b49842789db6 100644 --- a/README.md +++ b/README.md @@ -82,6 +82,17 @@ version. [Nightly](/docs/NIGHTLIES.md) builds are available, generated from the master branch. +### 3rd Party Builds + +Builds for other platforms or package formats are provided by members of the Telegraf community. These packages are not built, tested or supported by the Telegraf project or InfluxData, we make no guarantees that they will work. Please get in touch with the package author if you need support. + +* Windows + * [Chocolatey](https://chocolatey.org/packages/telegraf) by [ripclawffb](https://chocolatey.org/profiles/ripclawffb) + * [Scoop](https://github.com/ScoopInstaller/Main/blob/master/bucket/telegraf.json) +* Linux + * [Snap](https://snapcraft.io/telegraf) by Laurent Sesquès (sajoupa) + + ## How to use it: See usage with: From f5a3df429ad969302c765da5dce92f4c63042f37 Mon Sep 17 00:00:00 2001 From: John Seekins Date: Thu, 16 Sep 2021 15:19:51 -0600 Subject: [PATCH 061/176] fix: add additional logstash output plugin stats (#9707) --- plugins/inputs/logstash/README.md | 6 ++ plugins/inputs/logstash/logstash.go | 65 +++++++++++++++++++- plugins/inputs/logstash/logstash_test.go | 58 +++++++++++++++++ plugins/inputs/logstash/samples_logstash7.go | 7 ++- 4 files changed, 131 insertions(+), 5 deletions(-) diff --git a/plugins/inputs/logstash/README.md b/plugins/inputs/logstash/README.md index 9571de5fd8873..95ec3e6feae66 100644 --- a/plugins/inputs/logstash/README.md +++ b/plugins/inputs/logstash/README.md @@ -42,6 +42,8 @@ Logstash 5 and later is supported. ### Metrics +Additional plugin stats may be collected (because logstash doesn't consistently expose all stats) + - logstash_jvm - tags: - node_id @@ -125,6 +127,10 @@ Logstash 5 and later is supported. - duration_in_millis - in - out + - bulk_requests_failures (for Logstash 7+) + - bulk_requests_with_errors (for Logstash 7+) + - documents_successes (for logstash 7+) + - documents_retryable_failures (for logstash 7+) - logstash_queue - tags: diff --git a/plugins/inputs/logstash/logstash.go b/plugins/inputs/logstash/logstash.go index 76f75bc63a6a0..10a3e7b6b8dd0 100644 --- a/plugins/inputs/logstash/logstash.go +++ b/plugins/inputs/logstash/logstash.go @@ -126,9 +126,11 @@ type Pipeline struct { } type Plugin struct { - ID string `json:"id"` - Events interface{} `json:"events"` - Name string `json:"name"` + ID string `json:"id"` + Events interface{} `json:"events"` + Name string `json:"name"` + BulkRequests map[string]interface{} `json:"bulk_requests"` + Documents map[string]interface{} `json:"documents"` } type PipelinePlugins struct { @@ -290,6 +292,63 @@ func (logstash *Logstash) gatherPluginsStats( return err } accumulator.AddFields("logstash_plugins", flattener.Fields, pluginTags) + /* + The elasticsearch output produces additional stats around + bulk requests and document writes (that are elasticsearch specific). + Collect those here + */ + if pluginType == "output" && plugin.Name == "elasticsearch" { + /* + The "bulk_requests" section has details about batch writes + into Elasticsearch + + "bulk_requests" : { + "successes" : 2870, + "responses" : { + "200" : 2870 + }, + "failures": 262, + "with_errors": 9089 + }, + */ + flattener := jsonParser.JSONFlattener{} + err := flattener.FlattenJSON("", plugin.BulkRequests) + if err != nil { + return err + } + for k, v := range flattener.Fields { + if strings.HasPrefix(k, "bulk_requests") { + continue + } + newKey := fmt.Sprintf("bulk_requests_%s", k) + flattener.Fields[newKey] = v + delete(flattener.Fields, k) + } + accumulator.AddFields("logstash_plugins", flattener.Fields, pluginTags) + + /* + The "documents" section has counts of individual documents + written/retried/etc. + "documents" : { + "successes" : 2665549, + "retryable_failures": 13733 + } + */ + flattener = jsonParser.JSONFlattener{} + err = flattener.FlattenJSON("", plugin.Documents) + if err != nil { + return err + } + for k, v := range flattener.Fields { + if strings.HasPrefix(k, "documents") { + continue + } + newKey := fmt.Sprintf("documents_%s", k) + flattener.Fields[newKey] = v + delete(flattener.Fields, k) + } + accumulator.AddFields("logstash_plugins", flattener.Fields, pluginTags) + } } return nil diff --git a/plugins/inputs/logstash/logstash_test.go b/plugins/inputs/logstash/logstash_test.go index d8db3475a1e95..089824c58767f 100644 --- a/plugins/inputs/logstash/logstash_test.go +++ b/plugins/inputs/logstash/logstash_test.go @@ -708,6 +708,64 @@ func Test_Logstash7GatherPipelinesQueueStats(test *testing.T) { }, ) + logstash7accPipelinesStats.AssertContainsTaggedFields( + test, + "logstash_plugins", + map[string]interface{}{ + "duration_in_millis": float64(2802177.0), + "in": float64(2665549.0), + "out": float64(2665549.0), + }, + map[string]string{ + "node_id": string("28580380-ad2c-4032-934b-76359125edca"), + "node_name": string("HOST01.local"), + "source": string("HOST01.local"), + "node_version": string("7.4.2"), + "pipeline": string("infra"), + "plugin_name": string("elasticsearch"), + "plugin_id": string("38967f09bbd2647a95aa00702b6b557bdbbab31da6a04f991d38abe5629779e3"), + "plugin_type": string("output"), + }, + ) + logstash7accPipelinesStats.AssertContainsTaggedFields( + test, + "logstash_plugins", + map[string]interface{}{ + "bulk_requests_successes": float64(2870), + "bulk_requests_responses_200": float64(2870), + "bulk_requests_failures": float64(262), + "bulk_requests_with_errors": float64(9089), + }, + map[string]string{ + "node_id": string("28580380-ad2c-4032-934b-76359125edca"), + "node_name": string("HOST01.local"), + "source": string("HOST01.local"), + "node_version": string("7.4.2"), + "pipeline": string("infra"), + "plugin_name": string("elasticsearch"), + "plugin_id": string("38967f09bbd2647a95aa00702b6b557bdbbab31da6a04f991d38abe5629779e3"), + "plugin_type": string("output"), + }, + ) + logstash7accPipelinesStats.AssertContainsTaggedFields( + test, + "logstash_plugins", + map[string]interface{}{ + "documents_successes": float64(2665549), + "documents_retryable_failures": float64(13733), + }, + map[string]string{ + "node_id": string("28580380-ad2c-4032-934b-76359125edca"), + "node_name": string("HOST01.local"), + "source": string("HOST01.local"), + "node_version": string("7.4.2"), + "pipeline": string("infra"), + "plugin_name": string("elasticsearch"), + "plugin_id": string("38967f09bbd2647a95aa00702b6b557bdbbab31da6a04f991d38abe5629779e3"), + "plugin_type": string("output"), + }, + ) + logstash7accPipelinesStats.AssertContainsTaggedFields( test, "logstash_queue", diff --git a/plugins/inputs/logstash/samples_logstash7.go b/plugins/inputs/logstash/samples_logstash7.go index fe05712909c81..e04bb4319a27a 100644 --- a/plugins/inputs/logstash/samples_logstash7.go +++ b/plugins/inputs/logstash/samples_logstash7.go @@ -110,10 +110,13 @@ const logstash7PipelinesJSON = ` "successes" : 2870, "responses" : { "200" : 2870 - } + }, + "failures": 262, + "with_errors": 9089 }, "documents" : { - "successes" : 2665549 + "successes" : 2665549, + "retryable_failures": 13733 } } ] }, From d441b03b57599257142b7949af8711782209f269 Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Thu, 16 Sep 2021 15:57:45 -0600 Subject: [PATCH 062/176] fix: bump github.com/shirou/gopsutil (#9760) --- go.mod | 2 +- go.sum | 2 ++ 2 files changed, 3 insertions(+), 1 deletion(-) diff --git a/go.mod b/go.mod index 65a894974bea6..72990ab8394bb 100644 --- a/go.mod +++ b/go.mod @@ -231,7 +231,7 @@ require ( github.com/samuel/go-zookeeper v0.0.0-20200724154423-2164a8ac840e // indirect github.com/satori/go.uuid v1.2.1-0.20181028125025-b2ce2384e17b // indirect github.com/sensu/sensu-go/api/core/v2 v2.9.0 - github.com/shirou/gopsutil v3.21.6-0.20210624221800-cb512c850043+incompatible + github.com/shirou/gopsutil v3.21.8+incompatible github.com/shopspring/decimal v0.0.0-20200105231215-408a2507e114 // indirect github.com/showwin/speedtest-go v1.1.4 github.com/signalfx/com_signalfx_metrics_protobuf v0.0.2 // indirect diff --git a/go.sum b/go.sum index 5ff7799dc902b..79046bf42b4ab 100644 --- a/go.sum +++ b/go.sum @@ -1443,6 +1443,8 @@ github.com/sergi/go-diff v1.0.0/go.mod h1:0CfEIISq7TuYL3j771MWULgwwjU+GofnZX9QAm github.com/shirou/gopsutil v2.18.10+incompatible/go.mod h1:5b4v6he4MtMOwMlS0TUMTu2PcXUg8+E1lC7eC3UO/RA= github.com/shirou/gopsutil v3.21.6-0.20210624221800-cb512c850043+incompatible h1:Rucj22V2P6ktUBqN5auqjyxRHLXqNX6CteXBXifRrgY= github.com/shirou/gopsutil v3.21.6-0.20210624221800-cb512c850043+incompatible/go.mod h1:5b4v6he4MtMOwMlS0TUMTu2PcXUg8+E1lC7eC3UO/RA= +github.com/shirou/gopsutil v3.21.8+incompatible h1:sh0foI8tMRlCidUJR+KzqWYWxrkuuPIGiO6Vp+KXdCU= +github.com/shirou/gopsutil v3.21.8+incompatible/go.mod h1:5b4v6he4MtMOwMlS0TUMTu2PcXUg8+E1lC7eC3UO/RA= github.com/shirou/w32 v0.0.0-20160930032740-bb4de0191aa4/go.mod h1:qsXQc7+bwAM3Q1u/4XEfrquwF8Lw7D7y5cD8CuHnfIc= github.com/shopspring/decimal v0.0.0-20180709203117-cd690d0c9e24/go.mod h1:M+9NzErvs504Cn4c5DxATwIqPbtswREoFCre64PpcG4= github.com/shopspring/decimal v0.0.0-20200105231215-408a2507e114 h1:Pm6R878vxWWWR+Sa3ppsLce/Zq+JNTs6aVvRu13jv9A= From 3b20b93a3346a132854f783a148f3c020b375bb9 Mon Sep 17 00:00:00 2001 From: David Reimschussel Date: Thu, 16 Sep 2021 16:26:09 -0600 Subject: [PATCH 063/176] go mod tidy --- go.sum | 2 -- 1 file changed, 2 deletions(-) diff --git a/go.sum b/go.sum index 79046bf42b4ab..e85b07043c3f2 100644 --- a/go.sum +++ b/go.sum @@ -1441,8 +1441,6 @@ github.com/sensu/sensu-go/api/core/v2 v2.9.0 h1:NanHMIWbrHP/L4Ge0V1x2+0G9bxFHpvh github.com/sensu/sensu-go/api/core/v2 v2.9.0/go.mod h1:QcgxKxydmScE66hLBTzbFhhiPSR/JHqUjNi/+Lelh6E= github.com/sergi/go-diff v1.0.0/go.mod h1:0CfEIISq7TuYL3j771MWULgwwjU+GofnZX9QAmXWZgo= github.com/shirou/gopsutil v2.18.10+incompatible/go.mod h1:5b4v6he4MtMOwMlS0TUMTu2PcXUg8+E1lC7eC3UO/RA= -github.com/shirou/gopsutil v3.21.6-0.20210624221800-cb512c850043+incompatible h1:Rucj22V2P6ktUBqN5auqjyxRHLXqNX6CteXBXifRrgY= -github.com/shirou/gopsutil v3.21.6-0.20210624221800-cb512c850043+incompatible/go.mod h1:5b4v6he4MtMOwMlS0TUMTu2PcXUg8+E1lC7eC3UO/RA= github.com/shirou/gopsutil v3.21.8+incompatible h1:sh0foI8tMRlCidUJR+KzqWYWxrkuuPIGiO6Vp+KXdCU= github.com/shirou/gopsutil v3.21.8+incompatible/go.mod h1:5b4v6he4MtMOwMlS0TUMTu2PcXUg8+E1lC7eC3UO/RA= github.com/shirou/w32 v0.0.0-20160930032740-bb4de0191aa4/go.mod h1:qsXQc7+bwAM3Q1u/4XEfrquwF8Lw7D7y5cD8CuHnfIc= From 8014a508e5fdde3b56936e678a9a72502e219b07 Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Fri, 17 Sep 2021 16:06:13 -0600 Subject: [PATCH 064/176] fix: bump github.com/aws/smithy-go from 1.3.1 to 1.8.0 (#9770) --- go.mod | 2 +- go.sum | 3 ++- 2 files changed, 3 insertions(+), 2 deletions(-) diff --git a/go.mod b/go.mod index 72990ab8394bb..6f16bb0fb0f83 100644 --- a/go.mod +++ b/go.mod @@ -59,7 +59,7 @@ require ( github.com/aws/aws-sdk-go-v2/service/s3 v1.5.0 // indirect github.com/aws/aws-sdk-go-v2/service/sso v1.1.5 // indirect github.com/aws/aws-sdk-go-v2/service/sts v1.2.2 // indirect - github.com/aws/smithy-go v1.3.1 + github.com/aws/smithy-go v1.8.0 github.com/benbjohnson/clock v1.0.3 github.com/beorn7/perks v1.0.1 // indirect github.com/bitly/go-hostpool v0.1.0 // indirect diff --git a/go.sum b/go.sum index e85b07043c3f2..19b3febf462ff 100644 --- a/go.sum +++ b/go.sum @@ -282,8 +282,9 @@ github.com/aws/aws-sdk-go-v2/service/sso v1.1.5/go.mod h1:bpGz0tidC4y39sZkQSkpO/ github.com/aws/aws-sdk-go-v2/service/sts v1.2.2 h1:fKw6QSGcFlvZCBPYx3fo4sL0HfTmaT06ZtMHJfQQNQQ= github.com/aws/aws-sdk-go-v2/service/sts v1.2.2/go.mod h1:ssRzzJ2RZOVuKj2Vx1YE7ypfil/BIlgmQnCSW4DistU= github.com/aws/smithy-go v1.0.0/go.mod h1:EzMw8dbp/YJL4A5/sbhGddag+NPT7q084agLbB9LgIw= -github.com/aws/smithy-go v1.3.1 h1:xJFO4pK0y9J8fCl34uGsSJX5KNnGbdARDlA5BPhXnwE= github.com/aws/smithy-go v1.3.1/go.mod h1:SObp3lf9smib00L/v3U2eAKG8FyQ7iLrJnQiAmR5n+E= +github.com/aws/smithy-go v1.8.0 h1:AEwwwXQZtUwP5Mz506FeXXrKBe0jA8gVM+1gEcSRooc= +github.com/aws/smithy-go v1.8.0/go.mod h1:SObp3lf9smib00L/v3U2eAKG8FyQ7iLrJnQiAmR5n+E= github.com/benbjohnson/clock v1.0.3 h1:vkLuvpK4fmtSCuo60+yC63p7y0BmQ8gm5ZXGuBCJyXg= github.com/benbjohnson/clock v1.0.3/go.mod h1:bGMdMPoPVvcYyt1gHDf4J2KE153Yf9BuiUKYMaxlTDM= github.com/beorn7/perks v0.0.0-20160804104726-4c0e84591b9a/go.mod h1:Dwedo/Wpr24TaqPxmxbtue+5NUziq4I4S80YR8gNf3Q= From 38cb624f3729c8bc4d22da3134eb21bde3c7e241 Mon Sep 17 00:00:00 2001 From: David Reimschussel Date: Fri, 17 Sep 2021 16:31:42 -0600 Subject: [PATCH 065/176] Update changelog (cherry picked from commit a9924dea7a9bc642120b23db5ef39d757bff9103) --- CHANGELOG.md | 22 +++++++++++++++++++--- etc/telegraf.conf | 9 ++++++++- 2 files changed, 27 insertions(+), 4 deletions(-) diff --git a/CHANGELOG.md b/CHANGELOG.md index 42ca26772a37b..2ebccd4849220 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -1,4 +1,4 @@ -## v1.20.0-rc0 [2021-09-02] +## v1.20.0 [2021-09-17] #### Release Notes @@ -6,7 +6,7 @@ #### Bugfixes - - [#9700](https://github.com/influxdata/telegraf/pull/9700) Update thrift module to 0.14.2 and zipkin-go-opentracing 0.4.5 + - [#9700](https://github.com/influxdata/telegraf/pull/9700) Update thrift module to 0.14.2 and zipkin-go-opentracing to 0.4.5 - [#9587](https://github.com/influxdata/telegraf/pull/9587) `outputs.opentelemetry` Use headers config in grpc requests - [#9713](https://github.com/influxdata/telegraf/pull/9713) Update runc module to v1.0.0-rc95 to address CVE-2021-30465 - [#9699](https://github.com/influxdata/telegraf/pull/9699) Migrate dgrijalva/jwt-go to golang-jwt/jwt/v4 @@ -16,8 +16,21 @@ - [#9674](https://github.com/influxdata/telegraf/pull/9674) `inputs.mongodb` Change command based on server version - [#9676](https://github.com/influxdata/telegraf/pull/9676) `outputs.dynatrace` Remove hardcoded int value - [#9619](https://github.com/influxdata/telegraf/pull/9619) `outputs.influxdb_v2` Increase accepted retry-after header values. - - [#9652](https://github.com/influxdata/telegraf/pull/9652) Update github.com/tinylib/msgp module from 1.1.5 to 1.1.6 + - [#9652](https://github.com/influxdata/telegraf/pull/9652) Update tinylib/msgp module from 1.1.5 to 1.1.6 - [#9471](https://github.com/influxdata/telegraf/pull/9471) `inputs.sql` Make timeout apply to single query + - [#9760](https://github.com/influxdata/telegraf/pull/9760) Update shirou/gopsutil module to 3.21.8 + - [#9707](https://github.com/influxdata/telegraf/pull/9707) `inputs.logstash` Add additional logstash output plugin stats + - [#9656](https://github.com/influxdata/telegraf/pull/9656) Update miekg/dns module from 1.1.31 to 1.1.43 + - [#9750](https://github.com/influxdata/telegraf/pull/9750) Update antchfx/xmlquery module from 1.3.5 to 1.3.6 + - [#9757](https://github.com/influxdata/telegraf/pull/9757) `parsers.registry.go` Fix panic for non-existing metric names + - [#9677](https://github.com/influxdata/telegraf/pull/9677) Update Azure/azure-event-hubs-go/v3 module from 3.2.0 to 3.3.13 + - [#9653](https://github.com/influxdata/telegraf/pull/9653) Update prometheus/client_golang module from 1.7.1 to 1.11.0 + - [#9693](https://github.com/influxdata/telegraf/pull/9693) `inputs.cloudwatch` Fix pagination error + - [#9727](https://github.com/influxdata/telegraf/pull/9727) `outputs.http` Add error message logging + - [#9718](https://github.com/influxdata/telegraf/pull/9718) Update influxdata/influxdb-observability module from 0.2.4 to 0.2.7 + - [#9560](https://github.com/influxdata/telegraf/pull/9560) Update gopcua/opcua module + - [#9544](https://github.com/influxdata/telegraf/pull/9544) `inputs.couchbase` Fix memory leak + - [#9588](https://github.com/influxdata/telegraf/pull/9588) `outputs.opentelemetry` Use attributes setting #### Features @@ -38,12 +51,15 @@ - [#9343](https://github.com/influxdata/telegraf/pull/9343) `inputs.snmp_trap` Improve MIB lookup performance - [#9342](https://github.com/influxdata/telegraf/pull/9342) `outputs.newrelic` Add option to override metric_url - [#9306](https://github.com/influxdata/telegraf/pull/9306) `inputs.smart` Add power mode status + - [#9762](https://github.com/influxdata/telegraf/pull/9762) `inputs.bond` Add count of bonded slaves (for easier alerting) + - [#9675](https://github.com/influxdata/telegraf/pull/9675) `outputs.dynatrace` Remove special handling from counters and update dynatrace-oss/dynatrace-metric-utils-go module to 0.3.0 #### New Input Plugins - [#9602](https://github.com/influxdata/telegraf/pull/9602) Add rocm_smi input to monitor AMD GPUs - [#9101](https://github.com/influxdata/telegraf/pull/9101) Add mdstat input to gather from /proc/mdstat collection - [#3536](https://github.com/influxdata/telegraf/pull/3536) Add Elasticsearch query input + - [#9623](https://github.com/influxdata/telegraf/pull/9623) Add internet Speed Monitor Input Plugin #### New Output Plugins diff --git a/etc/telegraf.conf b/etc/telegraf.conf index fabd2616141fb..beb22821464d9 100644 --- a/etc/telegraf.conf +++ b/etc/telegraf.conf @@ -592,7 +592,7 @@ # ## Connection timeout, defaults to "5s" if not set. # timeout = "5s" # -# ## If you want to convert values represented as gauges to counters, add the metric names here +# ## If you want metrics to be treated and reported as delta counters, add the metric names here # additional_counters = [ ] # # ## Optional dimensions to be added to every metric @@ -4324,6 +4324,13 @@ # # collect_memstats = true +# # Monitors internet speed using speedtest.net service +# [[inputs.internet_speed]] +# ## Sets if runs file download test +# ## Default: false +# enable_file_download = false + + # # This plugin gathers interrupts data from /proc/interrupts and /proc/softirqs. # [[inputs.interrupts]] # ## When set to true, cpu metrics are tagged with the cpu. Otherwise cpu is From 9ecf6040afd97488365739c423ffc5ed83a15479 Mon Sep 17 00:00:00 2001 From: Samantha Wang <32681364+sjwang90@users.noreply.github.com> Date: Mon, 20 Sep 2021 08:13:39 -0700 Subject: [PATCH 066/176] docs: document telegraf commands and flags (#9635) --- docs/COMMANDS_AND_FLAGS.md | 67 ++++++++++++++++++++++++++++++++++++++ docs/CONFIGURATION.md | 3 ++ 2 files changed, 70 insertions(+) create mode 100644 docs/COMMANDS_AND_FLAGS.md diff --git a/docs/COMMANDS_AND_FLAGS.md b/docs/COMMANDS_AND_FLAGS.md new file mode 100644 index 0000000000000..cb0c31268c9a4 --- /dev/null +++ b/docs/COMMANDS_AND_FLAGS.md @@ -0,0 +1,67 @@ +# Telegraf Commands & Flags + +### Usage + +``` +telegraf [commands] +telegraf [flags] +``` + +### Commands + +|command|description| +|--------|-----------------------------------------------| +|`config` |print out full sample configuration to stdout| +|`version`|print the version to stdout| + +### Flags + +|flag|description| +|-------------------|------------| +|`--aggregator-filter ` |filter the aggregators to enable, separator is `:`| +|`--config ` |configuration file to load| +|`--config-directory ` |directory containing additional *.conf files| +|`--watch-config` |Telegraf will restart on local config changes.
Monitor changes using either fs notifications or polling. Valid values: `inotify` or `poll`.
Monitoring is off by default.| +|`--plugin-directory` |directory containing *.so files, this directory will be searched recursively. Any Plugin found will be loaded and namespaced.| +|`--debug` |turn on debug logging| +|`--input-filter ` |filter the inputs to enable, separator is `:`| +|`--input-list` |print available input plugins.| +|`--output-filter ` |filter the outputs to enable, separator is `:`| +|`--output-list` |print available output plugins.| +|`--pidfile ` |file to write our pid to| +|`--pprof-addr
` |pprof address to listen on, don't activate pprof if empty| +|`--processor-filter ` |filter the processors to enable, separator is `:`| +|`--quiet` |run in quiet mode| +|`--section-filter` |filter config sections to output, separator is `:`
Valid values are `agent`, `global_tags`, `outputs`, `processors`, `aggregators` and `inputs`| +|`--sample-config` |print out full sample configuration| +|`--once` |enable once mode: gather metrics once, write them, and exit| +|`--test` |enable test mode: gather metrics once and print them| +|`--test-wait` |wait up to this many seconds for service inputs to complete in test or once mode| +|`--usage ` |print usage for a plugin, ie, `telegraf --usage mysql`| +|`--version` |display the version and exit| + +### Examples + +**Generate a telegraf config file:** + +`telegraf config > telegraf.conf` + +**Generate config with only cpu input & influxdb output plugins defined:** + +`telegraf --input-filter cpu --output-filter influxdb config` + +**Run a single telegraf collection, outputting metrics to stdout:** + +`telegraf --config telegraf.conf --test` + +**Run telegraf with all plugins defined in config file:** + +`telegraf --config telegraf.conf` + +**Run telegraf, enabling the cpu & memory input, and influxdb output plugins:** + +`telegraf --config telegraf.conf --input-filter cpu:mem --output-filter influxdb` + +**Run telegraf with pprof:** + +`telegraf --config telegraf.conf --pprof-addr localhost:6060` diff --git a/docs/CONFIGURATION.md b/docs/CONFIGURATION.md index 70e7981c9450b..9af88b669ea9f 100644 --- a/docs/CONFIGURATION.md +++ b/docs/CONFIGURATION.md @@ -19,6 +19,8 @@ To generate a file with specific inputs and outputs, you can use the telegraf --input-filter cpu:mem:net:swap --output-filter influxdb:kafka config ``` +[View the full list][flags] of Telegraf commands and flags or by running `telegraf --help`. + ### Configuration Loading The location of the configuration file can be set via the `--config` command @@ -671,3 +673,4 @@ Reference the detailed [TLS][] documentation. [telegraf.conf]: /etc/telegraf.conf [TLS]: /docs/TLS.md [glob pattern]: https://github.com/gobwas/glob#syntax +[flags]: /docs/COMMANDS_AND_FLAGS.md From b93f20068a9b35905137517bb93448bdc5351539 Mon Sep 17 00:00:00 2001 From: Sean Molenaar Date: Mon, 20 Sep 2021 19:10:36 +0300 Subject: [PATCH 067/176] docs: fix jenkins plugin documentation (#9714) --- plugins/inputs/jenkins/README.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/plugins/inputs/jenkins/README.md b/plugins/inputs/jenkins/README.md index 4d82f4e90ba31..e12326031b9ef 100644 --- a/plugins/inputs/jenkins/README.md +++ b/plugins/inputs/jenkins/README.md @@ -57,7 +57,7 @@ This plugin does not require a plugin on jenkins and it makes use of Jenkins API ### Metrics: -- jenkins_node +- jenkins - tags: - source - port From 82bdbce498ad2b1f558145fa9f4cc7cac2bbf1c4 Mon Sep 17 00:00:00 2001 From: Sebastian Spaink <3441183+sspaink@users.noreply.github.com> Date: Mon, 20 Sep 2021 10:26:10 -0700 Subject: [PATCH 068/176] chore: "makefile help" output, but still support building based on arch for CI (#9579) --- .circleci/config.yml | 16 +- Makefile | 355 ++++++++++++++++------------------- docs/developers/PACKAGING.md | 25 ++- 3 files changed, 179 insertions(+), 217 deletions(-) diff --git a/.circleci/config.yml b/.circleci/config.yml index b2043e1fa291c..1f644a7b9d20b 100644 --- a/.circleci/config.yml +++ b/.circleci/config.yml @@ -123,11 +123,11 @@ commands: - when: condition: << parameters.release >> steps: - - run: 'mips=1 mipsel=1 arm64=1 amd64=1 static=1 armel=1 armhf=1 s390x=1 ppc641e=1 i386=1 windows=1 darwin=1 make package' + - run: 'make package' - when: condition: << parameters.nightly >> steps: - - run: 'mips=1 mipsel=1 arm64=1 amd64=1 static=1 armel=1 armhf=1 s390x=1 ppc641e=1 i386=1 windows=1 darwin=1 NIGHTLY=1 make package' + - run: 'make package' - run: 'make upload-nightly' - unless: condition: @@ -135,7 +135,7 @@ commands: - << parameters.nightly >> - << parameters.release >> steps: - - run: '<< parameters.type >>=1 make package' + - run: 'make package include_packages="$(make << parameters.type >>)"' - store_artifacts: path: './build/dist' destination: 'build/dist' @@ -215,11 +215,11 @@ jobs: steps: - package-build: type: i386 - ppc641e-package: - executor: go-1_17 + ppc64le-package: + executor: go-1_16 steps: - package-build: - type: ppc641e + type: ppc64le s390x-package: executor: go-1_17 steps: @@ -391,7 +391,7 @@ workflows: - 'i386-package': requires: - 'test-awaiter' - - 'ppc641e-package': + - 'ppc64le-package': requires: - 'test-awaiter' - 's390x-package': @@ -421,7 +421,7 @@ workflows: - 'share-artifacts': requires: - 'i386-package' - - 'ppc641e-package' + - 'ppc64le-package' - 's390x-package' - 'armel-package' - 'amd64-package' diff --git a/Makefile b/Makefile index 230eedf600f6f..a7797a0e8ce5f 100644 --- a/Makefile +++ b/Makefile @@ -80,9 +80,18 @@ help: @echo ' lint-install - install linter' @echo ' check-deps - check docs/LICENSE_OF_DEPENDENCIES.md' @echo ' clean - delete build artifacts' + @echo ' package - build all supported packages, override include_packages to only build a subset' + @echo ' e.g.: make package include_packages="amd64.deb"' @echo '' - @echo 'Package Targets:' - @$(foreach dist,$(dists),echo " $(dist)";) + @echo 'Possible values for include_packages variable' + @$(foreach package,$(include_packages),echo " $(package)";) + @echo '' + @echo 'Resulting package name format (where arch will be the arch of the package):' + @echo ' telegraf_$(deb_version)_arch.deb' + @echo ' telegraf-$(rpm_version).arch.rpm' + @echo ' telegraf-$(tar_version)_arch.tar.gz' + @echo ' telegraf-$(tar_version)_arch.zip' + .PHONY: deps deps: @@ -224,164 +233,118 @@ $(buildbin): @mkdir -pv $(dir $@) go build -o $(dir $@) -ldflags "$(LDFLAGS)" ./cmd/telegraf -ifdef mips -debs += telegraf_$(deb_version)_mips.deb -tars += telegraf-$(tar_version)_linux_mips.tar.gz -endif - -ifdef mipsel -debs += telegraf_$(deb_version)_mipsel.deb -tars += telegraf-$(tar_version)_linux_mipsel.tar.gz -endif - -ifdef arm64 -tars += telegraf-$(tar_version)_linux_arm64.tar.gz -debs += telegraf_$(deb_version)_arm64.deb -rpms += telegraf-$(rpm_version).aarch64.rpm -endif - -ifdef amd64 -tars += telegraf-$(tar_version)_freebsd_amd64.tar.gz -tars += telegraf-$(tar_version)_linux_amd64.tar.gz -debs += telegraf_$(deb_version)_amd64.deb -rpms += telegraf-$(rpm_version).x86_64.rpm -endif - -ifdef static -tars += telegraf-$(tar_version)_static_linux_amd64.tar.gz -endif - -ifdef armel -tars += telegraf-$(tar_version)_linux_armel.tar.gz -rpms += telegraf-$(rpm_version).armel.rpm -debs += telegraf_$(deb_version)_armel.deb -endif - -ifdef armhf -tars += telegraf-$(tar_version)_linux_armhf.tar.gz -tars += telegraf-$(tar_version)_freebsd_armv7.tar.gz -debs += telegraf_$(deb_version)_armhf.deb -rpms += telegraf-$(rpm_version).armv6hl.rpm -endif - -ifdef s390x -tars += telegraf-$(tar_version)_linux_s390x.tar.gz -debs += telegraf_$(deb_version)_s390x.deb -rpms += telegraf-$(rpm_version).s390x.rpm -endif - -ifdef ppc641e -tars += telegraf-$(tar_version)_linux_ppc64le.tar.gz -rpms += telegraf-$(rpm_version).ppc64le.rpm -debs += telegraf_$(deb_version)_ppc64el.deb -endif - -ifdef i386 -tars += telegraf-$(tar_version)_freebsd_i386.tar.gz -debs += telegraf_$(deb_version)_i386.deb -tars += telegraf-$(tar_version)_linux_i386.tar.gz -rpms += telegraf-$(rpm_version).i386.rpm -endif - -ifdef windows -zips += telegraf-$(tar_version)_windows_i386.zip -zips += telegraf-$(tar_version)_windows_amd64.zip -endif - -ifdef darwin -tars += telegraf-$(tar_version)_darwin_amd64.tar.gz -endif - -dists := $(debs) $(rpms) $(tars) $(zips) +# Define packages Telegraf supports, organized by architecture with a rule to echo the list to limit include_packages +# e.g. make package include_packages="$(make amd64)" +mips += linux_mips.tar.gz mips.deb +.PHONY: mips +mips: + @ echo $(mips) +mipsel += mipsel.deb linux_mipsel.tar.gz +.PHONY: mipsel +mipsel: + @ echo $(mipsel) +arm64 += linux_arm64.tar.gz arm64.deb aarch64.rpm +.PHONY: arm64 +arm64: + @ echo $(arm64) +amd64 += freebsd_amd64.tar.gz linux_amd64.tar.gz amd64.deb x86_64.rpm +.PHONY: amd64 +amd64: + @ echo $(amd64) +static += static_linux_amd64.tar.gz +.PHONY: static +static: + @ echo $(static) +armel += linux_armel.tar.gz armel.rpm armel.deb +.PHONY: armel +armel: + @ echo $(armel) +armhf += linux_armhf.tar.gz freebsd_armv7.tar.gz armhf.deb armv6hl.rpm +.PHONY: armhf +armhf: + @ echo $(armhf) +s390x += linux_s390x.tar.gz s390x.deb s390x.rpm +.PHONY: s390x +s390x: + @ echo $(s390x) +ppc64le += linux_ppc64le.tar.gz ppc64le.rpm ppc64el.deb +.PHONY: ppc64le +ppc64le: + @ echo $(ppc64le) +i386 += freebsd_i386.tar.gz i386.deb linux_i386.tar.gzi386.rpm +.PHONY: i386 +i386: + @ echo $(i386) +windows += windows_i386.zip windows_amd64.zip +.PHONY: windows +windows: + @ echo $(windows) +darwin += darwin_amd64.tar.gz +.PHONY: darwin +darwin: + @ echo $(darwin) + +include_packages := $(mips) $(mipsel) $(arm64) $(amd64) $(static) $(armel) $(armhf) $(s390x) $(ppc64le) $(i386) $(windows) $(darwin) .PHONY: package -package: $(dists) - -rpm_amd64 := amd64 -rpm_386 := i386 -rpm_s390x := s390x -rpm_ppc64le := ppc64le -rpm_arm5 := armel -rpm_arm6 := armv6hl -rpm_arm647 := aarch64 -rpm_arch = $(rpm_$(GOARCH)$(GOARM)) - -.PHONY: $(rpms) -$(rpms): - @$(MAKE) install - @mkdir -p $(pkgdir) - fpm --force \ - --log info \ - --architecture $(rpm_arch) \ - --input-type dir \ - --output-type rpm \ - --vendor InfluxData \ - --url https://github.com/influxdata/telegraf \ - --license MIT \ - --maintainer support@influxdb.com \ - --config-files /etc/telegraf/telegraf.conf \ - --config-files /etc/logrotate.d/telegraf \ - --after-install scripts/rpm/post-install.sh \ - --before-install scripts/rpm/pre-install.sh \ - --after-remove scripts/rpm/post-remove.sh \ - --description "Plugin-driven server agent for reporting metrics into InfluxDB." \ - --depends coreutils \ - --depends shadow-utils \ - --rpm-posttrans scripts/rpm/post-install.sh \ - --name telegraf \ - --version $(version) \ - --iteration $(rpm_iteration) \ - --chdir $(DESTDIR) \ - --package $(pkgdir)/$@ - -deb_amd64 := amd64 -deb_386 := i386 -deb_s390x := s390x -deb_ppc64le := ppc64el -deb_arm5 := armel -deb_arm6 := armhf -deb_arm647 := arm64 -deb_mips := mips -deb_mipsle := mipsel -deb_arch = $(deb_$(GOARCH)$(GOARM)) - -.PHONY: $(debs) -$(debs): - @$(MAKE) install - @mkdir -pv $(pkgdir) - fpm --force \ - --log info \ - --architecture $(deb_arch) \ - --input-type dir \ - --output-type deb \ - --vendor InfluxData \ - --url https://github.com/influxdata/telegraf \ - --license MIT \ - --maintainer support@influxdb.com \ - --config-files /etc/telegraf/telegraf.conf.sample \ - --config-files /etc/logrotate.d/telegraf \ - --after-install scripts/deb/post-install.sh \ - --before-install scripts/deb/pre-install.sh \ - --after-remove scripts/deb/post-remove.sh \ - --before-remove scripts/deb/pre-remove.sh \ - --description "Plugin-driven server agent for reporting metrics into InfluxDB." \ - --name telegraf \ - --version $(version) \ - --iteration $(deb_iteration) \ - --chdir $(DESTDIR) \ - --package $(pkgdir)/$@ - -.PHONY: $(zips) -$(zips): - @$(MAKE) install - @mkdir -p $(pkgdir) - (cd $(dir $(DESTDIR)) && zip -r - ./*) > $(pkgdir)/$@ +package: $(include_packages) -.PHONY: $(tars) -$(tars): +.PHONY: $(include_packages) +$(include_packages): @$(MAKE) install @mkdir -p $(pkgdir) - tar --owner 0 --group 0 -czvf $(pkgdir)/$@ -C $(dir $(DESTDIR)) . + + @if [ "$(suffix $@)" = ".rpm" ]; then \ + fpm --force \ + --log info \ + --architecture $(basename $@) \ + --input-type dir \ + --output-type rpm \ + --vendor InfluxData \ + --url https://github.com/influxdata/telegraf \ + --license MIT \ + --maintainer support@influxdb.com \ + --config-files /etc/telegraf/telegraf.conf \ + --config-files /etc/logrotate.d/telegraf \ + --after-install scripts/rpm/post-install.sh \ + --before-install scripts/rpm/pre-install.sh \ + --after-remove scripts/rpm/post-remove.sh \ + --description "Plugin-driven server agent for reporting metrics into InfluxDB." \ + --depends coreutils \ + --depends shadow-utils \ + --rpm-posttrans scripts/rpm/post-install.sh \ + --name telegraf \ + --version $(version) \ + --iteration $(rpm_iteration) \ + --chdir $(DESTDIR) \ + --package $(pkgdir)/telegraf-$(rpm_version).$@ ;\ + elif [ "$(suffix $@)" = ".deb" ]; then \ + fpm --force \ + --log info \ + --architecture $(basename $@) \ + --input-type dir \ + --output-type deb \ + --vendor InfluxData \ + --url https://github.com/influxdata/telegraf \ + --license MIT \ + --maintainer support@influxdb.com \ + --config-files /etc/telegraf/telegraf.conf.sample \ + --config-files /etc/logrotate.d/telegraf \ + --after-install scripts/deb/post-install.sh \ + --before-install scripts/deb/pre-install.sh \ + --after-remove scripts/deb/post-remove.sh \ + --before-remove scripts/deb/pre-remove.sh \ + --description "Plugin-driven server agent for reporting metrics into InfluxDB." \ + --name telegraf \ + --version $(version) \ + --iteration $(deb_iteration) \ + --chdir $(DESTDIR) \ + --package $(pkgdir)/telegraf_$(deb_version)_$@ ;\ + elif [ "$(suffix $@)" = ".zip" ]; then \ + (cd $(dir $(DESTDIR)) && zip -r - ./*) > $(pkgdir)/telegraf-$(tar_version)_$@ ;\ + elif [ "$(suffix $@)" = ".gz" ]; then \ + tar --owner 0 --group 0 -czvf $(pkgdir)/telegraf-$(tar_version)_$@ -C $(dir $(DESTDIR)) . ;\ + fi .PHONY: upload-nightly upload-nightly: @@ -393,63 +356,63 @@ upload-nightly: --include "*.zip" \ --acl public-read -%amd64.deb %x86_64.rpm %linux_amd64.tar.gz: export GOOS := linux -%amd64.deb %x86_64.rpm %linux_amd64.tar.gz: export GOARCH := amd64 +amd64.deb x86_64.rpm linux_amd64.tar.gz: export GOOS := linux +amd64.deb x86_64.rpm linux_amd64.tar.gz: export GOARCH := amd64 -%static_linux_amd64.tar.gz: export cgo := -nocgo -%static_linux_amd64.tar.gz: export CGO_ENABLED := 0 +static_linux_amd64.tar.gz: export cgo := -nocgo +static_linux_amd64.tar.gz: export CGO_ENABLED := 0 -%i386.deb %i386.rpm %linux_i386.tar.gz: export GOOS := linux -%i386.deb %i386.rpm %linux_i386.tar.gz: export GOARCH := 386 +i386.deb i386.rpm linux_i386.tar.gz: export GOOS := linux +i386.deb i386.rpm linux_i386.tar.gz: export GOARCH := 386 -%armel.deb %armel.rpm %linux_armel.tar.gz: export GOOS := linux -%armel.deb %armel.rpm %linux_armel.tar.gz: export GOARCH := arm -%armel.deb %armel.rpm %linux_armel.tar.gz: export GOARM := 5 +armel.deb armel.rpm linux_armel.tar.gz: export GOOS := linux +armel.deb armel.rpm linux_armel.tar.gz: export GOARCH := arm +armel.deb armel.rpm linux_armel.tar.gz: export GOARM := 5 -%armhf.deb %armv6hl.rpm %linux_armhf.tar.gz: export GOOS := linux -%armhf.deb %armv6hl.rpm %linux_armhf.tar.gz: export GOARCH := arm -%armhf.deb %armv6hl.rpm %linux_armhf.tar.gz: export GOARM := 6 +armhf.deb armv6hl.rpm linux_armhf.tar.gz: export GOOS := linux +armhf.deb armv6hl.rpm linux_armhf.tar.gz: export GOARCH := arm +armhf.deb armv6hl.rpm linux_armhf.tar.gz: export GOARM := 6 -%arm64.deb %aarch64.rpm %linux_arm64.tar.gz: export GOOS := linux -%arm64.deb %aarch64.rpm %linux_arm64.tar.gz: export GOARCH := arm64 -%arm64.deb %aarch64.rpm %linux_arm64.tar.gz: export GOARM := 7 +arm64.deb aarch64.rpm linux_arm64.tar.gz: export GOOS := linux +arm64.deb aarch64.rpm linux_arm64.tar.gz: export GOARCH := arm64 +arm64.deb aarch64.rpm linux_arm64.tar.gz: export GOARM := 7 -%mips.deb %linux_mips.tar.gz: export GOOS := linux -%mips.deb %linux_mips.tar.gz: export GOARCH := mips +mips.deb linux_mips.tar.gz: export GOOS := linux +mips.deb linux_mips.tar.gz: export GOARCH := mips -%mipsel.deb %linux_mipsel.tar.gz: export GOOS := linux -%mipsel.deb %linux_mipsel.tar.gz: export GOARCH := mipsle +mipsel.deb linux_mipsel.tar.gz: export GOOS := linux +mipsel.deb linux_mipsel.tar.gz: export GOARCH := mipsle -%s390x.deb %s390x.rpm %linux_s390x.tar.gz: export GOOS := linux -%s390x.deb %s390x.rpm %linux_s390x.tar.gz: export GOARCH := s390x +s390x.deb s390x.rpm linux_s390x.tar.gz: export GOOS := linux +s390x.deb s390x.rpm linux_s390x.tar.gz: export GOARCH := s390x -%ppc64el.deb %ppc64le.rpm %linux_ppc64le.tar.gz: export GOOS := linux -%ppc64el.deb %ppc64le.rpm %linux_ppc64le.tar.gz: export GOARCH := ppc64le +ppc64el.deb ppc64le.rpm linux_ppc64le.tar.gz: export GOOS := linux +ppc64el.deb ppc64le.rpm linux_ppc64le.tar.gz: export GOARCH := ppc64le -%freebsd_amd64.tar.gz: export GOOS := freebsd -%freebsd_amd64.tar.gz: export GOARCH := amd64 +freebsd_amd64.tar.gz: export GOOS := freebsd +freebsd_amd64.tar.gz: export GOARCH := amd64 -%freebsd_i386.tar.gz: export GOOS := freebsd -%freebsd_i386.tar.gz: export GOARCH := 386 +freebsd_i386.tar.gz: export GOOS := freebsd +freebsd_i386.tar.gz: export GOARCH := 386 -%freebsd_armv7.tar.gz: export GOOS := freebsd -%freebsd_armv7.tar.gz: export GOARCH := arm -%freebsd_armv7.tar.gz: export GOARM := 7 +freebsd_armv7.tar.gz: export GOOS := freebsd +freebsd_armv7.tar.gz: export GOARCH := arm +freebsd_armv7.tar.gz: export GOARM := 7 -%windows_amd64.zip: export GOOS := windows -%windows_amd64.zip: export GOARCH := amd64 +windows_amd64.zip: export GOOS := windows +windows_amd64.zip: export GOARCH := amd64 -%darwin_amd64.tar.gz: export GOOS := darwin -%darwin_amd64.tar.gz: export GOARCH := amd64 +darwin_amd64.tar.gz: export GOOS := darwin +darwin_amd64.tar.gz: export GOARCH := amd64 -%windows_i386.zip: export GOOS := windows -%windows_i386.zip: export GOARCH := 386 +windows_i386.zip: export GOOS := windows +windows_i386.zip: export GOARCH := 386 -%windows_i386.zip %windows_amd64.zip: export prefix = -%windows_i386.zip %windows_amd64.zip: export bindir = $(prefix) -%windows_i386.zip %windows_amd64.zip: export sysconfdir = $(prefix) -%windows_i386.zip %windows_amd64.zip: export localstatedir = $(prefix) -%windows_i386.zip %windows_amd64.zip: export EXEEXT := .exe +windows_i386.zip windows_amd64.zip: export prefix = +windows_i386.zip windows_amd64.zip: export bindir = $(prefix) +windows_i386.zip windows_amd64.zip: export sysconfdir = $(prefix) +windows_i386.zip windows_amd64.zip: export localstatedir = $(prefix) +windows_i386.zip windows_amd64.zip: export EXEEXT := .exe %.deb: export pkg := deb %.deb: export prefix := /usr diff --git a/docs/developers/PACKAGING.md b/docs/developers/PACKAGING.md index f9708fb7164d0..cbdb61b05af01 100644 --- a/docs/developers/PACKAGING.md +++ b/docs/developers/PACKAGING.md @@ -1,5 +1,9 @@ # Packaging +Building the packages for Telegraf is automated using [Make](https://en.wikipedia.org/wiki/Make_(software)). Just running `make` will build a Telegraf binary for the operating system and architecture you are using (if it is supported). If you need to build a different package then you can run `make package` which will build all the supported packages. You will most likely only want a subset, you can define a subset of packages to be built by overriding the `include_packages` variable like so `make package include_packages="amd64.deb"`. You can also build all packages for a specific architecture like so `make package include_packages="$(make amd64)"`. + +The packaging steps require certain tools to be setup before hand to work. These dependencies are listed in the ci-1.16.docker file which you can find in the scripts directory. Therefore it is recommended to use Docker to build the artifacts, see more details below. + ## Package using Docker This packaging method uses the CI images, and is very similar to how the @@ -18,20 +22,15 @@ docker run -ti quay.io/influxdb/telegraf-ci:1.9.7 /bin/bash ``` From within the container: -``` -go get -d github.com/influxdata/telegraf -cd /go/src/github.com/influxdata/telegraf - -# Use tag of Telegraf version you would like to build -git checkout release-1.10 -git reset --hard 1.10.2 -make deps -# To build packages run: - -``` -make package amd64=1 -``` +1. `go get -d github.com/influxdata/telegraf` +2. `cd /go/src/github.com/influxdata/telegraf` +3. `git checkout release-1.10` + * Replace tag `release-1.10` with the version of Telegraf you would like to build +4. `git reset --hard 1.10.2` +5. `make deps` +6. `make package include_packages="amd64.deb"` + * Change `include_packages` to change what package you want, run `make help` to see possible values From the host system, copy the build artifacts out of the container: ``` From 58d4e9a851c293608ede43b47dda8f8b347979dc Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Mon, 20 Sep 2021 15:34:39 -0600 Subject: [PATCH 069/176] fix: bump cloud.google.com/go/pubsub from 1.15.0 to 1.17.0 (#9769) --- go.mod | 9 ++++----- go.sum | 18 ++++++++++++------ 2 files changed, 16 insertions(+), 11 deletions(-) diff --git a/go.mod b/go.mod index 6f16bb0fb0f83..d1d2dd094d749 100644 --- a/go.mod +++ b/go.mod @@ -3,9 +3,10 @@ module github.com/influxdata/telegraf go 1.17 require ( - cloud.google.com/go v0.90.0 + cloud.google.com/go v0.93.3 // indirect cloud.google.com/go/bigquery v1.8.0 - cloud.google.com/go/pubsub v1.15.0 + cloud.google.com/go/monitoring v0.2.0 + cloud.google.com/go/pubsub v1.17.0 code.cloudfoundry.org/clock v1.0.0 // indirect collectd.org v0.5.0 github.com/Azure/azure-amqp-common-go/v3 v3.0.1 // indirect @@ -161,7 +162,6 @@ require ( github.com/jmespath/go-jmespath v0.4.0 github.com/jpillora/backoff v1.0.0 // indirect github.com/json-iterator/go v1.1.11 // indirect - github.com/jstemmer/go-junit-report v0.9.1 // indirect github.com/kardianos/service v1.0.0 github.com/karrick/godirwalk v1.16.1 github.com/kballard/go-shellquote v0.0.0-20180428030007-95032a82bc51 @@ -275,7 +275,6 @@ require ( go.uber.org/atomic v1.7.0 // indirect go.uber.org/multierr v1.6.0 // indirect golang.org/x/crypto v0.0.0-20210616213533-5ff15b29337e // indirect - golang.org/x/lint v0.0.0-20210508222113-6edffad5e616 // indirect golang.org/x/mod v0.4.2 // indirect golang.org/x/net v0.0.0-20210610132358-84b48f89b13b golang.org/x/oauth2 v0.0.0-20210805134026-6f1e6394065a @@ -290,7 +289,7 @@ require ( golang.zx2c4.com/wireguard/wgctrl v0.0.0-20200205215550-e35592f146e4 google.golang.org/api v0.54.0 google.golang.org/appengine v1.6.7 // indirect - google.golang.org/genproto v0.0.0-20210813162853-db860fec028c + google.golang.org/genproto v0.0.0-20210827211047-25e5f791fe06 google.golang.org/grpc v1.40.0 google.golang.org/protobuf v1.27.1 gopkg.in/asn1-ber.v1 v1.0.0-20181015200546-f715ec2f112d // indirect diff --git a/go.sum b/go.sum index 19b3febf462ff..9a8b98cea97ad 100644 --- a/go.sum +++ b/go.sum @@ -24,8 +24,10 @@ cloud.google.com/go v0.81.0/go.mod h1:mk/AM35KwGk/Nm2YSeZbxXdrNK3KZOYHmLkOqC2V6E cloud.google.com/go v0.83.0/go.mod h1:Z7MJUsANfY0pYPdw0lbnivPx4/vhy/e2FEkSkF7vAVY= cloud.google.com/go v0.84.0/go.mod h1:RazrYuxIK6Kb7YrzzhPoLmCVzl7Sup4NrbKPg8KHSUM= cloud.google.com/go v0.87.0/go.mod h1:TpDYlFy7vuLzZMMZ+B6iRiELaY7z/gJPaqbMx6mlWcY= -cloud.google.com/go v0.90.0 h1:MjvSkUq8RuAb+2JLDi5VQmmExRJPUQ3JLCWpRB6fmdw= cloud.google.com/go v0.90.0/go.mod h1:kRX0mNRHe0e2rC6oNakvwQqzyDmg57xJ+SZU1eT2aDQ= +cloud.google.com/go v0.92.2/go.mod h1:8utlLll2EF5XMAV15woO4lSbWQlk8rer9aLOfLh7+YI= +cloud.google.com/go v0.93.3 h1:wPBktZFzYBcCZVARvwVKqH1uEj+aLXofJEtrb4oOsio= +cloud.google.com/go v0.93.3/go.mod h1:8utlLll2EF5XMAV15woO4lSbWQlk8rer9aLOfLh7+YI= cloud.google.com/go/bigquery v1.0.1/go.mod h1:i/xbL2UlR5RvWAURpBYZTtm/cXjCha9lbfbpx4poX+o= cloud.google.com/go/bigquery v1.3.0/go.mod h1:PjpwJnslEMmckchkHFfq+HTD2DmtT67aNFKH1/VBDHE= cloud.google.com/go/bigquery v1.4.0/go.mod h1:S8dzgnTigyfTmLBfrtrhyYhwRxG72rYxvftPBK2Dvzc= @@ -36,12 +38,16 @@ cloud.google.com/go/bigquery v1.8.0/go.mod h1:J5hqkt3O0uAFnINi6JXValWIb1v0goeZM7 cloud.google.com/go/bigtable v1.2.0/go.mod h1:JcVAOl45lrTmQfLj7T6TxyMzIN/3FGGcFm+2xVAli2o= cloud.google.com/go/datastore v1.0.0/go.mod h1:LXYbyblFSglQ5pkeyhO+Qmw7ukd3C+pD7TKLgZqpHYE= cloud.google.com/go/datastore v1.1.0/go.mod h1:umbIZjpQpHh4hmRpGhH4tLFup+FVzqBi1b3c64qFpCk= +cloud.google.com/go/kms v0.1.0 h1:VXAb5OzejDcyhFzIDeZ5n5AUdlsFnCyexuascIwWMj0= +cloud.google.com/go/kms v0.1.0/go.mod h1:8Qp8PCAypHg4FdmlyW1QRAv09BGQ9Uzh7JnmIZxPk+c= +cloud.google.com/go/monitoring v0.2.0 h1:UFQB1+YbZjAOqAFFY4RlNiOrt19O5HzPeCdtYSlPvmk= +cloud.google.com/go/monitoring v0.2.0/go.mod h1:K/JoZWY3xszHf38AMkzZGx1n5eT1/57ilElGMpESsEE= cloud.google.com/go/pubsub v1.0.1/go.mod h1:R0Gpsv3s54REJCy4fxDixWD93lHJMoZTyQ2kNxGRt3I= cloud.google.com/go/pubsub v1.1.0/go.mod h1:EwwdRX2sKPjnvnqCa270oGRyludottCI76h+R3AArQw= cloud.google.com/go/pubsub v1.2.0/go.mod h1:jhfEVHT8odbXTkndysNHCcx0awwzvfOlguIAii9o8iA= cloud.google.com/go/pubsub v1.3.1/go.mod h1:i+ucay31+CNRpDW4Lu78I4xXG+O1r/MAHgjpRVR+TSU= -cloud.google.com/go/pubsub v1.15.0 h1:6KI/wDVYLtNvzIPJ8ObuJcq5bBtAWQ6Suo8osHPvYn4= -cloud.google.com/go/pubsub v1.15.0/go.mod h1:DnEUPGZlp+N9MElp/6uVqCKiknQixvVLcrgrqT62O6A= +cloud.google.com/go/pubsub v1.17.0 h1:uGzqGUGvaSJ3APz5BmLFw1LpSTnB9o+EzE5fI3rBbJI= +cloud.google.com/go/pubsub v1.17.0/go.mod h1:bBIeYx9ftf/hr7eoSUim6cRaOYZE/hHuigwdwLLByi8= cloud.google.com/go/storage v1.0.0/go.mod h1:IhtSnM/ZTZV8YYJWCY8RULGVqBDmpoyjwiyrjsg+URw= cloud.google.com/go/storage v1.5.0/go.mod h1:tpKbwo567HUNpVclU5sGELwQWBDZ8gh0ZeosJ0Rtdos= cloud.google.com/go/storage v1.6.0/go.mod h1:N7U0C8pVQ/+NIKOBQyamJIeKQKkZ+mxpohlUTyfDhBk= @@ -1032,7 +1038,6 @@ github.com/json-iterator/go v1.1.10/go.mod h1:KdQUCv79m/52Kvf8AW2vK1V8akMuk1QjK/ github.com/json-iterator/go v1.1.11 h1:uVUAXhF2To8cbw/3xN3pxj6kk7TYKs98NIrTqPlMWAQ= github.com/json-iterator/go v1.1.11/go.mod h1:KdQUCv79m/52Kvf8AW2vK1V8akMuk1QjK/uOdHXbAo4= github.com/jstemmer/go-junit-report v0.0.0-20190106144839-af01ea7f8024/go.mod h1:6v2b51hI/fHJwM22ozAgKL4VKDeJcHhJFhtBdhmNjmU= -github.com/jstemmer/go-junit-report v0.9.1 h1:6QPYqodiu3GuPL+7mfx+NwDdp2eTkp9IfEUpgAwUN0o= github.com/jstemmer/go-junit-report v0.9.1/go.mod h1:Brl9GWCQeLvo8nXZwPNNblvFj/XSXhF0NWZEnDohbsk= github.com/jsternberg/zap-logfmt v1.0.0/go.mod h1:uvPs/4X51zdkcm5jXl5SYoN+4RK21K8mysFmDaM/h+o= github.com/jtolds/gls v4.20.0+incompatible h1:xdiiI2gbIgH/gLH7ADydsJ1uDOEzR8yvV7C0MuV77Wo= @@ -1716,7 +1721,6 @@ golang.org/x/lint v0.0.0-20191125180803-fdd1cda4f05f/go.mod h1:5qLYkcX4OjUUV8bRu golang.org/x/lint v0.0.0-20200130185559-910be7a94367/go.mod h1:3xt1FjdF8hUf6vQPIChWIBhFzV8gjjsPE/fR3IyQdNY= golang.org/x/lint v0.0.0-20200302205851-738671d3881b/go.mod h1:3xt1FjdF8hUf6vQPIChWIBhFzV8gjjsPE/fR3IyQdNY= golang.org/x/lint v0.0.0-20201208152925-83fdc39ff7b5/go.mod h1:3xt1FjdF8hUf6vQPIChWIBhFzV8gjjsPE/fR3IyQdNY= -golang.org/x/lint v0.0.0-20210508222113-6edffad5e616 h1:VLliZ0d+/avPrXXH+OakdXhpJuEoBZuwh1m2j7U6Iug= golang.org/x/lint v0.0.0-20210508222113-6edffad5e616/go.mod h1:3xt1FjdF8hUf6vQPIChWIBhFzV8gjjsPE/fR3IyQdNY= golang.org/x/mobile v0.0.0-20190312151609-d3739f865fa6/go.mod h1:z+o9i4GpDbdi3rU15maQ/Ox0txvL9dWGYEHz965HBQE= golang.org/x/mobile v0.0.0-20190719004257-d2bd2a29d028/go.mod h1:E/iHnbuqvinMTCcRqshq8CkpyQDoeVncDDYHnLhea+o= @@ -2174,8 +2178,10 @@ google.golang.org/genproto v0.0.0-20210713002101-d411969a0d9a/go.mod h1:AxrInvYm google.golang.org/genproto v0.0.0-20210716133855-ce7ef5c701ea/go.mod h1:AxrInvYm1dci+enl5hChSFPOmmUF1+uAa/UsgNRWd7k= google.golang.org/genproto v0.0.0-20210728212813-7823e685a01f/go.mod h1:ob2IJxKrgPT52GcgX759i1sleT07tiKowYBGbczaW48= google.golang.org/genproto v0.0.0-20210805201207-89edb61ffb67/go.mod h1:ob2IJxKrgPT52GcgX759i1sleT07tiKowYBGbczaW48= -google.golang.org/genproto v0.0.0-20210813162853-db860fec028c h1:iLQakcwWG3k/++1q/46apVb1sUQ3IqIdn9yUE6eh/xA= google.golang.org/genproto v0.0.0-20210813162853-db860fec028c/go.mod h1:cFeNkxwySK631ADgubI+/XFU/xp8FD5KIVV4rj8UC5w= +google.golang.org/genproto v0.0.0-20210824181836-a4879c3d0e89/go.mod h1:eFjDcFEctNawg4eG61bRv87N7iHBWyVhJu7u1kqDUXY= +google.golang.org/genproto v0.0.0-20210827211047-25e5f791fe06 h1:Ogdiaj9EMVKYHnDsESxwlTr/k5eqCdwoQVJEcdg0NbE= +google.golang.org/genproto v0.0.0-20210827211047-25e5f791fe06/go.mod h1:eFjDcFEctNawg4eG61bRv87N7iHBWyVhJu7u1kqDUXY= google.golang.org/grpc v0.0.0-20160317175043-d3ddb4469d5a/go.mod h1:yo6s7OP7yaDglbqo1J04qKzAhqBH6lvTonzMVmEdcZw= google.golang.org/grpc v1.17.0/go.mod h1:6QZJwpn2B+Zp71q/5VxRsJ6NXXVCE5NRUHRo+f3cWCs= google.golang.org/grpc v1.19.0/go.mod h1:mqu4LbDTu4XGKhr4mRzUsmM4RtVoemTSY81AxZiDr8c= From 7eb6e88c859ea30a783f9bf1146a3d3c574113f4 Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Mon, 20 Sep 2021 15:53:39 -0600 Subject: [PATCH 070/176] fix: bump github.com/Azure/go-autorest/autorest/azure/auth from 0.5.6 to 0.5.8 (#9678) --- go.mod | 2 +- go.sum | 8 +++----- 2 files changed, 4 insertions(+), 6 deletions(-) diff --git a/go.mod b/go.mod index d1d2dd094d749..d56ac810d6026 100644 --- a/go.mod +++ b/go.mod @@ -21,7 +21,7 @@ require ( github.com/Azure/go-autorest v14.2.0+incompatible // indirect github.com/Azure/go-autorest/autorest v0.11.18 github.com/Azure/go-autorest/autorest/adal v0.9.15 - github.com/Azure/go-autorest/autorest/azure/auth v0.5.6 + github.com/Azure/go-autorest/autorest/azure/auth v0.5.8 github.com/Azure/go-autorest/autorest/azure/cli v0.4.2 // indirect github.com/Azure/go-autorest/autorest/date v0.3.0 // indirect github.com/Azure/go-autorest/autorest/to v0.4.0 // indirect diff --git a/go.sum b/go.sum index 9a8b98cea97ad..c67071b110a16 100644 --- a/go.sum +++ b/go.sum @@ -107,13 +107,13 @@ github.com/Azure/go-autorest/autorest/adal v0.8.2/go.mod h1:ZjhuQClTqx435SRJ2iMl github.com/Azure/go-autorest/autorest/adal v0.9.0/go.mod h1:/c022QCutn2P7uY+/oQWWNcK9YU+MH96NgK+jErpbcg= github.com/Azure/go-autorest/autorest/adal v0.9.2/go.mod h1:/3SMAM86bP6wC9Ev35peQDUeqFZBMH07vvUOmg4z/fE= github.com/Azure/go-autorest/autorest/adal v0.9.5/go.mod h1:B7KF7jKIeC9Mct5spmyCB/A8CG/sEz1vwIRGv/bbw7A= -github.com/Azure/go-autorest/autorest/adal v0.9.10/go.mod h1:B7KF7jKIeC9Mct5spmyCB/A8CG/sEz1vwIRGv/bbw7A= +github.com/Azure/go-autorest/autorest/adal v0.9.11/go.mod h1:nBKAnTomx8gDtl+3ZCJv2v0KACFHWTB2drffI1B68Pk= github.com/Azure/go-autorest/autorest/adal v0.9.13/go.mod h1:W/MM4U6nLxnIskrw4UwWzlHfGjwUS50aOsc/I3yuU8M= github.com/Azure/go-autorest/autorest/adal v0.9.15 h1:X+p2GF0GWyOiSmqohIaEeuNFNDY4I4EOlVuUQvFdWMk= github.com/Azure/go-autorest/autorest/adal v0.9.15/go.mod h1:tGMin8I49Yij6AQ+rvV+Xa/zwxYQB5hmsd6DkfAx2+A= github.com/Azure/go-autorest/autorest/azure/auth v0.4.2/go.mod h1:90gmfKdlmKgfjUpnCEpOJzsUEjrWDSLwHIG73tSXddM= -github.com/Azure/go-autorest/autorest/azure/auth v0.5.6 h1:cgiBtUxatlt/e3qY6fQJioqbocWHr5osz259MomF5M0= -github.com/Azure/go-autorest/autorest/azure/auth v0.5.6/go.mod h1:nYlP+G+n8MhD5CjIi6W8nFTIJn/PnTHes5nUbK6BxD0= +github.com/Azure/go-autorest/autorest/azure/auth v0.5.8 h1:TzPg6B6fTZ0G1zBf3T54aI7p3cAT6u//TOXGPmFMOXg= +github.com/Azure/go-autorest/autorest/azure/auth v0.5.8/go.mod h1:kxyKZTSfKh8OVFWPAgOgQ/frrJgeYQJPyR5fLFmXko4= github.com/Azure/go-autorest/autorest/azure/cli v0.3.1/go.mod h1:ZG5p860J94/0kI9mNJVoIoLgXcirM2gF5i2kWloofxw= github.com/Azure/go-autorest/autorest/azure/cli v0.4.2 h1:dMOmEJfkLKW/7JsokJqkyoYSgmR08hi9KrhjZb+JALY= github.com/Azure/go-autorest/autorest/azure/cli v0.4.2/go.mod h1:7qkJkT+j6b+hIpzMOwPChJhTqS8VbsqqgULzMNRugoM= @@ -1687,7 +1687,6 @@ golang.org/x/crypto v0.0.0-20200622213623-75b288015ac9/go.mod h1:LzIPMQfyMNhhGPh golang.org/x/crypto v0.0.0-20200728195943-123391ffb6de/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= golang.org/x/crypto v0.0.0-20200820211705-5c72a883971a/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= golang.org/x/crypto v0.0.0-20201002170205-7f63de1d35b0/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= -golang.org/x/crypto v0.0.0-20201221181555-eec23a3978ad/go.mod h1:jdWPYTVW3xRLrWPugEBEK3UY2ZEsg3UU495nc5E+M+I= golang.org/x/crypto v0.0.0-20210314154223-e6e6c4f2bb5b/go.mod h1:T9bdIzuCu7OtxOm1hfPfRQxPLYneinmdGuTeoZ9dtd4= golang.org/x/crypto v0.0.0-20210314154223-e6e6c4f2bb5b/go.mod h1:T9bdIzuCu7OtxOm1hfPfRQxPLYneinmdGuTeoZ9dtd4= golang.org/x/crypto v0.0.0-20210322153248-0c34fe9e7dc2/go.mod h1:T9bdIzuCu7OtxOm1hfPfRQxPLYneinmdGuTeoZ9dtd4= @@ -1957,7 +1956,6 @@ golang.org/x/sys v0.0.0-20210616094352-59db8d763f22/go.mod h1:oPkhp1MJrh7nUepCBc golang.org/x/sys v0.0.0-20210630005230-0f9fa26af87c/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20210806184541-e5e7981a1069 h1:siQdpVirKtzPhKl3lZWozZraCFObP8S1v6PRp0bLrtU= golang.org/x/sys v0.0.0-20210806184541-e5e7981a1069/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/term v0.0.0-20201117132131-f5c789dd3221/go.mod h1:Nr5EML6q2oocZ2LXRh80K7BxOlk5/8JxuGnuhpl+muw= golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1 h1:v+OssWQX+hTHEmOBgwxdZxK4zHq3yOs8F9J7mk0PY8E= golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo= golang.org/x/text v0.0.0-20160726164857-2910a502d2bf/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= From c3bdb7e8dd8a4bbc4080c64601f17353f755210f Mon Sep 17 00:00:00 2001 From: David Reimschussel Date: Mon, 20 Sep 2021 17:24:40 -0600 Subject: [PATCH 071/176] Update build version to 1.21.0 --- build_version.txt | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/build_version.txt b/build_version.txt index 3989355915568..3500250a4b05b 100644 --- a/build_version.txt +++ b/build_version.txt @@ -1 +1 @@ -1.20.0 +1.21.0 From 8133fd83a8177866adff2028160bcf27e186464c Mon Sep 17 00:00:00 2001 From: Alan Pope Date: Tue, 21 Sep 2021 12:02:13 +0100 Subject: [PATCH 072/176] Reduce README size/complexity --- README.md | 367 +++--------------------------------------------------- 1 file changed, 19 insertions(+), 348 deletions(-) diff --git a/README.md b/README.md index 2b49842789db6..5f0861f4fa3cb 100644 --- a/README.md +++ b/README.md @@ -8,9 +8,8 @@ Telegraf is an agent for collecting, processing, aggregating, and writing metrics. -Design goals are to have a minimal memory footprint with a plugin system so -that developers in the community can easily add support for collecting -metrics. +Design goal: +- Have a minimal memory footprint with a plugin system so that developers in the community can easily add support for collecting metrics. Telegraf is plugin-driven and has the concept of 4 distinct plugin types: @@ -19,25 +18,9 @@ Telegraf is plugin-driven and has the concept of 4 distinct plugin types: 3. [Aggregator Plugins](#aggregator-plugins) create aggregate metrics (e.g. mean, min, max, quantiles, etc.) 4. [Output Plugins](#output-plugins) write metrics to various destinations -New plugins are designed to be easy to contribute, pull requests are welcomed -and we work to incorporate as many pull requests as possible. -If none of the internal plugins fit your needs, you could have a look at the +New plugins are designed to be easy to contribute, pull requests are welcomed and we work to incorporate as many pull requests as possible. If none of the internal plugins fit your needs, you could have a look at the [list of external plugins](EXTERNAL_PLUGINS.md). -## Try in Browser :rocket: - -You can try Telegraf right in your browser in the [Telegraf playground](https://rootnroll.com/d/telegraf/). - -## Contributing - -There are many ways to contribute: -- Fix and [report bugs](https://github.com/influxdata/telegraf/issues/new) -- [Improve documentation](https://github.com/influxdata/telegraf/issues?q=is%3Aopen+label%3Adocumentation) -- [Review code and feature proposals](https://github.com/influxdata/telegraf/pulls) -- Answer questions and discuss here on github and on the [Community Site](https://community.influxdata.com/) -- [Contribute plugins](CONTRIBUTING.md) -- [Contribute external plugins](docs/EXTERNAL_PLUGINS.md) - ## Minimum Requirements Telegraf shares the same [minimum requirements][] as Go: @@ -92,7 +75,6 @@ Builds for other platforms or package formats are provided by members of the Tel * Linux * [Snap](https://snapcraft.io/telegraf) by Laurent Sesquès (sajoupa) - ## How to use it: See usage with: @@ -138,330 +120,19 @@ telegraf --config telegraf.conf --input-filter cpu:mem --output-filter influxdb For documentation on the latest development code see the [documentation index][devel docs]. [release docs]: https://docs.influxdata.com/telegraf -[devel docs]: docs - -## Input Plugins - -* [activemq](./plugins/inputs/activemq) -* [aerospike](./plugins/inputs/aerospike) -* [amqp_consumer](./plugins/inputs/amqp_consumer) (rabbitmq) -* [apache](./plugins/inputs/apache) -* [apcupsd](./plugins/inputs/apcupsd) -* [aurora](./plugins/inputs/aurora) -* [aws cloudwatch](./plugins/inputs/cloudwatch) (Amazon Cloudwatch) -* [azure_storage_queue](./plugins/inputs/azure_storage_queue) -* [bcache](./plugins/inputs/bcache) -* [beanstalkd](./plugins/inputs/beanstalkd) -* [bind](./plugins/inputs/bind) -* [bond](./plugins/inputs/bond) -* [burrow](./plugins/inputs/burrow) -* [cassandra](./plugins/inputs/cassandra) (deprecated, use [jolokia2](./plugins/inputs/jolokia2)) -* [ceph](./plugins/inputs/ceph) -* [cgroup](./plugins/inputs/cgroup) -* [chrony](./plugins/inputs/chrony) -* [cisco_telemetry_gnmi](./plugins/inputs/cisco_telemetry_gnmi) (deprecated, renamed to [gnmi](/plugins/inputs/gnmi)) -* [cisco_telemetry_mdt](./plugins/inputs/cisco_telemetry_mdt) -* [clickhouse](./plugins/inputs/clickhouse) -* [cloud_pubsub](./plugins/inputs/cloud_pubsub) Google Cloud Pub/Sub -* [cloud_pubsub_push](./plugins/inputs/cloud_pubsub_push) Google Cloud Pub/Sub push endpoint -* [conntrack](./plugins/inputs/conntrack) -* [consul](./plugins/inputs/consul) -* [couchbase](./plugins/inputs/couchbase) -* [couchdb](./plugins/inputs/couchdb) -* [cpu](./plugins/inputs/cpu) -* [DC/OS](./plugins/inputs/dcos) -* [diskio](./plugins/inputs/diskio) -* [disk](./plugins/inputs/disk) -* [disque](./plugins/inputs/disque) -* [dmcache](./plugins/inputs/dmcache) -* [dns query time](./plugins/inputs/dns_query) -* [docker](./plugins/inputs/docker) -* [docker_log](./plugins/inputs/docker_log) -* [dovecot](./plugins/inputs/dovecot) -* [dpdk](./plugins/inputs/dpdk) -* [aws ecs](./plugins/inputs/ecs) (Amazon Elastic Container Service, Fargate) -* [elasticsearch](./plugins/inputs/elasticsearch) -* [ethtool](./plugins/inputs/ethtool) -* [eventhub_consumer](./plugins/inputs/eventhub_consumer) (Azure Event Hubs \& Azure IoT Hub) -* [exec](./plugins/inputs/exec) (generic executable plugin, support JSON, influx, graphite and nagios) -* [execd](./plugins/inputs/execd) (generic executable "daemon" processes) -* [fail2ban](./plugins/inputs/fail2ban) -* [fibaro](./plugins/inputs/fibaro) -* [file](./plugins/inputs/file) -* [filestat](./plugins/inputs/filestat) -* [filecount](./plugins/inputs/filecount) -* [fireboard](/plugins/inputs/fireboard) -* [fluentd](./plugins/inputs/fluentd) -* [github](./plugins/inputs/github) -* [gnmi](./plugins/inputs/gnmi) -* [graylog](./plugins/inputs/graylog) -* [haproxy](./plugins/inputs/haproxy) -* [hddtemp](./plugins/inputs/hddtemp) -* [httpjson](./plugins/inputs/httpjson) (generic JSON-emitting http service plugin) -* [http_listener](./plugins/inputs/influxdb_listener) (deprecated, renamed to [influxdb_listener](/plugins/inputs/influxdb_listener)) -* [http_listener_v2](./plugins/inputs/http_listener_v2) -* [http](./plugins/inputs/http) (generic HTTP plugin, supports using input data formats) -* [http_response](./plugins/inputs/http_response) -* [icinga2](./plugins/inputs/icinga2) -* [infiniband](./plugins/inputs/infiniband) -* [influxdb](./plugins/inputs/influxdb) -* [influxdb_listener](./plugins/inputs/influxdb_listener) -* [influxdb_v2_listener](./plugins/inputs/influxdb_v2_listener) -* [intel_powerstat](plugins/inputs/intel_powerstat) -* [intel_rdt](./plugins/inputs/intel_rdt) -* [internal](./plugins/inputs/internal) -* [interrupts](./plugins/inputs/interrupts) -* [ipmi_sensor](./plugins/inputs/ipmi_sensor) -* [ipset](./plugins/inputs/ipset) -* [iptables](./plugins/inputs/iptables) -* [ipvs](./plugins/inputs/ipvs) -* [jenkins](./plugins/inputs/jenkins) -* [jolokia2](./plugins/inputs/jolokia2) (java, cassandra, kafka) -* [jolokia](./plugins/inputs/jolokia) (deprecated, use [jolokia2](./plugins/inputs/jolokia2)) -* [jti_openconfig_telemetry](./plugins/inputs/jti_openconfig_telemetry) -* [kafka_consumer](./plugins/inputs/kafka_consumer) -* [kapacitor](./plugins/inputs/kapacitor) -* [aws kinesis](./plugins/inputs/kinesis_consumer) (Amazon Kinesis) -* [kernel](./plugins/inputs/kernel) -* [kernel_vmstat](./plugins/inputs/kernel_vmstat) -* [kibana](./plugins/inputs/kibana) -* [knx_listener](./plugins/inputs/knx_listener) -* [kubernetes](./plugins/inputs/kubernetes) -* [kube_inventory](./plugins/inputs/kube_inventory) -* [lanz](./plugins/inputs/lanz) -* [leofs](./plugins/inputs/leofs) -* [linux_sysctl_fs](./plugins/inputs/linux_sysctl_fs) -* [logparser](./plugins/inputs/logparser) (deprecated, use [tail](/plugins/inputs/tail)) -* [logstash](./plugins/inputs/logstash) -* [lustre2](./plugins/inputs/lustre2) -* [mailchimp](./plugins/inputs/mailchimp) -* [marklogic](./plugins/inputs/marklogic) -* [mcrouter](./plugins/inputs/mcrouter) -* [mdstat](./plugins/inputs/mdstat) -* [memcached](./plugins/inputs/memcached) -* [mem](./plugins/inputs/mem) -* [mesos](./plugins/inputs/mesos) -* [minecraft](./plugins/inputs/minecraft) -* [modbus](./plugins/inputs/modbus) -* [mongodb](./plugins/inputs/mongodb) -* [monit](./plugins/inputs/monit) -* [mqtt_consumer](./plugins/inputs/mqtt_consumer) -* [multifile](./plugins/inputs/multifile) -* [mysql](./plugins/inputs/mysql) -* [nats_consumer](./plugins/inputs/nats_consumer) -* [nats](./plugins/inputs/nats) -* [neptune_apex](./plugins/inputs/neptune_apex) -* [net](./plugins/inputs/net) -* [net_response](./plugins/inputs/net_response) -* [netstat](./plugins/inputs/net) -* [nfsclient](./plugins/inputs/nfsclient) -* [nginx](./plugins/inputs/nginx) -* [nginx_plus_api](./plugins/inputs/nginx_plus_api) -* [nginx_plus](./plugins/inputs/nginx_plus) -* [nginx_sts](./plugins/inputs/nginx_sts) -* [nginx_upstream_check](./plugins/inputs/nginx_upstream_check) -* [nginx_vts](./plugins/inputs/nginx_vts) -* [nsd](./plugins/inputs/nsd) -* [nsq_consumer](./plugins/inputs/nsq_consumer) -* [nsq](./plugins/inputs/nsq) -* [nstat](./plugins/inputs/nstat) -* [ntpq](./plugins/inputs/ntpq) -* [nvidia_smi](./plugins/inputs/nvidia_smi) -* [opcua](./plugins/inputs/opcua) -* [openldap](./plugins/inputs/openldap) -* [openntpd](./plugins/inputs/openntpd) -* [opensmtpd](./plugins/inputs/opensmtpd) -* [opentelemetry](./plugins/inputs/opentelemetry) -* [openweathermap](./plugins/inputs/openweathermap) -* [pf](./plugins/inputs/pf) -* [pgbouncer](./plugins/inputs/pgbouncer) -* [phpfpm](./plugins/inputs/phpfpm) -* [phusion passenger](./plugins/inputs/passenger) -* [ping](./plugins/inputs/ping) -* [postfix](./plugins/inputs/postfix) -* [postgresql_extensible](./plugins/inputs/postgresql_extensible) -* [postgresql](./plugins/inputs/postgresql) -* [powerdns](./plugins/inputs/powerdns) -* [powerdns_recursor](./plugins/inputs/powerdns_recursor) -* [processes](./plugins/inputs/processes) -* [procstat](./plugins/inputs/procstat) -* [prometheus](./plugins/inputs/prometheus) (can be used for [Caddy server](./plugins/inputs/prometheus/README.md#usage-for-caddy-http-server)) -* [proxmox](./plugins/inputs/proxmox) -* [puppetagent](./plugins/inputs/puppetagent) -* [rabbitmq](./plugins/inputs/rabbitmq) -* [raindrops](./plugins/inputs/raindrops) -* [ras](./plugins/inputs/ras) -* [ravendb](./plugins/inputs/ravendb) -* [redfish](./plugins/inputs/redfish) -* [redis](./plugins/inputs/redis) -* [rethinkdb](./plugins/inputs/rethinkdb) -* [riak](./plugins/inputs/riak) -* [salesforce](./plugins/inputs/salesforce) -* [sensors](./plugins/inputs/sensors) -* [sflow](./plugins/inputs/sflow) -* [smart](./plugins/inputs/smart) -* [snmp_legacy](./plugins/inputs/snmp_legacy) -* [snmp](./plugins/inputs/snmp) -* [snmp_trap](./plugins/inputs/snmp_trap) -* [socket_listener](./plugins/inputs/socket_listener) -* [solr](./plugins/inputs/solr) -* [sql](./plugins/inputs/sql) (generic SQL query plugin) -* [sql server](./plugins/inputs/sqlserver) (microsoft) -* [stackdriver](./plugins/inputs/stackdriver) (Google Cloud Monitoring) -* [sql](./plugins/outputs/sql) (SQL generic output) -* [statsd](./plugins/inputs/statsd) -* [suricata](./plugins/inputs/suricata) -* [swap](./plugins/inputs/swap) -* [synproxy](./plugins/inputs/synproxy) -* [syslog](./plugins/inputs/syslog) -* [sysstat](./plugins/inputs/sysstat) -* [systemd_units](./plugins/inputs/systemd_units) -* [system](./plugins/inputs/system) -* [tail](./plugins/inputs/tail) -* [temp](./plugins/inputs/temp) -* [tcp_listener](./plugins/inputs/socket_listener) -* [teamspeak](./plugins/inputs/teamspeak) -* [tengine](./plugins/inputs/tengine) -* [tomcat](./plugins/inputs/tomcat) -* [twemproxy](./plugins/inputs/twemproxy) -* [udp_listener](./plugins/inputs/socket_listener) -* [unbound](./plugins/inputs/unbound) -* [uwsgi](./plugins/inputs/uwsgi) -* [varnish](./plugins/inputs/varnish) -* [vsphere](./plugins/inputs/vsphere) VMware vSphere -* [webhooks](./plugins/inputs/webhooks) - * [filestack](./plugins/inputs/webhooks/filestack) - * [github](./plugins/inputs/webhooks/github) - * [mandrill](./plugins/inputs/webhooks/mandrill) - * [papertrail](./plugins/inputs/webhooks/papertrail) - * [particle](./plugins/inputs/webhooks/particle) - * [rollbar](./plugins/inputs/webhooks/rollbar) -* [win_eventlog](./plugins/inputs/win_eventlog) -* [win_perf_counters](./plugins/inputs/win_perf_counters) (windows performance counters) -* [win_services](./plugins/inputs/win_services) -* [wireguard](./plugins/inputs/wireguard) -* [wireless](./plugins/inputs/wireless) -* [x509_cert](./plugins/inputs/x509_cert) -* [zfs](./plugins/inputs/zfs) -* [zipkin](./plugins/inputs/zipkin) -* [zookeeper](./plugins/inputs/zookeeper) - -## Parsers - -- [InfluxDB Line Protocol](/plugins/parsers/influx) -- [Collectd](/plugins/parsers/collectd) -- [CSV](/plugins/parsers/csv) -- [Dropwizard](/plugins/parsers/dropwizard) -- [FormUrlencoded](/plugins/parser/form_urlencoded) -- [Graphite](/plugins/parsers/graphite) -- [Grok](/plugins/parsers/grok) -- [JSON](/plugins/parsers/json) -- [JSON v2](/plugins/parsers/json_v2) -- [Logfmt](/plugins/parsers/logfmt) -- [Nagios](/plugins/parsers/nagios) -- [Prometheus](/plugins/parsers/prometheus) -- [Prometheus Remote Write](/plugins/parsers/prometheusremotewrite) -- [Value](/plugins/parsers/value), ie: 45 or "booyah" -- [Wavefront](/plugins/parsers/wavefront) -- [XPath](/plugins/parsers/xpath) (supports XML, JSON, MessagePack, Protocol Buffers) - -## Serializers - -- [InfluxDB Line Protocol](/plugins/serializers/influx) -- [Carbon2](/plugins/serializers/carbon2) -- [Graphite](/plugins/serializers/graphite) -- [JSON](/plugins/serializers/json) -- [MessagePack](/plugins/serializers/msgpack) -- [Prometheus](/plugins/serializers/prometheus) -- [Prometheus Remote Write](/plugins/serializers/prometheusremotewrite) -- [ServiceNow](/plugins/serializers/nowmetric) -- [SplunkMetric](/plugins/serializers/splunkmetric) -- [Wavefront](/plugins/serializers/wavefront) - -## Processor Plugins - -* [clone](/plugins/processors/clone) -* [converter](/plugins/processors/converter) -* [date](/plugins/processors/date) -* [dedup](/plugins/processors/dedup) -* [defaults](/plugins/processors/defaults) -* [enum](/plugins/processors/enum) -* [execd](/plugins/processors/execd) -* [ifname](/plugins/processors/ifname) -* [filepath](/plugins/processors/filepath) -* [override](/plugins/processors/override) -* [parser](/plugins/processors/parser) -* [pivot](/plugins/processors/pivot) -* [port_name](/plugins/processors/port_name) -* [printer](/plugins/processors/printer) -* [regex](/plugins/processors/regex) -* [rename](/plugins/processors/rename) -* [reverse_dns](/plugins/processors/reverse_dns) -* [s2geo](/plugins/processors/s2geo) -* [starlark](/plugins/processors/starlark) -* [strings](/plugins/processors/strings) -* [tag_limit](/plugins/processors/tag_limit) -* [template](/plugins/processors/template) -* [topk](/plugins/processors/topk) -* [unpivot](/plugins/processors/unpivot) - -## Aggregator Plugins - -* [basicstats](./plugins/aggregators/basicstats) -* [derivative](./plugins/aggregators/derivative) -* [final](./plugins/aggregators/final) -* [histogram](./plugins/aggregators/histogram) -* [merge](./plugins/aggregators/merge) -* [minmax](./plugins/aggregators/minmax) -* [quantile](./plugins/aggregators/quantile) -* [valuecounter](./plugins/aggregators/valuecounter) - -## Output Plugins - -* [influxdb](./plugins/outputs/influxdb) (InfluxDB 1.x) -* [influxdb_v2](./plugins/outputs/influxdb_v2) ([InfluxDB 2.x](https://github.com/influxdata/influxdb)) -* [amon](./plugins/outputs/amon) -* [amqp](./plugins/outputs/amqp) (rabbitmq) -* [application_insights](./plugins/outputs/application_insights) -* [aws kinesis](./plugins/outputs/kinesis) -* [aws cloudwatch](./plugins/outputs/cloudwatch) -* [azure_monitor](./plugins/outputs/azure_monitor) -* [bigquery](./plugins/outputs/bigquery) -* [cloud_pubsub](./plugins/outputs/cloud_pubsub) Google Cloud Pub/Sub -* [cratedb](./plugins/outputs/cratedb) -* [datadog](./plugins/outputs/datadog) -* [discard](./plugins/outputs/discard) -* [dynatrace](./plugins/outputs/dynatrace) -* [elasticsearch](./plugins/outputs/elasticsearch) -* [exec](./plugins/outputs/exec) -* [execd](./plugins/outputs/execd) -* [file](./plugins/outputs/file) -* [graphite](./plugins/outputs/graphite) -* [graylog](./plugins/outputs/graylog) -* [health](./plugins/outputs/health) -* [http](./plugins/outputs/http) -* [instrumental](./plugins/outputs/instrumental) -* [kafka](./plugins/outputs/kafka) -* [librato](./plugins/outputs/librato) -* [logz.io](./plugins/outputs/logzio) -* [mqtt](./plugins/outputs/mqtt) -* [nats](./plugins/outputs/nats) -* [newrelic](./plugins/outputs/newrelic) -* [nsq](./plugins/outputs/nsq) -* [opentelemetry](./plugins/outputs/opentelemetry) -* [opentsdb](./plugins/outputs/opentsdb) -* [prometheus](./plugins/outputs/prometheus_client) -* [riemann](./plugins/outputs/riemann) -* [riemann_legacy](./plugins/outputs/riemann_legacy) -* [sensu](./plugins/outputs/sensu) -* [signalfx](./plugins/outputs/signalfx) -* [socket_writer](./plugins/outputs/socket_writer) -* [stackdriver](./plugins/outputs/stackdriver) (Google Cloud Monitoring) -* [syslog](./plugins/outputs/syslog) -* [tcp](./plugins/outputs/socket_writer) -* [udp](./plugins/outputs/socket_writer) -* [warp10](./plugins/outputs/warp10) -* [wavefront](./plugins/outputs/wavefront) -* [websocket](./plugins/outputs/websocket) -* [sumologic](./plugins/outputs/sumologic) -* [yandex_cloud_monitoring](./plugins/outputs/yandex_cloud_monitoring) +[developer docs]: docs +- [Input Plugins](/telegraf/docs/INPUTS.md) +- [Output Plugins](/telegraf/docs/OUTPUTS.md) +- [Processor Plugins](/telegraf/docs/PROCESSORS.md) +- [Aggregator Plugins](/telegraf/docs/AGGREGATORS.md) + + +## Contributing + +There are many ways to contribute: +- Fix and [report bugs](https://github.com/influxdata/telegraf/issues/new) +- [Improve documentation](https://github.com/influxdata/telegraf/issues?q=is%3Aopen+label%3Adocumentation) +- [Review code and feature proposals](https://github.com/influxdata/telegraf/pulls) +- Answer questions and discuss here on github and on the [Community Site](https://community.influxdata.com/) +- [Contribute plugins](CONTRIBUTING.md) +- [Contribute external plugins](docs/EXTERNAL_PLUGINS.md) \ No newline at end of file From b9aa9839fbde04fb4a728d656fae57a20281e0ba Mon Sep 17 00:00:00 2001 From: Heiko Schlittermann Date: Tue, 21 Sep 2021 17:12:44 +0200 Subject: [PATCH 073/176] feat: Add json_timestamp_layout option (#8229) --- config/config.go | 3 +- .../azure_data_explorer.go | 2 +- .../azure_data_explorer_test.go | 2 +- plugins/serializers/json/README.md | 7 +++++ plugins/serializers/json/json.go | 24 +++++++++------ plugins/serializers/json/json_test.go | 30 +++++++++++-------- plugins/serializers/registry.go | 9 ++++-- 7 files changed, 50 insertions(+), 27 deletions(-) diff --git a/config/config.go b/config/config.go index 56beed8ee4910..b6eed9446162f 100644 --- a/config/config.go +++ b/config/config.go @@ -1504,6 +1504,7 @@ func (c *Config) buildSerializer(tbl *ast.Table) (serializers.Serializer, error) c.getFieldString(tbl, "graphite_separator", &sc.GraphiteSeparator) c.getFieldDuration(tbl, "json_timestamp_units", &sc.TimestampUnits) + c.getFieldString(tbl, "json_timestamp_format", &sc.TimestampFormat) c.getFieldBool(tbl, "splunkmetric_hec_routing", &sc.HecRouting) c.getFieldBool(tbl, "splunkmetric_multimetric", &sc.SplunkmetricMultiMetric) @@ -1569,7 +1570,7 @@ func (c *Config) missingTomlField(_ reflect.Type, key string) error { "grok_custom_pattern_files", "grok_custom_patterns", "grok_named_patterns", "grok_patterns", "grok_timezone", "grok_unique_timestamp", "influx_max_line_bytes", "influx_sort_fields", "influx_uint_support", "interval", "json_name_key", "json_query", "json_strict", - "json_string_fields", "json_time_format", "json_time_key", "json_timestamp_units", "json_timezone", "json_v2", + "json_string_fields", "json_time_format", "json_time_key", "json_timestamp_format", "json_timestamp_units", "json_timezone", "json_v2", "metric_batch_size", "metric_buffer_limit", "name_override", "name_prefix", "name_suffix", "namedrop", "namepass", "order", "pass", "period", "precision", "prefix", "prometheus_export_timestamp", "prometheus_sort_metrics", "prometheus_string_as_label", diff --git a/plugins/outputs/azure_data_explorer/azure_data_explorer.go b/plugins/outputs/azure_data_explorer/azure_data_explorer.go index 6d411fd05c3b9..b4c2054d3c22e 100644 --- a/plugins/outputs/azure_data_explorer/azure_data_explorer.go +++ b/plugins/outputs/azure_data_explorer/azure_data_explorer.go @@ -230,7 +230,7 @@ func (adx *AzureDataExplorer) Init() error { return errors.New("Metrics grouping type is not valid") } - serializer, err := json.NewSerializer(time.Second) + serializer, err := json.NewSerializer(time.Second, "") // FIXME: get the json.TimestampFormat from the config file if err != nil { return err } diff --git a/plugins/outputs/azure_data_explorer/azure_data_explorer_test.go b/plugins/outputs/azure_data_explorer/azure_data_explorer_test.go index f85d074cb1f6f..b8d30d66ce28b 100644 --- a/plugins/outputs/azure_data_explorer/azure_data_explorer_test.go +++ b/plugins/outputs/azure_data_explorer/azure_data_explorer_test.go @@ -105,7 +105,7 @@ func TestWrite(t *testing.T) { for _, tC := range testCases { t.Run(tC.name, func(t *testing.T) { - serializer, err := telegrafJson.NewSerializer(time.Second) + serializer, err := telegrafJson.NewSerializer(time.Second, "") require.NoError(t, err) plugin := AzureDataExplorer{ diff --git a/plugins/serializers/json/README.md b/plugins/serializers/json/README.md index 08bb9d4f7c904..b33875578272a 100644 --- a/plugins/serializers/json/README.md +++ b/plugins/serializers/json/README.md @@ -19,6 +19,13 @@ The `json` output data format converts metrics into JSON documents. ## such as "1ns", "1us", "1ms", "10ms", "1s". Durations are truncated to ## the power of 10 less than the specified units. json_timestamp_units = "1s" + + ## The default timestamp format is Unix epoch time, subject to the + # resolution configured in json_timestamp_units. + # Other timestamp layout can be configured using the Go language time + # layout specification from https://golang.org/pkg/time/#Time.Format + # e.g.: json_timestamp_format = "2006-01-02T15:04:05Z07:00" + #json_timestamp_format = "" ``` ### Examples: diff --git a/plugins/serializers/json/json.go b/plugins/serializers/json/json.go index e2d7af3305117..6db2a43ee231a 100644 --- a/plugins/serializers/json/json.go +++ b/plugins/serializers/json/json.go @@ -8,18 +8,20 @@ import ( "github.com/influxdata/telegraf" ) -type serializer struct { - TimestampUnits time.Duration +type Serializer struct { + TimestampUnits time.Duration + TimestampFormat string } -func NewSerializer(timestampUnits time.Duration) (*serializer, error) { - s := &serializer{ - TimestampUnits: truncateDuration(timestampUnits), +func NewSerializer(timestampUnits time.Duration, timestampformat string) (*Serializer, error) { + s := &Serializer{ + TimestampUnits: truncateDuration(timestampUnits), + TimestampFormat: timestampformat, } return s, nil } -func (s *serializer) Serialize(metric telegraf.Metric) ([]byte, error) { +func (s *Serializer) Serialize(metric telegraf.Metric) ([]byte, error) { m := s.createObject(metric) serialized, err := json.Marshal(m) if err != nil { @@ -30,7 +32,7 @@ func (s *serializer) Serialize(metric telegraf.Metric) ([]byte, error) { return serialized, nil } -func (s *serializer) SerializeBatch(metrics []telegraf.Metric) ([]byte, error) { +func (s *Serializer) SerializeBatch(metrics []telegraf.Metric) ([]byte, error) { objects := make([]interface{}, 0, len(metrics)) for _, metric := range metrics { m := s.createObject(metric) @@ -48,7 +50,7 @@ func (s *serializer) SerializeBatch(metrics []telegraf.Metric) ([]byte, error) { return serialized, nil } -func (s *serializer) createObject(metric telegraf.Metric) map[string]interface{} { +func (s *Serializer) createObject(metric telegraf.Metric) map[string]interface{} { m := make(map[string]interface{}, 4) tags := make(map[string]string, len(metric.TagList())) @@ -71,7 +73,11 @@ func (s *serializer) createObject(metric telegraf.Metric) map[string]interface{} m["fields"] = fields m["name"] = metric.Name() - m["timestamp"] = metric.Time().UnixNano() / int64(s.TimestampUnits) + if s.TimestampFormat == "" { + m["timestamp"] = metric.Time().UnixNano() / int64(s.TimestampUnits) + } else { + m["timestamp"] = metric.Time().UTC().Format(s.TimestampFormat) + } return m } diff --git a/plugins/serializers/json/json_test.go b/plugins/serializers/json/json_test.go index 74d7f94166621..be939243904eb 100644 --- a/plugins/serializers/json/json_test.go +++ b/plugins/serializers/json/json_test.go @@ -30,7 +30,7 @@ func TestSerializeMetricFloat(t *testing.T) { } m := metric.New("cpu", tags, fields, now) - s, _ := NewSerializer(0) + s, _ := NewSerializer(0, "") var buf []byte buf, err := s.Serialize(m) assert.NoError(t, err) @@ -40,9 +40,10 @@ func TestSerializeMetricFloat(t *testing.T) { func TestSerialize_TimestampUnits(t *testing.T) { tests := []struct { - name string - timestampUnits time.Duration - expected string + name string + timestampUnits time.Duration + timestampFormat string + expected string }{ { name: "default of 1s", @@ -74,6 +75,11 @@ func TestSerialize_TimestampUnits(t *testing.T) { timestampUnits: 65 * time.Millisecond, expected: `{"fields":{"value":42},"name":"cpu","tags":{},"timestamp":152547879512}`, }, + { + name: "timestamp format", + timestampFormat: "2006-01-02T15:04:05Z07:00", + expected: `{"fields":{"value":42},"name":"cpu","tags":{},"timestamp":"2018-05-05T00:06:35Z"}`, + }, } for _, tt := range tests { t.Run(tt.name, func(t *testing.T) { @@ -85,7 +91,7 @@ func TestSerialize_TimestampUnits(t *testing.T) { }, time.Unix(1525478795, 123456789), ) - s, _ := NewSerializer(tt.timestampUnits) + s, _ := NewSerializer(tt.timestampUnits, tt.timestampFormat) actual, err := s.Serialize(m) require.NoError(t, err) require.Equal(t, tt.expected+"\n", string(actual)) @@ -103,7 +109,7 @@ func TestSerializeMetricInt(t *testing.T) { } m := metric.New("cpu", tags, fields, now) - s, _ := NewSerializer(0) + s, _ := NewSerializer(0, "") var buf []byte buf, err := s.Serialize(m) assert.NoError(t, err) @@ -122,7 +128,7 @@ func TestSerializeMetricString(t *testing.T) { } m := metric.New("cpu", tags, fields, now) - s, _ := NewSerializer(0) + s, _ := NewSerializer(0, "") var buf []byte buf, err := s.Serialize(m) assert.NoError(t, err) @@ -142,7 +148,7 @@ func TestSerializeMultiFields(t *testing.T) { } m := metric.New("cpu", tags, fields, now) - s, _ := NewSerializer(0) + s, _ := NewSerializer(0, "") var buf []byte buf, err := s.Serialize(m) assert.NoError(t, err) @@ -161,7 +167,7 @@ func TestSerializeMetricWithEscapes(t *testing.T) { } m := metric.New("My CPU", tags, fields, now) - s, _ := NewSerializer(0) + s, _ := NewSerializer(0, "") buf, err := s.Serialize(m) assert.NoError(t, err) @@ -180,7 +186,7 @@ func TestSerializeBatch(t *testing.T) { ) metrics := []telegraf.Metric{m, m} - s, _ := NewSerializer(0) + s, _ := NewSerializer(0, "") buf, err := s.SerializeBatch(metrics) require.NoError(t, err) require.Equal(t, []byte(`{"metrics":[{"fields":{"value":42},"name":"cpu","tags":{},"timestamp":0},{"fields":{"value":42},"name":"cpu","tags":{},"timestamp":0}]}`), buf) @@ -199,7 +205,7 @@ func TestSerializeBatchSkipInf(t *testing.T) { ), } - s, err := NewSerializer(0) + s, err := NewSerializer(0, "") require.NoError(t, err) buf, err := s.SerializeBatch(metrics) require.NoError(t, err) @@ -218,7 +224,7 @@ func TestSerializeBatchSkipInfAllFields(t *testing.T) { ), } - s, err := NewSerializer(0) + s, err := NewSerializer(0, "") require.NoError(t, err) buf, err := s.SerializeBatch(metrics) require.NoError(t, err) diff --git a/plugins/serializers/registry.go b/plugins/serializers/registry.go index e67a9594dda73..b17364e66f0a6 100644 --- a/plugins/serializers/registry.go +++ b/plugins/serializers/registry.go @@ -88,6 +88,9 @@ type Config struct { // Timestamp units to use for JSON formatted output TimestampUnits time.Duration `toml:"timestamp_units"` + // Timestamp format to use for JSON formatted output + TimestampFormat string `toml:"timestamp_format"` + // Include HEC routing fields for splunkmetric output HecRouting bool `toml:"hec_routing"` @@ -123,7 +126,7 @@ func NewSerializer(config *Config) (Serializer, error) { case "graphite": serializer, err = NewGraphiteSerializer(config.Prefix, config.Template, config.GraphiteTagSupport, config.GraphiteTagSanitizeMode, config.GraphiteSeparator, config.Templates) case "json": - serializer, err = NewJSONSerializer(config.TimestampUnits) + serializer, err = NewJSONSerializer(config.TimestampUnits, config.TimestampFormat) case "splunkmetric": serializer, err = NewSplunkmetricSerializer(config.HecRouting, config.SplunkmetricMultiMetric) case "nowmetric": @@ -188,8 +191,8 @@ func NewWavefrontSerializer(prefix string, useStrict bool, sourceOverride []stri return wavefront.NewSerializer(prefix, useStrict, sourceOverride) } -func NewJSONSerializer(timestampUnits time.Duration) (Serializer, error) { - return json.NewSerializer(timestampUnits) +func NewJSONSerializer(timestampUnits time.Duration, timestampFormat string) (Serializer, error) { + return json.NewSerializer(timestampUnits, timestampFormat) } func NewCarbon2Serializer(carbon2format string, carbon2SanitizeReplaceChar string) (Serializer, error) { From 90d08787f5961cd465c0f6b04194cdb475cef35c Mon Sep 17 00:00:00 2001 From: John Seekins Date: Tue, 21 Sep 2021 10:07:58 -0600 Subject: [PATCH 074/176] feat: add measurements from puppet 5 (#9706) --- plugins/inputs/puppetagent/README.md | 37 +++++++---- .../inputs/puppetagent/last_run_summary.yaml | 39 ++++++----- plugins/inputs/puppetagent/puppetagent.go | 52 +++++++++------ .../inputs/puppetagent/puppetagent_test.go | 64 +++++++++++-------- 4 files changed, 115 insertions(+), 77 deletions(-) diff --git a/plugins/inputs/puppetagent/README.md b/plugins/inputs/puppetagent/README.md index 687005b98cc11..1406064d5c617 100644 --- a/plugins/inputs/puppetagent/README.md +++ b/plugins/inputs/puppetagent/README.md @@ -85,18 +85,19 @@ Meta: - tags: `` Measurement names: + - puppetagent_changes_total - puppetagent_events_failure - puppetagent_events_total - puppetagent_events_success + - puppetagent_resources_changed + - puppetagent_resources_corrective_change - puppetagent_resources_failed + - puppetagent_resources_failedtorestart + - puppetagent_resources_outofsync + - puppetagent_resources_restarted - puppetagent_resources_scheduled - - puppetagent_resources_changed - puppetagent_resources_skipped - puppetagent_resources_total - - puppetagent_resources_failedtorestart - - puppetagent_resources_restarted - - puppetagent_resources_outofsync - - puppetagent_changes_total - puppetagent_time_service - puppetagent_time_lastrun - puppetagent_version_config @@ -108,18 +109,26 @@ Meta: - tags: `` Measurement names: - - puppetagent_time_user - - puppetagent_time_schedule - - puppetagent_time_filebucket - - puppetagent_time_file - - puppetagent_time_exec - puppetagent_time_anchor - - puppetagent_time_sshauthorizedkey - - puppetagent_time_package - - puppetagent_time_total + - puppetagent_time_catalogapplication - puppetagent_time_configretrieval - - puppetagent_time_lastrun + - puppetagent_time_convertcatalog - puppetagent_time_cron + - puppetagent_time_exec + - puppetagent_time_factgeneration + - puppetagent_time_file + - puppetagent_time_filebucket + - puppetagent_time_group + - puppetagent_time_lastrun + - puppetagent_time_noderetrieval + - puppetagent_time_notify + - puppetagent_time_package + - puppetagent_time_pluginsync + - puppetagent_time_schedule + - puppetagent_time_sshauthorizedkey + - puppetagent_time_total + - puppetagent_time_transactionevaluation + - puppetagent_time_user - puppetagent_version_config #### PuppetAgent string measurements: diff --git a/plugins/inputs/puppetagent/last_run_summary.yaml b/plugins/inputs/puppetagent/last_run_summary.yaml index be2f017465fad..c1aa1ce276216 100644 --- a/plugins/inputs/puppetagent/last_run_summary.yaml +++ b/plugins/inputs/puppetagent/last_run_summary.yaml @@ -1,34 +1,43 @@ --- events: failure: 0 + noop: 0 total: 0 success: 0 resources: + changed: 0 + corrective_change: 0 failed: 0 + failed_to_restart: 0 + out_of_sync: 0 + restarted: 0 scheduled: 0 - changed: 0 skipped: 0 total: 109 - failed_to_restart: 0 - restarted: 0 - out_of_sync: 0 changes: total: 0 time: - user: 0.004331 - schedule: 0.001123 - filebucket: 0.000353 - file: 0.441472 - exec: 0.508123 anchor: 0.000555 - yumrepo: 0.006989 - ssh_authorized_key: 0.000764 - service: 1.807795 - package: 1.325788 - total: 8.85354707064819 + catalog_application: 0.010555 config_retrieval: 4.75567007064819 - last_run: 1444936531 + convert_catalog: 1.3 cron: 0.000584 + exec: 0.508123 + fact_generation: 0.34 + file: 0.441472 + filebucket: 0.000353 + last_run: 1444936531 + node_retrieval: 1.235 + notify: 0.00035 + package: 1.325788 + plugin_sync: 0.325788 + schedule: 0.001123 + service: 1.807795 + ssh_authorized_key: 0.000764 + total: 8.85354707064819 + transaction_evaluation: 4.69765 + user: 0.004331 + yumrepo: 0.006989 version: config: "environment:d6018ce" puppet: "3.7.5" diff --git a/plugins/inputs/puppetagent/puppetagent.go b/plugins/inputs/puppetagent/puppetagent.go index 741de4a0dc013..36c284ff57cb6 100644 --- a/plugins/inputs/puppetagent/puppetagent.go +++ b/plugins/inputs/puppetagent/puppetagent.go @@ -32,19 +32,21 @@ type State struct { type event struct { Failure int64 `yaml:"failure"` + Noop int64 `yaml:"noop"` Total int64 `yaml:"total"` Success int64 `yaml:"success"` } type resource struct { - Failed int64 `yaml:"failed"` - Scheduled int64 `yaml:"scheduled"` - Changed int64 `yaml:"changed"` - Skipped int64 `yaml:"skipped"` - Total int64 `yaml:"total"` - FailedToRestart int64 `yaml:"failed_to_restart"` - Restarted int64 `yaml:"restarted"` - OutOfSync int64 `yaml:"out_of_sync"` + Changed int64 `yaml:"changed"` + CorrectiveChange int64 `yaml:"corrective_change"` + Failed int64 `yaml:"failed"` + FailedToRestart int64 `yaml:"failed_to_restart"` + OutOfSync int64 `yaml:"out_of_sync"` + Restarted int64 `yaml:"restarted"` + Scheduled int64 `yaml:"scheduled"` + Skipped int64 `yaml:"skipped"` + Total int64 `yaml:"total"` } type change struct { @@ -52,19 +54,27 @@ type change struct { } type time struct { - User float64 `yaml:"user"` - Schedule float64 `yaml:"schedule"` - FileBucket float64 `yaml:"filebucket"` - File float64 `yaml:"file"` - Exec float64 `yaml:"exec"` - Anchor float64 `yaml:"anchor"` - SSHAuthorizedKey float64 `yaml:"ssh_authorized_key"` - Service float64 `yaml:"service"` - Package float64 `yaml:"package"` - Total float64 `yaml:"total"` - ConfigRetrieval float64 `yaml:"config_retrieval"` - LastRun int64 `yaml:"last_run"` - Cron float64 `yaml:"cron"` + Anchor float64 `yaml:"anchor"` + CataLogApplication float64 `yaml:"catalog_application"` + ConfigRetrieval float64 `yaml:"config_retrieval"` + ConvertCatalog float64 `yaml:"convert_catalog"` + Cron float64 `yaml:"cron"` + Exec float64 `yaml:"exec"` + FactGeneration float64 `yaml:"fact_generation"` + File float64 `yaml:"file"` + FileBucket float64 `yaml:"filebucket"` + Group float64 `yaml:"group"` + LastRun int64 `yaml:"last_run"` + NodeRetrieval float64 `yaml:"node_retrieval"` + Notify float64 `yaml:"notify"` + Package float64 `yaml:"package"` + PluginSync float64 `yaml:"plugin_sync"` + Schedule float64 `yaml:"schedule"` + Service float64 `yaml:"service"` + SSHAuthorizedKey float64 `yaml:"ssh_authorized_key"` + Total float64 `yaml:"total"` + TransactionEvaluation float64 `yaml:"transaction_evaluation"` + User float64 `yaml:"user"` } type version struct { diff --git a/plugins/inputs/puppetagent/puppetagent_test.go b/plugins/inputs/puppetagent/puppetagent_test.go index 6ba769ac5dd37..754fb39783a2a 100644 --- a/plugins/inputs/puppetagent/puppetagent_test.go +++ b/plugins/inputs/puppetagent/puppetagent_test.go @@ -17,33 +17,43 @@ func TestGather(t *testing.T) { tags := map[string]string{"location": "last_run_summary.yaml"} fields := map[string]interface{}{ - "events_failure": int64(0), - "events_total": int64(0), - "events_success": int64(0), - "resources_failed": int64(0), - "resources_scheduled": int64(0), - "resources_changed": int64(0), - "resources_skipped": int64(0), - "resources_total": int64(109), - "resources_failedtorestart": int64(0), - "resources_restarted": int64(0), - "resources_outofsync": int64(0), - "changes_total": int64(0), - "time_lastrun": int64(1444936531), - "version_configstring": "environment:d6018ce", - "time_user": float64(0.004331), - "time_schedule": float64(0.001123), - "time_filebucket": float64(0.000353), - "time_file": float64(0.441472), - "time_exec": float64(0.508123), - "time_anchor": float64(0.000555), - "time_sshauthorizedkey": float64(0.000764), - "time_service": float64(1.807795), - "time_package": float64(1.325788), - "time_total": float64(8.85354707064819), - "time_configretrieval": float64(4.75567007064819), - "time_cron": float64(0.000584), - "version_puppet": "3.7.5", + "events_failure": int64(0), + "events_noop": int64(0), + "events_success": int64(0), + "events_total": int64(0), + "resources_changed": int64(0), + "resources_correctivechange": int64(0), + "resources_failed": int64(0), + "resources_failedtorestart": int64(0), + "resources_outofsync": int64(0), + "resources_restarted": int64(0), + "resources_scheduled": int64(0), + "resources_skipped": int64(0), + "resources_total": int64(109), + "changes_total": int64(0), + "time_anchor": float64(0.000555), + "time_catalogapplication": float64(0.010555), + "time_configretrieval": float64(4.75567007064819), + "time_convertcatalog": float64(1.3), + "time_cron": float64(0.000584), + "time_exec": float64(0.508123), + "time_factgeneration": float64(0.34), + "time_file": float64(0.441472), + "time_filebucket": float64(0.000353), + "time_group": float64(0), + "time_lastrun": int64(1444936531), + "time_noderetrieval": float64(1.235), + "time_notify": float64(0.00035), + "time_package": float64(1.325788), + "time_pluginsync": float64(0.325788), + "time_schedule": float64(0.001123), + "time_service": float64(1.807795), + "time_sshauthorizedkey": float64(0.000764), + "time_total": float64(8.85354707064819), + "time_transactionevaluation": float64(4.69765), + "time_user": float64(0.004331), + "version_configstring": "environment:d6018ce", + "version_puppet": "3.7.5", } acc.AssertContainsTaggedFields(t, "puppetagent", fields, tags) From 9e004623e03ba9f7003a36f0b5f9ffd259e7fafc Mon Sep 17 00:00:00 2001 From: Alan Pope Date: Tue, 21 Sep 2021 18:39:48 +0100 Subject: [PATCH 075/176] Update README.md Sorry, unbreaking the broken commit to master I did earlier. Won't do it again, promise! --- README.md | 20 ++++++++++---------- 1 file changed, 10 insertions(+), 10 deletions(-) diff --git a/README.md b/README.md index 5f0861f4fa3cb..9c75311e4e2cb 100644 --- a/README.md +++ b/README.md @@ -13,10 +13,10 @@ Design goal: Telegraf is plugin-driven and has the concept of 4 distinct plugin types: -1. [Input Plugins](#input-plugins) collect metrics from the system, services, or 3rd party APIs -2. [Processor Plugins](#processor-plugins) transform, decorate, and/or filter metrics -3. [Aggregator Plugins](#aggregator-plugins) create aggregate metrics (e.g. mean, min, max, quantiles, etc.) -4. [Output Plugins](#output-plugins) write metrics to various destinations +1. [Input Plugins](/docs/INPUTS.md) collect metrics from the system, services, or 3rd party APIs +2. [Processor Plugins](/docs/PROCESSORS.md) transform, decorate, and/or filter metrics +3. [Aggregator Plugins](/docs/AGGREGATORS.md) create aggregate metrics (e.g. mean, min, max, quantiles, etc.) +4. [Output Plugins](/docs/OUTPUTS.md) write metrics to various destinations New plugins are designed to be easy to contribute, pull requests are welcomed and we work to incorporate as many pull requests as possible. If none of the internal plugins fit your needs, you could have a look at the [list of external plugins](EXTERNAL_PLUGINS.md). @@ -117,14 +117,14 @@ telegraf --config telegraf.conf --input-filter cpu:mem --output-filter influxdb [Latest Release Documentation][release docs]. -For documentation on the latest development code see the [documentation index][devel docs]. +For documentation on the latest development code see the [documentation index](/docs). [release docs]: https://docs.influxdata.com/telegraf [developer docs]: docs -- [Input Plugins](/telegraf/docs/INPUTS.md) -- [Output Plugins](/telegraf/docs/OUTPUTS.md) -- [Processor Plugins](/telegraf/docs/PROCESSORS.md) -- [Aggregator Plugins](/telegraf/docs/AGGREGATORS.md) +- [Input Plugins](/docs/INPUTS.md) +- [Output Plugins](/docs/OUTPUTS.md) +- [Processor Plugins](/docs/PROCESSORS.md) +- [Aggregator Plugins](/docs/AGGREGATORS.md) ## Contributing @@ -135,4 +135,4 @@ There are many ways to contribute: - [Review code and feature proposals](https://github.com/influxdata/telegraf/pulls) - Answer questions and discuss here on github and on the [Community Site](https://community.influxdata.com/) - [Contribute plugins](CONTRIBUTING.md) -- [Contribute external plugins](docs/EXTERNAL_PLUGINS.md) \ No newline at end of file +- [Contribute external plugins](docs/EXTERNAL_PLUGINS.md) From c4c3c8ade982c7935f013bc93107dd9c702541f5 Mon Sep 17 00:00:00 2001 From: Samantha Wang <32681364+sjwang90@users.noreply.github.com> Date: Tue, 21 Sep 2021 12:03:41 -0700 Subject: [PATCH 076/176] docs: update caddy server instructions (#9698) --- plugins/inputs/prometheus/README.md | 10 +++++----- 1 file changed, 5 insertions(+), 5 deletions(-) diff --git a/plugins/inputs/prometheus/README.md b/plugins/inputs/prometheus/README.md index c826fd0e015ab..955c6ab7d978b 100644 --- a/plugins/inputs/prometheus/README.md +++ b/plugins/inputs/prometheus/README.md @@ -158,20 +158,20 @@ Authorization header. ### Usage for Caddy HTTP server -If you want to monitor Caddy, you need to use Caddy with its Prometheus plugin: +Steps to monitor Caddy with Telegraf's Prometheus input plugin: -* Download Caddy+Prometheus plugin [here](https://caddyserver.com/download/linux/amd64?plugins=http.prometheus) -* Add the `prometheus` directive in your `CaddyFile` +* Download [Caddy](https://caddyserver.com/download) +* Download Prometheus and set up [monitoring Caddy with Prometheus metrics](https://caddyserver.com/docs/metrics#monitoring-caddy-with-prometheus-metrics) * Restart Caddy * Configure Telegraf to fetch metrics on it: ```toml [[inputs.prometheus]] # ## An array of urls to scrape metrics from. - urls = ["http://localhost:9180/metrics"] + urls = ["http://localhost:2019/metrics"] ``` -> This is the default URL where Caddy Prometheus plugin will send data. +> This is the default URL where Caddy will send data. > For more details, please read the [Caddy Prometheus documentation](https://github.com/miekg/caddy-prometheus/blob/master/README.md). ### Metrics: From 1c0b74eacded31b103a3467535166c39f6dffc7b Mon Sep 17 00:00:00 2001 From: Sebastian Spaink <3441183+sspaink@users.noreply.github.com> Date: Tue, 21 Sep 2021 12:19:45 -0700 Subject: [PATCH 077/176] chore: update nightly to package by arch (#9781) --- .circleci/config.yml | 156 ++++++++++++++++++++++++++----------------- Makefile | 12 +--- 2 files changed, 96 insertions(+), 72 deletions(-) diff --git a/.circleci/config.yml b/.circleci/config.yml index 1f644a7b9d20b..01a4bce06952e 100644 --- a/.circleci/config.yml +++ b/.circleci/config.yml @@ -109,9 +109,6 @@ commands: release: type: boolean default: false - nightly: - type: boolean - default: false type: type: string default: "" @@ -124,15 +121,9 @@ commands: condition: << parameters.release >> steps: - run: 'make package' - - when: - condition: << parameters.nightly >> - steps: - - run: 'make package' - - run: 'make upload-nightly' - unless: condition: or: - - << parameters.nightly >> - << parameters.release >> steps: - run: 'make package include_packages="$(make << parameters.type >>)"' @@ -269,8 +260,17 @@ jobs: nightly: executor: go-1_17 steps: - - package-build: - nightly: true + - attach_workspace: + at: '/build' + - run: + command: | + aws s3 sync /build/dist s3://dl.influxdata.com/telegraf/nightlies/ \ + --exclude "*" \ + --include "*.tar.gz" \ + --include "*.deb" \ + --include "*.rpm" \ + --include "*.zip" \ + --acl public-read package-consolidate: executor: name: win/default @@ -336,6 +336,63 @@ jobs: printf -v payload '{ "pullRequestNumber": "%s" }' "$PR" curl -X POST "https://182c7jdgog.execute-api.us-east-1.amazonaws.com/prod/shareArtifacts" --data "$payload" +commonjobs: + - &test-awaiter + 'test-awaiter': + requires: + - 'test-go-1_16' + - 'test-go-1_16-386' + - 'test-go-1_17' + - 'test-go-1_17-386' + - &windows-package + 'windows-package': + requires: + - 'test-go-windows' + - &darwin-package + 'darwin-package': + requires: + - 'test-go-mac' + - &i386-package + 'i386-package': + requires: + - 'test-awaiter' + - &ppc64le-package + 'ppc64le-package': + requires: + - 'test-awaiter' + - &s390x-package + 's390x-package': + requires: + - 'test-awaiter' + - &armel-package + 'armel-package': + requires: + - 'test-awaiter' + - &amd64-package + 'amd64-package': + requires: + - 'test-awaiter' + - &arm64-package + 'arm64-package': + requires: + - 'test-awaiter' + - &armhf-package + 'armhf-package': + requires: + - 'test-awaiter' + - &static-package + 'static-package': + requires: + - 'test-awaiter' + - &mipsel-package + 'mipsel-package': + requires: + - 'test-awaiter' + - &mips-package + 'mips-package': + requires: + - 'test-awaiter' + workflows: version: 2 check: @@ -376,48 +433,19 @@ workflows: filters: tags: only: /.*/ - - 'test-awaiter': - requires: - - 'test-go-1_16' - - 'test-go-1_16-386' - - 'test-go-1_17' - - 'test-go-1_17-386' - - 'windows-package': - requires: - - 'test-go-windows' - - 'darwin-package': - requires: - - 'test-go-mac' - - 'i386-package': - requires: - - 'test-awaiter' - - 'ppc64le-package': - requires: - - 'test-awaiter' - - 's390x-package': - requires: - - 'test-awaiter' - - 'armel-package': - requires: - - 'test-awaiter' - - 'amd64-package': - requires: - - 'test-awaiter' - - 'arm64-package': - requires: - - 'test-awaiter' - - 'armhf-package': - requires: - - 'test-awaiter' - - 'static-package': - requires: - - 'test-awaiter' - - 'mipsel-package': - requires: - - 'test-awaiter' - - 'mips-package': - requires: - - 'test-awaiter' + - *test-awaiter + - *windows-package + - *darwin-package + - *i386-package + - *ppc64le-package + - *s390x-package + - *armel-package + - *amd64-package + - *arm64-package + - *armhf-package + - *static-package + - *mipsel-package + - *mips-package - 'share-artifacts': requires: - 'i386-package' @@ -479,14 +507,20 @@ workflows: - 'deps' - 'test-go-mac' - 'test-go-windows' - - 'nightly': - requires: - - 'test-go-windows' - - 'test-go-mac' - - 'test-go-1_16' - - 'test-go-1_16-386' - - 'test-go-1_17' - - 'test-go-1_17-386' + - *test-awaiter + - *windows-package + - *darwin-package + - *i386-package + - *ppc64le-package + - *s390x-package + - *armel-package + - *amd64-package + - *arm64-package + - *armhf-package + - *static-package + - *mipsel-package + - *mips-package + - nightly triggers: - schedule: cron: "0 7 * * *" diff --git a/Makefile b/Makefile index a7797a0e8ce5f..cbe0e2a2e5dbb 100644 --- a/Makefile +++ b/Makefile @@ -203,7 +203,7 @@ plugin-%: ci-1.16: docker build -t quay.io/influxdb/telegraf-ci:1.16.7 - < scripts/ci-1.16.docker docker push quay.io/influxdb/telegraf-ci:1.16.7 - + .PHONY: ci-1.17 ci-1.17: docker build -t quay.io/influxdb/telegraf-ci:1.17.0 - < scripts/ci-1.17.docker @@ -346,16 +346,6 @@ $(include_packages): tar --owner 0 --group 0 -czvf $(pkgdir)/telegraf-$(tar_version)_$@ -C $(dir $(DESTDIR)) . ;\ fi -.PHONY: upload-nightly -upload-nightly: - aws s3 sync $(pkgdir) s3://dl.influxdata.com/telegraf/nightlies/ \ - --exclude "*" \ - --include "*.tar.gz" \ - --include "*.deb" \ - --include "*.rpm" \ - --include "*.zip" \ - --acl public-read - amd64.deb x86_64.rpm linux_amd64.tar.gz: export GOOS := linux amd64.deb x86_64.rpm linux_amd64.tar.gz: export GOARCH := amd64 From 027647e3edde77ce6a6c70956ea3a5bd20abf2d2 Mon Sep 17 00:00:00 2001 From: Sebastian Spaink <3441183+sspaink@users.noreply.github.com> Date: Tue, 21 Sep 2021 12:53:18 -0700 Subject: [PATCH 078/176] chore: automate updating etc/telegraf.conf and etc/telegraf_windows.conf (#9684) --- .circleci/config.yml | 57 ++++++++++++++++++++++++++++++++++++++ scripts/generate_config.sh | 27 ++++++++++++++++++ scripts/update_config.sh | 22 +++++++++++++++ 3 files changed, 106 insertions(+) create mode 100755 scripts/generate_config.sh create mode 100755 scripts/update_config.sh diff --git a/.circleci/config.yml b/.circleci/config.yml index 01a4bce06952e..dc59d4aa13cdb 100644 --- a/.circleci/config.yml +++ b/.circleci/config.yml @@ -25,6 +25,23 @@ executors: GOFLAGS: -p=8 commands: + generate-config: + parameters: + os: + type: string + default: "linux" + steps: + - checkout + - attach_workspace: + at: '/build' + - run: ./scripts/generate_config.sh << parameters.os >> + - store_artifacts: + path: './new-config' + destination: 'new-config' + - persist_to_workspace: + root: './new-config' + paths: + - '*' check-changed-files-or-halt: steps: - run: ./scripts/check-file-changes.sh @@ -335,6 +352,24 @@ jobs: PR=${CIRCLE_PULL_REQUEST##*/} printf -v payload '{ "pullRequestNumber": "%s" }' "$PR" curl -X POST "https://182c7jdgog.execute-api.us-east-1.amazonaws.com/prod/shareArtifacts" --data "$payload" + generate-config: + executor: go-1_17 + steps: + - generate-config + generate-config-win: + executor: + name: win/default + shell: bash.exe + steps: + - generate-config: + os: windows + update-config: + executor: go-1_17 + steps: + - checkout + - attach_workspace: + at: '/new-config' + - run: ./scripts/update_config.sh ${UPDATE_CONFIG_TOKEN} commonjobs: - &test-awaiter @@ -446,6 +481,28 @@ workflows: - *static-package - *mipsel-package - *mips-package + - 'generate-config': + requires: + - 'amd64-package' + filters: + branches: + only: + - master + - 'generate-config-win': + requires: + - 'windows-package' + filters: + branches: + only: + - master + - 'update-config': + requires: + - 'generate-config-win' + - 'generate-config' + filters: + branches: + only: + - master - 'share-artifacts': requires: - 'i386-package' diff --git a/scripts/generate_config.sh b/scripts/generate_config.sh new file mode 100755 index 0000000000000..c85dd05172631 --- /dev/null +++ b/scripts/generate_config.sh @@ -0,0 +1,27 @@ +#!/bin/bash +# This script is responsible for generating the Telegraf config found under the `etc` directory. +# This script is meant to be only ran in within the Circle CI pipeline so that the Tiger Bot can update them automatically. +# It supports Windows and Linux because the configs are different depending on the OS. + + +os=$1 # windows or linux +exe_path="/build/extracted" # Path will contain telegraf binary +config_name="telegraf.conf" + +if [ "$os" = "windows" ]; then + zip=$(/bin/find ./build/dist -maxdepth 1 -name "*windows_amd64.zip" -print) + exe_path="$PWD/build/extracted" + unzip "$zip" -d "$exe_path" + config_name="telegraf_windows.conf" + exe_path=$(/bin/find "$exe_path" -name telegraf.exe -type f -print) +else + tar_path=$(find /build/dist -maxdepth 1 -name "*linux_amd64.tar.gz" -print | grep -v ".*static.*") + mkdir "$exe_path" + tar --extract --file="$tar_path" --directory "$exe_path" + exe_path=$(find "$exe_path" -name telegraf -type f -print | grep ".*usr/bin/.*") +fi + +$exe_path config > $config_name + +mkdir ./new-config +mv $config_name ./new-config diff --git a/scripts/update_config.sh b/scripts/update_config.sh new file mode 100755 index 0000000000000..87cfe2620ab61 --- /dev/null +++ b/scripts/update_config.sh @@ -0,0 +1,22 @@ +#!/bin/bash +# This script is responsible for triggering the Tiger Bot endpoint that will create the pull request with the newly generated configs. +# This script is meant to be only ran in within the Circle CI pipeline. + +token=$1 + +config_path="/new-config" + +if [ ! -f "$config_path/telegraf.conf" ]; then + echo "$config_path/telegraf.conf does not exist" + exit +fi +if [ ! -f "$config_path/telegraf_windows.conf" ]; then + echo "$config_path/telegraf_windows.conf does not exist" + exit +fi + +if cmp -s "$config_path/telegraf.conf" "etc/telegraf.conf" && cmp -s "$config_path/telegraf_windows.conf" "etc/telegraf_windows.conf"; then + echo "Both telegraf.conf and telegraf_windows.conf haven't changed" +fi + +curl -H "Authorization: Bearer $token" -X POST "https://182c7jdgog.execute-api.us-east-1.amazonaws.com/prod/updateConfig" From a9898f179bbcbbfbc126b5f604c3357b990c80c3 Mon Sep 17 00:00:00 2001 From: alespour <42931850+alespour@users.noreply.github.com> Date: Tue, 21 Sep 2021 23:02:36 +0200 Subject: [PATCH 079/176] feat: add graylog plugin TCP support (#9644) --- etc/telegraf.conf | 4 +- plugins/outputs/graylog/README.md | 10 +- plugins/outputs/graylog/graylog.go | 181 +++++++++++++++++++----- plugins/outputs/graylog/graylog_test.go | 156 ++++++++++++++++++-- 4 files changed, 295 insertions(+), 56 deletions(-) diff --git a/etc/telegraf.conf b/etc/telegraf.conf index beb22821464d9..0ed5ba8ebffba 100644 --- a/etc/telegraf.conf +++ b/etc/telegraf.conf @@ -762,8 +762,8 @@ # # Send telegraf metrics to graylog # [[outputs.graylog]] -# ## UDP endpoint for your graylog instance. -# servers = ["127.0.0.1:12201"] +# ## Endpoints for your graylog instances. +# servers = ["udp://127.0.0.1:12201"] # # ## The field to use as the GELF short_message, if unset the static string # ## "telegraf" will be used. diff --git a/plugins/outputs/graylog/README.md b/plugins/outputs/graylog/README.md index 4945ce46f84f0..6003122894f6d 100644 --- a/plugins/outputs/graylog/README.md +++ b/plugins/outputs/graylog/README.md @@ -8,11 +8,17 @@ This plugin writes to a Graylog instance using the "[GELF][]" format. ```toml [[outputs.graylog]] - ## UDP endpoint for your graylog instances. - servers = ["127.0.0.1:12201"] + ## Endpoints for your graylog instances. + servers = ["udp://127.0.0.1:12201"] + + ## Connection timeout. + # timeout = "5s" ## The field to use as the GELF short_message, if unset the static string ## "telegraf" will be used. ## example: short_message_field = "message" # short_message_field = "" ``` + +Server endpoint may be specified without UDP or TCP scheme (eg. "127.0.0.1:12201"). +In such case, UDP protocol is assumed. diff --git a/plugins/outputs/graylog/graylog.go b/plugins/outputs/graylog/graylog.go index cf5dc6dc5ac3b..951273e2e7703 100644 --- a/plugins/outputs/graylog/graylog.go +++ b/plugins/outputs/graylog/graylog.go @@ -11,8 +11,11 @@ import ( "math" "net" "os" + "strings" + "time" "github.com/influxdata/telegraf" + "github.com/influxdata/telegraf/config" "github.com/influxdata/telegraf/plugins/outputs" ) @@ -21,45 +24,78 @@ const ( defaultConnection = "wan" defaultMaxChunkSizeWan = 1420 defaultMaxChunkSizeLan = 8154 + defaultScheme = "udp" + defaultTimeout = 5 * time.Second ) -type GelfConfig struct { +type gelfConfig struct { GraylogEndpoint string Connection string MaxChunkSizeWan int MaxChunkSizeLan int } -type Gelf struct { - GelfConfig +type gelf interface { + io.WriteCloser } -func NewGelfWriter(config GelfConfig) *Gelf { - if config.GraylogEndpoint == "" { - config.GraylogEndpoint = defaultGraylogEndpoint +type gelfCommon struct { + gelfConfig + dialer *net.Dialer + conn net.Conn +} + +type gelfUDP struct { + gelfCommon +} + +type gelfTCP struct { + gelfCommon +} + +func newGelfWriter(cfg gelfConfig, dialer *net.Dialer) gelf { + if cfg.GraylogEndpoint == "" { + cfg.GraylogEndpoint = defaultGraylogEndpoint } - if config.Connection == "" { - config.Connection = defaultConnection + if cfg.Connection == "" { + cfg.Connection = defaultConnection } - if config.MaxChunkSizeWan == 0 { - config.MaxChunkSizeWan = defaultMaxChunkSizeWan + if cfg.MaxChunkSizeWan == 0 { + cfg.MaxChunkSizeWan = defaultMaxChunkSizeWan } - if config.MaxChunkSizeLan == 0 { - config.MaxChunkSizeLan = defaultMaxChunkSizeLan + if cfg.MaxChunkSizeLan == 0 { + cfg.MaxChunkSizeLan = defaultMaxChunkSizeLan } - g := &Gelf{GelfConfig: config} + scheme := defaultScheme + parts := strings.SplitN(cfg.GraylogEndpoint, "://", 2) + if len(parts) == 2 { + scheme = strings.ToLower(parts[0]) + cfg.GraylogEndpoint = parts[1] + } + common := gelfCommon{ + gelfConfig: cfg, + dialer: dialer, + } + + var g gelf + switch scheme { + case "tcp": + g = &gelfTCP{gelfCommon: common} + default: + g = &gelfUDP{gelfCommon: common} + } return g } -func (g *Gelf) Write(message []byte) (n int, err error) { +func (g *gelfUDP) Write(message []byte) (n int, err error) { compressed := g.compress(message) - chunksize := g.GelfConfig.MaxChunkSizeWan + chunksize := g.gelfConfig.MaxChunkSizeWan length := compressed.Len() if length > chunksize { @@ -84,10 +120,19 @@ func (g *Gelf) Write(message []byte) (n int, err error) { n = len(message) - return + return n, nil +} + +func (g *gelfUDP) Close() (err error) { + if g.conn != nil { + err = g.conn.Close() + g.conn = nil + } + + return err } -func (g *Gelf) createChunkedMessage(index int, chunkCountInt int, id []byte, compressed *bytes.Buffer) bytes.Buffer { +func (g *gelfUDP) createChunkedMessage(index int, chunkCountInt int, id []byte, compressed *bytes.Buffer) bytes.Buffer { var packet bytes.Buffer chunksize := g.getChunksize() @@ -104,26 +149,26 @@ func (g *Gelf) createChunkedMessage(index int, chunkCountInt int, id []byte, com return packet } -func (g *Gelf) getChunksize() int { - if g.GelfConfig.Connection == "wan" { - return g.GelfConfig.MaxChunkSizeWan +func (g *gelfUDP) getChunksize() int { + if g.gelfConfig.Connection == "wan" { + return g.gelfConfig.MaxChunkSizeWan } - if g.GelfConfig.Connection == "lan" { - return g.GelfConfig.MaxChunkSizeLan + if g.gelfConfig.Connection == "lan" { + return g.gelfConfig.MaxChunkSizeLan } - return g.GelfConfig.MaxChunkSizeWan + return g.gelfConfig.MaxChunkSizeWan } -func (g *Gelf) intToBytes(i int) []byte { +func (g *gelfUDP) intToBytes(i int) []byte { buf := new(bytes.Buffer) binary.Write(buf, binary.LittleEndian, int8(i)) return buf.Bytes() } -func (g *Gelf) compress(b []byte) bytes.Buffer { +func (g *gelfUDP) compress(b []byte) bytes.Buffer { var buf bytes.Buffer comp := zlib.NewWriter(&buf) @@ -133,30 +178,83 @@ func (g *Gelf) compress(b []byte) bytes.Buffer { return buf } -func (g *Gelf) send(b []byte) error { - udpAddr, err := net.ResolveUDPAddr("udp", g.GelfConfig.GraylogEndpoint) +func (g *gelfUDP) send(b []byte) error { + if g.conn == nil { + conn, err := g.dialer.Dial("udp", g.gelfConfig.GraylogEndpoint) + if err != nil { + return err + } + g.conn = conn + } + + _, err := g.conn.Write(b) if err != nil { - return err + _ = g.conn.Close() + g.conn = nil } - conn, err := net.DialUDP("udp", nil, udpAddr) + return err +} + +func (g *gelfTCP) Write(message []byte) (n int, err error) { + err = g.send(message) if err != nil { - return err + return 0, err + } + + n = len(message) + + return n, nil +} + +func (g *gelfTCP) Close() (err error) { + if g.conn != nil { + err = g.conn.Close() + g.conn = nil + } + + return err +} + +func (g *gelfTCP) send(b []byte) error { + if g.conn == nil { + conn, err := g.dialer.Dial("tcp", g.gelfConfig.GraylogEndpoint) + if err != nil { + return err + } + g.conn = conn + } + + _, err := g.conn.Write(b) + if err != nil { + _ = g.conn.Close() + g.conn = nil + } else { + _, err = g.conn.Write([]byte{0}) // message delimiter + if err != nil { + _ = g.conn.Close() + g.conn = nil + } } - _, err = conn.Write(b) return err } type Graylog struct { - Servers []string `toml:"servers"` - ShortMessageField string `toml:"short_message_field"` - writer io.Writer + Servers []string `toml:"servers"` + ShortMessageField string `toml:"short_message_field"` + Timeout config.Duration `toml:"timeout"` + + writer io.Writer + closers []io.WriteCloser } var sampleConfig = ` - ## UDP endpoint for your graylog instance. - servers = ["127.0.0.1:12201"] + ## Endpoints for your graylog instances. + servers = ["udp://127.0.0.1:12201"] + + ## Connection timeout. + # timeout = "5s" ## The field to use as the GELF short_message, if unset the static string ## "telegraf" will be used. @@ -166,14 +264,16 @@ var sampleConfig = ` func (g *Graylog) Connect() error { writers := []io.Writer{} + dialer := net.Dialer{Timeout: time.Duration(g.Timeout)} if len(g.Servers) == 0 { g.Servers = append(g.Servers, "localhost:12201") } for _, server := range g.Servers { - w := NewGelfWriter(GelfConfig{GraylogEndpoint: server}) + w := newGelfWriter(gelfConfig{GraylogEndpoint: server}, &dialer) writers = append(writers, w) + g.closers = append(g.closers, w) } g.writer = io.MultiWriter(writers...) @@ -181,6 +281,9 @@ func (g *Graylog) Connect() error { } func (g *Graylog) Close() error { + for _, closer := range g.closers { + _ = closer.Close() + } return nil } @@ -253,6 +356,8 @@ func (g *Graylog) serialize(metric telegraf.Metric) ([]string, error) { func init() { outputs.Add("graylog", func() telegraf.Output { - return &Graylog{} + return &Graylog{ + Timeout: config.Duration(defaultTimeout), + } }) } diff --git a/plugins/outputs/graylog/graylog_test.go b/plugins/outputs/graylog/graylog_test.go index 37816a7a2c4b3..faa5b34b908d7 100644 --- a/plugins/outputs/graylog/graylog_test.go +++ b/plugins/outputs/graylog/graylog_test.go @@ -11,9 +11,22 @@ import ( "github.com/influxdata/telegraf/testutil" "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" ) -func TestWrite(t *testing.T) { +func TestWriteDefault(t *testing.T) { + scenarioUDP(t, "127.0.0.1:12201") +} + +func TestWriteUDP(t *testing.T) { + scenarioUDP(t, "udp://127.0.0.1:12201") +} + +func TestWriteTCP(t *testing.T) { + scenarioTCP(t, "tcp://127.0.0.1:12201") +} + +func scenarioUDP(t *testing.T, server string) { var wg sync.WaitGroup var wg2 sync.WaitGroup wg.Add(1) @@ -22,13 +35,62 @@ func TestWrite(t *testing.T) { wg2.Wait() i := Graylog{ - Servers: []string{"127.0.0.1:12201"}, + Servers: []string{server}, + } + err := i.Connect() + require.NoError(t, err) + + metrics := testutil.MockMetrics() + + // UDP scenario: + // 4 messages are send + + err = i.Write(metrics) + require.NoError(t, err) + err = i.Write(metrics) + require.NoError(t, err) + err = i.Write(metrics) + require.NoError(t, err) + err = i.Write(metrics) + require.NoError(t, err) + + wg.Wait() + i.Close() +} + +func scenarioTCP(t *testing.T, server string) { + var wg sync.WaitGroup + var wg2 sync.WaitGroup + var wg3 sync.WaitGroup + wg.Add(1) + wg2.Add(1) + wg3.Add(1) + go TCPServer(t, &wg, &wg2, &wg3) + wg2.Wait() + + i := Graylog{ + Servers: []string{server}, } - i.Connect() + err := i.Connect() + require.NoError(t, err) metrics := testutil.MockMetrics() - i.Write(metrics) + // TCP scenario: + // 4 messages are send + // -> connection gets broken after the 2nd message (server closes connection) + // -> the 3rd write ends with error + // -> in the 4th write connection is restored and write is successful + + err = i.Write(metrics) + require.NoError(t, err) + err = i.Write(metrics) + require.NoError(t, err) + wg3.Wait() + err = i.Write(metrics) + require.Error(t, err) + err = i.Write(metrics) + require.NoError(t, err) wg.Wait() i.Close() @@ -37,22 +99,88 @@ func TestWrite(t *testing.T) { type GelfObject map[string]interface{} func UDPServer(t *testing.T, wg *sync.WaitGroup, wg2 *sync.WaitGroup) { - serverAddr, _ := net.ResolveUDPAddr("udp", "127.0.0.1:12201") - udpServer, _ := net.ListenUDP("udp", serverAddr) + serverAddr, err := net.ResolveUDPAddr("udp", "127.0.0.1:12201") + require.NoError(t, err) + udpServer, err := net.ListenUDP("udp", serverAddr) + require.NoError(t, err) + defer udpServer.Close() defer wg.Done() bufR := make([]byte, 1024) wg2.Done() - n, _, _ := udpServer.ReadFromUDP(bufR) - b := bytes.NewReader(bufR[0:n]) - r, _ := zlib.NewReader(b) + recv := func() { + n, _, err := udpServer.ReadFromUDP(bufR) + require.NoError(t, err) + + b := bytes.NewReader(bufR[0:n]) + r, _ := zlib.NewReader(b) + + bufW := bytes.NewBuffer(nil) + _, _ = io.Copy(bufW, r) + _ = r.Close() + + var obj GelfObject + _ = json.Unmarshal(bufW.Bytes(), &obj) + require.NoError(t, err) + assert.Equal(t, obj["_value"], float64(1)) + } + + // in UDP scenario all 4 messages are received + + recv() + recv() + recv() + recv() +} + +func TCPServer(t *testing.T, wg *sync.WaitGroup, wg2 *sync.WaitGroup, wg3 *sync.WaitGroup) { + serverAddr, err := net.ResolveTCPAddr("tcp", "127.0.0.1:12201") + require.NoError(t, err) + tcpServer, err := net.ListenTCP("tcp", serverAddr) + require.NoError(t, err) + defer tcpServer.Close() + defer wg.Done() + bufR := make([]byte, 1) bufW := bytes.NewBuffer(nil) - io.Copy(bufW, r) - r.Close() + wg2.Done() + + accept := func() *net.TCPConn { + conn, err := tcpServer.AcceptTCP() + require.NoError(t, err) + _ = conn.SetLinger(0) + return conn + } + conn := accept() + defer conn.Close() + + recv := func() { + bufW.Reset() + for { + n, err := conn.Read(bufR) + require.NoError(t, err) + if n > 0 { + if bufR[0] == 0 { // message delimiter found + break + } + _, _ = bufW.Write(bufR) + } + } + + var obj GelfObject + err = json.Unmarshal(bufW.Bytes(), &obj) + require.NoError(t, err) + assert.Equal(t, obj["_value"], float64(1)) + } + + // in TCP scenario only 3 messages are received (1st, 2dn and 4th) due to connection break after the 2nd - var obj GelfObject - json.Unmarshal(bufW.Bytes(), &obj) - assert.Equal(t, obj["_value"], float64(1)) + recv() + recv() + _ = conn.Close() + wg3.Done() + conn = accept() + defer conn.Close() + recv() } From 3eebfd2f0fd4d1768936ae98f601c827f6a271a2 Mon Sep 17 00:00:00 2001 From: Joshua Powers Date: Tue, 21 Sep 2021 15:51:43 -0600 Subject: [PATCH 080/176] feat: add Linux Volume Manager input plugin (#9771) --- config/config.go | 2 +- plugins/inputs/all/all.go | 1 + plugins/inputs/lvm/README.md | 77 +++++++++ plugins/inputs/lvm/lvm.go | 293 +++++++++++++++++++++++++++++++++ plugins/inputs/lvm/lvm_test.go | 211 ++++++++++++++++++++++++ 5 files changed, 583 insertions(+), 1 deletion(-) create mode 100644 plugins/inputs/lvm/README.md create mode 100644 plugins/inputs/lvm/lvm.go create mode 100644 plugins/inputs/lvm/lvm_test.go diff --git a/config/config.go b/config/config.go index b6eed9446162f..e64d893bc05ea 100644 --- a/config/config.go +++ b/config/config.go @@ -1571,7 +1571,7 @@ func (c *Config) missingTomlField(_ reflect.Type, key string) error { "grok_timezone", "grok_unique_timestamp", "influx_max_line_bytes", "influx_sort_fields", "influx_uint_support", "interval", "json_name_key", "json_query", "json_strict", "json_string_fields", "json_time_format", "json_time_key", "json_timestamp_format", "json_timestamp_units", "json_timezone", "json_v2", - "metric_batch_size", "metric_buffer_limit", "name_override", "name_prefix", + "lvm", "metric_batch_size", "metric_buffer_limit", "name_override", "name_prefix", "name_suffix", "namedrop", "namepass", "order", "pass", "period", "precision", "prefix", "prometheus_export_timestamp", "prometheus_sort_metrics", "prometheus_string_as_label", "separator", "splunkmetric_hec_routing", "splunkmetric_multimetric", "tag_keys", diff --git a/plugins/inputs/all/all.go b/plugins/inputs/all/all.go index 60a52903ef079..690df0d3b0e46 100644 --- a/plugins/inputs/all/all.go +++ b/plugins/inputs/all/all.go @@ -100,6 +100,7 @@ import ( _ "github.com/influxdata/telegraf/plugins/inputs/logparser" _ "github.com/influxdata/telegraf/plugins/inputs/logstash" _ "github.com/influxdata/telegraf/plugins/inputs/lustre2" + _ "github.com/influxdata/telegraf/plugins/inputs/lvm" _ "github.com/influxdata/telegraf/plugins/inputs/mailchimp" _ "github.com/influxdata/telegraf/plugins/inputs/marklogic" _ "github.com/influxdata/telegraf/plugins/inputs/mcrouter" diff --git a/plugins/inputs/lvm/README.md b/plugins/inputs/lvm/README.md new file mode 100644 index 0000000000000..c0ce1a2e6008a --- /dev/null +++ b/plugins/inputs/lvm/README.md @@ -0,0 +1,77 @@ +# LVM Input Plugin + +The Logical Volume Management (LVM) input plugin collects information about +physical volumes, volume groups, and logical volumes. + +### Configuration + +The `lvm` command requires elevated permissions. If the user has configured +sudo with the ability to run these commands, then set the `use_sudo` to true. + +```toml +# Read metrics about LVM physical volumes, volume groups, logical volumes. +[[inputs.lvm]] + ## Use sudo to run LVM commands + use_sudo = false +``` + +#### Using sudo + +If your account does not already have the ability to run commands +with passwordless sudo then updates to the sudoers file are required. Below +is an example to allow the requires LVM commands: + +First, use the `visudo` command to start editing the sudoers file. Then add +the following content, where `` is the username of the user that +needs this access: + +```text +Cmnd_Alias LVM = /usr/sbin/pvs *, /usr/sbin/vgs *, /usr/sbin/lvs * + ALL=(root) NOPASSWD: LVM +Defaults!LVM !logfile, !syslog, !pam_session +``` + +### Metrics + +Metrics are broken out by physical volume (pv), volume group (vg), and logical +volume (lv): + +- lvm_physical_vol + - tags + - path + - vol_group + - fields + - size + - free + - used + - used_percent +- lvm_vol_group + - tags + - name + - fields + - size + - free + - used_percent + - physical_volume_count + - logical_volume_count + - snapshot_count +- lvm_logical_vol + - tags + - name + - vol_group + - fields + - size + - data_percent + - meta_percent + +### Example Output + +The following example shows a system with the root partition on an LVM group +as well as with a Docker thin-provisioned LVM group on a second drive: + +> lvm_physical_vol,path=/dev/sda2,vol_group=vgroot free=0i,size=249510756352i,used=249510756352i,used_percent=100 1631823026000000000 +> lvm_physical_vol,path=/dev/sdb,vol_group=docker free=3858759680i,size=128316342272i,used=124457582592i,used_percent=96.99277612525741 1631823026000000000 +> lvm_vol_group,name=vgroot free=0i,logical_volume_count=1i,physical_volume_count=1i,size=249510756352i,snapshot_count=0i,used_percent=100 1631823026000000000 +> lvm_vol_group,name=docker free=3858759680i,logical_volume_count=1i,physical_volume_count=1i,size=128316342272i,snapshot_count=0i,used_percent=96.99277612525741 1631823026000000000 +> lvm_logical_vol,name=lvroot,vol_group=vgroot data_percent=0,metadata_percent=0,size=249510756352i 1631823026000000000 +> lvm_logical_vol,name=thinpool,vol_group=docker data_percent=0.36000001430511475,metadata_percent=1.3300000429153442,size=121899057152i 1631823026000000000 diff --git a/plugins/inputs/lvm/lvm.go b/plugins/inputs/lvm/lvm.go new file mode 100644 index 0000000000000..ce46af8a31c04 --- /dev/null +++ b/plugins/inputs/lvm/lvm.go @@ -0,0 +1,293 @@ +package lvm + +import ( + "encoding/json" + "fmt" + "os/exec" + "strconv" + "strings" + "time" + + "github.com/influxdata/telegraf" + "github.com/influxdata/telegraf/internal" + "github.com/influxdata/telegraf/plugins/inputs" +) + +var ( + execCommand = exec.Command +) + +var sampleConfig = ` +## Use sudo to run LVM commands +use_sudo = false +` + +type LVM struct { + UseSudo bool `toml:"use_sudo"` +} + +func (lvm *LVM) Description() string { + return "Read metrics about LVM physical volumes, volume groups, logical volumes." +} + +func (lvm *LVM) SampleConfig() string { + return sampleConfig +} + +func (lvm *LVM) Init() error { + return nil +} + +func (lvm *LVM) Gather(acc telegraf.Accumulator) error { + if err := lvm.gatherPhysicalVolumes(acc); err != nil { + return err + } else if err := lvm.gatherVolumeGroups(acc); err != nil { + return err + } else if err := lvm.gatherLogicalVolumes(acc); err != nil { + return err + } + + return nil +} + +func (lvm *LVM) gatherPhysicalVolumes(acc telegraf.Accumulator) error { + pvsCmd := "/usr/sbin/pvs" + args := []string{ + "--reportformat", "json", "--units", "b", "--nosuffix", + "-o", "pv_name,vg_name,pv_size,pv_free,pv_used", + } + out, err := lvm.runCmd(pvsCmd, args) + if err != nil { + return err + } + + var report pvsReport + err = json.Unmarshal(out, &report) + if err != nil { + return fmt.Errorf("failed to unmarshal physical volume JSON: %s", err) + } + + if len(report.Report) > 0 { + for _, pv := range report.Report[0].Pv { + tags := map[string]string{ + "path": pv.Name, + "vol_group": pv.VolGroup, + } + + size, err := strconv.ParseUint(pv.Size, 10, 64) + if err != nil { + return err + } + + free, err := strconv.ParseUint(pv.Free, 10, 64) + if err != nil { + return err + } + + used, err := strconv.ParseUint(pv.Used, 10, 64) + if err != nil { + return err + } + + usedPercent := float64(used) / float64(size) * 100 + + fields := map[string]interface{}{ + "size": size, + "free": free, + "used": used, + "used_percent": usedPercent, + } + + acc.AddFields("lvm_physical_vol", fields, tags) + } + } + + return nil +} + +func (lvm *LVM) gatherVolumeGroups(acc telegraf.Accumulator) error { + cmd := "/usr/sbin/vgs" + args := []string{ + "--reportformat", "json", "--units", "b", "--nosuffix", + "-o", "vg_name,pv_count,lv_count,snap_count,vg_size,vg_free", + } + out, err := lvm.runCmd(cmd, args) + if err != nil { + return err + } + + var report vgsReport + err = json.Unmarshal(out, &report) + if err != nil { + return fmt.Errorf("failed to unmarshal vol group JSON: %s", err) + } + + if len(report.Report) > 0 { + for _, vg := range report.Report[0].Vg { + tags := map[string]string{ + "name": vg.Name, + } + + size, err := strconv.ParseUint(vg.Size, 10, 64) + if err != nil { + return err + } + + free, err := strconv.ParseUint(vg.Free, 10, 64) + if err != nil { + return err + } + + pvCount, err := strconv.ParseUint(vg.PvCount, 10, 64) + if err != nil { + return err + } + lvCount, err := strconv.ParseUint(vg.LvCount, 10, 64) + if err != nil { + return err + } + snapCount, err := strconv.ParseUint(vg.SnapCount, 10, 64) + if err != nil { + return err + } + + usedPercent := (float64(size) - float64(free)) / float64(size) * 100 + + fields := map[string]interface{}{ + "size": size, + "free": free, + "used_percent": usedPercent, + "physical_volume_count": pvCount, + "logical_volume_count": lvCount, + "snapshot_count": snapCount, + } + + acc.AddFields("lvm_vol_group", fields, tags) + } + } + + return nil +} + +func (lvm *LVM) gatherLogicalVolumes(acc telegraf.Accumulator) error { + cmd := "/usr/sbin/lvs" + args := []string{ + "--reportformat", "json", "--units", "b", "--nosuffix", + "-o", "lv_name,vg_name,lv_size,data_percent,metadata_percent", + } + out, err := lvm.runCmd(cmd, args) + if err != nil { + return err + } + + var report lvsReport + err = json.Unmarshal(out, &report) + if err != nil { + return fmt.Errorf("failed to unmarshal logical vol JSON: %s", err) + } + + if len(report.Report) > 0 { + for _, lv := range report.Report[0].Lv { + tags := map[string]string{ + "name": lv.Name, + "vol_group": lv.VolGroup, + } + + size, err := strconv.ParseUint(lv.Size, 10, 64) + if err != nil { + return err + } + + // Does not apply to all logical volumes, set default value + if lv.DataPercent == "" { + lv.DataPercent = "0.0" + } + dataPercent, err := strconv.ParseFloat(lv.DataPercent, 32) + if err != nil { + return err + } + + // Does not apply to all logical volumes, set default value + if lv.MetadataPercent == "" { + lv.MetadataPercent = "0.0" + } + metadataPercent, err := strconv.ParseFloat(lv.MetadataPercent, 32) + if err != nil { + return err + } + + fields := map[string]interface{}{ + "size": size, + "data_percent": dataPercent, + "metadata_percent": metadataPercent, + } + + acc.AddFields("lvm_logical_vol", fields, tags) + } + } + + return nil +} + +func (lvm *LVM) runCmd(cmd string, args []string) ([]byte, error) { + execCmd := execCommand(cmd, args...) + if lvm.UseSudo { + execCmd = execCommand("sudo", append([]string{"-n", cmd}, args...)...) + } + + out, err := internal.StdOutputTimeout(execCmd, 5*time.Second) + if err != nil { + return nil, fmt.Errorf( + "failed to run command %s: %s - %s", + strings.Join(execCmd.Args, " "), err, string(out), + ) + } + + return out, nil +} + +// Represents info about physical volume command, pvs, output +type pvsReport struct { + Report []struct { + Pv []struct { + Name string `json:"pv_name"` + VolGroup string `json:"vg_name"` + Size string `json:"pv_size"` + Free string `json:"pv_free"` + Used string `json:"pv_used"` + } `json:"pv"` + } `json:"report"` +} + +// Represents info about volume group command, vgs, output +type vgsReport struct { + Report []struct { + Vg []struct { + Name string `json:"vg_name"` + Size string `json:"vg_size"` + Free string `json:"vg_free"` + LvCount string `json:"lv_count"` + PvCount string `json:"pv_count"` + SnapCount string `json:"snap_count"` + } `json:"vg"` + } `json:"report"` +} + +// Represents info about logical volume command, lvs, output +type lvsReport struct { + Report []struct { + Lv []struct { + Name string `json:"lv_name"` + VolGroup string `json:"vg_name"` + Size string `json:"lv_size"` + DataPercent string `json:"data_percent"` + MetadataPercent string `json:"metadata_percent"` + } `json:"lv"` + } `json:"report"` +} + +func init() { + inputs.Add("lvm", func() telegraf.Input { + return &LVM{} + }) +} diff --git a/plugins/inputs/lvm/lvm_test.go b/plugins/inputs/lvm/lvm_test.go new file mode 100644 index 0000000000000..c48eff5c039b1 --- /dev/null +++ b/plugins/inputs/lvm/lvm_test.go @@ -0,0 +1,211 @@ +package lvm + +import ( + "fmt" + "os" + "os/exec" + "testing" + + "github.com/influxdata/telegraf/testutil" + "github.com/stretchr/testify/require" +) + +func TestGather(t *testing.T) { + var lvm LVM = LVM{UseSudo: false} + var acc testutil.Accumulator + + // overwriting exec commands with mock commands + execCommand = fakeExecCommand + err := lvm.Gather(&acc) + require.NoError(t, err) + + pvsTags := map[string]string{ + "path": "/dev/sdb", + "vol_group": "docker", + } + pvsFields := map[string]interface{}{ + "size": uint64(128316342272), + "free": uint64(3858759680), + "used": uint64(124457582592), + "used_percent": 96.99277612525741, + } + acc.AssertContainsTaggedFields(t, "lvm_physical_vol", pvsFields, pvsTags) + + vgsTags := map[string]string{ + "name": "docker", + } + vgsFields := map[string]interface{}{ + "size": uint64(128316342272), + "free": uint64(3858759680), + "used_percent": 96.99277612525741, + "physical_volume_count": uint64(1), + "logical_volume_count": uint64(1), + "snapshot_count": uint64(0), + } + acc.AssertContainsTaggedFields(t, "lvm_vol_group", vgsFields, vgsTags) + + lvsTags := map[string]string{ + "name": "thinpool", + "vol_group": "docker", + } + lvsFields := map[string]interface{}{ + "size": uint64(121899057152), + "data_percent": 0.36000001430511475, + "metadata_percent": 1.3300000429153442, + } + acc.AssertContainsTaggedFields(t, "lvm_logical_vol", lvsFields, lvsTags) +} + +// Used as a helper function that mock the exec.Command call +func fakeExecCommand(command string, args ...string) *exec.Cmd { + cs := []string{"-test.run=TestHelperProcess", "--", command} + cs = append(cs, args...) + cmd := exec.Command(os.Args[0], cs...) + cmd.Env = []string{"GO_WANT_HELPER_PROCESS=1"} + return cmd +} + +// Used to mock exec.Command output +func TestHelperProcess(_ *testing.T) { + if os.Getenv("GO_WANT_HELPER_PROCESS") != "1" { + return + } + + mockPVSData := `{ + "report": [ + { + "pv": [ + {"pv_name":"/dev/sdb", "vg_name":"docker", "pv_size":"128316342272", "pv_free":"3858759680", "pv_used":"124457582592"} + ] + } + ] + } +` + + mockVGSData := `{ + "report": [ + { + "vg": [ + {"vg_name":"docker", "pv_count":"1", "lv_count":"1", "snap_count":"0", "vg_size":"128316342272", "vg_free":"3858759680"} + ] + } + ] + } +` + + mockLVSData := `{ + "report": [ + { + "lv": [ + {"lv_name":"thinpool", "vg_name":"docker", "lv_size":"121899057152", "data_percent":"0.36", "metadata_percent":"1.33"} + ] + } + ] + } +` + + // Previous arguments are tests stuff, that looks like : + // /tmp/go-build970079519/…/_test/integration.test -test.run=TestHelperProcess -- + args := os.Args + cmd := args[3] + if cmd == "/usr/sbin/pvs" { + //nolint:errcheck,revive // test will fail anyway + fmt.Fprint(os.Stdout, mockPVSData) + } else if cmd == "/usr/sbin/vgs" { + //nolint:errcheck,revive // test will fail anyway + fmt.Fprint(os.Stdout, mockVGSData) + } else if cmd == "/usr/sbin/lvs" { + //nolint:errcheck,revive // test will fail anyway + fmt.Fprint(os.Stdout, mockLVSData) + } else { + //nolint:errcheck,revive // test will fail anyway + fmt.Fprint(os.Stdout, "command not found") + //nolint:revive // error code is important for this "test" + os.Exit(1) + } + //nolint:revive // error code is important for this "test" + os.Exit(0) +} + +// test when no lvm devices exist +func TestGatherNoLVM(t *testing.T) { + var noLVM LVM = LVM{UseSudo: false} + var acc testutil.Accumulator + + // overwriting exec commands with mock commands + execCommand = fakeExecCommandNoLVM + err := noLVM.Gather(&acc) + require.NoError(t, err) + + acc.AssertDoesNotContainMeasurement(t, "lvm_physical_vol") + acc.AssertDoesNotContainMeasurement(t, "lvm_vol_group") + acc.AssertDoesNotContainMeasurement(t, "lvm_logical_vol") +} + +// Used as a helper function that mock the exec.Command call +func fakeExecCommandNoLVM(command string, args ...string) *exec.Cmd { + cs := []string{"-test.run=TestHelperProcessNoLVM", "--", command} + cs = append(cs, args...) + cmd := exec.Command(os.Args[0], cs...) + cmd.Env = []string{"GO_WANT_HELPER_PROCESS=1"} + return cmd +} + +// Used to mock exec.Command output +func TestHelperProcessNoLVM(_ *testing.T) { + if os.Getenv("GO_WANT_HELPER_PROCESS") != "1" { + return + } + + mockPVSData := `{ + "report": [ + { + "pv": [ + ] + } + ] + } +` + + mockVGSData := `{ + "report": [ + { + "vg": [ + ] + } + ] + } +` + + mockLVSData := `{ + "report": [ + { + "lv": [ + ] + } + ] + } +` + + // Previous arguments are tests stuff, that looks like : + // /tmp/go-build970079519/…/_test/integration.test -test.run=TestHelperProcess -- + args := os.Args + cmd := args[3] + if cmd == "/usr/sbin/pvs" { + //nolint:errcheck,revive // test will fail anyway + fmt.Fprint(os.Stdout, mockPVSData) + } else if cmd == "/usr/sbin/vgs" { + //nolint:errcheck,revive // test will fail anyway + fmt.Fprint(os.Stdout, mockVGSData) + } else if cmd == "/usr/sbin/lvs" { + //nolint:errcheck,revive // test will fail anyway + fmt.Fprint(os.Stdout, mockLVSData) + } else { + //nolint:errcheck,revive // test will fail anyway + fmt.Fprint(os.Stdout, "command not found") + //nolint:revive // error code is important for this "test" + os.Exit(1) + } + //nolint:revive // error code is important for this "test" + os.Exit(0) +} From 86a6c06955d6e3197ff39355293ec7cfb4c3d54d Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Wed, 22 Sep 2021 08:00:13 -0600 Subject: [PATCH 081/176] fix: bump github.com/Azure/go-autorest/autorest/adal (#9791) --- go.mod | 2 +- go.sum | 2 ++ 2 files changed, 3 insertions(+), 1 deletion(-) diff --git a/go.mod b/go.mod index d56ac810d6026..8e7e48099934e 100644 --- a/go.mod +++ b/go.mod @@ -20,7 +20,7 @@ require ( github.com/Azure/go-ansiterm v0.0.0-20170929234023-d6e3b3328b78 // indirect github.com/Azure/go-autorest v14.2.0+incompatible // indirect github.com/Azure/go-autorest/autorest v0.11.18 - github.com/Azure/go-autorest/autorest/adal v0.9.15 + github.com/Azure/go-autorest/autorest/adal v0.9.16 github.com/Azure/go-autorest/autorest/azure/auth v0.5.8 github.com/Azure/go-autorest/autorest/azure/cli v0.4.2 // indirect github.com/Azure/go-autorest/autorest/date v0.3.0 // indirect diff --git a/go.sum b/go.sum index c67071b110a16..e8ba0d9ef6cc6 100644 --- a/go.sum +++ b/go.sum @@ -111,6 +111,8 @@ github.com/Azure/go-autorest/autorest/adal v0.9.11/go.mod h1:nBKAnTomx8gDtl+3ZCJ github.com/Azure/go-autorest/autorest/adal v0.9.13/go.mod h1:W/MM4U6nLxnIskrw4UwWzlHfGjwUS50aOsc/I3yuU8M= github.com/Azure/go-autorest/autorest/adal v0.9.15 h1:X+p2GF0GWyOiSmqohIaEeuNFNDY4I4EOlVuUQvFdWMk= github.com/Azure/go-autorest/autorest/adal v0.9.15/go.mod h1:tGMin8I49Yij6AQ+rvV+Xa/zwxYQB5hmsd6DkfAx2+A= +github.com/Azure/go-autorest/autorest/adal v0.9.16 h1:P8An8Z9rH1ldbOLdFpxYorgOt2sywL9V24dAwWHPuGc= +github.com/Azure/go-autorest/autorest/adal v0.9.16/go.mod h1:tGMin8I49Yij6AQ+rvV+Xa/zwxYQB5hmsd6DkfAx2+A= github.com/Azure/go-autorest/autorest/azure/auth v0.4.2/go.mod h1:90gmfKdlmKgfjUpnCEpOJzsUEjrWDSLwHIG73tSXddM= github.com/Azure/go-autorest/autorest/azure/auth v0.5.8 h1:TzPg6B6fTZ0G1zBf3T54aI7p3cAT6u//TOXGPmFMOXg= github.com/Azure/go-autorest/autorest/azure/auth v0.5.8/go.mod h1:kxyKZTSfKh8OVFWPAgOgQ/frrJgeYQJPyR5fLFmXko4= From 4cee2ca15d80ffe16bc5c861adfb7710f158b03a Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Wed, 22 Sep 2021 08:20:37 -0600 Subject: [PATCH 082/176] fix: bump github.com/testcontainers/testcontainers-go from 0.11.0 to 0.11.1 (#9789) --- go.mod | 4 ++-- go.sum | 8 ++++---- 2 files changed, 6 insertions(+), 6 deletions(-) diff --git a/go.mod b/go.mod index 8e7e48099934e..a569c672a4eb4 100644 --- a/go.mod +++ b/go.mod @@ -82,7 +82,7 @@ require ( github.com/dgrijalva/jwt-go v3.2.0+incompatible // indirect github.com/dimchansky/utfbom v1.1.1 github.com/docker/distribution v2.7.1+incompatible // indirect - github.com/docker/docker v20.10.6+incompatible + github.com/docker/docker v20.10.7+incompatible github.com/docker/go-connections v0.4.0 // indirect github.com/docker/go-units v0.4.0 // indirect github.com/doclambda/protobufquery v0.0.0-20210317203640-88ffabe06a60 @@ -245,7 +245,7 @@ require ( github.com/stretchr/objx v0.2.0 // indirect github.com/stretchr/testify v1.7.0 github.com/tbrandon/mbserver v0.0.0-20170611213546-993e1772cc62 - github.com/testcontainers/testcontainers-go v0.11.0 + github.com/testcontainers/testcontainers-go v0.11.1 github.com/tidwall/gjson v1.8.0 github.com/tidwall/match v1.0.3 // indirect github.com/tidwall/pretty v1.1.0 // indirect diff --git a/go.sum b/go.sum index e8ba0d9ef6cc6..6cc78b3768f13 100644 --- a/go.sum +++ b/go.sum @@ -499,8 +499,8 @@ github.com/docker/distribution v2.7.1-0.20190205005809-0d3efadf0154+incompatible github.com/docker/distribution v2.7.1+incompatible h1:a5mlkVzth6W5A4fOsS3D2EO5BUmsJpcB+cRlLU7cSug= github.com/docker/distribution v2.7.1+incompatible/go.mod h1:J2gT2udsDAN96Uj4KfcMRqY0/ypR+oyYUYmja8H+y+w= github.com/docker/docker v17.12.0-ce-rc1.0.20200706150819-a40b877fbb9e+incompatible/go.mod h1:eEKB0N0r5NX/I1kEveEz05bcu8tLC/8azJZsviup8Sk= -github.com/docker/docker v20.10.6+incompatible h1:oXI3Vas8TI8Eu/EjH4srKHJBVqraSzJybhxY7Om9faQ= -github.com/docker/docker v20.10.6+incompatible/go.mod h1:eEKB0N0r5NX/I1kEveEz05bcu8tLC/8azJZsviup8Sk= +github.com/docker/docker v20.10.7+incompatible h1:Z6O9Nhsjv+ayUEeI1IojKbYcsGdgYSNqxe1s2MYzUhQ= +github.com/docker/docker v20.10.7+incompatible/go.mod h1:eEKB0N0r5NX/I1kEveEz05bcu8tLC/8azJZsviup8Sk= github.com/docker/go-connections v0.4.0 h1:El9xVISelRB7BuFusrZozjnkIM5YnzCViNKohAFqRJQ= github.com/docker/go-connections v0.4.0 h1:El9xVISelRB7BuFusrZozjnkIM5YnzCViNKohAFqRJQ= github.com/docker/go-connections v0.4.0/go.mod h1:Gbd7IOopHjR8Iph03tsViu4nIes5XhDvyHbTtUxmeec= @@ -1533,8 +1533,8 @@ github.com/tbrandon/mbserver v0.0.0-20170611213546-993e1772cc62 h1:Oj2e7Sae4XrOs github.com/tbrandon/mbserver v0.0.0-20170611213546-993e1772cc62/go.mod h1:qUzPVlSj2UgxJkVbH0ZwuuiR46U8RBMDT5KLY78Ifpw= github.com/tchap/go-patricia v2.2.6+incompatible/go.mod h1:bmLyhP68RS6kStMGxByiQ23RP/odRBOTVjwp2cDyi6I= github.com/tedsuo/ifrit v0.0.0-20180802180643-bea94bb476cc/go.mod h1:eyZnKCc955uh98WQvzOm0dgAeLnf2O0Rz0LPoC5ze+0= -github.com/testcontainers/testcontainers-go v0.11.0 h1:HO5YOx2DYBHqcg4MzVWPj3FuHAv7USWVu94vCSsgiaM= -github.com/testcontainers/testcontainers-go v0.11.0/go.mod h1:HztBCODzuA+YpMXGK8amjO8j50jz2gcT0BOzSKUiYIs= +github.com/testcontainers/testcontainers-go v0.11.1 h1:FiYsB83LSGbiawoV8TpAZGfcCUbtaeeg1SXqEKUxh08= +github.com/testcontainers/testcontainers-go v0.11.1/go.mod h1:/V0UVq+1e7NWYoqTPog179clf0Qp9TOyp4EcXaEFQz8= github.com/tidwall/gjson v1.8.0 h1:Qt+orfosKn0rbNTZqHYDqBrmm3UDA4KRkv70fDzG+PQ= github.com/tidwall/gjson v1.8.0/go.mod h1:5/xDoumyyDNerp2U36lyolv46b3uF/9Bu6OfyQ9GImk= github.com/tidwall/match v1.0.3 h1:FQUVvBImDutD8wJLN6c5eMzWtjgONK9MwIBCOrUJKeE= From 20ed68c36088941ebd608ef7405567ec764f54da Mon Sep 17 00:00:00 2001 From: Joshua Powers Date: Wed, 22 Sep 2021 08:36:04 -0600 Subject: [PATCH 083/176] fix: run go mod tidy --- go.sum | 2 -- 1 file changed, 2 deletions(-) diff --git a/go.sum b/go.sum index 6cc78b3768f13..300b12d8d6a6c 100644 --- a/go.sum +++ b/go.sum @@ -109,8 +109,6 @@ github.com/Azure/go-autorest/autorest/adal v0.9.2/go.mod h1:/3SMAM86bP6wC9Ev35pe github.com/Azure/go-autorest/autorest/adal v0.9.5/go.mod h1:B7KF7jKIeC9Mct5spmyCB/A8CG/sEz1vwIRGv/bbw7A= github.com/Azure/go-autorest/autorest/adal v0.9.11/go.mod h1:nBKAnTomx8gDtl+3ZCJv2v0KACFHWTB2drffI1B68Pk= github.com/Azure/go-autorest/autorest/adal v0.9.13/go.mod h1:W/MM4U6nLxnIskrw4UwWzlHfGjwUS50aOsc/I3yuU8M= -github.com/Azure/go-autorest/autorest/adal v0.9.15 h1:X+p2GF0GWyOiSmqohIaEeuNFNDY4I4EOlVuUQvFdWMk= -github.com/Azure/go-autorest/autorest/adal v0.9.15/go.mod h1:tGMin8I49Yij6AQ+rvV+Xa/zwxYQB5hmsd6DkfAx2+A= github.com/Azure/go-autorest/autorest/adal v0.9.16 h1:P8An8Z9rH1ldbOLdFpxYorgOt2sywL9V24dAwWHPuGc= github.com/Azure/go-autorest/autorest/adal v0.9.16/go.mod h1:tGMin8I49Yij6AQ+rvV+Xa/zwxYQB5hmsd6DkfAx2+A= github.com/Azure/go-autorest/autorest/azure/auth v0.4.2/go.mod h1:90gmfKdlmKgfjUpnCEpOJzsUEjrWDSLwHIG73tSXddM= From 045adcb700ebc55761e5876a07de82f9317e4056 Mon Sep 17 00:00:00 2001 From: Sebastian Spaink <3441183+sspaink@users.noreply.github.com> Date: Wed, 22 Sep 2021 08:54:59 -0700 Subject: [PATCH 084/176] fix: nightly upload requires package steps (#9795) --- .circleci/config.yml | 15 ++++++++++++++- 1 file changed, 14 insertions(+), 1 deletion(-) diff --git a/.circleci/config.yml b/.circleci/config.yml index dc59d4aa13cdb..027a529cb0385 100644 --- a/.circleci/config.yml +++ b/.circleci/config.yml @@ -577,7 +577,20 @@ workflows: - *static-package - *mipsel-package - *mips-package - - nightly + - nightly: + requires: + - 'i386-package' + - 'ppc64le-package' + - 's390x-package' + - 'armel-package' + - 'amd64-package' + - 'mipsel-package' + - 'mips-package' + - 'darwin-package' + - 'windows-package' + - 'static-package' + - 'arm64-package' + - 'armhf-package' triggers: - schedule: cron: "0 7 * * *" From ceae37d66ecaf949a5813847ad4b695e2a936c3e Mon Sep 17 00:00:00 2001 From: Sven Rebhan <36194019+srebhan@users.noreply.github.com> Date: Thu, 23 Sep 2021 16:10:44 +0200 Subject: [PATCH 085/176] fix: Rename KNXListener to knx_listener (#9741) --- etc/telegraf.conf | 15 +++++++-------- plugins/inputs/knx_listener/README.md | 6 +++--- plugins/inputs/knx_listener/knx_listener.go | 6 ++++-- 3 files changed, 14 insertions(+), 13 deletions(-) diff --git a/etc/telegraf.conf b/etc/telegraf.conf index 0ed5ba8ebffba..2892d99142be5 100644 --- a/etc/telegraf.conf +++ b/etc/telegraf.conf @@ -5546,7 +5546,7 @@ # # timeout = "5ms" -# # A plugin to collect stats from Opensmtpd - a validating, recursive, and caching DNS resolver +# # A plugin to collect stats from Opensmtpd - a validating, recursive, and caching DNS resolver # [[inputs.opensmtpd]] # ## If running as a restricted user you can prepend sudo for additional access: # #use_sudo = false @@ -6802,7 +6802,7 @@ # # Listener capable of handling KNX bus messages provided through a KNX-IP Interface. -# [[inputs.KNXListener]] +# [[inputs.knx_listener]] # ## Type of KNX-IP interface. # ## Can be either "tunnel" or "router". # # service_type = "tunnel" @@ -6811,7 +6811,7 @@ # service_address = "localhost:3671" # # ## Measurement definition(s) -# # [[inputs.KNXListener.measurement]] +# # [[inputs.knx_listener.measurement]] # # ## Name of the measurement # # name = "temperature" # # ## Datapoint-Type (DPT) of the KNX messages @@ -6819,7 +6819,7 @@ # # ## List of Group-Addresses (GAs) assigned to the measurement # # addresses = ["5/5/1"] # -# # [[inputs.KNXListener.measurement]] +# # [[inputs.knx_listener.measurement]] # # name = "illumination" # # dpt = "9.004" # # addresses = ["5/5/3"] @@ -7667,7 +7667,7 @@ # ## This value is propagated to pqos tool. Interval format is defined by pqos itself. # ## If not provided or provided 0, will be set to 10 = 10x100ms = 1s. # # sampling_interval = "10" -# +# # ## Optionally specify the path to pqos executable. # ## If not provided, auto discovery will be performed. # # pqos_path = "/usr/local/bin/pqos" @@ -7675,12 +7675,12 @@ # ## Optionally specify if IPC and LLC_Misses metrics shouldn't be propagated. # ## If not provided, default value is false. # # shortened_metrics = false -# +# # ## Specify the list of groups of CPU core(s) to be provided as pqos input. # ## Mandatory if processes aren't set and forbidden if processes are specified. # ## e.g. ["0-3", "4,5,6"] or ["1-3,4"] # # cores = ["0-3"] -# +# # ## Specify the list of processes for which Metrics will be collected. # ## Mandatory if cores aren't set and forbidden if cores are specified. # ## e.g. ["qemu", "pmd"] @@ -9099,4 +9099,3 @@ # [[inputs.zipkin]] # # path = "/api/v1/spans" # URL path for span data # # port = 9411 # Port on which Telegraf listens - diff --git a/plugins/inputs/knx_listener/README.md b/plugins/inputs/knx_listener/README.md index 7a06462ffbb3e..518dd5d7f3720 100644 --- a/plugins/inputs/knx_listener/README.md +++ b/plugins/inputs/knx_listener/README.md @@ -11,7 +11,7 @@ This is a sample config for the plugin. ```toml # Listener capable of handling KNX bus messages provided through a KNX-IP Interface. -[[inputs.KNXListener]] +[[inputs.knx_listener]] ## Type of KNX-IP interface. ## Can be either "tunnel" or "router". # service_type = "tunnel" @@ -20,7 +20,7 @@ This is a sample config for the plugin. service_address = "localhost:3671" ## Measurement definition(s) - # [[inputs.KNXListener.measurement]] + # [[inputs.knx_listener.measurement]] # ## Name of the measurement # name = "temperature" # ## Datapoint-Type (DPT) of the KNX messages @@ -28,7 +28,7 @@ This is a sample config for the plugin. # ## List of Group-Addresses (GAs) assigned to the measurement # addresses = ["5/5/1"] - # [[inputs.KNXListener.measurement]] + # [[inputs.knx_listener.measurement]] # name = "illumination" # dpt = "9.004" # addresses = ["5/5/3"] diff --git a/plugins/inputs/knx_listener/knx_listener.go b/plugins/inputs/knx_listener/knx_listener.go index 98f19e922f7ad..3896d649b4055 100644 --- a/plugins/inputs/knx_listener/knx_listener.go +++ b/plugins/inputs/knx_listener/knx_listener.go @@ -56,7 +56,7 @@ func (kl *KNXListener) SampleConfig() string { service_address = "localhost:3671" ## Measurement definition(s) - # [[inputs.KNXListener.measurement]] + # [[inputs.knx_listener.measurement]] # ## Name of the measurement # name = "temperature" # ## Datapoint-Type (DPT) of the KNX messages @@ -64,7 +64,7 @@ func (kl *KNXListener) SampleConfig() string { # ## List of Group-Addresses (GAs) assigned to the measurement # addresses = ["5/5/1"] - # [[inputs.KNXListener.measurement]] + # [[inputs.knx_listener.measurement]] # name = "illumination" # dpt = "9.004" # addresses = ["5/5/3"] @@ -195,5 +195,7 @@ func (kl *KNXListener) listen() { } func init() { + inputs.Add("knx_listener", func() telegraf.Input { return &KNXListener{ServiceType: "tunnel"} }) + // Register for backward compatibility inputs.Add("KNXListener", func() telegraf.Input { return &KNXListener{ServiceType: "tunnel"} }) } From fb088bd69c86c6628dfdc5a44f9e0d878587f6e3 Mon Sep 17 00:00:00 2001 From: Jacob Marble Date: Thu, 23 Sep 2021 09:05:29 -0700 Subject: [PATCH 086/176] fix: error returned to OpenTelemetry client (#9797) --- go.mod | 16 +++- go.sum | 27 +++++- plugins/inputs/opentelemetry/grpc_services.go | 2 +- plugins/inputs/opentelemetry/opentelemetry.go | 11 ++- .../opentelemetry/opentelemetry_test.go | 83 +++++++++++++++++++ 5 files changed, 131 insertions(+), 8 deletions(-) create mode 100644 plugins/inputs/opentelemetry/opentelemetry_test.go diff --git a/go.mod b/go.mod index a569c672a4eb4..dc8b762d1e6d1 100644 --- a/go.mod +++ b/go.mod @@ -61,7 +61,7 @@ require ( github.com/aws/aws-sdk-go-v2/service/sso v1.1.5 // indirect github.com/aws/aws-sdk-go-v2/service/sts v1.2.2 // indirect github.com/aws/smithy-go v1.8.0 - github.com/benbjohnson/clock v1.0.3 + github.com/benbjohnson/clock v1.1.0 github.com/beorn7/perks v1.0.1 // indirect github.com/bitly/go-hostpool v0.1.0 // indirect github.com/bmatcuk/doublestar/v3 v3.0.0 @@ -271,6 +271,9 @@ require ( go.mongodb.org/mongo-driver v1.5.3 go.opencensus.io v0.23.0 // indirect go.opentelemetry.io/collector/model v0.35.0 + go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetricgrpc v0.23.0 + go.opentelemetry.io/otel/metric v0.23.0 + go.opentelemetry.io/otel/sdk/metric v0.23.0 go.starlark.net v0.0.0-20210406145628-7a1108eaa012 go.uber.org/atomic v1.7.0 // indirect go.uber.org/multierr v1.6.0 // indirect @@ -329,6 +332,17 @@ require ( sigs.k8s.io/yaml v1.2.0 // indirect ) +require ( + github.com/cenkalti/backoff/v4 v4.1.1 // indirect + go.opentelemetry.io/otel v1.0.0-RC3 // indirect + go.opentelemetry.io/otel/exporters/otlp/otlpmetric v0.23.0 // indirect + go.opentelemetry.io/otel/internal/metric v0.23.0 // indirect + go.opentelemetry.io/otel/sdk v1.0.0-RC3 // indirect + go.opentelemetry.io/otel/sdk/export/metric v0.23.0 // indirect + go.opentelemetry.io/otel/trace v1.0.0-RC3 // indirect + go.opentelemetry.io/proto/otlp v0.9.0 // indirect +) + // replaced due to https://github.com/satori/go.uuid/issues/73 replace github.com/satori/go.uuid => github.com/gofrs/uuid v3.2.0+incompatible diff --git a/go.sum b/go.sum index 300b12d8d6a6c..4189b415723f0 100644 --- a/go.sum +++ b/go.sum @@ -291,8 +291,8 @@ github.com/aws/smithy-go v1.0.0/go.mod h1:EzMw8dbp/YJL4A5/sbhGddag+NPT7q084agLbB github.com/aws/smithy-go v1.3.1/go.mod h1:SObp3lf9smib00L/v3U2eAKG8FyQ7iLrJnQiAmR5n+E= github.com/aws/smithy-go v1.8.0 h1:AEwwwXQZtUwP5Mz506FeXXrKBe0jA8gVM+1gEcSRooc= github.com/aws/smithy-go v1.8.0/go.mod h1:SObp3lf9smib00L/v3U2eAKG8FyQ7iLrJnQiAmR5n+E= -github.com/benbjohnson/clock v1.0.3 h1:vkLuvpK4fmtSCuo60+yC63p7y0BmQ8gm5ZXGuBCJyXg= -github.com/benbjohnson/clock v1.0.3/go.mod h1:bGMdMPoPVvcYyt1gHDf4J2KE153Yf9BuiUKYMaxlTDM= +github.com/benbjohnson/clock v1.1.0 h1:Q92kusRqC1XV2MjkWETPvjJVqKetz1OzxZB7mHJLju8= +github.com/benbjohnson/clock v1.1.0/go.mod h1:J11/hYXuz8f4ySSvYwY0FKfm+ezbsZBKZxNJlLklBHA= github.com/beorn7/perks v0.0.0-20160804104726-4c0e84591b9a/go.mod h1:Dwedo/Wpr24TaqPxmxbtue+5NUziq4I4S80YR8gNf3Q= github.com/beorn7/perks v0.0.0-20180321164747-3a771d992973/go.mod h1:Dwedo/Wpr24TaqPxmxbtue+5NUziq4I4S80YR8gNf3Q= github.com/beorn7/perks v1.0.0/go.mod h1:KWe93zE9D1o94FZ5RNwFwVgaQK1VOXiVxmqh+CedLV8= @@ -333,6 +333,8 @@ github.com/cenkalti/backoff v2.2.1+incompatible h1:tNowT99t7UNflLxfYYSlKYsBpXdEe github.com/cenkalti/backoff v2.2.1+incompatible/go.mod h1:90ReRw6GdpyfrHakVjL/QHaoyV4aDUVVkXQJJJ3NXXM= github.com/cenkalti/backoff v2.2.1+incompatible/go.mod h1:90ReRw6GdpyfrHakVjL/QHaoyV4aDUVVkXQJJJ3NXXM= github.com/cenkalti/backoff/v4 v4.0.2/go.mod h1:eEew/i+1Q6OrCDZh3WiXYv3+nJwBASZ8Bog/87DQnVg= +github.com/cenkalti/backoff/v4 v4.1.1 h1:G2HAfAmvm/GcKan2oOQpBXOd2tT2G57ZnZGWa1PxPBQ= +github.com/cenkalti/backoff/v4 v4.1.1/go.mod h1:scbssz8iZGpm3xbr14ovlUdkxfGXNInqkPWOWmG2CLw= github.com/census-instrumentation/opencensus-proto v0.2.1/go.mod h1:f6KPmirojxKA12rnyqOA5BBL4O983OfeGPqjHWSTneU= github.com/cespare/xxhash v1.1.0 h1:a6HrQnmkObjyL+Gs60czilIUGqrzKutQD6XZog3p+ko= github.com/cespare/xxhash v1.1.0/go.mod h1:XrSqR1VqqWfGrhpAt58auRo0WTKS1nRRg3ghfAqPWnc= @@ -1639,7 +1641,27 @@ go.opencensus.io v0.23.0 h1:gqCw0LfLxScz8irSi8exQc7fyQ0fKQU/qnC/X8+V/1M= go.opencensus.io v0.23.0/go.mod h1:XItmlyltB5F7CS4xOC1DcqMoFqwtC6OG2xF7mCv7P7E= go.opentelemetry.io/collector/model v0.35.0 h1:NpKjghiqlei4ecwjOYOMhD6tj4gY8yiWHPJmbFs/ArI= go.opentelemetry.io/collector/model v0.35.0/go.mod h1:+7YCSjJG+MqiIFjauzt7oM2qkqBsaJWh5hcsO4fwsAc= +go.opentelemetry.io/otel v1.0.0-RC3 h1:kvwiyEkiUT/JaadXzVLI/R1wDO934A7r3Bs2wEe6wqA= +go.opentelemetry.io/otel v1.0.0-RC3/go.mod h1:Ka5j3ua8tZs4Rkq4Ex3hwgBgOchyPVq5S6P2lz//nKQ= +go.opentelemetry.io/otel/exporters/otlp/otlpmetric v0.23.0 h1:vKIEsT6IJU0NYd+iZccjgCmk80zsa7dTiC2Bu7U1jz0= +go.opentelemetry.io/otel/exporters/otlp/otlpmetric v0.23.0/go.mod h1:pe9oOWRaZyapdajWCn64fnl76v3cmTEmNBgh7MkKvwE= +go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetricgrpc v0.23.0 h1:JSsJID+KU3G8wxynfHIlWaefOvYngDjnrmtHOGb1sb0= +go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetricgrpc v0.23.0/go.mod h1:aSP5oMNaAfOYq+sRydHANZ0vBYLyZR/3lR9pru9aPLk= +go.opentelemetry.io/otel/internal/metric v0.23.0 h1:mPfzm9Iqhw7G2nDBmUAjFTfPqLZPbOW2k7QI57ITbaI= +go.opentelemetry.io/otel/internal/metric v0.23.0/go.mod h1:z+RPiDJe30YnCrOhFGivwBS+DU1JU/PiLKkk4re2DNY= +go.opentelemetry.io/otel/metric v0.23.0 h1:mYCcDxi60P4T27/0jchIDFa1WHEfQeU3zH9UEMpnj2c= +go.opentelemetry.io/otel/metric v0.23.0/go.mod h1:G/Nn9InyNnIv7J6YVkQfpc0JCfKBNJaERBGw08nqmVQ= +go.opentelemetry.io/otel/sdk v1.0.0-RC3 h1:iRMkET+EmJUn5mW0hJzygBraXRmrUwzbOtNvTCh/oKs= +go.opentelemetry.io/otel/sdk v1.0.0-RC3/go.mod h1:78H6hyg2fka0NYT9fqGuFLvly2yCxiBXDJAgLKo/2Us= +go.opentelemetry.io/otel/sdk/export/metric v0.23.0 h1:7NeoKPPx6NdZBVHLEp/LY5Lq85Ff1WNZnuJkuRy+azw= +go.opentelemetry.io/otel/sdk/export/metric v0.23.0/go.mod h1:SuMiREmKVRIwFKq73zvGTvwFpxb/ZAYkMfyqMoOtDqs= +go.opentelemetry.io/otel/sdk/metric v0.23.0 h1:xlZhPbiue1+jjSFEth94q9QCmX8Q24mOtue9IAmlVyI= +go.opentelemetry.io/otel/sdk/metric v0.23.0/go.mod h1:wa0sKK13eeIFW+0OFjcC3S1i7FTRRiLAXe1kjBVbhwg= +go.opentelemetry.io/otel/trace v1.0.0-RC3 h1:9F0ayEvlxv8BmNmPbU005WK7hC+7KbOazCPZjNa1yME= +go.opentelemetry.io/otel/trace v1.0.0-RC3/go.mod h1:VUt2TUYd8S2/ZRX09ZDFZQwn2RqfMB5MzO17jBojGxo= go.opentelemetry.io/proto/otlp v0.7.0/go.mod h1:PqfVotwruBrMGOCsRd/89rSnXhoiJIqeYNgFYFoEGnI= +go.opentelemetry.io/proto/otlp v0.9.0 h1:C0g6TWmQYvjKRnljRULLWUVJGy8Uvu0NEL/5frY2/t4= +go.opentelemetry.io/proto/otlp v0.9.0/go.mod h1:1vKfU9rv61e9EVGthD1zNvUbiwPcimSsOPU9brfSHJg= go.starlark.net v0.0.0-20210406145628-7a1108eaa012 h1:4RGobP/iq7S22H0Bb92OEt+M8/cfBQnW+T+a2MC0sQo= go.starlark.net v0.0.0-20210406145628-7a1108eaa012/go.mod h1:t3mmBBPzAVvK0L0n1drDmrQsJ8FoIx4INCqVMTr/Zo0= go.uber.org/atomic v1.3.2/go.mod h1:gD2HeocX3+yG+ygLZcrzQJaqmWj9AIm7n08wl/qW/PE= @@ -1944,6 +1966,7 @@ golang.org/x/sys v0.0.0-20210320140829-1e4c9ba3b0c4/go.mod h1:h1NjWce9XRLGQEsW7w golang.org/x/sys v0.0.0-20210324051608-47abb6519492/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20210330210617-4fbd30eecc44/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20210423082822-04245dca01da/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20210423185535-09eb48e85fd7/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20210426230700-d19ff857e887/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20210510120138-977fb7262007/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20210510120138-977fb7262007/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= diff --git a/plugins/inputs/opentelemetry/grpc_services.go b/plugins/inputs/opentelemetry/grpc_services.go index f5fa450fa8f65..1c805e2a23ff2 100644 --- a/plugins/inputs/opentelemetry/grpc_services.go +++ b/plugins/inputs/opentelemetry/grpc_services.go @@ -56,7 +56,7 @@ func newMetricsService(logger common.Logger, writer *writeToAccumulator, schema func (s *metricsService) Export(ctx context.Context, req pdata.Metrics) (otlpgrpc.MetricsResponse, error) { err := s.converter.WriteMetrics(ctx, req, s.writer) - return otlpgrpc.MetricsResponse{}, err + return otlpgrpc.NewMetricsResponse(), err } type logsService struct { diff --git a/plugins/inputs/opentelemetry/opentelemetry.go b/plugins/inputs/opentelemetry/opentelemetry.go index 2e6cbf9b8349a..85f32a7695efa 100644 --- a/plugins/inputs/opentelemetry/opentelemetry.go +++ b/plugins/inputs/opentelemetry/opentelemetry.go @@ -24,6 +24,7 @@ type OpenTelemetry struct { Log telegraf.Logger `toml:"-"` + listener net.Listener // overridden in tests grpcServer *grpc.Server wg sync.WaitGroup @@ -89,14 +90,16 @@ func (o *OpenTelemetry) Start(accumulator telegraf.Accumulator) error { otlpgrpc.RegisterMetricsServer(o.grpcServer, ms) otlpgrpc.RegisterLogsServer(o.grpcServer, newLogsService(logger, influxWriter)) - listener, err := net.Listen("tcp", o.ServiceAddress) - if err != nil { - return err + if o.listener == nil { + o.listener, err = net.Listen("tcp", o.ServiceAddress) + if err != nil { + return err + } } o.wg.Add(1) go func() { - if err := o.grpcServer.Serve(listener); err != nil { + if err := o.grpcServer.Serve(o.listener); err != nil { accumulator.AddError(fmt.Errorf("failed to stop OpenTelemetry gRPC service: %w", err)) } o.wg.Done() diff --git a/plugins/inputs/opentelemetry/opentelemetry_test.go b/plugins/inputs/opentelemetry/opentelemetry_test.go new file mode 100644 index 0000000000000..2de35bb06af50 --- /dev/null +++ b/plugins/inputs/opentelemetry/opentelemetry_test.go @@ -0,0 +1,83 @@ +package opentelemetry + +import ( + "context" + "net" + "testing" + + "github.com/influxdata/telegraf" + "github.com/influxdata/telegraf/plugins/inputs" + "github.com/influxdata/telegraf/testutil" + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" + "go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetricgrpc" + "go.opentelemetry.io/otel/metric" + "go.opentelemetry.io/otel/metric/global" + controller "go.opentelemetry.io/otel/sdk/metric/controller/basic" + processor "go.opentelemetry.io/otel/sdk/metric/processor/basic" + "go.opentelemetry.io/otel/sdk/metric/selector/simple" + "google.golang.org/grpc" + "google.golang.org/grpc/test/bufconn" +) + +func TestOpenTelemetry(t *testing.T) { + mockListener := bufconn.Listen(1024 * 1024) + plugin := inputs.Inputs["opentelemetry"]().(*OpenTelemetry) + plugin.listener = mockListener + accumulator := new(testutil.Accumulator) + + err := plugin.Start(accumulator) + require.NoError(t, err) + t.Cleanup(plugin.Stop) + + metricExporter, err := otlpmetricgrpc.New(context.Background(), + otlpmetricgrpc.WithInsecure(), + otlpmetricgrpc.WithDialOption( + grpc.WithBlock(), + grpc.WithContextDialer(func(_ context.Context, _ string) (net.Conn, error) { + return mockListener.Dial() + })), + ) + require.NoError(t, err) + t.Cleanup(func() { _ = metricExporter.Shutdown(context.Background()) }) + + pusher := controller.New( + processor.New( + simple.NewWithExactDistribution(), + metricExporter, + ), + controller.WithExporter(metricExporter), + ) + + err = pusher.Start(context.Background()) + require.NoError(t, err) + t.Cleanup(func() { _ = pusher.Stop(context.Background()) }) + + global.SetMeterProvider(pusher.MeterProvider()) + + // write metrics + meter := global.Meter("library-name") + counter := metric.Must(meter).NewInt64Counter("measurement-counter") + meter.RecordBatch(context.Background(), nil, counter.Measurement(7)) + + err = pusher.Stop(context.Background()) + require.NoError(t, err) + + // Shutdown + + plugin.Stop() + + err = metricExporter.Shutdown(context.Background()) + require.NoError(t, err) + + // Check + + assert.Empty(t, accumulator.Errors) + + if assert.Len(t, accumulator.Metrics, 1) { + got := accumulator.Metrics[0] + assert.Equal(t, "measurement-counter", got.Measurement) + assert.Equal(t, telegraf.Counter, got.Type) + assert.Equal(t, "library-name", got.Tags["otel.library.name"]) + } +} From 3ec4c128caf32f89c18501299f894abac69420e9 Mon Sep 17 00:00:00 2001 From: Samantha Wang <32681364+sjwang90@users.noreply.github.com> Date: Thu, 23 Sep 2021 10:15:43 -0700 Subject: [PATCH 087/176] docs: add trig and twemproxy plugin readmes (#9801) --- plugins/inputs/trig/README.md | 28 ++++++++++++++++++++++++++++ plugins/inputs/twemproxy/README.md | 16 ++++++++++++++++ 2 files changed, 44 insertions(+) create mode 100644 plugins/inputs/trig/README.md create mode 100644 plugins/inputs/twemproxy/README.md diff --git a/plugins/inputs/trig/README.md b/plugins/inputs/trig/README.md new file mode 100644 index 0000000000000..41ff8743e8cf3 --- /dev/null +++ b/plugins/inputs/trig/README.md @@ -0,0 +1,28 @@ +# Trig Input Plugin + +The `trig` plugin is for demonstration purposes and inserts sine and cosine + +### Configuration + +```toml +# Inserts sine and cosine waves for demonstration purposes +[[inputs.trig]] + ## Set the amplitude + amplitude = 10.0 +``` + +### Metrics + +- trig + - fields: + - cosine (float) + - sine (float) + + +### Example Output + +``` +trig,host=MBP15-SWANG.local cosine=10,sine=0 1632338680000000000 +trig,host=MBP15-SWANG.local sine=5.877852522924732,cosine=8.090169943749473 1632338690000000000 +trig,host=MBP15-SWANG.local sine=9.510565162951535,cosine=3.0901699437494745 1632338700000000000 +``` diff --git a/plugins/inputs/twemproxy/README.md b/plugins/inputs/twemproxy/README.md new file mode 100644 index 0000000000000..0c07e0aec4463 --- /dev/null +++ b/plugins/inputs/twemproxy/README.md @@ -0,0 +1,16 @@ +# Twemproxy Input Plugin + +The `twemproxy` plugin gathers statistics from [Twemproxy](https://github.com/twitter/twemproxy) servers. + + +### Configuration + +```toml +# Read Twemproxy stats data +[[inputs.twemproxy]] + ## Twemproxy stats address and port (no scheme) + addr = "localhost:22222" + ## Monitor pool name + pools = ["redis_pool", "mc_pool"] +``` + From 3b94269f30de70079fbf9942cf9266882b359947 Mon Sep 17 00:00:00 2001 From: Joshua Powers Date: Fri, 24 Sep 2021 09:09:52 -0600 Subject: [PATCH 088/176] fix: update golang-ci package (#9817) --- Makefile | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/Makefile b/Makefile index cbe0e2a2e5dbb..12267c04403bb 100644 --- a/Makefile +++ b/Makefile @@ -141,7 +141,7 @@ vet: .PHONY: lint-install lint-install: - go install github.com/golangci/golangci-lint/cmd/golangci-lint@v1.38.0 + go install github.com/golangci/golangci-lint/cmd/golangci-lint@v1.42.1 .PHONY: lint lint: From c4d2ad85f0dc4ee386f4b5975c33b9ef15f7cbf6 Mon Sep 17 00:00:00 2001 From: Sven Rebhan <36194019+srebhan@users.noreply.github.com> Date: Tue, 28 Sep 2021 16:09:20 +0200 Subject: [PATCH 089/176] feat: Allow user to select the source for the metric timestamp. (#9013) --- plugins/inputs/opcua/README.md | 6 ++++ plugins/inputs/opcua/opcua_client.go | 42 ++++++++++++++++++++++------ 2 files changed, 39 insertions(+), 9 deletions(-) diff --git a/plugins/inputs/opcua/README.md b/plugins/inputs/opcua/README.md index d6530c0839b18..f28981f7482ae 100644 --- a/plugins/inputs/opcua/README.md +++ b/plugins/inputs/opcua/README.md @@ -46,6 +46,12 @@ Plugin minimum tested version: 1.16 ## Password. Required for auth_method = "UserName" # password = "" # + ## Option to select the metric timestamp to use. Valid options are: + ## "gather" -- uses the time of receiving the data in telegraf + ## "server" -- uses the timestamp provided by the server + ## "source" -- uses the timestamp provided by the source + # timestamp = "gather" + # ## Node ID configuration ## name - field name to use in the output ## namespace - OPC UA namespace of the node (integer value 0 thru 3) diff --git a/plugins/inputs/opcua/opcua_client.go b/plugins/inputs/opcua/opcua_client.go index 8dec41eb343e3..d59adc453ba8b 100644 --- a/plugins/inputs/opcua/opcua_client.go +++ b/plugins/inputs/opcua/opcua_client.go @@ -12,6 +12,7 @@ import ( "github.com/gopcua/opcua/ua" "github.com/influxdata/telegraf" "github.com/influxdata/telegraf/config" + "github.com/influxdata/telegraf/internal/choice" "github.com/influxdata/telegraf/plugins/inputs" "github.com/influxdata/telegraf/selfstat" ) @@ -26,6 +27,7 @@ type OpcUA struct { PrivateKey string `toml:"private_key"` Username string `toml:"username"` Password string `toml:"password"` + Timestamp string `toml:"timestamp"` AuthMethod string `toml:"auth_method"` ConnectTimeout config.Duration `toml:"connect_timeout"` RequestTimeout config.Duration `toml:"request_timeout"` @@ -77,12 +79,12 @@ type GroupSettings struct { // OPCData type type OPCData struct { - TagName string - Value interface{} - Quality ua.StatusCode - TimeStamp string - Time string - DataType ua.TypeID + TagName string + Value interface{} + Quality ua.StatusCode + ServerTime time.Time + SourceTime time.Time + DataType ua.TypeID } // ConnectionState used for constants @@ -136,6 +138,12 @@ const sampleConfig = ` ## Password. Required for auth_method = "UserName" # password = "" # + ## Option to select the metric timestamp to use. Valid options are: + ## "gather" -- uses the time of receiving the data in telegraf + ## "server" -- uses the timestamp provided by the server + ## "source" -- uses the timestamp provided by the source + # timestamp = "gather" + # ## Node ID configuration ## name - field name to use in the output ## namespace - OPC UA namespace of the node (integer value 0 thru 3) @@ -188,7 +196,12 @@ func (o *OpcUA) SampleConfig() string { func (o *OpcUA) Init() error { o.state = Disconnected - err := o.validateEndpoint() + err := choice.Check(o.Timestamp, []string{"", "gather", "server", "source"}) + if err != nil { + return err + } + + err = o.validateEndpoint() if err != nil { return err } @@ -485,8 +498,9 @@ func (o *OpcUA) getData() error { o.nodeData[i].Value = d.Value.Value() o.nodeData[i].DataType = d.Value.Type() } - o.nodeData[i].TimeStamp = d.ServerTimestamp.String() - o.nodeData[i].Time = d.SourceTimestamp.String() + o.nodeData[i].Quality = d.Status + o.nodeData[i].ServerTime = d.ServerTimestamp + o.nodeData[i].SourceTime = d.SourceTimestamp } return nil } @@ -551,6 +565,15 @@ func (o *OpcUA) Gather(acc telegraf.Accumulator) error { fields[o.nodeData[i].TagName] = o.nodeData[i].Value fields["Quality"] = strings.TrimSpace(fmt.Sprint(o.nodeData[i].Quality)) acc.AddFields(n.metricName, fields, tags) + + switch o.Timestamp { + case "server": + acc.AddFields(n.metricName, fields, tags, o.nodeData[i].ServerTime) + case "source": + acc.AddFields(n.metricName, fields, tags, o.nodeData[i].SourceTime) + default: + acc.AddFields(n.metricName, fields, tags) + } } } return nil @@ -564,6 +587,7 @@ func init() { Endpoint: "opc.tcp://localhost:4840", SecurityPolicy: "auto", SecurityMode: "auto", + Timestamp: "gather", RequestTimeout: config.Duration(5 * time.Second), ConnectTimeout: config.Duration(10 * time.Second), Certificate: "/etc/telegraf/cert.pem", From 6a3b27126a26dd43d2a287b4c9d8831dca6bf687 Mon Sep 17 00:00:00 2001 From: Eng Zer Jun Date: Wed, 29 Sep 2021 05:16:32 +0800 Subject: [PATCH 090/176] refactor: move from io/ioutil to io and os package (#9811) --- config/config.go | 6 ++-- internal/content_coding_test.go | 4 +-- internal/internal_test.go | 7 ++-- internal/process/process.go | 3 +- internal/rotate/file_writer_test.go | 35 +++++++++---------- logger/logger_test.go | 27 +++++++------- plugins/common/cookie/cookie.go | 5 ++- plugins/common/cookie/cookie_test.go | 4 +-- plugins/common/encoding/decoder_test.go | 4 +-- plugins/common/logrus/hook.go | 4 +-- plugins/common/shim/config.go | 3 +- plugins/common/shim/input_test.go | 5 +-- plugins/common/shim/processor_test.go | 5 +-- plugins/common/tls/config.go | 4 +-- plugins/inputs/activemq/activemq.go | 4 +-- plugins/inputs/aliyuncms/aliyuncms_test.go | 8 ++--- .../inputs/amd_rocm_smi/amd_rocm_smi_test.go | 4 +-- plugins/inputs/bcache/bcache.go | 5 ++- plugins/inputs/bcache/bcache_test.go | 19 +++++----- plugins/inputs/beat/beat_test.go | 6 ++-- plugins/inputs/bond/bond.go | 3 +- plugins/inputs/burrow/burrow_test.go | 3 +- plugins/inputs/cassandra/cassandra.go | 4 +-- plugins/inputs/cassandra/cassandra_test.go | 4 +-- plugins/inputs/ceph/ceph.go | 4 +-- plugins/inputs/ceph/ceph_test.go | 5 ++- plugins/inputs/cgroup/cgroup_linux.go | 3 +- plugins/inputs/clickhouse/clickhouse.go | 5 ++- .../inputs/cloud_pubsub_push/pubsub_push.go | 4 +-- plugins/inputs/conntrack/conntrack.go | 6 ++-- plugins/inputs/conntrack/conntrack_test.go | 17 +++++---- plugins/inputs/dcos/creds.go | 4 +-- plugins/inputs/dcos/dcos.go | 4 +-- .../directory_monitor/directory_monitor.go | 5 ++- .../directory_monitor_test.go | 11 +++--- plugins/inputs/diskio/diskio_linux_test.go | 3 +- plugins/inputs/docker/docker_test.go | 6 ++-- plugins/inputs/docker/docker_testdata.go | 6 ++-- plugins/inputs/ecs/client.go | 5 ++- plugins/inputs/ecs/client_test.go | 14 ++++---- plugins/inputs/elasticsearch/elasticsearch.go | 4 +-- .../elasticsearch/elasticsearch_test.go | 4 +-- plugins/inputs/execd/shim/goshim.go | 3 +- plugins/inputs/file/file.go | 4 +-- plugins/inputs/fluentd/fluentd.go | 4 +-- plugins/inputs/graylog/graylog.go | 4 +-- plugins/inputs/graylog/graylog_test.go | 4 +-- plugins/inputs/http/http.go | 8 ++--- plugins/inputs/http/http_test.go | 10 +++--- .../http_listener_v2/http_listener_v2.go | 8 ++--- .../http_listener_v2/http_listener_v2_test.go | 4 +-- plugins/inputs/http_response/http_response.go | 6 ++-- .../http_response/http_response_test.go | 4 +-- plugins/inputs/httpjson/httpjson.go | 4 +-- plugins/inputs/httpjson/httpjson_test.go | 6 ++-- .../influxdb_listener_test.go | 4 +-- .../influxdb_v2_listener.go | 4 +-- .../influxdb_v2_listener_test.go | 7 ++-- plugins/inputs/intel_powerstat/file.go | 5 ++- plugins/inputs/jolokia/jolokia.go | 4 +-- plugins/inputs/jolokia/jolokia_test.go | 4 +-- plugins/inputs/jolokia2/client.go | 4 +-- plugins/inputs/jolokia2/client_test.go | 6 ++-- plugins/inputs/kernel/kernel.go | 5 ++- plugins/inputs/kernel/kernel_test.go | 3 +- plugins/inputs/kernel_vmstat/kernel_vmstat.go | 3 +- .../kernel_vmstat/kernel_vmstat_test.go | 3 +- plugins/inputs/kibana/kibana.go | 3 +- plugins/inputs/kibana/kibana_test.go | 4 +-- .../kinesis_consumer/kinesis_consumer.go | 6 ++-- plugins/inputs/kube_inventory/kube_state.go | 4 +-- plugins/inputs/kubernetes/kubernetes.go | 4 +-- plugins/inputs/leofs/leofs_test.go | 3 +- .../inputs/linux_sysctl_fs/linux_sysctl_fs.go | 5 ++- .../linux_sysctl_fs/linux_sysctl_fs_test.go | 15 ++++---- plugins/inputs/logparser/logparser_test.go | 7 ++-- plugins/inputs/logstash/logstash.go | 3 +- plugins/inputs/lustre2/lustre2.go | 4 +-- plugins/inputs/lustre2/lustre2_test.go | 11 +++--- plugins/inputs/mailchimp/chimp_api.go | 5 ++- plugins/inputs/mdstat/mdstat.go | 3 +- plugins/inputs/mdstat/mdstat_test.go | 3 +- plugins/inputs/mesos/mesos.go | 4 +-- plugins/inputs/multifile/multifile.go | 4 +-- plugins/inputs/nats/nats.go | 4 +-- plugins/inputs/neptune_apex/neptune_apex.go | 4 +-- .../nginx_plus_api/nginx_plus_api_metrics.go | 4 +-- .../nginx_upstream_check.go | 3 +- plugins/inputs/nsq/nsq.go | 4 +-- plugins/inputs/nstat/nstat.go | 7 ++-- plugins/inputs/nvidia_smi/nvidia_smi_test.go | 4 +-- plugins/inputs/opcua/opcua_util.go | 3 +- plugins/inputs/passenger/passenger_test.go | 3 +- plugins/inputs/phpfpm/child.go | 5 ++- plugins/inputs/phpfpm/fcgi_test.go | 5 ++- plugins/inputs/postfix/postfix_test.go | 15 ++++---- .../postgresql_extensible.go | 4 +-- .../inputs/processes/processes_notwindows.go | 3 +- plugins/inputs/procstat/native_finder.go | 4 +-- plugins/inputs/procstat/pgrep.go | 4 +-- plugins/inputs/procstat/procstat.go | 3 +- plugins/inputs/procstat/procstat_test.go | 5 ++- plugins/inputs/prometheus/kubernetes.go | 4 +-- plugins/inputs/prometheus/prometheus.go | 6 ++-- plugins/inputs/proxmox/proxmox.go | 4 +-- plugins/inputs/puppetagent/puppetagent.go | 6 ++-- plugins/inputs/rabbitmq/rabbitmq.go | 4 +-- plugins/inputs/rabbitmq/rabbitmq_test.go | 6 ++-- plugins/inputs/ravendb/ravendb_test.go | 6 ++-- plugins/inputs/redfish/redfish.go | 4 +-- plugins/inputs/salesforce/salesforce.go | 5 ++- plugins/inputs/snmp_legacy/snmp_legacy.go | 4 +-- .../socket_listener/socket_listener_test.go | 7 ++-- plugins/inputs/sql/sql.go | 4 +-- plugins/inputs/suricata/suricata_test.go | 27 +++++++------- plugins/inputs/synproxy/synproxy_test.go | 3 +- plugins/inputs/syslog/nontransparent_test.go | 9 +++-- plugins/inputs/syslog/octetcounting_test.go | 9 +++-- plugins/inputs/syslog/rfc5426_test.go | 5 ++- plugins/inputs/syslog/syslog_test.go | 3 +- plugins/inputs/tail/tail_test.go | 13 ++++--- plugins/inputs/twemproxy/twemproxy.go | 4 +-- .../inputs/udp_listener/udp_listener_test.go | 8 ++--- .../webhooks/filestack/filestack_webhooks.go | 4 +-- .../inputs/webhooks/github/github_webhooks.go | 4 +-- .../webhooks/mandrill/mandrill_webhooks.go | 4 +-- .../webhooks/rollbar/rollbar_webhooks.go | 4 +-- plugins/inputs/wireless/wireless_linux.go | 3 +- plugins/inputs/x509_cert/x509_cert.go | 7 ++-- plugins/inputs/x509_cert/x509_cert_test.go | 12 +++---- plugins/inputs/zfs/zfs_linux_test.go | 21 ++++++----- .../cmd/thrift_serialize/thrift_serialize.go | 8 ++--- .../inputs/zipkin/codec/thrift/thrift_test.go | 4 +-- plugins/inputs/zipkin/handler.go | 4 +-- plugins/inputs/zipkin/handler_test.go | 7 ++-- plugins/inputs/zipkin/zipkin_test.go | 4 +-- .../outputs/azure_monitor/azure_monitor.go | 6 ++-- plugins/outputs/dynatrace/dynatrace.go | 4 +-- plugins/outputs/dynatrace/dynatrace_test.go | 18 +++++----- plugins/outputs/file/file_test.go | 7 ++-- plugins/outputs/health/health_test.go | 4 +-- plugins/outputs/http/http.go | 3 +- plugins/outputs/http/http_test.go | 4 +-- plugins/outputs/influxdb/http.go | 7 ++-- plugins/outputs/influxdb/http_test.go | 16 ++++----- plugins/outputs/influxdb_v2/http.go | 3 +- plugins/outputs/influxdb_v2/http_test.go | 4 +-- plugins/outputs/librato/librato.go | 4 +-- plugins/outputs/loki/loki_test.go | 9 ++--- plugins/outputs/opentsdb/opentsdb_http.go | 3 +- .../prometheus_client_v1_test.go | 8 ++--- .../prometheus_client_v2_test.go | 6 ++-- plugins/outputs/sensu/sensu.go | 3 +- plugins/outputs/sensu/sensu_test.go | 4 +-- .../socket_writer/socket_writer_test.go | 5 ++- plugins/outputs/sql/sql_test.go | 13 ++++--- plugins/outputs/sql/sqlite_test.go | 3 +- plugins/outputs/sumologic/sumologic_test.go | 3 +- plugins/outputs/warp10/warp10.go | 4 +-- .../yandex_cloud_monitoring.go | 6 ++-- plugins/parsers/json_v2/parser_test.go | 3 +- plugins/parsers/prometheus/parser_test.go | 4 +-- plugins/parsers/xpath/parser_test.go | 6 ++-- plugins/processors/starlark/starlark_test.go | 3 +- testutil/tls.go | 4 +-- 165 files changed, 456 insertions(+), 517 deletions(-) diff --git a/config/config.go b/config/config.go index e64d893bc05ea..4880da4832e5a 100644 --- a/config/config.go +++ b/config/config.go @@ -3,7 +3,7 @@ package config import ( "bytes" "fmt" - "io/ioutil" + "io" "log" "net/http" "net/url" @@ -933,7 +933,7 @@ func loadConfig(config string) ([]byte, error) { } // If it isn't a https scheme, try it as a file - return ioutil.ReadFile(config) + return os.ReadFile(config) } func fetchConfig(u *url.URL) ([]byte, error) { @@ -964,7 +964,7 @@ func fetchConfig(u *url.URL) ([]byte, error) { return nil, fmt.Errorf("Retry %d of %d failed to retrieve remote config: %s", i, retries, resp.Status) } defer resp.Body.Close() - return ioutil.ReadAll(resp.Body) + return io.ReadAll(resp.Body) } return nil, nil diff --git a/internal/content_coding_test.go b/internal/content_coding_test.go index 85496df59c5b6..06235a63879a9 100644 --- a/internal/content_coding_test.go +++ b/internal/content_coding_test.go @@ -2,7 +2,7 @@ package internal import ( "bytes" - "io/ioutil" + "io" "testing" "github.com/stretchr/testify/require" @@ -68,7 +68,7 @@ func TestStreamIdentityDecode(t *testing.T) { dec, err := NewStreamContentDecoder("identity", &r) require.NoError(t, err) - data, err := ioutil.ReadAll(dec) + data, err := io.ReadAll(dec) require.NoError(t, err) require.Equal(t, []byte("howdy"), data) diff --git a/internal/internal_test.go b/internal/internal_test.go index 7cb56d5324f06..8dae73f562702 100644 --- a/internal/internal_test.go +++ b/internal/internal_test.go @@ -5,7 +5,6 @@ import ( "compress/gzip" "crypto/rand" "io" - "io/ioutil" "log" "os/exec" "regexp" @@ -182,7 +181,7 @@ func TestCompressWithGzip(t *testing.T) { assert.NoError(t, err) defer gzipReader.Close() - output, err := ioutil.ReadAll(gzipReader) + output, err := io.ReadAll(gzipReader) assert.NoError(t, err) assert.Equal(t, testData, string(output)) @@ -203,7 +202,7 @@ func TestCompressWithGzipEarlyClose(t *testing.T) { rc, err := CompressWithGzip(mr) assert.NoError(t, err) - n, err := io.CopyN(ioutil.Discard, rc, 10000) + n, err := io.CopyN(io.Discard, rc, 10000) assert.NoError(t, err) assert.Equal(t, int64(10000), n) @@ -211,7 +210,7 @@ func TestCompressWithGzipEarlyClose(t *testing.T) { err = rc.Close() assert.NoError(t, err) - n, err = io.CopyN(ioutil.Discard, rc, 10000) + n, err = io.CopyN(io.Discard, rc, 10000) assert.Error(t, io.EOF, err) assert.Equal(t, int64(0), n) diff --git a/internal/process/process.go b/internal/process/process.go index 6da98d211a43b..3bfc3bb7e44e6 100644 --- a/internal/process/process.go +++ b/internal/process/process.go @@ -5,7 +5,6 @@ import ( "errors" "fmt" "io" - "io/ioutil" "os/exec" "sync" "sync/atomic" @@ -187,5 +186,5 @@ func isQuitting(ctx context.Context) bool { } func defaultReadPipe(r io.Reader) { - io.Copy(ioutil.Discard, r) + _, _ = io.Copy(io.Discard, r) } diff --git a/internal/rotate/file_writer_test.go b/internal/rotate/file_writer_test.go index ca29b9a2f45d6..2d249d74548e1 100644 --- a/internal/rotate/file_writer_test.go +++ b/internal/rotate/file_writer_test.go @@ -1,7 +1,6 @@ package rotate import ( - "io/ioutil" "os" "path/filepath" "testing" @@ -12,7 +11,7 @@ import ( ) func TestFileWriter_NoRotation(t *testing.T) { - tempDir, err := ioutil.TempDir("", "RotationNo") + tempDir, err := os.MkdirTemp("", "RotationNo") require.NoError(t, err) writer, err := NewFileWriter(filepath.Join(tempDir, "test"), 0, 0, 0) require.NoError(t, err) @@ -22,12 +21,12 @@ func TestFileWriter_NoRotation(t *testing.T) { require.NoError(t, err) _, err = writer.Write([]byte("Hello World 2")) require.NoError(t, err) - files, _ := ioutil.ReadDir(tempDir) + files, _ := os.ReadDir(tempDir) assert.Equal(t, 1, len(files)) } func TestFileWriter_TimeRotation(t *testing.T) { - tempDir, err := ioutil.TempDir("", "RotationTime") + tempDir, err := os.MkdirTemp("", "RotationTime") require.NoError(t, err) interval, _ := time.ParseDuration("1s") writer, err := NewFileWriter(filepath.Join(tempDir, "test"), interval, 0, -1) @@ -39,28 +38,28 @@ func TestFileWriter_TimeRotation(t *testing.T) { time.Sleep(1 * time.Second) _, err = writer.Write([]byte("Hello World 2")) require.NoError(t, err) - files, _ := ioutil.ReadDir(tempDir) + files, _ := os.ReadDir(tempDir) assert.Equal(t, 2, len(files)) } func TestFileWriter_ReopenTimeRotation(t *testing.T) { - tempDir, err := ioutil.TempDir("", "RotationTime") + tempDir, err := os.MkdirTemp("", "RotationTime") require.NoError(t, err) interval, _ := time.ParseDuration("1s") filePath := filepath.Join(tempDir, "test.log") - err = ioutil.WriteFile(filePath, []byte("Hello World"), 0644) + err = os.WriteFile(filePath, []byte("Hello World"), 0644) time.Sleep(1 * time.Second) assert.NoError(t, err) writer, err := NewFileWriter(filepath.Join(tempDir, "test.log"), interval, 0, -1) require.NoError(t, err) defer func() { writer.Close(); os.RemoveAll(tempDir) }() - files, _ := ioutil.ReadDir(tempDir) + files, _ := os.ReadDir(tempDir) assert.Equal(t, 2, len(files)) } func TestFileWriter_SizeRotation(t *testing.T) { - tempDir, err := ioutil.TempDir("", "RotationSize") + tempDir, err := os.MkdirTemp("", "RotationSize") require.NoError(t, err) maxSize := int64(9) writer, err := NewFileWriter(filepath.Join(tempDir, "test.log"), 0, maxSize, -1) @@ -71,16 +70,16 @@ func TestFileWriter_SizeRotation(t *testing.T) { require.NoError(t, err) _, err = writer.Write([]byte("World 2")) require.NoError(t, err) - files, _ := ioutil.ReadDir(tempDir) + files, _ := os.ReadDir(tempDir) assert.Equal(t, 2, len(files)) } func TestFileWriter_ReopenSizeRotation(t *testing.T) { - tempDir, err := ioutil.TempDir("", "RotationSize") + tempDir, err := os.MkdirTemp("", "RotationSize") require.NoError(t, err) maxSize := int64(12) filePath := filepath.Join(tempDir, "test.log") - err = ioutil.WriteFile(filePath, []byte("Hello World"), 0644) + err = os.WriteFile(filePath, []byte("Hello World"), 0644) assert.NoError(t, err) writer, err := NewFileWriter(filepath.Join(tempDir, "test.log"), 0, maxSize, -1) require.NoError(t, err) @@ -88,12 +87,12 @@ func TestFileWriter_ReopenSizeRotation(t *testing.T) { _, err = writer.Write([]byte("Hello World Again")) require.NoError(t, err) - files, _ := ioutil.ReadDir(tempDir) + files, _ := os.ReadDir(tempDir) assert.Equal(t, 2, len(files)) } func TestFileWriter_DeleteArchives(t *testing.T) { - tempDir, err := ioutil.TempDir("", "RotationDeleteArchives") + tempDir, err := os.MkdirTemp("", "RotationDeleteArchives") require.NoError(t, err) maxSize := int64(5) writer, err := NewFileWriter(filepath.Join(tempDir, "test.log"), 0, maxSize, 2) @@ -112,14 +111,14 @@ func TestFileWriter_DeleteArchives(t *testing.T) { _, err = writer.Write([]byte("Third file")) require.NoError(t, err) - files, _ := ioutil.ReadDir(tempDir) + files, _ := os.ReadDir(tempDir) assert.Equal(t, 3, len(files)) for _, tempFile := range files { var bytes []byte var err error path := filepath.Join(tempDir, tempFile.Name()) - if bytes, err = ioutil.ReadFile(path); err != nil { + if bytes, err = os.ReadFile(path); err != nil { t.Error(err.Error()) return } @@ -133,7 +132,7 @@ func TestFileWriter_DeleteArchives(t *testing.T) { } func TestFileWriter_CloseRotates(t *testing.T) { - tempDir, err := ioutil.TempDir("", "RotationClose") + tempDir, err := os.MkdirTemp("", "RotationClose") require.NoError(t, err) defer os.RemoveAll(tempDir) maxSize := int64(9) @@ -142,7 +141,7 @@ func TestFileWriter_CloseRotates(t *testing.T) { writer.Close() - files, _ := ioutil.ReadDir(tempDir) + files, _ := os.ReadDir(tempDir) assert.Equal(t, 1, len(files)) assert.Regexp(t, "^test\\.[^\\.]+\\.log$", files[0].Name()) } diff --git a/logger/logger_test.go b/logger/logger_test.go index d2c699da52644..47af1d4591bff 100644 --- a/logger/logger_test.go +++ b/logger/logger_test.go @@ -3,7 +3,6 @@ package logger import ( "bytes" "io" - "io/ioutil" "log" "os" "path/filepath" @@ -15,7 +14,7 @@ import ( ) func TestWriteLogToFile(t *testing.T) { - tmpfile, err := ioutil.TempFile("", "") + tmpfile, err := os.CreateTemp("", "") assert.NoError(t, err) defer func() { os.Remove(tmpfile.Name()) }() @@ -24,13 +23,13 @@ func TestWriteLogToFile(t *testing.T) { log.Printf("I! TEST") log.Printf("D! TEST") // <- should be ignored - f, err := ioutil.ReadFile(tmpfile.Name()) + f, err := os.ReadFile(tmpfile.Name()) assert.NoError(t, err) assert.Equal(t, f[19:], []byte("Z I! TEST\n")) } func TestDebugWriteLogToFile(t *testing.T) { - tmpfile, err := ioutil.TempFile("", "") + tmpfile, err := os.CreateTemp("", "") assert.NoError(t, err) defer func() { os.Remove(tmpfile.Name()) }() config := createBasicLogConfig(tmpfile.Name()) @@ -38,13 +37,13 @@ func TestDebugWriteLogToFile(t *testing.T) { SetupLogging(config) log.Printf("D! TEST") - f, err := ioutil.ReadFile(tmpfile.Name()) + f, err := os.ReadFile(tmpfile.Name()) assert.NoError(t, err) assert.Equal(t, f[19:], []byte("Z D! TEST\n")) } func TestErrorWriteLogToFile(t *testing.T) { - tmpfile, err := ioutil.TempFile("", "") + tmpfile, err := os.CreateTemp("", "") assert.NoError(t, err) defer func() { os.Remove(tmpfile.Name()) }() config := createBasicLogConfig(tmpfile.Name()) @@ -53,13 +52,13 @@ func TestErrorWriteLogToFile(t *testing.T) { log.Printf("E! TEST") log.Printf("I! TEST") // <- should be ignored - f, err := ioutil.ReadFile(tmpfile.Name()) + f, err := os.ReadFile(tmpfile.Name()) assert.NoError(t, err) assert.Equal(t, f[19:], []byte("Z E! TEST\n")) } func TestAddDefaultLogLevel(t *testing.T) { - tmpfile, err := ioutil.TempFile("", "") + tmpfile, err := os.CreateTemp("", "") assert.NoError(t, err) defer func() { os.Remove(tmpfile.Name()) }() config := createBasicLogConfig(tmpfile.Name()) @@ -67,13 +66,13 @@ func TestAddDefaultLogLevel(t *testing.T) { SetupLogging(config) log.Printf("TEST") - f, err := ioutil.ReadFile(tmpfile.Name()) + f, err := os.ReadFile(tmpfile.Name()) assert.NoError(t, err) assert.Equal(t, f[19:], []byte("Z I! TEST\n")) } func TestWriteToTruncatedFile(t *testing.T) { - tmpfile, err := ioutil.TempFile("", "") + tmpfile, err := os.CreateTemp("", "") assert.NoError(t, err) defer func() { os.Remove(tmpfile.Name()) }() config := createBasicLogConfig(tmpfile.Name()) @@ -81,7 +80,7 @@ func TestWriteToTruncatedFile(t *testing.T) { SetupLogging(config) log.Printf("TEST") - f, err := ioutil.ReadFile(tmpfile.Name()) + f, err := os.ReadFile(tmpfile.Name()) assert.NoError(t, err) assert.Equal(t, f[19:], []byte("Z I! TEST\n")) @@ -91,13 +90,13 @@ func TestWriteToTruncatedFile(t *testing.T) { log.Printf("SHOULD BE FIRST") - f, err = ioutil.ReadFile(tmpfile.Name()) + f, err = os.ReadFile(tmpfile.Name()) assert.NoError(t, err) assert.Equal(t, f[19:], []byte("Z I! SHOULD BE FIRST\n")) } func TestWriteToFileInRotation(t *testing.T) { - tempDir, err := ioutil.TempDir("", "LogRotation") + tempDir, err := os.MkdirTemp("", "LogRotation") require.NoError(t, err) cfg := createBasicLogConfig(filepath.Join(tempDir, "test.log")) cfg.LogTarget = LogTargetFile @@ -110,7 +109,7 @@ func TestWriteToFileInRotation(t *testing.T) { log.Printf("I! TEST 1") // Writes 31 bytes, will rotate log.Printf("I! TEST") // Writes 29 byes, no rotation expected - files, _ := ioutil.ReadDir(tempDir) + files, _ := os.ReadDir(tempDir) assert.Equal(t, 2, len(files)) } diff --git a/plugins/common/cookie/cookie.go b/plugins/common/cookie/cookie.go index e452a50a4b0a9..03fd97f95077f 100644 --- a/plugins/common/cookie/cookie.go +++ b/plugins/common/cookie/cookie.go @@ -4,7 +4,6 @@ import ( "context" "fmt" "io" - "io/ioutil" "net/http" "net/http/cookiejar" "strings" @@ -78,7 +77,7 @@ func (c *CookieAuthConfig) authRenewal(ctx context.Context, ticker *clockutil.Ti func (c *CookieAuthConfig) auth() error { var body io.ReadCloser if c.Body != "" { - body = ioutil.NopCloser(strings.NewReader(c.Body)) + body = io.NopCloser(strings.NewReader(c.Body)) defer body.Close() } @@ -97,7 +96,7 @@ func (c *CookieAuthConfig) auth() error { } defer resp.Body.Close() - if _, err = io.Copy(ioutil.Discard, resp.Body); err != nil { + if _, err = io.Copy(io.Discard, resp.Body); err != nil { return err } diff --git a/plugins/common/cookie/cookie_test.go b/plugins/common/cookie/cookie_test.go index 99269c27cd339..b32ceb0059e8b 100644 --- a/plugins/common/cookie/cookie_test.go +++ b/plugins/common/cookie/cookie_test.go @@ -3,7 +3,7 @@ package cookie import ( "context" "fmt" - "io/ioutil" + "io" "net/http" "net/http/httptest" "sync/atomic" @@ -50,7 +50,7 @@ func newFakeServer(t *testing.T) fakeServer { case authEndpointNoCreds: authed() case authEndpointWithBody: - body, err := ioutil.ReadAll(r.Body) + body, err := io.ReadAll(r.Body) require.NoError(t, err) if !cmp.Equal([]byte(reqBody), body) { w.WriteHeader(http.StatusUnauthorized) diff --git a/plugins/common/encoding/decoder_test.go b/plugins/common/encoding/decoder_test.go index 87115318ad0ed..b8e19af9cea43 100644 --- a/plugins/common/encoding/decoder_test.go +++ b/plugins/common/encoding/decoder_test.go @@ -2,7 +2,7 @@ package encoding import ( "bytes" - "io/ioutil" + "io" "testing" "github.com/stretchr/testify/require" @@ -66,7 +66,7 @@ func TestDecoder(t *testing.T) { require.NoError(t, err) buf := bytes.NewBuffer(tt.input) r := decoder.Reader(buf) - actual, err := ioutil.ReadAll(r) + actual, err := io.ReadAll(r) if tt.expectedErr { require.Error(t, err) return diff --git a/plugins/common/logrus/hook.go b/plugins/common/logrus/hook.go index a7f99023be1ba..7451639a75423 100644 --- a/plugins/common/logrus/hook.go +++ b/plugins/common/logrus/hook.go @@ -1,7 +1,7 @@ package logrus import ( - "io/ioutil" + "io" "log" "strings" "sync" @@ -19,7 +19,7 @@ type LogHook struct { // that directly log to the logrus system without providing an override method. func InstallHook() { once.Do(func() { - logrus.SetOutput(ioutil.Discard) + logrus.SetOutput(io.Discard) logrus.AddHook(&LogHook{}) }) } diff --git a/plugins/common/shim/config.go b/plugins/common/shim/config.go index a0bb3ce0de696..089c2b7ee7525 100644 --- a/plugins/common/shim/config.go +++ b/plugins/common/shim/config.go @@ -3,7 +3,6 @@ package shim import ( "errors" "fmt" - "io/ioutil" "log" "os" @@ -53,7 +52,7 @@ func LoadConfig(filePath *string) (loaded loadedConfig, err error) { var data string conf := config{} if filePath != nil && *filePath != "" { - b, err := ioutil.ReadFile(*filePath) + b, err := os.ReadFile(*filePath) if err != nil { return loadedConfig{}, err } diff --git a/plugins/common/shim/input_test.go b/plugins/common/shim/input_test.go index 7cbfe6413975f..9a0423261ac14 100644 --- a/plugins/common/shim/input_test.go +++ b/plugins/common/shim/input_test.go @@ -3,7 +3,6 @@ package shim import ( "bufio" "io" - "io/ioutil" "strings" "testing" "time" @@ -45,7 +44,9 @@ func TestInputShimStdinSignalingWorks(t *testing.T) { require.Equal(t, "measurement,tag=tag field=1i 1234000005678\n", out) stdinWriter.Close() - go ioutil.ReadAll(r) + go func() { + _, _ = io.ReadAll(r) + }() // check that it exits cleanly <-exited } diff --git a/plugins/common/shim/processor_test.go b/plugins/common/shim/processor_test.go index ea2e61a459469..bc00fb70d1bba 100644 --- a/plugins/common/shim/processor_test.go +++ b/plugins/common/shim/processor_test.go @@ -3,7 +3,6 @@ package shim import ( "bufio" "io" - "io/ioutil" "math/rand" "sync" "testing" @@ -84,7 +83,9 @@ func testSendAndRecieve(t *testing.T, fieldKey string, fieldValue string) { val2, ok := mOut.Fields()[fieldKey] require.True(t, ok) require.Equal(t, fieldValue, val2) - go ioutil.ReadAll(r) + go func() { + _, _ = io.ReadAll(r) + }() wg.Wait() } diff --git a/plugins/common/tls/config.go b/plugins/common/tls/config.go index 9a752fbce5714..586ec8fd4a417 100644 --- a/plugins/common/tls/config.go +++ b/plugins/common/tls/config.go @@ -4,7 +4,7 @@ import ( "crypto/tls" "crypto/x509" "fmt" - "io/ioutil" + "os" "strings" ) @@ -147,7 +147,7 @@ func (c *ServerConfig) TLSConfig() (*tls.Config, error) { func makeCertPool(certFiles []string) (*x509.CertPool, error) { pool := x509.NewCertPool() for _, certFile := range certFiles { - pem, err := ioutil.ReadFile(certFile) + pem, err := os.ReadFile(certFile) if err != nil { return nil, fmt.Errorf( "could not read certificate %q: %v", certFile, err) diff --git a/plugins/inputs/activemq/activemq.go b/plugins/inputs/activemq/activemq.go index 0674b7ae0fe52..f5cf7927342e5 100644 --- a/plugins/inputs/activemq/activemq.go +++ b/plugins/inputs/activemq/activemq.go @@ -3,7 +3,7 @@ package activemq import ( "encoding/xml" "fmt" - "io/ioutil" + "io" "net/http" "net/url" "path" @@ -184,7 +184,7 @@ func (a *ActiveMQ) GetMetrics(u string) ([]byte, error) { return nil, fmt.Errorf("GET %s returned status %q", u, resp.Status) } - return ioutil.ReadAll(resp.Body) + return io.ReadAll(resp.Body) } func (a *ActiveMQ) GatherQueuesMetrics(acc telegraf.Accumulator, queues Queues) { diff --git a/plugins/inputs/aliyuncms/aliyuncms_test.go b/plugins/inputs/aliyuncms/aliyuncms_test.go index 22e0acbc52ebe..7e346a6ae9b8e 100644 --- a/plugins/inputs/aliyuncms/aliyuncms_test.go +++ b/plugins/inputs/aliyuncms/aliyuncms_test.go @@ -2,7 +2,7 @@ package aliyuncms import ( "bytes" - "io/ioutil" + "io" "net/http" "testing" "time" @@ -132,7 +132,7 @@ func TestPluginInitialize(t *testing.T) { httpResp := &http.Response{ StatusCode: 200, - Body: ioutil.NopCloser(bytes.NewBufferString( + Body: io.NopCloser(bytes.NewBufferString( `{ "LoadBalancers": { @@ -359,7 +359,7 @@ func TestGetDiscoveryDataAcrossRegions(t *testing.T) { region: "cn-hongkong", httpResp: &http.Response{ StatusCode: 200, - Body: ioutil.NopCloser(bytes.NewBufferString(`{}`)), + Body: io.NopCloser(bytes.NewBufferString(`{}`)), }, totalCount: 0, pageSize: 0, @@ -372,7 +372,7 @@ func TestGetDiscoveryDataAcrossRegions(t *testing.T) { region: "cn-hongkong", httpResp: &http.Response{ StatusCode: 200, - Body: ioutil.NopCloser(bytes.NewBufferString( + Body: io.NopCloser(bytes.NewBufferString( `{ "LoadBalancers": { diff --git a/plugins/inputs/amd_rocm_smi/amd_rocm_smi_test.go b/plugins/inputs/amd_rocm_smi/amd_rocm_smi_test.go index 7893760bdf952..e38e0ff89eae0 100644 --- a/plugins/inputs/amd_rocm_smi/amd_rocm_smi_test.go +++ b/plugins/inputs/amd_rocm_smi/amd_rocm_smi_test.go @@ -1,7 +1,7 @@ package amd_rocm_smi import ( - "io/ioutil" + "os" "path/filepath" "testing" "time" @@ -78,7 +78,7 @@ func TestGatherValidJSON(t *testing.T) { for _, tt := range tests { t.Run(tt.name, func(t *testing.T) { var acc testutil.Accumulator - octets, err := ioutil.ReadFile(filepath.Join("testdata", tt.filename)) + octets, err := os.ReadFile(filepath.Join("testdata", tt.filename)) require.NoError(t, err) err = gatherROCmSMI(octets, &acc) diff --git a/plugins/inputs/bcache/bcache.go b/plugins/inputs/bcache/bcache.go index 3195cf4dabcbb..84eb3262fdf28 100644 --- a/plugins/inputs/bcache/bcache.go +++ b/plugins/inputs/bcache/bcache.go @@ -8,7 +8,6 @@ package bcache import ( "errors" "fmt" - "io/ioutil" "os" "path/filepath" "strconv" @@ -85,7 +84,7 @@ func (b *Bcache) gatherBcache(bdev string, acc telegraf.Accumulator) error { if len(metrics) == 0 { return errors.New("can't read any stats file") } - file, err := ioutil.ReadFile(bdev + "/dirty_data") + file, err := os.ReadFile(bdev + "/dirty_data") if err != nil { return err } @@ -97,7 +96,7 @@ func (b *Bcache) gatherBcache(bdev string, acc telegraf.Accumulator) error { for _, path := range metrics { key := filepath.Base(path) - file, err := ioutil.ReadFile(path) + file, err := os.ReadFile(path) rawValue := strings.TrimSpace(string(file)) if err != nil { return err diff --git a/plugins/inputs/bcache/bcache_test.go b/plugins/inputs/bcache/bcache_test.go index 857538a8d6f72..4c62e0f014f14 100644 --- a/plugins/inputs/bcache/bcache_test.go +++ b/plugins/inputs/bcache/bcache_test.go @@ -4,7 +4,6 @@ package bcache import ( - "io/ioutil" "os" "testing" @@ -50,39 +49,39 @@ func TestBcacheGeneratesMetrics(t *testing.T) { err = os.MkdirAll(testBcacheUUIDPath+"/bdev0/stats_total", 0755) require.NoError(t, err) - err = ioutil.WriteFile(testBcacheUUIDPath+"/bdev0/dirty_data", + err = os.WriteFile(testBcacheUUIDPath+"/bdev0/dirty_data", []byte(dirtyData), 0644) require.NoError(t, err) - err = ioutil.WriteFile(testBcacheUUIDPath+"/bdev0/stats_total/bypassed", + err = os.WriteFile(testBcacheUUIDPath+"/bdev0/stats_total/bypassed", []byte(bypassed), 0644) require.NoError(t, err) - err = ioutil.WriteFile(testBcacheUUIDPath+"/bdev0/stats_total/cache_bypass_hits", + err = os.WriteFile(testBcacheUUIDPath+"/bdev0/stats_total/cache_bypass_hits", []byte(cacheBypassHits), 0644) require.NoError(t, err) - err = ioutil.WriteFile(testBcacheUUIDPath+"/bdev0/stats_total/cache_bypass_misses", + err = os.WriteFile(testBcacheUUIDPath+"/bdev0/stats_total/cache_bypass_misses", []byte(cacheBypassMisses), 0644) require.NoError(t, err) - err = ioutil.WriteFile(testBcacheUUIDPath+"/bdev0/stats_total/cache_hit_ratio", + err = os.WriteFile(testBcacheUUIDPath+"/bdev0/stats_total/cache_hit_ratio", []byte(cacheHitRatio), 0644) require.NoError(t, err) - err = ioutil.WriteFile(testBcacheUUIDPath+"/bdev0/stats_total/cache_hits", + err = os.WriteFile(testBcacheUUIDPath+"/bdev0/stats_total/cache_hits", []byte(cacheHits), 0644) require.NoError(t, err) - err = ioutil.WriteFile(testBcacheUUIDPath+"/bdev0/stats_total/cache_miss_collisions", + err = os.WriteFile(testBcacheUUIDPath+"/bdev0/stats_total/cache_miss_collisions", []byte(cacheMissCollisions), 0644) require.NoError(t, err) - err = ioutil.WriteFile(testBcacheUUIDPath+"/bdev0/stats_total/cache_misses", + err = os.WriteFile(testBcacheUUIDPath+"/bdev0/stats_total/cache_misses", []byte(cacheMisses), 0644) require.NoError(t, err) - err = ioutil.WriteFile(testBcacheUUIDPath+"/bdev0/stats_total/cache_readaheads", + err = os.WriteFile(testBcacheUUIDPath+"/bdev0/stats_total/cache_readaheads", []byte(cacheReadaheads), 0644) require.NoError(t, err) diff --git a/plugins/inputs/beat/beat_test.go b/plugins/inputs/beat/beat_test.go index 8f2c5c9c2fbee..433e8fcd61337 100644 --- a/plugins/inputs/beat/beat_test.go +++ b/plugins/inputs/beat/beat_test.go @@ -2,11 +2,11 @@ package beat import ( "fmt" - "io/ioutil" "net" "net/http" "net/http/httptest" "net/url" + "os" "testing" "github.com/influxdata/telegraf/testutil" @@ -31,7 +31,7 @@ func Test_BeatStats(t *testing.T) { require.FailNow(t, "cannot handle request") } - data, err := ioutil.ReadFile(jsonFilePath) + data, err := os.ReadFile(jsonFilePath) require.NoErrorf(t, err, "could not read from data file %s", jsonFilePath) _, err = w.Write(data) require.NoError(t, err, "could not write data") @@ -175,7 +175,7 @@ func Test_BeatRequest(t *testing.T) { require.FailNow(t, "cannot handle request") } - data, err := ioutil.ReadFile(jsonFilePath) + data, err := os.ReadFile(jsonFilePath) require.NoErrorf(t, err, "could not read from data file %s", jsonFilePath) require.Equal(t, request.Host, "beat.test.local") require.Equal(t, request.Method, "POST") diff --git a/plugins/inputs/bond/bond.go b/plugins/inputs/bond/bond.go index dc9b083ec5af9..4f30a20e3f677 100644 --- a/plugins/inputs/bond/bond.go +++ b/plugins/inputs/bond/bond.go @@ -3,7 +3,6 @@ package bond import ( "bufio" "fmt" - "io/ioutil" "os" "path/filepath" "strconv" @@ -53,7 +52,7 @@ func (bond *Bond) Gather(acc telegraf.Accumulator) error { } for _, bondName := range bondNames { bondAbsPath := bond.HostProc + "/net/bonding/" + bondName - file, err := ioutil.ReadFile(bondAbsPath) + file, err := os.ReadFile(bondAbsPath) if err != nil { acc.AddError(fmt.Errorf("error inspecting '%s' interface: %v", bondAbsPath, err)) continue diff --git a/plugins/inputs/burrow/burrow_test.go b/plugins/inputs/burrow/burrow_test.go index d9df7be31d27e..db58df6fc94e8 100644 --- a/plugins/inputs/burrow/burrow_test.go +++ b/plugins/inputs/burrow/burrow_test.go @@ -2,7 +2,6 @@ package burrow import ( "fmt" - "io/ioutil" "net/http" "net/http/httptest" "os" @@ -28,7 +27,7 @@ func getResponseJSON(requestURI string) ([]byte, int) { } // respond with file - b, _ := ioutil.ReadFile(jsonFile) + b, _ := os.ReadFile(jsonFile) return b, code } diff --git a/plugins/inputs/cassandra/cassandra.go b/plugins/inputs/cassandra/cassandra.go index 4a52ef2979b7d..d1c23caadc68a 100644 --- a/plugins/inputs/cassandra/cassandra.go +++ b/plugins/inputs/cassandra/cassandra.go @@ -4,7 +4,7 @@ import ( "encoding/json" "errors" "fmt" - "io/ioutil" + "io" "net/http" "net/url" "strings" @@ -217,7 +217,7 @@ func (c *Cassandra) getAttr(requestURL *url.URL) (map[string]interface{}, error) } // read body - body, err := ioutil.ReadAll(resp.Body) + body, err := io.ReadAll(resp.Body) if err != nil { return nil, err } diff --git a/plugins/inputs/cassandra/cassandra_test.go b/plugins/inputs/cassandra/cassandra_test.go index 325c267d9274b..f167f50e7187f 100644 --- a/plugins/inputs/cassandra/cassandra_test.go +++ b/plugins/inputs/cassandra/cassandra_test.go @@ -2,7 +2,7 @@ package cassandra import ( _ "fmt" - "io/ioutil" + "io" "net/http" "strings" "testing" @@ -109,7 +109,7 @@ type jolokiaClientStub struct { func (c jolokiaClientStub) MakeRequest(_ *http.Request) (*http.Response, error) { resp := http.Response{} resp.StatusCode = c.statusCode - resp.Body = ioutil.NopCloser(strings.NewReader(c.responseBody)) + resp.Body = io.NopCloser(strings.NewReader(c.responseBody)) return &resp, nil } diff --git a/plugins/inputs/ceph/ceph.go b/plugins/inputs/ceph/ceph.go index 7baa28213ac7f..efd61d56322a7 100644 --- a/plugins/inputs/ceph/ceph.go +++ b/plugins/inputs/ceph/ceph.go @@ -4,7 +4,7 @@ import ( "bytes" "encoding/json" "fmt" - "io/ioutil" + "os" "os/exec" "path/filepath" "strings" @@ -206,7 +206,7 @@ var perfDump = func(binary string, socket *socket) (string, error) { } var findSockets = func(c *Ceph) ([]*socket, error) { - listing, err := ioutil.ReadDir(c.SocketDir) + listing, err := os.ReadDir(c.SocketDir) if err != nil { return []*socket{}, fmt.Errorf("Failed to read socket directory '%s': %v", c.SocketDir, err) } diff --git a/plugins/inputs/ceph/ceph_test.go b/plugins/inputs/ceph/ceph_test.go index a61838bc6a4e0..7915d6dd695f4 100644 --- a/plugins/inputs/ceph/ceph_test.go +++ b/plugins/inputs/ceph/ceph_test.go @@ -2,7 +2,6 @@ package ceph import ( "fmt" - "io/ioutil" "os" "path/filepath" "strconv" @@ -113,7 +112,7 @@ func TestGather(t *testing.T) { } func TestFindSockets(t *testing.T) { - tmpdir, err := ioutil.TempDir("", "socktest") + tmpdir, err := os.MkdirTemp("", "socktest") require.NoError(t, err) defer func() { err := os.Remove(tmpdir) @@ -189,7 +188,7 @@ func createTestFiles(dir string, st *SockTest) error { writeFile := func(prefix string, i int) error { f := sockFile(prefix, i) fpath := filepath.Join(dir, f) - return ioutil.WriteFile(fpath, []byte(""), 0777) + return os.WriteFile(fpath, []byte(""), 0777) } return tstFileApply(st, writeFile) } diff --git a/plugins/inputs/cgroup/cgroup_linux.go b/plugins/inputs/cgroup/cgroup_linux.go index d1eda6e7a3b07..b892f528c234f 100644 --- a/plugins/inputs/cgroup/cgroup_linux.go +++ b/plugins/inputs/cgroup/cgroup_linux.go @@ -5,7 +5,6 @@ package cgroup import ( "fmt" - "io/ioutil" "os" "path" "path/filepath" @@ -46,7 +45,7 @@ func (g *CGroup) gatherDir(acc telegraf.Accumulator, dir string) error { return file.err } - raw, err := ioutil.ReadFile(file.path) + raw, err := os.ReadFile(file.path) if err != nil { return err } diff --git a/plugins/inputs/clickhouse/clickhouse.go b/plugins/inputs/clickhouse/clickhouse.go index e7c5991676211..bdd4cf4730fbc 100644 --- a/plugins/inputs/clickhouse/clickhouse.go +++ b/plugins/inputs/clickhouse/clickhouse.go @@ -5,7 +5,6 @@ import ( "encoding/json" "fmt" "io" - "io/ioutil" "net" "net/http" "net/url" @@ -590,7 +589,7 @@ func (ch *ClickHouse) execQuery(address *url.URL, query string, i interface{}) e } defer func() { _ = resp.Body.Close() }() if resp.StatusCode >= 300 { - body, _ := ioutil.ReadAll(io.LimitReader(resp.Body, 200)) + body, _ := io.ReadAll(io.LimitReader(resp.Body, 200)) return &clickhouseError{ StatusCode: resp.StatusCode, body: body, @@ -606,7 +605,7 @@ func (ch *ClickHouse) execQuery(address *url.URL, query string, i interface{}) e return err } - if _, err := io.Copy(ioutil.Discard, resp.Body); err != nil { + if _, err := io.Copy(io.Discard, resp.Body); err != nil { return err } return nil diff --git a/plugins/inputs/cloud_pubsub_push/pubsub_push.go b/plugins/inputs/cloud_pubsub_push/pubsub_push.go index ef43a3d5eb161..48329e1cd362e 100644 --- a/plugins/inputs/cloud_pubsub_push/pubsub_push.go +++ b/plugins/inputs/cloud_pubsub_push/pubsub_push.go @@ -5,7 +5,7 @@ import ( "crypto/subtle" "encoding/base64" "encoding/json" - "io/ioutil" + "io" "net/http" "sync" "time" @@ -222,7 +222,7 @@ func (p *PubSubPush) serveWrite(res http.ResponseWriter, req *http.Request) { } body := http.MaxBytesReader(res, req.Body, int64(p.MaxBodySize)) - bytes, err := ioutil.ReadAll(body) + bytes, err := io.ReadAll(body) if err != nil { res.WriteHeader(http.StatusRequestEntityTooLarge) return diff --git a/plugins/inputs/conntrack/conntrack.go b/plugins/inputs/conntrack/conntrack.go index f1b04fb0d965a..d644f7c188fc5 100644 --- a/plugins/inputs/conntrack/conntrack.go +++ b/plugins/inputs/conntrack/conntrack.go @@ -5,14 +5,14 @@ package conntrack import ( "fmt" - "io/ioutil" "os" "strconv" "strings" + "path/filepath" + "github.com/influxdata/telegraf" "github.com/influxdata/telegraf/plugins/inputs" - "path/filepath" ) type Conntrack struct { @@ -91,7 +91,7 @@ func (c *Conntrack) Gather(acc telegraf.Accumulator) error { continue } - contents, err := ioutil.ReadFile(fName) + contents, err := os.ReadFile(fName) if err != nil { acc.AddError(fmt.Errorf("E! failed to read file '%s': %v", fName, err)) continue diff --git a/plugins/inputs/conntrack/conntrack_test.go b/plugins/inputs/conntrack/conntrack_test.go index 50f56d831791e..cb33caec2e330 100644 --- a/plugins/inputs/conntrack/conntrack_test.go +++ b/plugins/inputs/conntrack/conntrack_test.go @@ -4,7 +4,6 @@ package conntrack import ( - "io/ioutil" "os" "path" "strconv" @@ -35,11 +34,11 @@ func TestNoFilesFound(t *testing.T) { func TestDefaultsUsed(t *testing.T) { defer restoreDflts(dfltFiles, dfltDirs) - tmpdir, err := ioutil.TempDir("", "tmp1") + tmpdir, err := os.MkdirTemp("", "tmp1") require.NoError(t, err) defer os.Remove(tmpdir) - tmpFile, err := ioutil.TempFile(tmpdir, "ip_conntrack_count") + tmpFile, err := os.CreateTemp(tmpdir, "ip_conntrack_count") require.NoError(t, err) defer os.Remove(tmpFile.Name()) @@ -48,7 +47,7 @@ func TestDefaultsUsed(t *testing.T) { dfltFiles = []string{fname} count := 1234321 - require.NoError(t, ioutil.WriteFile(tmpFile.Name(), []byte(strconv.Itoa(count)), 0660)) + require.NoError(t, os.WriteFile(tmpFile.Name(), []byte(strconv.Itoa(count)), 0660)) c := &Conntrack{} acc := &testutil.Accumulator{} @@ -59,13 +58,13 @@ func TestDefaultsUsed(t *testing.T) { func TestConfigsUsed(t *testing.T) { defer restoreDflts(dfltFiles, dfltDirs) - tmpdir, err := ioutil.TempDir("", "tmp1") + tmpdir, err := os.MkdirTemp("", "tmp1") require.NoError(t, err) defer os.Remove(tmpdir) - cntFile, err := ioutil.TempFile(tmpdir, "nf_conntrack_count") + cntFile, err := os.CreateTemp(tmpdir, "nf_conntrack_count") require.NoError(t, err) - maxFile, err := ioutil.TempFile(tmpdir, "nf_conntrack_max") + maxFile, err := os.CreateTemp(tmpdir, "nf_conntrack_max") require.NoError(t, err) defer os.Remove(cntFile.Name()) defer os.Remove(maxFile.Name()) @@ -77,8 +76,8 @@ func TestConfigsUsed(t *testing.T) { count := 1234321 max := 9999999 - require.NoError(t, ioutil.WriteFile(cntFile.Name(), []byte(strconv.Itoa(count)), 0660)) - require.NoError(t, ioutil.WriteFile(maxFile.Name(), []byte(strconv.Itoa(max)), 0660)) + require.NoError(t, os.WriteFile(cntFile.Name(), []byte(strconv.Itoa(count)), 0660)) + require.NoError(t, os.WriteFile(maxFile.Name(), []byte(strconv.Itoa(max)), 0660)) c := &Conntrack{} acc := &testutil.Accumulator{} diff --git a/plugins/inputs/dcos/creds.go b/plugins/inputs/dcos/creds.go index 2fd5f078e46e5..328ce394a4cf6 100644 --- a/plugins/inputs/dcos/creds.go +++ b/plugins/inputs/dcos/creds.go @@ -4,7 +4,7 @@ import ( "context" "crypto/rsa" "fmt" - "io/ioutil" + "os" "strings" "time" "unicode/utf8" @@ -48,7 +48,7 @@ func (c *ServiceAccount) IsExpired() bool { } func (c *TokenCreds) Token(_ context.Context, _ Client) (string, error) { - octets, err := ioutil.ReadFile(c.Path) + octets, err := os.ReadFile(c.Path) if err != nil { return "", fmt.Errorf("error reading token file %q: %s", c.Path, err) } diff --git a/plugins/inputs/dcos/dcos.go b/plugins/inputs/dcos/dcos.go index 35822f30b074f..dd8f22f7292f5 100644 --- a/plugins/inputs/dcos/dcos.go +++ b/plugins/inputs/dcos/dcos.go @@ -2,8 +2,8 @@ package dcos import ( "context" - "io/ioutil" "net/url" + "os" "sort" "strings" "sync" @@ -370,7 +370,7 @@ func (d *DCOS) createClient() (Client, error) { func (d *DCOS) createCredentials() (Credentials, error) { if d.ServiceAccountID != "" && d.ServiceAccountPrivateKey != "" { - bs, err := ioutil.ReadFile(d.ServiceAccountPrivateKey) + bs, err := os.ReadFile(d.ServiceAccountPrivateKey) if err != nil { return nil, err } diff --git a/plugins/inputs/directory_monitor/directory_monitor.go b/plugins/inputs/directory_monitor/directory_monitor.go index 45acd1c062ba9..a58c039422757 100644 --- a/plugins/inputs/directory_monitor/directory_monitor.go +++ b/plugins/inputs/directory_monitor/directory_monitor.go @@ -7,7 +7,6 @@ import ( "errors" "fmt" "io" - "io/ioutil" "os" "path/filepath" "regexp" @@ -108,7 +107,7 @@ func (monitor *DirectoryMonitor) Description() string { func (monitor *DirectoryMonitor) Gather(_ telegraf.Accumulator) error { // Get all files sitting in the directory. - files, err := ioutil.ReadDir(monitor.Directory) + files, err := os.ReadDir(monitor.Directory) if err != nil { return fmt.Errorf("unable to monitor the targeted directory: %w", err) } @@ -183,7 +182,7 @@ func (monitor *DirectoryMonitor) Monitor() { } } -func (monitor *DirectoryMonitor) processFile(file os.FileInfo) { +func (monitor *DirectoryMonitor) processFile(file os.DirEntry) { if file.IsDir() { return } diff --git a/plugins/inputs/directory_monitor/directory_monitor_test.go b/plugins/inputs/directory_monitor/directory_monitor_test.go index 2ad504637c6c2..7cda5f2d7b639 100644 --- a/plugins/inputs/directory_monitor/directory_monitor_test.go +++ b/plugins/inputs/directory_monitor/directory_monitor_test.go @@ -3,7 +3,6 @@ package directory_monitor import ( "bytes" "compress/gzip" - "io/ioutil" "os" "path/filepath" "testing" @@ -20,9 +19,9 @@ func TestCSVGZImport(t *testing.T) { testCsvGzFile := "test.csv.gz" // Establish process directory and finished directory. - finishedDirectory, err := ioutil.TempDir("", "finished") + finishedDirectory, err := os.MkdirTemp("", "finished") require.NoError(t, err) - processDirectory, err := ioutil.TempDir("", "test") + processDirectory, err := os.MkdirTemp("", "test") require.NoError(t, err) defer os.RemoveAll(processDirectory) defer os.RemoveAll(finishedDirectory) @@ -62,7 +61,7 @@ func TestCSVGZImport(t *testing.T) { require.NoError(t, err) err = w.Close() require.NoError(t, err) - err = ioutil.WriteFile(filepath.Join(processDirectory, testCsvGzFile), b.Bytes(), 0666) + err = os.WriteFile(filepath.Join(processDirectory, testCsvGzFile), b.Bytes(), 0666) require.NoError(t, err) // Start plugin before adding file. @@ -89,9 +88,9 @@ func TestMultipleJSONFileImports(t *testing.T) { testJSONFile := "test.json" // Establish process directory and finished directory. - finishedDirectory, err := ioutil.TempDir("", "finished") + finishedDirectory, err := os.MkdirTemp("", "finished") require.NoError(t, err) - processDirectory, err := ioutil.TempDir("", "test") + processDirectory, err := os.MkdirTemp("", "test") require.NoError(t, err) defer os.RemoveAll(processDirectory) defer os.RemoveAll(finishedDirectory) diff --git a/plugins/inputs/diskio/diskio_linux_test.go b/plugins/inputs/diskio/diskio_linux_test.go index 1a97aabf40db5..8a76e230cbb98 100644 --- a/plugins/inputs/diskio/diskio_linux_test.go +++ b/plugins/inputs/diskio/diskio_linux_test.go @@ -4,7 +4,6 @@ package diskio import ( - "io/ioutil" "os" "testing" @@ -20,7 +19,7 @@ S:foo/bar/devlink1 // setupNullDisk sets up fake udev info as if /dev/null were a disk. func setupNullDisk(t *testing.T, s *DiskIO, devName string) func() { - td, err := ioutil.TempFile("", ".telegraf.DiskInfoTest") + td, err := os.CreateTemp("", ".telegraf.DiskInfoTest") require.NoError(t, err) if s.infoCache == nil { diff --git a/plugins/inputs/docker/docker_test.go b/plugins/inputs/docker/docker_test.go index 599adae409e99..a84a6047b30aa 100644 --- a/plugins/inputs/docker/docker_test.go +++ b/plugins/inputs/docker/docker_test.go @@ -3,7 +3,7 @@ package docker import ( "context" "crypto/tls" - "io/ioutil" + "io" "reflect" "sort" "strings" @@ -1060,7 +1060,7 @@ func TestContainerName(t *testing.T) { } client.ContainerStatsF = func(ctx context.Context, containerID string, stream bool) (types.ContainerStats, error) { return types.ContainerStats{ - Body: ioutil.NopCloser(strings.NewReader(`{"name": "logspout"}`)), + Body: io.NopCloser(strings.NewReader(`{"name": "logspout"}`)), }, nil } return &client, nil @@ -1080,7 +1080,7 @@ func TestContainerName(t *testing.T) { } client.ContainerStatsF = func(ctx context.Context, containerID string, stream bool) (types.ContainerStats, error) { return types.ContainerStats{ - Body: ioutil.NopCloser(strings.NewReader(`{}`)), + Body: io.NopCloser(strings.NewReader(`{}`)), }, nil } return &client, nil diff --git a/plugins/inputs/docker/docker_testdata.go b/plugins/inputs/docker/docker_testdata.go index bde0bd312c788..826f34f6703d4 100644 --- a/plugins/inputs/docker/docker_testdata.go +++ b/plugins/inputs/docker/docker_testdata.go @@ -2,7 +2,7 @@ package docker import ( "fmt" - "io/ioutil" + "io" "strings" "time" @@ -344,7 +344,7 @@ func containerStats(s string) types.ContainerStats { }, "read": "2016-02-24T11:42:27.472459608-05:00" }`, name) - stat.Body = ioutil.NopCloser(strings.NewReader(jsonStat)) + stat.Body = io.NopCloser(strings.NewReader(jsonStat)) return stat } @@ -488,7 +488,7 @@ func containerStatsWindows() types.ContainerStats { }, "name":"/gt_test_iis", }` - stat.Body = ioutil.NopCloser(strings.NewReader(jsonStat)) + stat.Body = io.NopCloser(strings.NewReader(jsonStat)) return stat } diff --git a/plugins/inputs/ecs/client.go b/plugins/inputs/ecs/client.go index ac7ed2e1b09ef..b5521c5ea3f3a 100644 --- a/plugins/inputs/ecs/client.go +++ b/plugins/inputs/ecs/client.go @@ -3,7 +3,6 @@ package ecs import ( "fmt" "io" - "io/ioutil" "net/http" "net/url" "time" @@ -113,7 +112,7 @@ func (c *EcsClient) Task() (*Task, error) { if resp.StatusCode != http.StatusOK { // ignore the err here; LimitReader returns io.EOF and we're not interested in read errors. - body, _ := ioutil.ReadAll(io.LimitReader(resp.Body, 200)) + body, _ := io.ReadAll(io.LimitReader(resp.Body, 200)) return nil, fmt.Errorf("%s returned HTTP status %s: %q", c.taskURL, resp.Status, body) } @@ -137,7 +136,7 @@ func (c *EcsClient) ContainerStats() (map[string]types.StatsJSON, error) { if resp.StatusCode != http.StatusOK { // ignore the err here; LimitReader returns io.EOF and we're not interested in read errors. - body, _ := ioutil.ReadAll(io.LimitReader(resp.Body, 200)) + body, _ := io.ReadAll(io.LimitReader(resp.Body, 200)) return nil, fmt.Errorf("%s returned HTTP status %s: %q", c.statsURL, resp.Status, body) } diff --git a/plugins/inputs/ecs/client_test.go b/plugins/inputs/ecs/client_test.go index 2f37ca0cfa456..7e9d7e393346f 100644 --- a/plugins/inputs/ecs/client_test.go +++ b/plugins/inputs/ecs/client_test.go @@ -3,7 +3,7 @@ package ecs import ( "bytes" "errors" - "io/ioutil" + "io" "net/http" "net/url" "os" @@ -108,7 +108,7 @@ func TestEcsClient_Task(t *testing.T) { do: func(req *http.Request) (*http.Response, error) { return &http.Response{ StatusCode: http.StatusOK, - Body: ioutil.NopCloser(rc), + Body: io.NopCloser(rc), }, nil }, }, @@ -129,7 +129,7 @@ func TestEcsClient_Task(t *testing.T) { do: func(req *http.Request) (*http.Response, error) { return &http.Response{ StatusCode: http.StatusInternalServerError, - Body: ioutil.NopCloser(bytes.NewReader([]byte("foo"))), + Body: io.NopCloser(bytes.NewReader([]byte("foo"))), }, nil }, }, @@ -141,7 +141,7 @@ func TestEcsClient_Task(t *testing.T) { do: func(req *http.Request) (*http.Response, error) { return &http.Response{ StatusCode: http.StatusOK, - Body: ioutil.NopCloser(bytes.NewReader([]byte("foo"))), + Body: io.NopCloser(bytes.NewReader([]byte("foo"))), }, nil }, }, @@ -179,7 +179,7 @@ func TestEcsClient_ContainerStats(t *testing.T) { do: func(req *http.Request) (*http.Response, error) { return &http.Response{ StatusCode: http.StatusOK, - Body: ioutil.NopCloser(rc), + Body: io.NopCloser(rc), }, nil }, }, @@ -201,7 +201,7 @@ func TestEcsClient_ContainerStats(t *testing.T) { do: func(req *http.Request) (*http.Response, error) { return &http.Response{ StatusCode: http.StatusOK, - Body: ioutil.NopCloser(bytes.NewReader([]byte("foo"))), + Body: io.NopCloser(bytes.NewReader([]byte("foo"))), }, nil }, }, @@ -214,7 +214,7 @@ func TestEcsClient_ContainerStats(t *testing.T) { do: func(req *http.Request) (*http.Response, error) { return &http.Response{ StatusCode: http.StatusInternalServerError, - Body: ioutil.NopCloser(bytes.NewReader([]byte("foo"))), + Body: io.NopCloser(bytes.NewReader([]byte("foo"))), }, nil }, }, diff --git a/plugins/inputs/elasticsearch/elasticsearch.go b/plugins/inputs/elasticsearch/elasticsearch.go index 0bd4ce677cd9e..24142ba38c32e 100644 --- a/plugins/inputs/elasticsearch/elasticsearch.go +++ b/plugins/inputs/elasticsearch/elasticsearch.go @@ -3,7 +3,7 @@ package elasticsearch import ( "encoding/json" "fmt" - "io/ioutil" + "io" "net/http" "regexp" "sort" @@ -702,7 +702,7 @@ func (e *Elasticsearch) getCatMaster(url string) (string, error) { // future calls. return "", fmt.Errorf("elasticsearch: Unable to retrieve master node information. API responded with status-code %d, expected %d", r.StatusCode, http.StatusOK) } - response, err := ioutil.ReadAll(r.Body) + response, err := io.ReadAll(r.Body) if err != nil { return "", err diff --git a/plugins/inputs/elasticsearch/elasticsearch_test.go b/plugins/inputs/elasticsearch/elasticsearch_test.go index 8248d063b6883..1ed61e731ce1f 100644 --- a/plugins/inputs/elasticsearch/elasticsearch_test.go +++ b/plugins/inputs/elasticsearch/elasticsearch_test.go @@ -1,7 +1,7 @@ package elasticsearch import ( - "io/ioutil" + "io" "net/http" "strings" "testing" @@ -44,7 +44,7 @@ func (t *transportMock) RoundTrip(r *http.Request) (*http.Response, error) { StatusCode: t.statusCode, } res.Header.Set("Content-Type", "application/json") - res.Body = ioutil.NopCloser(strings.NewReader(t.body)) + res.Body = io.NopCloser(strings.NewReader(t.body)) return res, nil } diff --git a/plugins/inputs/execd/shim/goshim.go b/plugins/inputs/execd/shim/goshim.go index 075d2cf55ab62..cfb54e3ae0708 100644 --- a/plugins/inputs/execd/shim/goshim.go +++ b/plugins/inputs/execd/shim/goshim.go @@ -8,7 +8,6 @@ import ( "errors" "fmt" "io" - "io/ioutil" "os" "os/signal" "strings" @@ -274,7 +273,7 @@ func LoadConfig(filePath *string) ([]telegraf.Input, error) { return DefaultImportedPlugins() } - b, err := ioutil.ReadFile(*filePath) + b, err := os.ReadFile(*filePath) if err != nil { return nil, err } diff --git a/plugins/inputs/file/file.go b/plugins/inputs/file/file.go index e431bc6df9f15..22af282dbde0a 100644 --- a/plugins/inputs/file/file.go +++ b/plugins/inputs/file/file.go @@ -2,7 +2,7 @@ package file import ( "fmt" - "io/ioutil" + "io" "os" "path/filepath" @@ -115,7 +115,7 @@ func (f *File) readMetric(filename string) ([]telegraf.Metric, error) { defer file.Close() r, _ := utfbom.Skip(f.decoder.Reader(file)) - fileContents, err := ioutil.ReadAll(r) + fileContents, err := io.ReadAll(r) if err != nil { return nil, fmt.Errorf("E! Error file: %v could not be read, %s", filename, err) } diff --git a/plugins/inputs/fluentd/fluentd.go b/plugins/inputs/fluentd/fluentd.go index 03f46c67ce515..9ebd1682a56b7 100644 --- a/plugins/inputs/fluentd/fluentd.go +++ b/plugins/inputs/fluentd/fluentd.go @@ -3,7 +3,7 @@ package fluentd import ( "encoding/json" "fmt" - "io/ioutil" + "io" "net/http" "net/url" "time" @@ -104,7 +104,7 @@ func (h *Fluentd) Gather(acc telegraf.Accumulator) error { defer resp.Body.Close() - body, err := ioutil.ReadAll(resp.Body) + body, err := io.ReadAll(resp.Body) if err != nil { return fmt.Errorf("unable to read the HTTP body \"%s\": %v", string(body), err) diff --git a/plugins/inputs/graylog/graylog.go b/plugins/inputs/graylog/graylog.go index 9b73991eb8227..d522f5a49dfea 100644 --- a/plugins/inputs/graylog/graylog.go +++ b/plugins/inputs/graylog/graylog.go @@ -5,7 +5,7 @@ import ( "encoding/base64" "encoding/json" "fmt" - "io/ioutil" + "io" "net" "net/http" "net/url" @@ -264,7 +264,7 @@ func (h *GrayLog) sendRequest(serverURL string) (string, float64, error) { defer resp.Body.Close() responseTime := time.Since(start).Seconds() - body, err := ioutil.ReadAll(resp.Body) + body, err := io.ReadAll(resp.Body) if err != nil { return string(body), responseTime, err } diff --git a/plugins/inputs/graylog/graylog_test.go b/plugins/inputs/graylog/graylog_test.go index f8008f1d94c66..5739969e3df01 100644 --- a/plugins/inputs/graylog/graylog_test.go +++ b/plugins/inputs/graylog/graylog_test.go @@ -1,7 +1,7 @@ package graylog import ( - "io/ioutil" + "io" "net/http" "strings" "testing" @@ -115,7 +115,7 @@ func (c *mockHTTPClient) MakeRequest(req *http.Request) (*http.Response, error) resp.StatusCode = 405 // Method not allowed } - resp.Body = ioutil.NopCloser(strings.NewReader(c.responseBody)) + resp.Body = io.NopCloser(strings.NewReader(c.responseBody)) return &resp, nil } diff --git a/plugins/inputs/http/http.go b/plugins/inputs/http/http.go index c61465a54c36f..d7a6ac1213b6f 100644 --- a/plugins/inputs/http/http.go +++ b/plugins/inputs/http/http.go @@ -4,8 +4,8 @@ import ( "context" "fmt" "io" - "io/ioutil" "net/http" + "os" "strings" "sync" @@ -180,7 +180,7 @@ func (h *HTTP) gatherURL( } if h.BearerToken != "" { - token, err := ioutil.ReadFile(h.BearerToken) + token, err := os.ReadFile(h.BearerToken) if err != nil { return err } @@ -225,7 +225,7 @@ func (h *HTTP) gatherURL( h.SuccessStatusCodes) } - b, err := ioutil.ReadAll(resp.Body) + b, err := io.ReadAll(resp.Body) if err != nil { return err } @@ -254,7 +254,7 @@ func makeRequestBodyReader(contentEncoding, body string) (io.ReadCloser, error) } return rc, nil } - return ioutil.NopCloser(reader), nil + return io.NopCloser(reader), nil } func init() { diff --git a/plugins/inputs/http/http_test.go b/plugins/inputs/http/http_test.go index 02351effc71b9..da9fed2251514 100644 --- a/plugins/inputs/http/http_test.go +++ b/plugins/inputs/http/http_test.go @@ -3,7 +3,7 @@ package http_test import ( "compress/gzip" "fmt" - "io/ioutil" + "io" "net/http" "net/http/httptest" "net/url" @@ -183,7 +183,7 @@ func TestBodyAndContentEncoding(t *testing.T) { URLs: []string{url}, }, queryHandlerFunc: func(t *testing.T, w http.ResponseWriter, r *http.Request) { - body, err := ioutil.ReadAll(r.Body) + body, err := io.ReadAll(r.Body) require.NoError(t, err) require.Equal(t, []byte(""), body) w.WriteHeader(http.StatusOK) @@ -197,7 +197,7 @@ func TestBodyAndContentEncoding(t *testing.T) { Body: "test", }, queryHandlerFunc: func(t *testing.T, w http.ResponseWriter, r *http.Request) { - body, err := ioutil.ReadAll(r.Body) + body, err := io.ReadAll(r.Body) require.NoError(t, err) require.Equal(t, []byte("test"), body) w.WriteHeader(http.StatusOK) @@ -211,7 +211,7 @@ func TestBodyAndContentEncoding(t *testing.T) { Body: "test", }, queryHandlerFunc: func(t *testing.T, w http.ResponseWriter, r *http.Request) { - body, err := ioutil.ReadAll(r.Body) + body, err := io.ReadAll(r.Body) require.NoError(t, err) require.Equal(t, []byte("test"), body) w.WriteHeader(http.StatusOK) @@ -230,7 +230,7 @@ func TestBodyAndContentEncoding(t *testing.T) { gr, err := gzip.NewReader(r.Body) require.NoError(t, err) - body, err := ioutil.ReadAll(gr) + body, err := io.ReadAll(gr) require.NoError(t, err) require.Equal(t, []byte("test"), body) w.WriteHeader(http.StatusOK) diff --git a/plugins/inputs/http_listener_v2/http_listener_v2.go b/plugins/inputs/http_listener_v2/http_listener_v2.go index 5b511de57fb54..d2a2e5f35214e 100644 --- a/plugins/inputs/http_listener_v2/http_listener_v2.go +++ b/plugins/inputs/http_listener_v2/http_listener_v2.go @@ -4,7 +4,7 @@ import ( "compress/gzip" "crypto/subtle" "crypto/tls" - "io/ioutil" + "io" "net" "net/http" "net/url" @@ -292,7 +292,7 @@ func (h *HTTPListenerV2) collectBody(res http.ResponseWriter, req *http.Request) } defer r.Close() maxReader := http.MaxBytesReader(res, r, int64(h.MaxBodySize)) - bytes, err := ioutil.ReadAll(maxReader) + bytes, err := io.ReadAll(maxReader) if err != nil { if err := tooLarge(res); err != nil { h.Log.Debugf("error in too-large: %v", err) @@ -302,7 +302,7 @@ func (h *HTTPListenerV2) collectBody(res http.ResponseWriter, req *http.Request) return bytes, true case "snappy": defer req.Body.Close() - bytes, err := ioutil.ReadAll(req.Body) + bytes, err := io.ReadAll(req.Body) if err != nil { h.Log.Debug(err.Error()) if err := badRequest(res); err != nil { @@ -322,7 +322,7 @@ func (h *HTTPListenerV2) collectBody(res http.ResponseWriter, req *http.Request) return bytes, true default: defer req.Body.Close() - bytes, err := ioutil.ReadAll(req.Body) + bytes, err := io.ReadAll(req.Body) if err != nil { h.Log.Debug(err.Error()) if err := badRequest(res); err != nil { diff --git a/plugins/inputs/http_listener_v2/http_listener_v2_test.go b/plugins/inputs/http_listener_v2/http_listener_v2_test.go index 5daaf2785ffe3..da70f443998e1 100644 --- a/plugins/inputs/http_listener_v2/http_listener_v2_test.go +++ b/plugins/inputs/http_listener_v2/http_listener_v2_test.go @@ -4,9 +4,9 @@ import ( "bytes" "crypto/tls" "crypto/x509" - "io/ioutil" "net/http" "net/url" + "os" "runtime" "strconv" "sync" @@ -361,7 +361,7 @@ func TestWriteHTTPGzippedData(t *testing.T) { require.NoError(t, listener.Start(acc)) defer listener.Stop() - data, err := ioutil.ReadFile("./testdata/testmsgs.gz") + data, err := os.ReadFile("./testdata/testmsgs.gz") require.NoError(t, err) req, err := http.NewRequest("POST", createURL(listener, "http", "/write", ""), bytes.NewBuffer(data)) diff --git a/plugins/inputs/http_response/http_response.go b/plugins/inputs/http_response/http_response.go index d8a4e0e1438cd..799f664d1e7b0 100644 --- a/plugins/inputs/http_response/http_response.go +++ b/plugins/inputs/http_response/http_response.go @@ -4,10 +4,10 @@ import ( "errors" "fmt" "io" - "io/ioutil" "net" "net/http" "net/url" + "os" "regexp" "strconv" "strings" @@ -277,7 +277,7 @@ func (h *HTTPResponse) httpGather(u string) (map[string]interface{}, map[string] } if h.BearerToken != "" { - token, err := ioutil.ReadFile(h.BearerToken) + token, err := os.ReadFile(h.BearerToken) if err != nil { return nil, nil, err } @@ -339,7 +339,7 @@ func (h *HTTPResponse) httpGather(u string) (map[string]interface{}, map[string] if h.ResponseBodyMaxSize == 0 { h.ResponseBodyMaxSize = config.Size(defaultResponseBodyMaxSize) } - bodyBytes, err := ioutil.ReadAll(io.LimitReader(resp.Body, int64(h.ResponseBodyMaxSize)+1)) + bodyBytes, err := io.ReadAll(io.LimitReader(resp.Body, int64(h.ResponseBodyMaxSize)+1)) // Check first if the response body size exceeds the limit. if err == nil && int64(len(bodyBytes)) > int64(h.ResponseBodyMaxSize) { h.setBodyReadError("The body of the HTTP Response is too large", bodyBytes, fields, tags) diff --git a/plugins/inputs/http_response/http_response_test.go b/plugins/inputs/http_response/http_response_test.go index 40917bba1bc39..5d109d0a35439 100644 --- a/plugins/inputs/http_response/http_response_test.go +++ b/plugins/inputs/http_response/http_response_test.go @@ -8,7 +8,7 @@ package http_response import ( "errors" "fmt" - "io/ioutil" + "io" "net" "net/http" "net/http/httptest" @@ -123,7 +123,7 @@ func setUpTestMux() http.Handler { fmt.Fprintf(w, "used post correctly!") }) mux.HandleFunc("/musthaveabody", func(w http.ResponseWriter, req *http.Request) { - body, err := ioutil.ReadAll(req.Body) + body, err := io.ReadAll(req.Body) //nolint:errcheck,revive req.Body.Close() if err != nil { diff --git a/plugins/inputs/httpjson/httpjson.go b/plugins/inputs/httpjson/httpjson.go index f5d97b90989c0..10a4cb0c17643 100644 --- a/plugins/inputs/httpjson/httpjson.go +++ b/plugins/inputs/httpjson/httpjson.go @@ -3,7 +3,7 @@ package httpjson import ( "bytes" "fmt" - "io/ioutil" + "io" "net/http" "net/url" "strings" @@ -263,7 +263,7 @@ func (h *HTTPJSON) sendRequest(serverURL string) (string, float64, error) { defer resp.Body.Close() responseTime := time.Since(start).Seconds() - body, err := ioutil.ReadAll(resp.Body) + body, err := io.ReadAll(resp.Body) if err != nil { return string(body), responseTime, err } diff --git a/plugins/inputs/httpjson/httpjson_test.go b/plugins/inputs/httpjson/httpjson_test.go index 9f6292cba722d..b203238a94037 100644 --- a/plugins/inputs/httpjson/httpjson_test.go +++ b/plugins/inputs/httpjson/httpjson_test.go @@ -2,7 +2,7 @@ package httpjson import ( "fmt" - "io/ioutil" + "io" "net/http" "net/http/httptest" "strings" @@ -143,7 +143,7 @@ func (c *mockHTTPClient) MakeRequest(req *http.Request) (*http.Response, error) resp.StatusCode = 405 // Method not allowed } - resp.Body = ioutil.NopCloser(strings.NewReader(c.responseBody)) + resp.Body = io.NopCloser(strings.NewReader(c.responseBody)) return &resp, nil } @@ -377,7 +377,7 @@ func TestHttpJsonPOST(t *testing.T) { "api_key": "mykey", } ts := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { - body, err := ioutil.ReadAll(r.Body) + body, err := io.ReadAll(r.Body) assert.NoError(t, err) assert.Equal(t, "api_key=mykey", string(body)) w.WriteHeader(http.StatusOK) diff --git a/plugins/inputs/influxdb_listener/influxdb_listener_test.go b/plugins/inputs/influxdb_listener/influxdb_listener_test.go index 8a082a855a7f8..6b88907f95801 100644 --- a/plugins/inputs/influxdb_listener/influxdb_listener_test.go +++ b/plugins/inputs/influxdb_listener/influxdb_listener_test.go @@ -4,9 +4,9 @@ import ( "bytes" "crypto/tls" "crypto/x509" - "io/ioutil" "net/http" "net/url" + "os" "runtime" "strconv" "sync" @@ -406,7 +406,7 @@ func TestWriteGzippedData(t *testing.T) { require.NoError(t, listener.Start(acc)) defer listener.Stop() - data, err := ioutil.ReadFile("./testdata/testmsgs.gz") + data, err := os.ReadFile("./testdata/testmsgs.gz") require.NoError(t, err) req, err := http.NewRequest("POST", createURL(listener, "http", "/write", ""), bytes.NewBuffer(data)) diff --git a/plugins/inputs/influxdb_v2_listener/influxdb_v2_listener.go b/plugins/inputs/influxdb_v2_listener/influxdb_v2_listener.go index 64907d12a52dc..4df2f7dc86a5e 100644 --- a/plugins/inputs/influxdb_v2_listener/influxdb_v2_listener.go +++ b/plugins/inputs/influxdb_v2_listener/influxdb_v2_listener.go @@ -6,7 +6,7 @@ import ( "crypto/tls" "encoding/json" "fmt" - "io/ioutil" + "io" "net" "net/http" "time" @@ -256,7 +256,7 @@ func (h *InfluxDBV2Listener) handleWrite() http.HandlerFunc { var readErr error var bytes []byte //body = http.MaxBytesReader(res, req.Body, 1000000) //p.MaxBodySize.Size) - bytes, readErr = ioutil.ReadAll(body) + bytes, readErr = io.ReadAll(body) if readErr != nil { h.Log.Debugf("Error parsing the request body: %v", readErr.Error()) if err := badRequest(res, InternalError, readErr.Error()); err != nil { diff --git a/plugins/inputs/influxdb_v2_listener/influxdb_v2_listener_test.go b/plugins/inputs/influxdb_v2_listener/influxdb_v2_listener_test.go index be99c93f51a8a..055dfc395ba7b 100644 --- a/plugins/inputs/influxdb_v2_listener/influxdb_v2_listener_test.go +++ b/plugins/inputs/influxdb_v2_listener/influxdb_v2_listener_test.go @@ -5,9 +5,10 @@ import ( "crypto/tls" "crypto/x509" "fmt" - "io/ioutil" + "io" "net/http" "net/url" + "os" "runtime" "strconv" "sync" @@ -363,7 +364,7 @@ func TestWriteGzippedData(t *testing.T) { require.NoError(t, listener.Start(acc)) defer listener.Stop() - data, err := ioutil.ReadFile("./testdata/testmsgs.gz") + data, err := os.ReadFile("./testdata/testmsgs.gz") require.NoError(t, err) req, err := http.NewRequest("POST", createURL(listener, "http", "/api/v2/write", "bucket=mybucket"), bytes.NewBuffer(data)) @@ -485,7 +486,7 @@ func TestReady(t *testing.T) { resp, err := http.Get(createURL(listener, "http", "/api/v2/ready", "")) require.NoError(t, err) require.Equal(t, "application/json", resp.Header["Content-Type"][0]) - bodyBytes, err := ioutil.ReadAll(resp.Body) + bodyBytes, err := io.ReadAll(resp.Body) require.NoError(t, err) require.Contains(t, string(bodyBytes), "\"status\":\"ready\"") require.NoError(t, resp.Body.Close()) diff --git a/plugins/inputs/intel_powerstat/file.go b/plugins/inputs/intel_powerstat/file.go index a07dd57e16a57..c69dea89f4e26 100644 --- a/plugins/inputs/intel_powerstat/file.go +++ b/plugins/inputs/intel_powerstat/file.go @@ -8,7 +8,6 @@ import ( "encoding/binary" "fmt" "io" - "io/ioutil" "os" "path/filepath" "regexp" @@ -107,7 +106,7 @@ func (fs *fileServiceImpl) getStringsMatchingPatternOnPath(path string) ([]strin // readFile reads file on path and return string content. func (fs *fileServiceImpl) readFile(path string) ([]byte, error) { - out, err := ioutil.ReadFile(path) + out, err := os.ReadFile(path) if err != nil { return make([]byte, 0), err } @@ -116,7 +115,7 @@ func (fs *fileServiceImpl) readFile(path string) ([]byte, error) { // readFileToFloat64 reads file on path and tries to parse content to float64. func (fs *fileServiceImpl) readFileToFloat64(reader io.Reader) (float64, int64, error) { - read, err := ioutil.ReadAll(reader) + read, err := io.ReadAll(reader) if err != nil { return 0, 0, err } diff --git a/plugins/inputs/jolokia/jolokia.go b/plugins/inputs/jolokia/jolokia.go index 9e4cac511683b..af5e3de283800 100644 --- a/plugins/inputs/jolokia/jolokia.go +++ b/plugins/inputs/jolokia/jolokia.go @@ -4,7 +4,7 @@ import ( "bytes" "encoding/json" "fmt" - "io/ioutil" + "io" "net/http" "net/url" "time" @@ -153,7 +153,7 @@ func (j *Jolokia) doRequest(req *http.Request) ([]map[string]interface{}, error) } // read body - body, err := ioutil.ReadAll(resp.Body) + body, err := io.ReadAll(resp.Body) if err != nil { return nil, err } diff --git a/plugins/inputs/jolokia/jolokia_test.go b/plugins/inputs/jolokia/jolokia_test.go index 5c1bc50aa2ae7..e91e9a1087fda 100644 --- a/plugins/inputs/jolokia/jolokia_test.go +++ b/plugins/inputs/jolokia/jolokia_test.go @@ -2,7 +2,7 @@ package jolokia import ( _ "fmt" - "io/ioutil" + "io" "net/http" "strings" "testing" @@ -116,7 +116,7 @@ type jolokiaClientStub struct { func (c jolokiaClientStub) MakeRequest(_ *http.Request) (*http.Response, error) { resp := http.Response{} resp.StatusCode = c.statusCode - resp.Body = ioutil.NopCloser(strings.NewReader(c.responseBody)) + resp.Body = io.NopCloser(strings.NewReader(c.responseBody)) return &resp, nil } diff --git a/plugins/inputs/jolokia2/client.go b/plugins/inputs/jolokia2/client.go index 41ebd4f8af872..789450e3a1016 100644 --- a/plugins/inputs/jolokia2/client.go +++ b/plugins/inputs/jolokia2/client.go @@ -4,7 +4,7 @@ import ( "bytes" "encoding/json" "fmt" - "io/ioutil" + "io" "net/http" "net/url" "path" @@ -149,7 +149,7 @@ func (c *Client) read(requests []ReadRequest) ([]ReadResponse, error) { c.URL, resp.StatusCode, http.StatusText(resp.StatusCode), http.StatusOK, http.StatusText(http.StatusOK)) } - responseBody, err := ioutil.ReadAll(resp.Body) + responseBody, err := io.ReadAll(resp.Body) if err != nil { return nil, err } diff --git a/plugins/inputs/jolokia2/client_test.go b/plugins/inputs/jolokia2/client_test.go index 7ec65d27a0ebf..a1bd5f4a2e141 100644 --- a/plugins/inputs/jolokia2/client_test.go +++ b/plugins/inputs/jolokia2/client_test.go @@ -3,7 +3,7 @@ package jolokia2 import ( "encoding/json" "fmt" - "io/ioutil" + "io" "net/http" "net/http/httptest" "testing" @@ -20,7 +20,7 @@ func TestJolokia2_ClientAuthRequest(t *testing.T) { server := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { username, password, _ = r.BasicAuth() - body, _ := ioutil.ReadAll(r.Body) + body, _ := io.ReadAll(r.Body) require.NoError(t, json.Unmarshal(body, &requests)) w.WriteHeader(http.StatusOK) @@ -56,7 +56,7 @@ func TestJolokia2_ClientProxyAuthRequest(t *testing.T) { server := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { username, password, _ = r.BasicAuth() - body, _ := ioutil.ReadAll(r.Body) + body, _ := io.ReadAll(r.Body) require.NoError(t, json.Unmarshal(body, &requests)) w.WriteHeader(http.StatusOK) _, err := fmt.Fprintf(w, "[]") diff --git a/plugins/inputs/kernel/kernel.go b/plugins/inputs/kernel/kernel.go index 22311e9a0f12d..c16c68bf44bd1 100644 --- a/plugins/inputs/kernel/kernel.go +++ b/plugins/inputs/kernel/kernel.go @@ -6,7 +6,6 @@ package kernel import ( "bytes" "fmt" - "io/ioutil" "os" "strconv" "strings" @@ -41,7 +40,7 @@ func (k *Kernel) Gather(acc telegraf.Accumulator) error { return err } - entropyData, err := ioutil.ReadFile(k.entropyStatFile) + entropyData, err := os.ReadFile(k.entropyStatFile) if err != nil { return err } @@ -109,7 +108,7 @@ func (k *Kernel) getProcStat() ([]byte, error) { return nil, err } - data, err := ioutil.ReadFile(k.statFile) + data, err := os.ReadFile(k.statFile) if err != nil { return nil, err } diff --git a/plugins/inputs/kernel/kernel_test.go b/plugins/inputs/kernel/kernel_test.go index 462624c2eb40d..f174017fad7b9 100644 --- a/plugins/inputs/kernel/kernel_test.go +++ b/plugins/inputs/kernel/kernel_test.go @@ -4,7 +4,6 @@ package kernel import ( - "io/ioutil" "os" "testing" @@ -169,7 +168,7 @@ const entropyStatFilePartial = `1024` const entropyStatFileInvalid = `` func makeFakeStatFile(t *testing.T, content []byte) string { - tmpfile, err := ioutil.TempFile("", "kernel_test") + tmpfile, err := os.CreateTemp("", "kernel_test") require.NoError(t, err) _, err = tmpfile.Write(content) diff --git a/plugins/inputs/kernel_vmstat/kernel_vmstat.go b/plugins/inputs/kernel_vmstat/kernel_vmstat.go index 2019e0cbfddb3..95a7a5e32f1e0 100644 --- a/plugins/inputs/kernel_vmstat/kernel_vmstat.go +++ b/plugins/inputs/kernel_vmstat/kernel_vmstat.go @@ -6,7 +6,6 @@ package kernel_vmstat import ( "bytes" "fmt" - "io/ioutil" "os" "strconv" @@ -61,7 +60,7 @@ func (k *KernelVmstat) getProcVmstat() ([]byte, error) { return nil, err } - data, err := ioutil.ReadFile(k.statFile) + data, err := os.ReadFile(k.statFile) if err != nil { return nil, err } diff --git a/plugins/inputs/kernel_vmstat/kernel_vmstat_test.go b/plugins/inputs/kernel_vmstat/kernel_vmstat_test.go index 6bbb9d7b5b12f..6590e3febd19c 100644 --- a/plugins/inputs/kernel_vmstat/kernel_vmstat_test.go +++ b/plugins/inputs/kernel_vmstat/kernel_vmstat_test.go @@ -4,7 +4,6 @@ package kernel_vmstat import ( - "io/ioutil" "os" "testing" @@ -300,7 +299,7 @@ thp_collapse_alloc_failed 102214 thp_split abcd` func makeFakeVMStatFile(t *testing.T, content []byte) string { - tmpfile, err := ioutil.TempFile("", "kernel_vmstat_test") + tmpfile, err := os.CreateTemp("", "kernel_vmstat_test") require.NoError(t, err) _, err = tmpfile.Write(content) diff --git a/plugins/inputs/kibana/kibana.go b/plugins/inputs/kibana/kibana.go index c94438eb38d4d..55ffa1df845f9 100644 --- a/plugins/inputs/kibana/kibana.go +++ b/plugins/inputs/kibana/kibana.go @@ -4,7 +4,6 @@ import ( "encoding/json" "fmt" "io" - "io/ioutil" "net/http" "strconv" "strings" @@ -253,7 +252,7 @@ func (k *Kibana) gatherJSONData(url string, v interface{}) (host string, err err if response.StatusCode != http.StatusOK { // ignore the err here; LimitReader returns io.EOF and we're not interested in read errors. - body, _ := ioutil.ReadAll(io.LimitReader(response.Body, 200)) + body, _ := io.ReadAll(io.LimitReader(response.Body, 200)) return request.Host, fmt.Errorf("%s returned HTTP status %s: %q", url, response.Status, body) } diff --git a/plugins/inputs/kibana/kibana_test.go b/plugins/inputs/kibana/kibana_test.go index 3dfed9edfa9a2..565d9b1c79416 100644 --- a/plugins/inputs/kibana/kibana_test.go +++ b/plugins/inputs/kibana/kibana_test.go @@ -1,7 +1,7 @@ package kibana import ( - "io/ioutil" + "io" "net/http" "strings" "testing" @@ -46,7 +46,7 @@ func (t *transportMock) RoundTrip(r *http.Request) (*http.Response, error) { StatusCode: t.statusCode, } res.Header.Set("Content-Type", "application/json") - res.Body = ioutil.NopCloser(strings.NewReader(t.body)) + res.Body = io.NopCloser(strings.NewReader(t.body)) return res, nil } diff --git a/plugins/inputs/kinesis_consumer/kinesis_consumer.go b/plugins/inputs/kinesis_consumer/kinesis_consumer.go index 7acd3202c012b..005ccdc43aab2 100644 --- a/plugins/inputs/kinesis_consumer/kinesis_consumer.go +++ b/plugins/inputs/kinesis_consumer/kinesis_consumer.go @@ -6,7 +6,7 @@ import ( "compress/zlib" "context" "fmt" - "io/ioutil" + "io" "math/big" "strings" "sync" @@ -349,7 +349,7 @@ func processGzip(data []byte) ([]byte, error) { return nil, err } defer zipData.Close() - return ioutil.ReadAll(zipData) + return io.ReadAll(zipData) } func processZlib(data []byte) ([]byte, error) { @@ -358,7 +358,7 @@ func processZlib(data []byte) ([]byte, error) { return nil, err } defer zlibData.Close() - return ioutil.ReadAll(zlibData) + return io.ReadAll(zlibData) } func processNoOp(data []byte) ([]byte, error) { diff --git a/plugins/inputs/kube_inventory/kube_state.go b/plugins/inputs/kube_inventory/kube_state.go index bcfae4ce8f52f..24db993dd39bb 100644 --- a/plugins/inputs/kube_inventory/kube_state.go +++ b/plugins/inputs/kube_inventory/kube_state.go @@ -3,8 +3,8 @@ package kube_inventory import ( "context" "fmt" - "io/ioutil" "log" + "os" "strconv" "strings" "sync" @@ -101,7 +101,7 @@ func (ki *KubernetesInventory) Init() error { } if ki.BearerToken != "" { - token, err := ioutil.ReadFile(ki.BearerToken) + token, err := os.ReadFile(ki.BearerToken) if err != nil { return err } diff --git a/plugins/inputs/kubernetes/kubernetes.go b/plugins/inputs/kubernetes/kubernetes.go index ab1cf4bfe4afc..8ca636d480cc2 100644 --- a/plugins/inputs/kubernetes/kubernetes.go +++ b/plugins/inputs/kubernetes/kubernetes.go @@ -3,8 +3,8 @@ package kubernetes import ( "encoding/json" "fmt" - "io/ioutil" "net/http" + "os" "strings" "time" @@ -93,7 +93,7 @@ func (k *Kubernetes) Init() error { } if k.BearerToken != "" { - token, err := ioutil.ReadFile(k.BearerToken) + token, err := os.ReadFile(k.BearerToken) if err != nil { return err } diff --git a/plugins/inputs/leofs/leofs_test.go b/plugins/inputs/leofs/leofs_test.go index 513d2f5ed7de7..1e33ddc4c3d38 100644 --- a/plugins/inputs/leofs/leofs_test.go +++ b/plugins/inputs/leofs/leofs_test.go @@ -1,7 +1,6 @@ package leofs import ( - "io/ioutil" "os" "os/exec" "runtime" @@ -132,7 +131,7 @@ func testMain(t *testing.T, code string, endpoint string, serverType ServerType) // Build the fake snmpwalk for test src := os.TempDir() + "/test.go" - require.NoError(t, ioutil.WriteFile(src, []byte(code), 0600)) + require.NoError(t, os.WriteFile(src, []byte(code), 0600)) defer os.Remove(src) require.NoError(t, exec.Command("go", "build", "-o", executable, src).Run()) diff --git a/plugins/inputs/linux_sysctl_fs/linux_sysctl_fs.go b/plugins/inputs/linux_sysctl_fs/linux_sysctl_fs.go index 55cb22292105a..19848b6db0e37 100644 --- a/plugins/inputs/linux_sysctl_fs/linux_sysctl_fs.go +++ b/plugins/inputs/linux_sysctl_fs/linux_sysctl_fs.go @@ -3,7 +3,6 @@ package linux_sysctl_fs import ( "bytes" "errors" - "io/ioutil" "os" "strconv" @@ -29,7 +28,7 @@ func (sfs SysctlFS) SampleConfig() string { } func (sfs *SysctlFS) gatherList(file string, fields map[string]interface{}, fieldNames ...string) error { - bs, err := ioutil.ReadFile(sfs.path + "/" + file) + bs, err := os.ReadFile(sfs.path + "/" + file) if err != nil { // Ignore non-existing entries if errors.Is(err, os.ErrNotExist) { @@ -58,7 +57,7 @@ func (sfs *SysctlFS) gatherList(file string, fields map[string]interface{}, fiel } func (sfs *SysctlFS) gatherOne(name string, fields map[string]interface{}) error { - bs, err := ioutil.ReadFile(sfs.path + "/" + name) + bs, err := os.ReadFile(sfs.path + "/" + name) if err != nil { // Ignore non-existing entries if errors.Is(err, os.ErrNotExist) { diff --git a/plugins/inputs/linux_sysctl_fs/linux_sysctl_fs_test.go b/plugins/inputs/linux_sysctl_fs/linux_sysctl_fs_test.go index 78011e288b962..8b76b266b1c9e 100644 --- a/plugins/inputs/linux_sysctl_fs/linux_sysctl_fs_test.go +++ b/plugins/inputs/linux_sysctl_fs/linux_sysctl_fs_test.go @@ -1,7 +1,6 @@ package linux_sysctl_fs import ( - "io/ioutil" "os" "testing" @@ -10,16 +9,16 @@ import ( ) func TestSysctlFSGather(t *testing.T) { - td, err := ioutil.TempDir("", "") + td, err := os.MkdirTemp("", "") require.NoError(t, err) defer os.RemoveAll(td) - require.NoError(t, ioutil.WriteFile(td+"/aio-nr", []byte("100\n"), 0644)) - require.NoError(t, ioutil.WriteFile(td+"/aio-max-nr", []byte("101\n"), 0644)) - require.NoError(t, ioutil.WriteFile(td+"/super-nr", []byte("102\n"), 0644)) - require.NoError(t, ioutil.WriteFile(td+"/super-max", []byte("103\n"), 0644)) - require.NoError(t, ioutil.WriteFile(td+"/file-nr", []byte("104\t0\t106\n"), 0644)) - require.NoError(t, ioutil.WriteFile(td+"/inode-state", []byte("107\t108\t109\t0\t0\t0\t0\n"), 0644)) + require.NoError(t, os.WriteFile(td+"/aio-nr", []byte("100\n"), 0644)) + require.NoError(t, os.WriteFile(td+"/aio-max-nr", []byte("101\n"), 0644)) + require.NoError(t, os.WriteFile(td+"/super-nr", []byte("102\n"), 0644)) + require.NoError(t, os.WriteFile(td+"/super-max", []byte("103\n"), 0644)) + require.NoError(t, os.WriteFile(td+"/file-nr", []byte("104\t0\t106\n"), 0644)) + require.NoError(t, os.WriteFile(td+"/inode-state", []byte("107\t108\t109\t0\t0\t0\t0\n"), 0644)) sfs := &SysctlFS{ path: td, diff --git a/plugins/inputs/logparser/logparser_test.go b/plugins/inputs/logparser/logparser_test.go index 9bf4f125ae4f6..3100c615cd4e4 100644 --- a/plugins/inputs/logparser/logparser_test.go +++ b/plugins/inputs/logparser/logparser_test.go @@ -1,7 +1,6 @@ package logparser import ( - "io/ioutil" "os" "path/filepath" "testing" @@ -111,7 +110,7 @@ func TestGrokParseLogFiles(t *testing.T) { } func TestGrokParseLogFilesAppearLater(t *testing.T) { - emptydir, err := ioutil.TempDir("", "TestGrokParseLogFilesAppearLater") + emptydir, err := os.MkdirTemp("", "TestGrokParseLogFilesAppearLater") defer os.RemoveAll(emptydir) assert.NoError(t, err) @@ -131,10 +130,10 @@ func TestGrokParseLogFilesAppearLater(t *testing.T) { assert.Equal(t, acc.NFields(), 0) - input, err := ioutil.ReadFile(filepath.Join(testdataDir, "test_a.log")) + input, err := os.ReadFile(filepath.Join(testdataDir, "test_a.log")) assert.NoError(t, err) - err = ioutil.WriteFile(filepath.Join(emptydir, "test_a.log"), input, 0644) + err = os.WriteFile(filepath.Join(emptydir, "test_a.log"), input, 0644) assert.NoError(t, err) assert.NoError(t, acc.GatherError(logparser.Gather)) diff --git a/plugins/inputs/logstash/logstash.go b/plugins/inputs/logstash/logstash.go index 10a3e7b6b8dd0..6fcaadabcd244 100644 --- a/plugins/inputs/logstash/logstash.go +++ b/plugins/inputs/logstash/logstash.go @@ -4,7 +4,6 @@ import ( "encoding/json" "fmt" "io" - "io/ioutil" "net/http" "net/url" "strings" @@ -206,7 +205,7 @@ func (logstash *Logstash) gatherJSONData(url string, value interface{}) error { defer response.Body.Close() if response.StatusCode != http.StatusOK { // ignore the err here; LimitReader returns io.EOF and we're not interested in read errors. - body, _ := ioutil.ReadAll(io.LimitReader(response.Body, 200)) + body, _ := io.ReadAll(io.LimitReader(response.Body, 200)) return fmt.Errorf("%s returned HTTP status %s: %q", url, response.Status, body) } diff --git a/plugins/inputs/lustre2/lustre2.go b/plugins/inputs/lustre2/lustre2.go index 00aa288b316a8..abd5ce87c6bbb 100644 --- a/plugins/inputs/lustre2/lustre2.go +++ b/plugins/inputs/lustre2/lustre2.go @@ -8,7 +8,7 @@ package lustre2 import ( - "io/ioutil" + "os" "path/filepath" "regexp" "strconv" @@ -374,7 +374,7 @@ func (l *Lustre2) GetLustreProcStats(fileglob string, wantedFields []*mapping) e name := path[len(path)-2] //lines, err := internal.ReadLines(file) - wholeFile, err := ioutil.ReadFile(file) + wholeFile, err := os.ReadFile(file) if err != nil { return err } diff --git a/plugins/inputs/lustre2/lustre2_test.go b/plugins/inputs/lustre2/lustre2_test.go index 52c7e87f08fc6..7fd3fd91f469e 100644 --- a/plugins/inputs/lustre2/lustre2_test.go +++ b/plugins/inputs/lustre2/lustre2_test.go @@ -4,7 +4,6 @@ package lustre2 import ( - "io/ioutil" "os" "testing" @@ -149,13 +148,13 @@ func TestLustre2GeneratesMetrics(t *testing.T) { err = os.MkdirAll(obddir+"/"+ostName, 0755) require.NoError(t, err) - err = ioutil.WriteFile(mdtdir+"/"+ostName+"/md_stats", []byte(mdtProcContents), 0644) + err = os.WriteFile(mdtdir+"/"+ostName+"/md_stats", []byte(mdtProcContents), 0644) require.NoError(t, err) - err = ioutil.WriteFile(osddir+"/"+ostName+"/stats", []byte(osdldiskfsProcContents), 0644) + err = os.WriteFile(osddir+"/"+ostName+"/stats", []byte(osdldiskfsProcContents), 0644) require.NoError(t, err) - err = ioutil.WriteFile(obddir+"/"+ostName+"/stats", []byte(obdfilterProcContents), 0644) + err = os.WriteFile(obddir+"/"+ostName+"/stats", []byte(obdfilterProcContents), 0644) require.NoError(t, err) // Begin by testing standard Lustre stats @@ -218,10 +217,10 @@ func TestLustre2GeneratesJobstatsMetrics(t *testing.T) { err = os.MkdirAll(obddir+"/"+ostName, 0755) require.NoError(t, err) - err = ioutil.WriteFile(mdtdir+"/"+ostName+"/job_stats", []byte(mdtJobStatsContents), 0644) + err = os.WriteFile(mdtdir+"/"+ostName+"/job_stats", []byte(mdtJobStatsContents), 0644) require.NoError(t, err) - err = ioutil.WriteFile(obddir+"/"+ostName+"/job_stats", []byte(obdfilterJobStatsContents), 0644) + err = os.WriteFile(obddir+"/"+ostName+"/job_stats", []byte(obdfilterJobStatsContents), 0644) require.NoError(t, err) // Test Lustre Jobstats diff --git a/plugins/inputs/mailchimp/chimp_api.go b/plugins/inputs/mailchimp/chimp_api.go index 259e64a0e3104..2f6cecdb9e0da 100644 --- a/plugins/inputs/mailchimp/chimp_api.go +++ b/plugins/inputs/mailchimp/chimp_api.go @@ -5,7 +5,6 @@ import ( "encoding/json" "fmt" "io" - "io/ioutil" "log" "net/http" "net/url" @@ -148,11 +147,11 @@ func runChimp(api *ChimpAPI, params ReportsParams) ([]byte, error) { if resp.StatusCode != http.StatusOK { // ignore the err here; LimitReader returns io.EOF and we're not interested in read errors. - body, _ := ioutil.ReadAll(io.LimitReader(resp.Body, 200)) + body, _ := io.ReadAll(io.LimitReader(resp.Body, 200)) return nil, fmt.Errorf("%s returned HTTP status %s: %q", api.url.String(), resp.Status, body) } - body, err := ioutil.ReadAll(resp.Body) + body, err := io.ReadAll(resp.Body) if err != nil { return nil, err } diff --git a/plugins/inputs/mdstat/mdstat.go b/plugins/inputs/mdstat/mdstat.go index 81e3f36e7c767..3f6fee7d086ca 100644 --- a/plugins/inputs/mdstat/mdstat.go +++ b/plugins/inputs/mdstat/mdstat.go @@ -20,7 +20,6 @@ package mdstat import ( "fmt" - "io/ioutil" "os" "regexp" "sort" @@ -291,7 +290,7 @@ func (k *MdstatConf) getProcMdstat() ([]byte, error) { return nil, err } - data, err := ioutil.ReadFile(mdStatFile) + data, err := os.ReadFile(mdStatFile) if err != nil { return nil, err } diff --git a/plugins/inputs/mdstat/mdstat_test.go b/plugins/inputs/mdstat/mdstat_test.go index fe6041abec353..070b7ddd234f5 100644 --- a/plugins/inputs/mdstat/mdstat_test.go +++ b/plugins/inputs/mdstat/mdstat_test.go @@ -4,7 +4,6 @@ package mdstat import ( - "io/ioutil" "os" "testing" @@ -134,7 +133,7 @@ unused devices: ` func makeFakeMDStatFile(content []byte) (filename string) { - fileobj, err := ioutil.TempFile("", "mdstat") + fileobj, err := os.CreateTemp("", "mdstat") if err != nil { panic(err) } diff --git a/plugins/inputs/mesos/mesos.go b/plugins/inputs/mesos/mesos.go index acb79ce5724e5..68203c9d480cb 100644 --- a/plugins/inputs/mesos/mesos.go +++ b/plugins/inputs/mesos/mesos.go @@ -3,7 +3,7 @@ package mesos import ( "encoding/json" "errors" - "io/ioutil" + "io" "log" "net" "net/http" @@ -558,7 +558,7 @@ func (m *Mesos) gatherMainMetrics(u *url.URL, role Role, acc telegraf.Accumulato return err } - data, err := ioutil.ReadAll(resp.Body) + data, err := io.ReadAll(resp.Body) // Ignore the returned error to not shadow the initial one //nolint:errcheck,revive resp.Body.Close() diff --git a/plugins/inputs/multifile/multifile.go b/plugins/inputs/multifile/multifile.go index 838b1dd764d2f..65c2ac4e4b783 100644 --- a/plugins/inputs/multifile/multifile.go +++ b/plugins/inputs/multifile/multifile.go @@ -3,8 +3,8 @@ package multifile import ( "bytes" "fmt" - "io/ioutil" "math" + "os" "path" "strconv" "time" @@ -84,7 +84,7 @@ func (m *MultiFile) Gather(acc telegraf.Accumulator) error { tags := make(map[string]string) for _, file := range m.Files { - fileContents, err := ioutil.ReadFile(file.Name) + fileContents, err := os.ReadFile(file.Name) if err != nil { if m.FailEarly { diff --git a/plugins/inputs/nats/nats.go b/plugins/inputs/nats/nats.go index c2adab29b324d..7144355096b4e 100644 --- a/plugins/inputs/nats/nats.go +++ b/plugins/inputs/nats/nats.go @@ -5,7 +5,7 @@ package nats import ( "encoding/json" - "io/ioutil" + "io" "net/http" "net/url" "path" @@ -56,7 +56,7 @@ func (n *Nats) Gather(acc telegraf.Accumulator) error { } defer resp.Body.Close() - bytes, err := ioutil.ReadAll(resp.Body) + bytes, err := io.ReadAll(resp.Body) if err != nil { return err } diff --git a/plugins/inputs/neptune_apex/neptune_apex.go b/plugins/inputs/neptune_apex/neptune_apex.go index dad4c8e5857f6..c2bb05384d7c8 100644 --- a/plugins/inputs/neptune_apex/neptune_apex.go +++ b/plugins/inputs/neptune_apex/neptune_apex.go @@ -5,7 +5,7 @@ package neptuneapex import ( "encoding/xml" "fmt" - "io/ioutil" + "io" "math" "net/http" "strconv" @@ -276,7 +276,7 @@ func (n *NeptuneApex) sendRequest(server string) ([]byte, error) { url, resp.StatusCode, http.StatusText(resp.StatusCode), http.StatusOK, http.StatusText(http.StatusOK)) } - body, err := ioutil.ReadAll(resp.Body) + body, err := io.ReadAll(resp.Body) if err != nil { return nil, fmt.Errorf("unable to read output from %q: %v", url, err) } diff --git a/plugins/inputs/nginx_plus_api/nginx_plus_api_metrics.go b/plugins/inputs/nginx_plus_api/nginx_plus_api_metrics.go index 7e1e753c5ff76..5cd7e76aec439 100644 --- a/plugins/inputs/nginx_plus_api/nginx_plus_api_metrics.go +++ b/plugins/inputs/nginx_plus_api/nginx_plus_api_metrics.go @@ -4,7 +4,7 @@ import ( "encoding/json" "errors" "fmt" - "io/ioutil" + "io" "net" "net/http" "net/url" @@ -70,7 +70,7 @@ func (n *NginxPlusAPI) gatherURL(addr *url.URL, path string) ([]byte, error) { contentType := strings.Split(resp.Header.Get("Content-Type"), ";")[0] switch contentType { case "application/json": - body, err := ioutil.ReadAll(resp.Body) + body, err := io.ReadAll(resp.Body) if err != nil { return nil, err } diff --git a/plugins/inputs/nginx_upstream_check/nginx_upstream_check.go b/plugins/inputs/nginx_upstream_check/nginx_upstream_check.go index fb40643409056..42e0cab62d53e 100644 --- a/plugins/inputs/nginx_upstream_check/nginx_upstream_check.go +++ b/plugins/inputs/nginx_upstream_check/nginx_upstream_check.go @@ -4,7 +4,6 @@ import ( "encoding/json" "fmt" "io" - "io/ioutil" "net/http" "net/url" "strconv" @@ -153,7 +152,7 @@ func (check *NginxUpstreamCheck) gatherJSONData(url string, value interface{}) e defer response.Body.Close() if response.StatusCode != http.StatusOK { // ignore the err here; LimitReader returns io.EOF and we're not interested in read errors. - body, _ := ioutil.ReadAll(io.LimitReader(response.Body, 200)) + body, _ := io.ReadAll(io.LimitReader(response.Body, 200)) return fmt.Errorf("%s returned HTTP status %s: %q", url, response.Status, body) } diff --git a/plugins/inputs/nsq/nsq.go b/plugins/inputs/nsq/nsq.go index 681c2f6e7f460..58f60192b96d0 100644 --- a/plugins/inputs/nsq/nsq.go +++ b/plugins/inputs/nsq/nsq.go @@ -25,7 +25,7 @@ package nsq import ( "encoding/json" "fmt" - "io/ioutil" + "io" "net/http" "net/url" "strconv" @@ -131,7 +131,7 @@ func (n *NSQ) gatherEndpoint(e string, acc telegraf.Accumulator) error { return fmt.Errorf("%s returned HTTP status %s", u.String(), r.Status) } - body, err := ioutil.ReadAll(r.Body) + body, err := io.ReadAll(r.Body) if err != nil { return fmt.Errorf(`error reading body: %s`, err) } diff --git a/plugins/inputs/nstat/nstat.go b/plugins/inputs/nstat/nstat.go index 5bc2bc85a3136..4408b8f728579 100644 --- a/plugins/inputs/nstat/nstat.go +++ b/plugins/inputs/nstat/nstat.go @@ -2,7 +2,6 @@ package nstat import ( "bytes" - "io/ioutil" "os" "strconv" @@ -62,7 +61,7 @@ func (ns *Nstat) Gather(acc telegraf.Accumulator) error { // load paths, get from env if config values are empty ns.loadPaths() - netstat, err := ioutil.ReadFile(ns.ProcNetNetstat) + netstat, err := os.ReadFile(ns.ProcNetNetstat) if err != nil { return err } @@ -71,14 +70,14 @@ func (ns *Nstat) Gather(acc telegraf.Accumulator) error { ns.gatherNetstat(netstat, acc) // collect SNMP data - snmp, err := ioutil.ReadFile(ns.ProcNetSNMP) + snmp, err := os.ReadFile(ns.ProcNetSNMP) if err != nil { return err } ns.gatherSNMP(snmp, acc) // collect SNMP6 data, if SNMP6 directory exists (IPv6 enabled) - snmp6, err := ioutil.ReadFile(ns.ProcNetSNMP6) + snmp6, err := os.ReadFile(ns.ProcNetSNMP6) if err == nil { ns.gatherSNMP6(snmp6, acc) } else if !os.IsNotExist(err) { diff --git a/plugins/inputs/nvidia_smi/nvidia_smi_test.go b/plugins/inputs/nvidia_smi/nvidia_smi_test.go index ea5887ae10a5d..3c0b14d6e4559 100644 --- a/plugins/inputs/nvidia_smi/nvidia_smi_test.go +++ b/plugins/inputs/nvidia_smi/nvidia_smi_test.go @@ -1,7 +1,7 @@ package nvidia_smi import ( - "io/ioutil" + "os" "path/filepath" "testing" "time" @@ -139,7 +139,7 @@ func TestGatherValidXML(t *testing.T) { t.Run(tt.name, func(t *testing.T) { var acc testutil.Accumulator - octets, err := ioutil.ReadFile(filepath.Join("testdata", tt.filename)) + octets, err := os.ReadFile(filepath.Join("testdata", tt.filename)) require.NoError(t, err) err = gatherNvidiaSMI(octets, &acc) diff --git a/plugins/inputs/opcua/opcua_util.go b/plugins/inputs/opcua/opcua_util.go index bb7ca56200954..e1304fa304fc6 100644 --- a/plugins/inputs/opcua/opcua_util.go +++ b/plugins/inputs/opcua/opcua_util.go @@ -9,7 +9,6 @@ import ( "crypto/x509/pkix" "encoding/pem" "fmt" - "io/ioutil" "log" "math/big" "net" @@ -27,7 +26,7 @@ import ( // SELF SIGNED CERT FUNCTIONS func newTempDir() (string, error) { - dir, err := ioutil.TempDir("", "ssc") + dir, err := os.MkdirTemp("", "ssc") return dir, err } diff --git a/plugins/inputs/passenger/passenger_test.go b/plugins/inputs/passenger/passenger_test.go index dbee336ba1040..ecbeeb532fd1e 100644 --- a/plugins/inputs/passenger/passenger_test.go +++ b/plugins/inputs/passenger/passenger_test.go @@ -2,7 +2,6 @@ package passenger import ( "fmt" - "io/ioutil" "os" "path/filepath" "runtime" @@ -28,7 +27,7 @@ func fakePassengerStatus(stat string) (string, error) { } tempFilePath := filepath.Join(os.TempDir(), "passenger-status"+fileExtension) - if err := ioutil.WriteFile(tempFilePath, []byte(content), 0700); err != nil { + if err := os.WriteFile(tempFilePath, []byte(content), 0700); err != nil { return "", err } diff --git a/plugins/inputs/phpfpm/child.go b/plugins/inputs/phpfpm/child.go index 9ac7e60715856..b6a6f956d3bf0 100644 --- a/plugins/inputs/phpfpm/child.go +++ b/plugins/inputs/phpfpm/child.go @@ -10,7 +10,6 @@ import ( "errors" "fmt" "io" - "io/ioutil" "net" "net/http" "net/http/cgi" @@ -161,7 +160,7 @@ func (c *child) serve() { var errCloseConn = errors.New("fcgi: connection should be closed") -var emptyBody = ioutil.NopCloser(strings.NewReader("")) +var emptyBody = io.NopCloser(strings.NewReader("")) // ErrRequestAborted is returned by Read when a handler attempts to read the // body of a request that has been aborted by the web server. @@ -295,7 +294,7 @@ func (c *child) serveRequest(req *request, body io.ReadCloser) { // can properly cut off the client sending all the data. // For now just bound it a little and //nolint:errcheck,revive - io.CopyN(ioutil.Discard, body, 100<<20) + io.CopyN(io.Discard, body, 100<<20) //nolint:errcheck,revive body.Close() diff --git a/plugins/inputs/phpfpm/fcgi_test.go b/plugins/inputs/phpfpm/fcgi_test.go index a7234225806cc..7211c0c3971e1 100644 --- a/plugins/inputs/phpfpm/fcgi_test.go +++ b/plugins/inputs/phpfpm/fcgi_test.go @@ -8,7 +8,6 @@ import ( "bytes" "errors" "io" - "io/ioutil" "net/http" "testing" ) @@ -242,7 +241,7 @@ func TestChildServeCleansUp(t *testing.T) { r *http.Request, ) { // block on reading body of request - _, err := io.Copy(ioutil.Discard, r.Body) + _, err := io.Copy(io.Discard, r.Body) if err != tt.err { t.Errorf("Expected %#v, got %#v", tt.err, err) } @@ -274,7 +273,7 @@ func TestMalformedParams(_ *testing.T) { // end of params 1, 4, 0, 1, 0, 0, 0, 0, } - rw := rwNopCloser{bytes.NewReader(input), ioutil.Discard} + rw := rwNopCloser{bytes.NewReader(input), io.Discard} c := newChild(rw, http.DefaultServeMux) c.serve() } diff --git a/plugins/inputs/postfix/postfix_test.go b/plugins/inputs/postfix/postfix_test.go index 782a0c78c95b9..6ab6556a0cf07 100644 --- a/plugins/inputs/postfix/postfix_test.go +++ b/plugins/inputs/postfix/postfix_test.go @@ -4,7 +4,6 @@ package postfix import ( - "io/ioutil" "os" "path/filepath" "testing" @@ -15,7 +14,7 @@ import ( ) func TestGather(t *testing.T) { - td, err := ioutil.TempDir("", "") + td, err := os.MkdirTemp("", "") require.NoError(t, err) defer os.RemoveAll(td) @@ -23,12 +22,12 @@ func TestGather(t *testing.T) { require.NoError(t, os.MkdirAll(filepath.FromSlash(td+"/"+q), 0755)) } - require.NoError(t, ioutil.WriteFile(filepath.FromSlash(td+"/active/01"), []byte("abc"), 0644)) - require.NoError(t, ioutil.WriteFile(filepath.FromSlash(td+"/active/02"), []byte("defg"), 0644)) - require.NoError(t, ioutil.WriteFile(filepath.FromSlash(td+"/hold/01"), []byte("abc"), 0644)) - require.NoError(t, ioutil.WriteFile(filepath.FromSlash(td+"/incoming/01"), []byte("abcd"), 0644)) - require.NoError(t, ioutil.WriteFile(filepath.FromSlash(td+"/deferred/0/0/01"), []byte("abc"), 0644)) - require.NoError(t, ioutil.WriteFile(filepath.FromSlash(td+"/deferred/F/F/F1"), []byte("abc"), 0644)) + require.NoError(t, os.WriteFile(filepath.FromSlash(td+"/active/01"), []byte("abc"), 0644)) + require.NoError(t, os.WriteFile(filepath.FromSlash(td+"/active/02"), []byte("defg"), 0644)) + require.NoError(t, os.WriteFile(filepath.FromSlash(td+"/hold/01"), []byte("abc"), 0644)) + require.NoError(t, os.WriteFile(filepath.FromSlash(td+"/incoming/01"), []byte("abcd"), 0644)) + require.NoError(t, os.WriteFile(filepath.FromSlash(td+"/deferred/0/0/01"), []byte("abc"), 0644)) + require.NoError(t, os.WriteFile(filepath.FromSlash(td+"/deferred/F/F/F1"), []byte("abc"), 0644)) p := Postfix{ QueueDirectory: td, diff --git a/plugins/inputs/postgresql_extensible/postgresql_extensible.go b/plugins/inputs/postgresql_extensible/postgresql_extensible.go index 8311064b1f060..176827a4b1dc7 100644 --- a/plugins/inputs/postgresql_extensible/postgresql_extensible.go +++ b/plugins/inputs/postgresql_extensible/postgresql_extensible.go @@ -3,7 +3,7 @@ package postgresql_extensible import ( "bytes" "fmt" - "io/ioutil" + "io" "os" "strings" "time" @@ -147,7 +147,7 @@ func ReadQueryFromFile(filePath string) (string, error) { } defer file.Close() - query, err := ioutil.ReadAll(file) + query, err := io.ReadAll(file) if err != nil { return "", err } diff --git a/plugins/inputs/processes/processes_notwindows.go b/plugins/inputs/processes/processes_notwindows.go index 3c685cf1ebf7f..070dce65fe2a0 100644 --- a/plugins/inputs/processes/processes_notwindows.go +++ b/plugins/inputs/processes/processes_notwindows.go @@ -6,7 +6,6 @@ package processes import ( "bytes" "fmt" - "io/ioutil" "os" "os/exec" "path/filepath" @@ -192,7 +191,7 @@ func (p *Processes) gatherFromProc(fields map[string]interface{}) error { } func readProcFile(filename string) ([]byte, error) { - data, err := ioutil.ReadFile(filename) + data, err := os.ReadFile(filename) if err != nil { if os.IsNotExist(err) { return nil, nil diff --git a/plugins/inputs/procstat/native_finder.go b/plugins/inputs/procstat/native_finder.go index d5d8b8b36fe70..05cf4a72735f0 100644 --- a/plugins/inputs/procstat/native_finder.go +++ b/plugins/inputs/procstat/native_finder.go @@ -2,7 +2,7 @@ package procstat import ( "fmt" - "io/ioutil" + "os" "regexp" "strconv" "strings" @@ -43,7 +43,7 @@ func (pg *NativeFinder) UID(user string) ([]PID, error) { //PidFile returns the pid from the pid file given. func (pg *NativeFinder) PidFile(path string) ([]PID, error) { var pids []PID - pidString, err := ioutil.ReadFile(path) + pidString, err := os.ReadFile(path) if err != nil { return pids, fmt.Errorf("Failed to read pidfile '%s'. Error: '%s'", path, err) diff --git a/plugins/inputs/procstat/pgrep.go b/plugins/inputs/procstat/pgrep.go index 85e8d80f83cfe..34c44e0b2fefb 100644 --- a/plugins/inputs/procstat/pgrep.go +++ b/plugins/inputs/procstat/pgrep.go @@ -2,7 +2,7 @@ package procstat import ( "fmt" - "io/ioutil" + "os" "os/exec" "strconv" "strings" @@ -25,7 +25,7 @@ func NewPgrep() (PIDFinder, error) { func (pg *Pgrep) PidFile(path string) ([]PID, error) { var pids []PID - pidString, err := ioutil.ReadFile(path) + pidString, err := os.ReadFile(path) if err != nil { return pids, fmt.Errorf("Failed to read pidfile '%s'. Error: '%s'", path, err) diff --git a/plugins/inputs/procstat/procstat.go b/plugins/inputs/procstat/procstat.go index b838df651f636..ce29a08460cca 100644 --- a/plugins/inputs/procstat/procstat.go +++ b/plugins/inputs/procstat/procstat.go @@ -3,7 +3,6 @@ package procstat import ( "bytes" "fmt" - "io/ioutil" "os" "os/exec" "path/filepath" @@ -516,7 +515,7 @@ func (p *Procstat) singleCgroupPIDs(path string) ([]PID, error) { return nil, fmt.Errorf("not a directory %s", path) } procsPath := filepath.Join(path, "cgroup.procs") - out, err := ioutil.ReadFile(procsPath) + out, err := os.ReadFile(procsPath) if err != nil { return nil, err } diff --git a/plugins/inputs/procstat/procstat_test.go b/plugins/inputs/procstat/procstat_test.go index 2d8687e75013b..bc586fca4fa42 100644 --- a/plugins/inputs/procstat/procstat_test.go +++ b/plugins/inputs/procstat/procstat_test.go @@ -2,7 +2,6 @@ package procstat import ( "fmt" - "io/ioutil" "os" "os/exec" "path/filepath" @@ -385,10 +384,10 @@ func TestGather_cgroupPIDs(t *testing.T) { if runtime.GOOS == "windows" { t.Skip("no cgroups in windows") } - td, err := ioutil.TempDir("", "") + td, err := os.MkdirTemp("", "") require.NoError(t, err) defer os.RemoveAll(td) - err = ioutil.WriteFile(filepath.Join(td, "cgroup.procs"), []byte("1234\n5678\n"), 0644) + err = os.WriteFile(filepath.Join(td, "cgroup.procs"), []byte("1234\n5678\n"), 0644) require.NoError(t, err) p := Procstat{ diff --git a/plugins/inputs/prometheus/kubernetes.go b/plugins/inputs/prometheus/kubernetes.go index 0e658003a7122..a57e771bfc483 100644 --- a/plugins/inputs/prometheus/kubernetes.go +++ b/plugins/inputs/prometheus/kubernetes.go @@ -5,11 +5,11 @@ import ( "crypto/tls" "encoding/json" "fmt" - "io/ioutil" "log" "net" "net/http" "net/url" + "os" "os/user" "path/filepath" "time" @@ -41,7 +41,7 @@ const cAdvisorPodListDefaultInterval = 60 // loadClient parses a kubeconfig from a file and returns a Kubernetes // client. It does not support extensions or client auth providers. func loadClient(kubeconfigPath string) (*kubernetes.Clientset, error) { - data, err := ioutil.ReadFile(kubeconfigPath) + data, err := os.ReadFile(kubeconfigPath) if err != nil { return nil, fmt.Errorf("failed reading '%s': %v", kubeconfigPath, err) } diff --git a/plugins/inputs/prometheus/prometheus.go b/plugins/inputs/prometheus/prometheus.go index adeb452253a37..136e8ae0f6d9d 100644 --- a/plugins/inputs/prometheus/prometheus.go +++ b/plugins/inputs/prometheus/prometheus.go @@ -4,7 +4,7 @@ import ( "context" "errors" "fmt" - "io/ioutil" + "io" "net" "net/http" "net/url" @@ -382,7 +382,7 @@ func (p *Prometheus) gatherURL(u URLAndAddress, acc telegraf.Accumulator) error p.addHeaders(req) if p.BearerToken != "" { - token, err := ioutil.ReadFile(p.BearerToken) + token, err := os.ReadFile(p.BearerToken) if err != nil { return err } @@ -408,7 +408,7 @@ func (p *Prometheus) gatherURL(u URLAndAddress, acc telegraf.Accumulator) error return fmt.Errorf("%s returned HTTP status %s", u.URL, resp.Status) } - body, err := ioutil.ReadAll(resp.Body) + body, err := io.ReadAll(resp.Body) if err != nil { return fmt.Errorf("error reading body: %s", err) } diff --git a/plugins/inputs/proxmox/proxmox.go b/plugins/inputs/proxmox/proxmox.go index ec34a7b2f5a36..efd7fae7d5d5f 100644 --- a/plugins/inputs/proxmox/proxmox.go +++ b/plugins/inputs/proxmox/proxmox.go @@ -3,7 +3,7 @@ package proxmox import ( "encoding/json" "errors" - "io/ioutil" + "io" "net/http" "net/url" "os" @@ -115,7 +115,7 @@ func performRequest(px *Proxmox, apiURL string, method string, data url.Values) } defer resp.Body.Close() - responseBody, err := ioutil.ReadAll(resp.Body) + responseBody, err := io.ReadAll(resp.Body) if err != nil { return nil, err } diff --git a/plugins/inputs/puppetagent/puppetagent.go b/plugins/inputs/puppetagent/puppetagent.go index 36c284ff57cb6..f31e03d327817 100644 --- a/plugins/inputs/puppetagent/puppetagent.go +++ b/plugins/inputs/puppetagent/puppetagent.go @@ -2,12 +2,12 @@ package puppetagent import ( "fmt" - "gopkg.in/yaml.v2" - "io/ioutil" "os" "reflect" "strings" + "gopkg.in/yaml.v2" + "github.com/influxdata/telegraf" "github.com/influxdata/telegraf/plugins/inputs" ) @@ -102,7 +102,7 @@ func (pa *PuppetAgent) Gather(acc telegraf.Accumulator) error { return fmt.Errorf("%s", err) } - fh, err := ioutil.ReadFile(pa.Location) + fh, err := os.ReadFile(pa.Location) if err != nil { return fmt.Errorf("%s", err) } diff --git a/plugins/inputs/rabbitmq/rabbitmq.go b/plugins/inputs/rabbitmq/rabbitmq.go index 13be5f63b1619..158b8d5ed6b21 100644 --- a/plugins/inputs/rabbitmq/rabbitmq.go +++ b/plugins/inputs/rabbitmq/rabbitmq.go @@ -3,7 +3,7 @@ package rabbitmq import ( "encoding/json" "fmt" - "io/ioutil" + "io" "net/http" "strconv" "sync" @@ -431,7 +431,7 @@ func (r *RabbitMQ) requestEndpoint(u string) ([]byte, error) { return nil, fmt.Errorf("getting %q failed: %v %v", u, resp.StatusCode, http.StatusText(resp.StatusCode)) } - return ioutil.ReadAll(resp.Body) + return io.ReadAll(resp.Body) } func (r *RabbitMQ) requestJSON(u string, target interface{}) error { diff --git a/plugins/inputs/rabbitmq/rabbitmq_test.go b/plugins/inputs/rabbitmq/rabbitmq_test.go index 830819b0528e4..e867b1e2dcb61 100644 --- a/plugins/inputs/rabbitmq/rabbitmq_test.go +++ b/plugins/inputs/rabbitmq/rabbitmq_test.go @@ -2,9 +2,9 @@ package rabbitmq import ( "fmt" - "io/ioutil" "net/http" "net/http/httptest" + "os" "time" "testing" @@ -37,7 +37,7 @@ func TestRabbitMQGeneratesMetricsSet1(t *testing.T) { return } - data, err := ioutil.ReadFile(jsonFilePath) + data, err := os.ReadFile(jsonFilePath) require.NoErrorf(t, err, "could not read from data file %s", jsonFilePath) _, err = w.Write(data) @@ -247,7 +247,7 @@ func TestRabbitMQGeneratesMetricsSet2(t *testing.T) { return } - data, err := ioutil.ReadFile(jsonFilePath) + data, err := os.ReadFile(jsonFilePath) require.NoErrorf(t, err, "could not read from data file %s", jsonFilePath) _, err = w.Write(data) diff --git a/plugins/inputs/ravendb/ravendb_test.go b/plugins/inputs/ravendb/ravendb_test.go index 42eaea3fb3e3b..3da1d0190a055 100644 --- a/plugins/inputs/ravendb/ravendb_test.go +++ b/plugins/inputs/ravendb/ravendb_test.go @@ -1,9 +1,9 @@ package ravendb import ( - "io/ioutil" "net/http" "net/http/httptest" + "os" "testing" "time" @@ -30,7 +30,7 @@ func TestRavenDBGeneratesMetricsFull(t *testing.T) { require.Failf(t, "Cannot handle request for uri %s", r.URL.Path) } - data, err := ioutil.ReadFile(jsonFilePath) + data, err := os.ReadFile(jsonFilePath) require.NoErrorf(t, err, "could not read from data file %s", jsonFilePath) _, err = w.Write(data) @@ -225,7 +225,7 @@ func TestRavenDBGeneratesMetricsMin(t *testing.T) { require.Failf(t, "Cannot handle request for uri %s", r.URL.Path) } - data, err := ioutil.ReadFile(jsonFilePath) + data, err := os.ReadFile(jsonFilePath) require.NoErrorf(t, err, "could not read from data file %s", jsonFilePath) _, err = w.Write(data) diff --git a/plugins/inputs/redfish/redfish.go b/plugins/inputs/redfish/redfish.go index 4d9e70a57a9bd..dcf26b192c651 100644 --- a/plugins/inputs/redfish/redfish.go +++ b/plugins/inputs/redfish/redfish.go @@ -3,7 +3,7 @@ package redfish import ( "encoding/json" "fmt" - "io/ioutil" + "io" "net" "net/http" "net/url" @@ -199,7 +199,7 @@ func (r *Redfish) getData(url string, payload interface{}) error { r.Address) } - body, err := ioutil.ReadAll(resp.Body) + body, err := io.ReadAll(resp.Body) if err != nil { return err } diff --git a/plugins/inputs/salesforce/salesforce.go b/plugins/inputs/salesforce/salesforce.go index f1ecff8d61a83..f7c321d7ae978 100644 --- a/plugins/inputs/salesforce/salesforce.go +++ b/plugins/inputs/salesforce/salesforce.go @@ -6,7 +6,6 @@ import ( "errors" "fmt" "io" - "io/ioutil" "net/http" "net/url" "strings" @@ -203,11 +202,11 @@ func (s *Salesforce) login() error { defer resp.Body.Close() if resp.StatusCode != http.StatusOK { // ignore the err here; LimitReader returns io.EOF and we're not interested in read errors. - body, _ := ioutil.ReadAll(io.LimitReader(resp.Body, 200)) + body, _ := io.ReadAll(io.LimitReader(resp.Body, 200)) return fmt.Errorf("%s returned HTTP status %s: %q", loginEndpoint, resp.Status, body) } - respBody, err := ioutil.ReadAll(resp.Body) + respBody, err := io.ReadAll(resp.Body) if err != nil { return err } diff --git a/plugins/inputs/snmp_legacy/snmp_legacy.go b/plugins/inputs/snmp_legacy/snmp_legacy.go index d85afca8e4e7f..604a2205c0d2c 100644 --- a/plugins/inputs/snmp_legacy/snmp_legacy.go +++ b/plugins/inputs/snmp_legacy/snmp_legacy.go @@ -1,9 +1,9 @@ package snmp_legacy import ( - "io/ioutil" "log" "net" + "os" "strconv" "strings" "time" @@ -296,7 +296,7 @@ func (s *Snmp) Gather(acc telegraf.Accumulator) error { subnodes: make(map[string]Node), } - data, err := ioutil.ReadFile(s.SnmptranslateFile) + data, err := os.ReadFile(s.SnmptranslateFile) if err != nil { s.Log.Errorf("Reading SNMPtranslate file error: %s", err.Error()) return err diff --git a/plugins/inputs/socket_listener/socket_listener_test.go b/plugins/inputs/socket_listener/socket_listener_test.go index c33e59f7129b6..a3ccacae1ceb2 100644 --- a/plugins/inputs/socket_listener/socket_listener_test.go +++ b/plugins/inputs/socket_listener/socket_listener_test.go @@ -4,7 +4,6 @@ import ( "bytes" "crypto/tls" "io" - "io/ioutil" "log" "net" "os" @@ -69,7 +68,7 @@ func TestSocketListener_tcp_tls(t *testing.T) { } func TestSocketListener_unix_tls(t *testing.T) { - tmpdir, err := ioutil.TempDir("", "telegraf") + tmpdir, err := os.MkdirTemp("", "telegraf") require.NoError(t, err) defer os.RemoveAll(tmpdir) sock := filepath.Join(tmpdir, "sl.TestSocketListener_unix_tls.sock") @@ -133,7 +132,7 @@ func TestSocketListener_udp(t *testing.T) { } func TestSocketListener_unix(t *testing.T) { - tmpdir, err := ioutil.TempDir("", "telegraf") + tmpdir, err := os.MkdirTemp("", "telegraf") require.NoError(t, err) defer os.RemoveAll(tmpdir) sock := filepath.Join(tmpdir, "sl.TestSocketListener_unix.sock") @@ -163,7 +162,7 @@ func TestSocketListener_unixgram(t *testing.T) { t.Skip("Skipping on Windows, as unixgram sockets are not supported") } - tmpdir, err := ioutil.TempDir("", "telegraf") + tmpdir, err := os.MkdirTemp("", "telegraf") require.NoError(t, err) defer os.RemoveAll(tmpdir) sock := filepath.Join(tmpdir, "sl.TestSocketListener_unixgram.sock") diff --git a/plugins/inputs/sql/sql.go b/plugins/inputs/sql/sql.go index c6c4658d83959..87227663bb4d0 100644 --- a/plugins/inputs/sql/sql.go +++ b/plugins/inputs/sql/sql.go @@ -5,7 +5,7 @@ import ( dbsql "database/sql" "errors" "fmt" - "io/ioutil" + "os" "sort" "strings" "sync" @@ -326,7 +326,7 @@ func (s *SQL) Init() error { // In case we got a script, we should read the query now. if q.Script != "" { - query, err := ioutil.ReadFile(q.Script) + query, err := os.ReadFile(q.Script) if err != nil { return fmt.Errorf("reading script %q failed: %v", q.Script, err) } diff --git a/plugins/inputs/suricata/suricata_test.go b/plugins/inputs/suricata/suricata_test.go index 9b620efc3e216..f3fc5f14eb394 100644 --- a/plugins/inputs/suricata/suricata_test.go +++ b/plugins/inputs/suricata/suricata_test.go @@ -2,7 +2,6 @@ package suricata import ( "fmt" - "io/ioutil" "log" "math/rand" "net" @@ -21,7 +20,7 @@ var ex2 = `{"timestamp":"2017-03-06T07:43:39.000397+0000","event_type":"stats"," var ex3 = `{"timestamp":"2017-03-06T07:43:39.000397+0000","event_type":"stats","stats":{"threads": { "W#05-wlp4s0": { "capture":{"kernel_packets":905344474,"kernel_drops":78355440}}}}}` func TestSuricataLarge(t *testing.T) { - dir, err := ioutil.TempDir("", "test") + dir, err := os.MkdirTemp("", "test") require.NoError(t, err) defer os.RemoveAll(dir) tmpfn := filepath.Join(dir, fmt.Sprintf("t%d", rand.Int63())) @@ -38,7 +37,7 @@ func TestSuricataLarge(t *testing.T) { require.NoError(t, s.Start(&acc)) defer s.Stop() - data, err := ioutil.ReadFile("testdata/test1.json") + data, err := os.ReadFile("testdata/test1.json") require.NoError(t, err) c, err := net.Dial("unix", tmpfn) @@ -49,7 +48,7 @@ func TestSuricataLarge(t *testing.T) { require.NoError(t, err) //test suricata alerts - data2, err := ioutil.ReadFile("testdata/test2.json") + data2, err := os.ReadFile("testdata/test2.json") require.NoError(t, err) _, err = c.Write(data2) require.NoError(t, err) @@ -61,7 +60,7 @@ func TestSuricataLarge(t *testing.T) { } func TestSuricataAlerts(t *testing.T) { - dir, err := ioutil.TempDir("", "test") + dir, err := os.MkdirTemp("", "test") require.NoError(t, err) defer os.RemoveAll(dir) tmpfn := filepath.Join(dir, fmt.Sprintf("t%d", rand.Int63())) @@ -78,7 +77,7 @@ func TestSuricataAlerts(t *testing.T) { require.NoError(t, s.Start(&acc)) defer s.Stop() - data, err := ioutil.ReadFile("testdata/test3.json") + data, err := os.ReadFile("testdata/test3.json") require.NoError(t, err) c, err := net.Dial("unix", tmpfn) @@ -116,7 +115,7 @@ func TestSuricataAlerts(t *testing.T) { } func TestSuricata(t *testing.T) { - dir, err := ioutil.TempDir("", "test") + dir, err := os.MkdirTemp("", "test") require.NoError(t, err) defer os.RemoveAll(dir) tmpfn := filepath.Join(dir, fmt.Sprintf("t%d", rand.Int63())) @@ -162,7 +161,7 @@ func TestSuricata(t *testing.T) { } func TestThreadStats(t *testing.T) { - dir, err := ioutil.TempDir("", "test") + dir, err := os.MkdirTemp("", "test") require.NoError(t, err) defer os.RemoveAll(dir) tmpfn := filepath.Join(dir, fmt.Sprintf("t%d", rand.Int63())) @@ -212,7 +211,7 @@ func TestThreadStats(t *testing.T) { } func TestSuricataInvalid(t *testing.T) { - dir, err := ioutil.TempDir("", "test") + dir, err := os.MkdirTemp("", "test") require.NoError(t, err) defer os.RemoveAll(dir) tmpfn := filepath.Join(dir, fmt.Sprintf("t%d", rand.Int63())) @@ -254,7 +253,7 @@ func TestSuricataInvalidPath(t *testing.T) { } func TestSuricataTooLongLine(t *testing.T) { - dir, err := ioutil.TempDir("", "test") + dir, err := os.MkdirTemp("", "test") require.NoError(t, err) defer os.RemoveAll(dir) tmpfn := filepath.Join(dir, fmt.Sprintf("t%d", rand.Int63())) @@ -282,7 +281,7 @@ func TestSuricataTooLongLine(t *testing.T) { } func TestSuricataEmptyJSON(t *testing.T) { - dir, err := ioutil.TempDir("", "test") + dir, err := os.MkdirTemp("", "test") require.NoError(t, err) defer os.RemoveAll(dir) tmpfn := filepath.Join(dir, fmt.Sprintf("t%d", rand.Int63())) @@ -309,7 +308,7 @@ func TestSuricataEmptyJSON(t *testing.T) { } func TestSuricataDisconnectSocket(t *testing.T) { - dir, err := ioutil.TempDir("", "test") + dir, err := os.MkdirTemp("", "test") require.NoError(t, err) defer os.RemoveAll(dir) tmpfn := filepath.Join(dir, fmt.Sprintf("t%d", rand.Int63())) @@ -345,7 +344,7 @@ func TestSuricataDisconnectSocket(t *testing.T) { } func TestSuricataStartStop(t *testing.T) { - dir, err := ioutil.TempDir("", "test") + dir, err := os.MkdirTemp("", "test") require.NoError(t, err) defer os.RemoveAll(dir) tmpfn := filepath.Join(dir, fmt.Sprintf("t%d", rand.Int63())) @@ -387,7 +386,7 @@ func TestSuricataParse(t *testing.T) { } for _, tc := range tests { - data, err := ioutil.ReadFile("testdata/" + tc.filename) + data, err := os.ReadFile("testdata/" + tc.filename) require.NoError(t, err) s := Suricata{ Delimiter: "_", diff --git a/plugins/inputs/synproxy/synproxy_test.go b/plugins/inputs/synproxy/synproxy_test.go index dd733253635b8..e8fbe62989055 100644 --- a/plugins/inputs/synproxy/synproxy_test.go +++ b/plugins/inputs/synproxy/synproxy_test.go @@ -4,7 +4,6 @@ package synproxy import ( - "io/ioutil" "os" "testing" @@ -156,7 +155,7 @@ func testSynproxyFileData(t *testing.T, fileData string, telegrafData map[string } func makeFakeSynproxyFile(content []byte) string { - tmpfile, err := ioutil.TempFile("", "synproxy_test") + tmpfile, err := os.CreateTemp("", "synproxy_test") if err != nil { panic(err) } diff --git a/plugins/inputs/syslog/nontransparent_test.go b/plugins/inputs/syslog/nontransparent_test.go index 4d29daaf53915..7782ad968a3b1 100644 --- a/plugins/inputs/syslog/nontransparent_test.go +++ b/plugins/inputs/syslog/nontransparent_test.go @@ -2,7 +2,6 @@ package syslog import ( "crypto/tls" - "io/ioutil" "net" "os" "path/filepath" @@ -270,7 +269,7 @@ func TestNonTransparentStrictWithZeroKeepAlive_tcp_tls(t *testing.T) { } func TestNonTransparentStrict_unix(t *testing.T) { - tmpdir, err := ioutil.TempDir("", "telegraf") + tmpdir, err := os.MkdirTemp("", "telegraf") require.NoError(t, err) defer os.RemoveAll(tmpdir) sock := filepath.Join(tmpdir, "syslog.TestStrict_unix.sock") @@ -278,7 +277,7 @@ func TestNonTransparentStrict_unix(t *testing.T) { } func TestNonTransparentBestEffort_unix(t *testing.T) { - tmpdir, err := ioutil.TempDir("", "telegraf") + tmpdir, err := os.MkdirTemp("", "telegraf") require.NoError(t, err) defer os.RemoveAll(tmpdir) sock := filepath.Join(tmpdir, "syslog.TestBestEffort_unix.sock") @@ -286,7 +285,7 @@ func TestNonTransparentBestEffort_unix(t *testing.T) { } func TestNonTransparentStrict_unix_tls(t *testing.T) { - tmpdir, err := ioutil.TempDir("", "telegraf") + tmpdir, err := os.MkdirTemp("", "telegraf") require.NoError(t, err) defer os.RemoveAll(tmpdir) sock := filepath.Join(tmpdir, "syslog.TestStrict_unix_tls.sock") @@ -294,7 +293,7 @@ func TestNonTransparentStrict_unix_tls(t *testing.T) { } func TestNonTransparentBestEffort_unix_tls(t *testing.T) { - tmpdir, err := ioutil.TempDir("", "telegraf") + tmpdir, err := os.MkdirTemp("", "telegraf") require.NoError(t, err) defer os.RemoveAll(tmpdir) sock := filepath.Join(tmpdir, "syslog.TestBestEffort_unix_tls.sock") diff --git a/plugins/inputs/syslog/octetcounting_test.go b/plugins/inputs/syslog/octetcounting_test.go index 53fee69d112a5..1c0cc024507e2 100644 --- a/plugins/inputs/syslog/octetcounting_test.go +++ b/plugins/inputs/syslog/octetcounting_test.go @@ -3,7 +3,6 @@ package syslog import ( "crypto/tls" "fmt" - "io/ioutil" "net" "os" "path/filepath" @@ -470,7 +469,7 @@ func TestOctetCountingStrictWithZeroKeepAlive_tcp_tls(t *testing.T) { } func TestOctetCountingStrict_unix(t *testing.T) { - tmpdir, err := ioutil.TempDir("", "telegraf") + tmpdir, err := os.MkdirTemp("", "telegraf") require.NoError(t, err) defer os.RemoveAll(tmpdir) sock := filepath.Join(tmpdir, "syslog.TestStrict_unix.sock") @@ -478,7 +477,7 @@ func TestOctetCountingStrict_unix(t *testing.T) { } func TestOctetCountingBestEffort_unix(t *testing.T) { - tmpdir, err := ioutil.TempDir("", "telegraf") + tmpdir, err := os.MkdirTemp("", "telegraf") require.NoError(t, err) defer os.RemoveAll(tmpdir) sock := filepath.Join(tmpdir, "syslog.TestBestEffort_unix.sock") @@ -486,7 +485,7 @@ func TestOctetCountingBestEffort_unix(t *testing.T) { } func TestOctetCountingStrict_unix_tls(t *testing.T) { - tmpdir, err := ioutil.TempDir("", "telegraf") + tmpdir, err := os.MkdirTemp("", "telegraf") require.NoError(t, err) defer os.RemoveAll(tmpdir) sock := filepath.Join(tmpdir, "syslog.TestStrict_unix_tls.sock") @@ -494,7 +493,7 @@ func TestOctetCountingStrict_unix_tls(t *testing.T) { } func TestOctetCountingBestEffort_unix_tls(t *testing.T) { - tmpdir, err := ioutil.TempDir("", "telegraf") + tmpdir, err := os.MkdirTemp("", "telegraf") require.NoError(t, err) defer os.RemoveAll(tmpdir) sock := filepath.Join(tmpdir, "syslog.TestBestEffort_unix_tls.sock") diff --git a/plugins/inputs/syslog/rfc5426_test.go b/plugins/inputs/syslog/rfc5426_test.go index ab3fe2ceaf60f..5bcb847b36ec4 100644 --- a/plugins/inputs/syslog/rfc5426_test.go +++ b/plugins/inputs/syslog/rfc5426_test.go @@ -2,7 +2,6 @@ package syslog import ( "fmt" - "io/ioutil" "net" "os" "path/filepath" @@ -290,7 +289,7 @@ func TestBestEffort_unixgram(t *testing.T) { t.Skip("Skipping on Windows, as unixgram sockets are not supported") } - tmpdir, err := ioutil.TempDir("", "telegraf") + tmpdir, err := os.MkdirTemp("", "telegraf") require.NoError(t, err) defer os.RemoveAll(tmpdir) sock := filepath.Join(tmpdir, "syslog.TestBestEffort_unixgram.sock") @@ -304,7 +303,7 @@ func TestStrict_unixgram(t *testing.T) { t.Skip("Skipping on Windows, as unixgram sockets are not supported") } - tmpdir, err := ioutil.TempDir("", "telegraf") + tmpdir, err := os.MkdirTemp("", "telegraf") require.NoError(t, err) defer os.RemoveAll(tmpdir) sock := filepath.Join(tmpdir, "syslog.TestStrict_unixgram.sock") diff --git a/plugins/inputs/syslog/syslog_test.go b/plugins/inputs/syslog/syslog_test.go index ac0539d30e1af..00146fde9cd26 100644 --- a/plugins/inputs/syslog/syslog_test.go +++ b/plugins/inputs/syslog/syslog_test.go @@ -1,7 +1,6 @@ package syslog import ( - "io/ioutil" "os" "path/filepath" "runtime" @@ -46,7 +45,7 @@ func TestAddress(t *testing.T) { require.EqualError(t, err, "unknown protocol 'unsupported' in 'example.com:6514'") require.Error(t, err) - tmpdir, err := ioutil.TempDir("", "telegraf") + tmpdir, err := os.MkdirTemp("", "telegraf") defer os.RemoveAll(tmpdir) require.NoError(t, err) sock := filepath.Join(tmpdir, "syslog.TestAddress.sock") diff --git a/plugins/inputs/tail/tail_test.go b/plugins/inputs/tail/tail_test.go index 16c38519a83b6..1098a10edbff5 100644 --- a/plugins/inputs/tail/tail_test.go +++ b/plugins/inputs/tail/tail_test.go @@ -2,7 +2,6 @@ package tail import ( "bytes" - "io/ioutil" "log" "os" "path/filepath" @@ -49,7 +48,7 @@ func NewTestTail() *Tail { } func TestTailBadLine(t *testing.T) { - tmpfile, err := ioutil.TempFile("", "") + tmpfile, err := os.CreateTemp("", "") require.NoError(t, err) defer os.Remove(tmpfile.Name()) @@ -86,7 +85,7 @@ func TestTailBadLine(t *testing.T) { } func TestTailDosLineEndings(t *testing.T) { - tmpfile, err := ioutil.TempFile("", "") + tmpfile, err := os.CreateTemp("", "") require.NoError(t, err) defer os.Remove(tmpfile.Name()) _, err = tmpfile.WriteString("cpu usage_idle=100\r\ncpu2 usage_idle=200\r\n") @@ -173,7 +172,7 @@ func TestGrokParseLogFilesWithMultiline(t *testing.T) { } func TestGrokParseLogFilesWithMultilineTimeout(t *testing.T) { - tmpfile, err := ioutil.TempFile("", "") + tmpfile, err := os.CreateTemp("", "") require.NoError(t, err) defer os.Remove(tmpfile.Name()) @@ -286,7 +285,7 @@ func createGrokParser() (parsers.Parser, error) { // The csv parser should only parse the header line once per file. func TestCSVHeadersParsedOnce(t *testing.T) { - tmpfile, err := ioutil.TempFile("", "") + tmpfile, err := os.CreateTemp("", "") require.NoError(t, err) defer os.Remove(tmpfile.Name()) @@ -345,7 +344,7 @@ cpu,42 // Ensure that the first line can produce multiple metrics (#6138) func TestMultipleMetricsOnFirstLine(t *testing.T) { - tmpfile, err := ioutil.TempFile("", "") + tmpfile, err := os.CreateTemp("", "") require.NoError(t, err) defer os.Remove(tmpfile.Name()) @@ -542,7 +541,7 @@ func TestCharacterEncoding(t *testing.T) { } func TestTailEOF(t *testing.T) { - tmpfile, err := ioutil.TempFile("", "") + tmpfile, err := os.CreateTemp("", "") require.NoError(t, err) defer os.Remove(tmpfile.Name()) _, err = tmpfile.WriteString("cpu usage_idle=100\r\n") diff --git a/plugins/inputs/twemproxy/twemproxy.go b/plugins/inputs/twemproxy/twemproxy.go index cda56943f1002..b4c4b52f85b6c 100644 --- a/plugins/inputs/twemproxy/twemproxy.go +++ b/plugins/inputs/twemproxy/twemproxy.go @@ -3,7 +3,7 @@ package twemproxy import ( "encoding/json" "errors" - "io/ioutil" + "io" "net" "time" @@ -37,7 +37,7 @@ func (t *Twemproxy) Gather(acc telegraf.Accumulator) error { if err != nil { return err } - body, err := ioutil.ReadAll(conn) + body, err := io.ReadAll(conn) if err != nil { return err } diff --git a/plugins/inputs/udp_listener/udp_listener_test.go b/plugins/inputs/udp_listener/udp_listener_test.go index 8bd8262c035b0..3e36838c6192a 100644 --- a/plugins/inputs/udp_listener/udp_listener_test.go +++ b/plugins/inputs/udp_listener/udp_listener_test.go @@ -104,7 +104,7 @@ package udp_listener // } // func TestRunParser(t *testing.T) { -// log.SetOutput(ioutil.Discard) +// log.SetOutput(io.Discard) // var testmsg = []byte("cpu_load_short,host=server01 value=12.0 1422568543702900257\n") // listener, in := newTestUDPListener() @@ -127,7 +127,7 @@ package udp_listener // } // func TestRunParserInvalidMsg(_ *testing.T) { -// log.SetOutput(ioutil.Discard) +// log.SetOutput(io.Discard) // var testmsg = []byte("cpu_load_short") // listener, in := newTestUDPListener() @@ -153,7 +153,7 @@ package udp_listener // } // func TestRunParserGraphiteMsg(t *testing.T) { -// log.SetOutput(ioutil.Discard) +// log.SetOutput(io.Discard) // var testmsg = []byte("cpu.load.graphite 12 1454780029") // listener, in := newTestUDPListener() @@ -174,7 +174,7 @@ package udp_listener // } // func TestRunParserJSONMsg(t *testing.T) { -// log.SetOutput(ioutil.Discard) +// log.SetOutput(io.Discard) // var testmsg = []byte("{\"a\": 5, \"b\": {\"c\": 6}}\n") // listener, in := newTestUDPListener() diff --git a/plugins/inputs/webhooks/filestack/filestack_webhooks.go b/plugins/inputs/webhooks/filestack/filestack_webhooks.go index 19f8c0251bbb7..44def8c6f5141 100644 --- a/plugins/inputs/webhooks/filestack/filestack_webhooks.go +++ b/plugins/inputs/webhooks/filestack/filestack_webhooks.go @@ -2,7 +2,7 @@ package filestack import ( "encoding/json" - "io/ioutil" + "io" "log" "net/http" "time" @@ -25,7 +25,7 @@ func (fs *FilestackWebhook) Register(router *mux.Router, acc telegraf.Accumulato func (fs *FilestackWebhook) eventHandler(w http.ResponseWriter, r *http.Request) { defer r.Body.Close() - body, err := ioutil.ReadAll(r.Body) + body, err := io.ReadAll(r.Body) if err != nil { w.WriteHeader(http.StatusBadRequest) return diff --git a/plugins/inputs/webhooks/github/github_webhooks.go b/plugins/inputs/webhooks/github/github_webhooks.go index 5febb80afb6bb..2d48cbef2e5f2 100644 --- a/plugins/inputs/webhooks/github/github_webhooks.go +++ b/plugins/inputs/webhooks/github/github_webhooks.go @@ -5,7 +5,7 @@ import ( "crypto/sha1" "encoding/hex" "encoding/json" - "io/ioutil" + "io" "log" "net/http" @@ -28,7 +28,7 @@ func (gh *GithubWebhook) Register(router *mux.Router, acc telegraf.Accumulator) func (gh *GithubWebhook) eventHandler(w http.ResponseWriter, r *http.Request) { defer r.Body.Close() eventType := r.Header.Get("X-Github-Event") - data, err := ioutil.ReadAll(r.Body) + data, err := io.ReadAll(r.Body) if err != nil { w.WriteHeader(http.StatusBadRequest) return diff --git a/plugins/inputs/webhooks/mandrill/mandrill_webhooks.go b/plugins/inputs/webhooks/mandrill/mandrill_webhooks.go index a7e219c53c905..67ba86908d1a1 100644 --- a/plugins/inputs/webhooks/mandrill/mandrill_webhooks.go +++ b/plugins/inputs/webhooks/mandrill/mandrill_webhooks.go @@ -2,7 +2,7 @@ package mandrill import ( "encoding/json" - "io/ioutil" + "io" "log" "net/http" "net/url" @@ -31,7 +31,7 @@ func (md *MandrillWebhook) returnOK(w http.ResponseWriter, _ *http.Request) { func (md *MandrillWebhook) eventHandler(w http.ResponseWriter, r *http.Request) { defer r.Body.Close() - body, err := ioutil.ReadAll(r.Body) + body, err := io.ReadAll(r.Body) if err != nil { w.WriteHeader(http.StatusBadRequest) return diff --git a/plugins/inputs/webhooks/rollbar/rollbar_webhooks.go b/plugins/inputs/webhooks/rollbar/rollbar_webhooks.go index 55ff7eb2f3594..d9c1323cdd608 100644 --- a/plugins/inputs/webhooks/rollbar/rollbar_webhooks.go +++ b/plugins/inputs/webhooks/rollbar/rollbar_webhooks.go @@ -3,7 +3,7 @@ package rollbar import ( "encoding/json" "errors" - "io/ioutil" + "io" "log" "net/http" "time" @@ -25,7 +25,7 @@ func (rb *RollbarWebhook) Register(router *mux.Router, acc telegraf.Accumulator) func (rb *RollbarWebhook) eventHandler(w http.ResponseWriter, r *http.Request) { defer r.Body.Close() - data, err := ioutil.ReadAll(r.Body) + data, err := io.ReadAll(r.Body) if err != nil { w.WriteHeader(http.StatusBadRequest) return diff --git a/plugins/inputs/wireless/wireless_linux.go b/plugins/inputs/wireless/wireless_linux.go index 706f9700d12c9..29a0250d92b7f 100644 --- a/plugins/inputs/wireless/wireless_linux.go +++ b/plugins/inputs/wireless/wireless_linux.go @@ -5,7 +5,6 @@ package wireless import ( "bytes" - "io/ioutil" "log" "os" "path" @@ -47,7 +46,7 @@ func (w *Wireless) Gather(acc telegraf.Accumulator) error { w.loadPath() wirelessPath := path.Join(w.HostProc, "net", "wireless") - table, err := ioutil.ReadFile(wirelessPath) + table, err := os.ReadFile(wirelessPath) if err != nil { return err } diff --git a/plugins/inputs/x509_cert/x509_cert.go b/plugins/inputs/x509_cert/x509_cert.go index b106f91b772f6..3486f2779eb2b 100644 --- a/plugins/inputs/x509_cert/x509_cert.go +++ b/plugins/inputs/x509_cert/x509_cert.go @@ -7,14 +7,15 @@ import ( "crypto/x509" "encoding/pem" "fmt" - "github.com/pion/dtls/v2" - "io/ioutil" "net" "net/url" + "os" "path/filepath" "strings" "time" + "github.com/pion/dtls/v2" + "github.com/influxdata/telegraf" "github.com/influxdata/telegraf/config" "github.com/influxdata/telegraf/internal/globpath" @@ -176,7 +177,7 @@ func (c *X509Cert) getCert(u *url.URL, timeout time.Duration) ([]*x509.Certifica return certs, nil case "file": - content, err := ioutil.ReadFile(u.Path) + content, err := os.ReadFile(u.Path) if err != nil { return nil, err } diff --git a/plugins/inputs/x509_cert/x509_cert_test.go b/plugins/inputs/x509_cert/x509_cert_test.go index 9c42c09bdabda..f0b0379109749 100644 --- a/plugins/inputs/x509_cert/x509_cert_test.go +++ b/plugins/inputs/x509_cert/x509_cert_test.go @@ -4,8 +4,6 @@ import ( "crypto/tls" "encoding/base64" "fmt" - "github.com/pion/dtls/v2" - "io/ioutil" "math/big" "net" "net/url" @@ -15,6 +13,8 @@ import ( "testing" "time" + "github.com/pion/dtls/v2" + "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" @@ -32,7 +32,7 @@ var _ telegraf.Input = &X509Cert{} func TestGatherRemoteIntegration(t *testing.T) { t.Skip("Skipping network-dependent test due to race condition when test-all") - tmpfile, err := ioutil.TempFile("", "example") + tmpfile, err := os.CreateTemp("", "example") require.NoError(t, err) defer os.Remove(tmpfile.Name()) @@ -149,7 +149,7 @@ func TestGatherLocal(t *testing.T) { for _, test := range tests { t.Run(test.name, func(t *testing.T) { - f, err := ioutil.TempFile("", "x509_cert") + f, err := os.CreateTemp("", "x509_cert") require.NoError(t, err) _, err = f.Write([]byte(test.content)) @@ -181,7 +181,7 @@ func TestGatherLocal(t *testing.T) { func TestTags(t *testing.T) { cert := fmt.Sprintf("%s\n%s", pki.ReadServerCert(), pki.ReadCACert()) - f, err := ioutil.TempFile("", "x509_cert") + f, err := os.CreateTemp("", "x509_cert") require.NoError(t, err) _, err = f.Write([]byte(cert)) @@ -238,7 +238,7 @@ func TestGatherChain(t *testing.T) { for _, test := range tests { t.Run(test.name, func(t *testing.T) { - f, err := ioutil.TempFile("", "x509_cert") + f, err := os.CreateTemp("", "x509_cert") require.NoError(t, err) _, err = f.Write([]byte(test.content)) diff --git a/plugins/inputs/zfs/zfs_linux_test.go b/plugins/inputs/zfs/zfs_linux_test.go index 52622582029a5..b844759eaffd1 100644 --- a/plugins/inputs/zfs/zfs_linux_test.go +++ b/plugins/inputs/zfs/zfs_linux_test.go @@ -4,7 +4,6 @@ package zfs import ( - "io/ioutil" "os" "testing" @@ -192,10 +191,10 @@ func TestZfsPoolMetrics(t *testing.T) { err = os.MkdirAll(testKstatPath+"/HOME", 0755) require.NoError(t, err) - err = ioutil.WriteFile(testKstatPath+"/HOME/io", []byte(poolIoContents), 0644) + err = os.WriteFile(testKstatPath+"/HOME/io", []byte(poolIoContents), 0644) require.NoError(t, err) - err = ioutil.WriteFile(testKstatPath+"/arcstats", []byte(arcstatsContents), 0644) + err = os.WriteFile(testKstatPath+"/arcstats", []byte(arcstatsContents), 0644) require.NoError(t, err) poolMetrics := getPoolMetrics() @@ -231,25 +230,25 @@ func TestZfsGeneratesMetrics(t *testing.T) { err = os.MkdirAll(testKstatPath+"/HOME", 0755) require.NoError(t, err) - err = ioutil.WriteFile(testKstatPath+"/HOME/io", []byte(""), 0644) + err = os.WriteFile(testKstatPath+"/HOME/io", []byte(""), 0644) require.NoError(t, err) - err = ioutil.WriteFile(testKstatPath+"/arcstats", []byte(arcstatsContents), 0644) + err = os.WriteFile(testKstatPath+"/arcstats", []byte(arcstatsContents), 0644) require.NoError(t, err) - err = ioutil.WriteFile(testKstatPath+"/zfetchstats", []byte(zfetchstatsContents), 0644) + err = os.WriteFile(testKstatPath+"/zfetchstats", []byte(zfetchstatsContents), 0644) require.NoError(t, err) - err = ioutil.WriteFile(testKstatPath+"/zil", []byte(zilContents), 0644) + err = os.WriteFile(testKstatPath+"/zil", []byte(zilContents), 0644) require.NoError(t, err) - err = ioutil.WriteFile(testKstatPath+"/fm", []byte(fmContents), 0644) + err = os.WriteFile(testKstatPath+"/fm", []byte(fmContents), 0644) require.NoError(t, err) - err = ioutil.WriteFile(testKstatPath+"/dmu_tx", []byte(dmuTxContents), 0644) + err = os.WriteFile(testKstatPath+"/dmu_tx", []byte(dmuTxContents), 0644) require.NoError(t, err) - err = ioutil.WriteFile(testKstatPath+"/abdstats", []byte(abdstatsContents), 0644) + err = os.WriteFile(testKstatPath+"/abdstats", []byte(abdstatsContents), 0644) require.NoError(t, err) intMetrics := getKstatMetricsAll() @@ -272,7 +271,7 @@ func TestZfsGeneratesMetrics(t *testing.T) { err = os.MkdirAll(testKstatPath+"/STORAGE", 0755) require.NoError(t, err) - err = ioutil.WriteFile(testKstatPath+"/STORAGE/io", []byte(""), 0644) + err = os.WriteFile(testKstatPath+"/STORAGE/io", []byte(""), 0644) require.NoError(t, err) tags = map[string]string{ diff --git a/plugins/inputs/zipkin/cmd/thrift_serialize/thrift_serialize.go b/plugins/inputs/zipkin/cmd/thrift_serialize/thrift_serialize.go index 9bf1f3261d9f6..09518103b22cc 100644 --- a/plugins/inputs/zipkin/cmd/thrift_serialize/thrift_serialize.go +++ b/plugins/inputs/zipkin/cmd/thrift_serialize/thrift_serialize.go @@ -29,8 +29,8 @@ import ( "errors" "flag" "fmt" - "io/ioutil" "log" + "os" "github.com/apache/thrift/lib/go/thrift" "github.com/influxdata/telegraf/plugins/inputs/zipkin/codec/thrift/gen-go/zipkincore" @@ -52,7 +52,7 @@ func init() { func main() { flag.Parse() - contents, err := ioutil.ReadFile(filename) + contents, err := os.ReadFile(filename) if err != nil { log.Fatalf("Error reading file: %v\n", err) } @@ -63,7 +63,7 @@ func main() { if err != nil { log.Fatalf("%v\n", err) } - if err := ioutil.WriteFile(outFileName, raw, 0644); err != nil { + if err := os.WriteFile(outFileName, raw, 0644); err != nil { log.Fatalf("%v", err) } case "thrift": @@ -71,7 +71,7 @@ func main() { if err != nil { log.Fatalf("%v\n", err) } - if err := ioutil.WriteFile(outFileName, raw, 0644); err != nil { + if err := os.WriteFile(outFileName, raw, 0644); err != nil { log.Fatalf("%v", err) } default: diff --git a/plugins/inputs/zipkin/codec/thrift/thrift_test.go b/plugins/inputs/zipkin/codec/thrift/thrift_test.go index d4bbc1d54df20..ea566e4bfd0c8 100644 --- a/plugins/inputs/zipkin/codec/thrift/thrift_test.go +++ b/plugins/inputs/zipkin/codec/thrift/thrift_test.go @@ -1,7 +1,7 @@ package thrift import ( - "io/ioutil" + "os" "testing" "github.com/google/go-cmp/cmp" @@ -193,7 +193,7 @@ func TestUnmarshalThrift(t *testing.T) { } for _, tt := range tests { t.Run(tt.name, func(t *testing.T) { - dat, err := ioutil.ReadFile(tt.filename) + dat, err := os.ReadFile(tt.filename) if err != nil { t.Fatalf("Could not find file %s\n", tt.filename) } diff --git a/plugins/inputs/zipkin/handler.go b/plugins/inputs/zipkin/handler.go index 24e7ac12f01be..83288bd6e4b2e 100644 --- a/plugins/inputs/zipkin/handler.go +++ b/plugins/inputs/zipkin/handler.go @@ -3,7 +3,7 @@ package zipkin import ( "compress/gzip" "fmt" - "io/ioutil" + "io" "mime" "net/http" "strings" @@ -88,7 +88,7 @@ func (s *SpanHandler) Spans(w http.ResponseWriter, r *http.Request) { w.WriteHeader(http.StatusUnsupportedMediaType) } - octets, err := ioutil.ReadAll(body) + octets, err := io.ReadAll(body) if err != nil { s.recorder.Error(err) w.WriteHeader(http.StatusInternalServerError) diff --git a/plugins/inputs/zipkin/handler_test.go b/plugins/inputs/zipkin/handler_test.go index b0176a22ead3c..f6e8bece80240 100644 --- a/plugins/inputs/zipkin/handler_test.go +++ b/plugins/inputs/zipkin/handler_test.go @@ -2,9 +2,10 @@ package zipkin import ( "bytes" - "io/ioutil" + "io" "net/http" "net/http/httptest" + "os" "strconv" "testing" "time" @@ -28,7 +29,7 @@ func (m *MockRecorder) Error(err error) { } func TestSpanHandler(t *testing.T) { - dat, err := ioutil.ReadFile("testdata/threespans.dat") + dat, err := os.ReadFile("testdata/threespans.dat") if err != nil { t.Fatalf("Could not find file %s\n", "testdata/threespans.dat") } @@ -37,7 +38,7 @@ func TestSpanHandler(t *testing.T) { r := httptest.NewRequest( "POST", "http://server.local/api/v1/spans", - ioutil.NopCloser( + io.NopCloser( bytes.NewReader(dat))) r.Header.Set("Content-Type", "application/x-thrift") diff --git a/plugins/inputs/zipkin/zipkin_test.go b/plugins/inputs/zipkin/zipkin_test.go index 77bef853b7e52..0c0bab279cc7f 100644 --- a/plugins/inputs/zipkin/zipkin_test.go +++ b/plugins/inputs/zipkin/zipkin_test.go @@ -3,8 +3,8 @@ package zipkin import ( "bytes" "fmt" - "io/ioutil" "net/http" + "os" "testing" "time" @@ -637,7 +637,7 @@ func TestZipkinPlugin(t *testing.T) { } func postThriftData(datafile, address, contentType string) error { - dat, err := ioutil.ReadFile(datafile) + dat, err := os.ReadFile(datafile) if err != nil { return fmt.Errorf("could not read from data file %s", datafile) } diff --git a/plugins/outputs/azure_monitor/azure_monitor.go b/plugins/outputs/azure_monitor/azure_monitor.go index e513dbdca23e9..ca511a5211860 100644 --- a/plugins/outputs/azure_monitor/azure_monitor.go +++ b/plugins/outputs/azure_monitor/azure_monitor.go @@ -7,7 +7,7 @@ import ( "encoding/json" "fmt" "hash/fnv" - "io/ioutil" + "io" "net/http" "regexp" "strings" @@ -221,7 +221,7 @@ func vmInstanceMetadata(c *http.Client) (string, string, error) { } defer resp.Body.Close() - body, err := ioutil.ReadAll(resp.Body) + body, err := io.ReadAll(resp.Body) if err != nil { return "", "", err } @@ -356,7 +356,7 @@ func (a *AzureMonitor) send(body []byte) error { } defer resp.Body.Close() - _, err = ioutil.ReadAll(resp.Body) + _, err = io.ReadAll(resp.Body) if err != nil || resp.StatusCode < 200 || resp.StatusCode > 299 { return fmt.Errorf("failed to write batch: [%v] %s", resp.StatusCode, resp.Status) } diff --git a/plugins/outputs/dynatrace/dynatrace.go b/plugins/outputs/dynatrace/dynatrace.go index 11796e8e12994..adf74ea48a232 100644 --- a/plugins/outputs/dynatrace/dynatrace.go +++ b/plugins/outputs/dynatrace/dynatrace.go @@ -3,7 +3,7 @@ package dynatrace import ( "bytes" "fmt" - "io/ioutil" + "io" "net/http" "strings" "time" @@ -209,7 +209,7 @@ func (d *Dynatrace) send(msg string) error { } // print metric line results as info log - bodyBytes, err := ioutil.ReadAll(resp.Body) + bodyBytes, err := io.ReadAll(resp.Body) if err != nil { d.Log.Errorf("Dynatrace error reading response") } diff --git a/plugins/outputs/dynatrace/dynatrace_test.go b/plugins/outputs/dynatrace/dynatrace_test.go index c3cb091cbf549..0ed7cf4cf1195 100644 --- a/plugins/outputs/dynatrace/dynatrace_test.go +++ b/plugins/outputs/dynatrace/dynatrace_test.go @@ -3,7 +3,7 @@ package dynatrace import ( "encoding/json" "fmt" - "io/ioutil" + "io" "net/http" "net/http/httptest" "regexp" @@ -130,7 +130,7 @@ func TestSendMetrics(t *testing.T) { ts := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { // check the encoded result - bodyBytes, err := ioutil.ReadAll(r.Body) + bodyBytes, err := io.ReadAll(r.Body) require.NoError(t, err) bodyString := string(bodyBytes) @@ -209,7 +209,7 @@ func TestSendMetrics(t *testing.T) { func TestSendSingleMetricWithUnorderedTags(t *testing.T) { ts := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { // check the encoded result - bodyBytes, err := ioutil.ReadAll(r.Body) + bodyBytes, err := io.ReadAll(r.Body) require.NoError(t, err) bodyString := string(bodyBytes) // use regex because dimension order isn't guaranteed @@ -255,7 +255,7 @@ func TestSendMetricWithoutTags(t *testing.T) { ts := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { w.WriteHeader(http.StatusOK) // check the encoded result - bodyBytes, err := ioutil.ReadAll(r.Body) + bodyBytes, err := io.ReadAll(r.Body) require.NoError(t, err) bodyString := string(bodyBytes) expected := "mymeasurement.myfield,dt.metrics.source=telegraf gauge,3.14 1289430000000" @@ -296,7 +296,7 @@ func TestSendMetricWithUpperCaseTagKeys(t *testing.T) { ts := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { w.WriteHeader(http.StatusOK) // check the encoded result - bodyBytes, err := ioutil.ReadAll(r.Body) + bodyBytes, err := io.ReadAll(r.Body) require.NoError(t, err) bodyString := string(bodyBytes) @@ -343,7 +343,7 @@ func TestSendBooleanMetricWithoutTags(t *testing.T) { ts := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { w.WriteHeader(http.StatusOK) // check the encoded result - bodyBytes, err := ioutil.ReadAll(r.Body) + bodyBytes, err := io.ReadAll(r.Body) require.NoError(t, err) bodyString := string(bodyBytes) // use regex because field order isn't guaranteed @@ -384,7 +384,7 @@ func TestSendMetricWithDefaultDimensions(t *testing.T) { ts := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { w.WriteHeader(http.StatusOK) // check the encoded result - bodyBytes, err := ioutil.ReadAll(r.Body) + bodyBytes, err := io.ReadAll(r.Body) require.NoError(t, err) bodyString := string(bodyBytes) // use regex because field order isn't guaranteed @@ -427,7 +427,7 @@ func TestMetricDimensionsOverrideDefault(t *testing.T) { ts := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { w.WriteHeader(http.StatusOK) // check the encoded result - bodyBytes, err := ioutil.ReadAll(r.Body) + bodyBytes, err := io.ReadAll(r.Body) require.NoError(t, err) bodyString := string(bodyBytes) // use regex because field order isn't guaranteed @@ -470,7 +470,7 @@ func TestStaticDimensionsOverrideMetric(t *testing.T) { ts := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { w.WriteHeader(http.StatusOK) // check the encoded result - bodyBytes, err := ioutil.ReadAll(r.Body) + bodyBytes, err := io.ReadAll(r.Body) require.NoError(t, err) bodyString := string(bodyBytes) // use regex because field order isn't guaranteed diff --git a/plugins/outputs/file/file_test.go b/plugins/outputs/file/file_test.go index f1e87853d6153..5fcdc511972ac 100644 --- a/plugins/outputs/file/file_test.go +++ b/plugins/outputs/file/file_test.go @@ -3,7 +3,6 @@ package file import ( "bytes" "io" - "io/ioutil" "os" "testing" @@ -181,7 +180,7 @@ func TestFileStdout(t *testing.T) { } func createFile() *os.File { - f, err := ioutil.TempFile("", "") + f, err := os.CreateTemp("", "") if err != nil { panic(err) } @@ -190,7 +189,7 @@ func createFile() *os.File { } func tmpFile() string { - d, err := ioutil.TempDir("", "") + d, err := os.MkdirTemp("", "") if err != nil { panic(err) } @@ -198,7 +197,7 @@ func tmpFile() string { } func validateFile(fname, expS string, t *testing.T) { - buf, err := ioutil.ReadFile(fname) + buf, err := os.ReadFile(fname) if err != nil { panic(err) } diff --git a/plugins/outputs/health/health_test.go b/plugins/outputs/health/health_test.go index f03cfcacba7a6..03a08fca21e7b 100644 --- a/plugins/outputs/health/health_test.go +++ b/plugins/outputs/health/health_test.go @@ -1,7 +1,7 @@ package health_test import ( - "io/ioutil" + "io" "net/http" "testing" "time" @@ -121,7 +121,7 @@ func TestHealth(t *testing.T) { require.NoError(t, err) require.Equal(t, tt.expectedCode, resp.StatusCode) - _, err = ioutil.ReadAll(resp.Body) + _, err = io.ReadAll(resp.Body) require.NoError(t, err) err = output.Close() diff --git a/plugins/outputs/http/http.go b/plugins/outputs/http/http.go index edaae3f6ec07d..c94052ea92c1c 100644 --- a/plugins/outputs/http/http.go +++ b/plugins/outputs/http/http.go @@ -6,7 +6,6 @@ import ( "context" "fmt" "io" - "io/ioutil" "net/http" "strings" "time" @@ -195,7 +194,7 @@ func (h *HTTP) write(reqBody []byte) error { return fmt.Errorf("when writing to [%s] received status code: %d. body: %s", h.URL, resp.StatusCode, errorLine) } - _, err = ioutil.ReadAll(resp.Body) + _, err = io.ReadAll(resp.Body) if err != nil { return fmt.Errorf("when writing to [%s] received error: %v", h.URL, err) } diff --git a/plugins/outputs/http/http_test.go b/plugins/outputs/http/http_test.go index 8089f45f59f2e..d6803eed3211d 100644 --- a/plugins/outputs/http/http_test.go +++ b/plugins/outputs/http/http_test.go @@ -3,7 +3,7 @@ package http import ( "compress/gzip" "fmt" - "io/ioutil" + "io" "net/http" "net/http/httptest" "net/url" @@ -272,7 +272,7 @@ func TestContentEncodingGzip(t *testing.T) { require.NoError(t, err) } - payload, err := ioutil.ReadAll(body) + payload, err := io.ReadAll(body) require.NoError(t, err) require.Contains(t, string(payload), "cpu value=42") diff --git a/plugins/outputs/influxdb/http.go b/plugins/outputs/influxdb/http.go index 5c11d2821d2f1..ac85814db1f34 100644 --- a/plugins/outputs/influxdb/http.go +++ b/plugins/outputs/influxdb/http.go @@ -8,7 +8,6 @@ import ( "errors" "fmt" "io" - "io/ioutil" "net" "net/http" "net/url" @@ -489,7 +488,7 @@ func (c *httpClient) requestBodyReader(metrics []telegraf.Metric) (io.ReadCloser return rc, nil } - return ioutil.NopCloser(reader), nil + return io.NopCloser(reader), nil } func (c *httpClient) addHeaders(req *http.Request) { @@ -503,13 +502,13 @@ func (c *httpClient) addHeaders(req *http.Request) { } func (c *httpClient) validateResponse(response io.ReadCloser) (io.ReadCloser, error) { - bodyBytes, err := ioutil.ReadAll(response) + bodyBytes, err := io.ReadAll(response) if err != nil { return nil, err } defer response.Close() - originalResponse := ioutil.NopCloser(bytes.NewBuffer(bodyBytes)) + originalResponse := io.NopCloser(bytes.NewBuffer(bodyBytes)) // Empty response is valid. if response == http.NoBody || len(bodyBytes) == 0 || bodyBytes == nil { diff --git a/plugins/outputs/influxdb/http_test.go b/plugins/outputs/influxdb/http_test.go index e19d8d2e580c9..ba4dd2d81b12a 100644 --- a/plugins/outputs/influxdb/http_test.go +++ b/plugins/outputs/influxdb/http_test.go @@ -6,7 +6,7 @@ import ( "compress/gzip" "context" "fmt" - "io/ioutil" + "io" "log" "net" "net/http" @@ -284,7 +284,7 @@ func TestHTTP_Write(t *testing.T) { }, queryHandlerFunc: func(t *testing.T, w http.ResponseWriter, r *http.Request) { require.Equal(t, r.FormValue("db"), "telegraf") - body, err := ioutil.ReadAll(r.Body) + body, err := io.ReadAll(r.Body) require.NoError(t, err) require.Contains(t, string(body), "cpu value=42") w.WriteHeader(http.StatusNoContent) @@ -573,7 +573,7 @@ func TestHTTP_WriteContentEncodingGzip(t *testing.T) { gr, err := gzip.NewReader(r.Body) require.NoError(t, err) - body, err := ioutil.ReadAll(gr) + body, err := io.ReadAll(gr) require.NoError(t, err) require.Contains(t, string(body), "cpu value=42") @@ -618,7 +618,7 @@ func TestHTTP_WriteContentEncodingGzip(t *testing.T) { } func TestHTTP_UnixSocket(t *testing.T) { - tmpdir, err := ioutil.TempDir("", "telegraf-test") + tmpdir, err := os.MkdirTemp("", "telegraf-test") if err != nil { require.NoError(t, err) } @@ -700,7 +700,7 @@ func TestHTTP_WriteDatabaseTagWorksOnRetry(t *testing.T) { r.ParseForm() require.Equal(t, r.Form["db"], []string{"foo"}) - body, err := ioutil.ReadAll(r.Body) + body, err := io.ReadAll(r.Body) require.NoError(t, err) require.Contains(t, string(body), "cpu value=42") @@ -835,7 +835,7 @@ func TestDBRPTags(t *testing.T) { handlerFunc: func(t *testing.T, w http.ResponseWriter, r *http.Request) { require.Equal(t, r.FormValue("db"), "telegraf") require.Equal(t, r.FormValue("rp"), "foo") - body, err := ioutil.ReadAll(r.Body) + body, err := io.ReadAll(r.Body) require.NoError(t, err) require.Contains(t, string(body), "cpu,rp=foo value=42") w.WriteHeader(http.StatusNoContent) @@ -917,7 +917,7 @@ func TestDBRPTags(t *testing.T) { handlerFunc: func(t *testing.T, w http.ResponseWriter, r *http.Request) { require.Equal(t, r.FormValue("db"), "telegraf") require.Equal(t, r.FormValue("rp"), "foo") - body, err := ioutil.ReadAll(r.Body) + body, err := io.ReadAll(r.Body) require.NoError(t, err) require.Contains(t, string(body), "cpu value=42") w.WriteHeader(http.StatusNoContent) @@ -948,7 +948,7 @@ func TestDBRPTags(t *testing.T) { handlerFunc: func(t *testing.T, w http.ResponseWriter, r *http.Request) { require.Equal(t, r.FormValue("db"), "telegraf") require.Equal(t, r.FormValue("rp"), "foo") - body, err := ioutil.ReadAll(r.Body) + body, err := io.ReadAll(r.Body) require.NoError(t, err) require.Contains(t, string(body), "cpu,rp=foo value=42") w.WriteHeader(http.StatusNoContent) diff --git a/plugins/outputs/influxdb_v2/http.go b/plugins/outputs/influxdb_v2/http.go index e8df4da7d2041..c076580255740 100644 --- a/plugins/outputs/influxdb_v2/http.go +++ b/plugins/outputs/influxdb_v2/http.go @@ -7,7 +7,6 @@ import ( "errors" "fmt" "io" - "io/ioutil" "log" "math" "net" @@ -361,7 +360,7 @@ func (c *httpClient) requestBodyReader(metrics []telegraf.Metric) (io.ReadCloser return rc, nil } - return ioutil.NopCloser(reader), nil + return io.NopCloser(reader), nil } func (c *httpClient) addHeaders(req *http.Request) { diff --git a/plugins/outputs/influxdb_v2/http_test.go b/plugins/outputs/influxdb_v2/http_test.go index 23c3ff05e17b6..0637cd8060bd0 100644 --- a/plugins/outputs/influxdb_v2/http_test.go +++ b/plugins/outputs/influxdb_v2/http_test.go @@ -2,7 +2,7 @@ package influxdb_v2_test import ( "context" - "io/ioutil" + "io" "net/http" "net/http/httptest" "net/url" @@ -63,7 +63,7 @@ func TestWriteBucketTagWorksOnRetry(t *testing.T) { r.ParseForm() require.Equal(t, r.Form["bucket"], []string{"foo"}) - body, err := ioutil.ReadAll(r.Body) + body, err := io.ReadAll(r.Body) require.NoError(t, err) require.Contains(t, string(body), "cpu value=42") diff --git a/plugins/outputs/librato/librato.go b/plugins/outputs/librato/librato.go index d4aa3e6e92bb7..dc1e9b6fa7856 100644 --- a/plugins/outputs/librato/librato.go +++ b/plugins/outputs/librato/librato.go @@ -4,7 +4,7 @@ import ( "bytes" "encoding/json" "fmt" - "io/ioutil" + "io" "net/http" "regexp" "time" @@ -151,7 +151,7 @@ func (l *Librato) Write(metrics []telegraf.Metric) error { defer resp.Body.Close() if resp.StatusCode != 200 || l.Debug { - htmlData, err := ioutil.ReadAll(resp.Body) + htmlData, err := io.ReadAll(resp.Body) if err != nil { l.Log.Debugf("Couldn't get response! (%v)", err) } diff --git a/plugins/outputs/loki/loki_test.go b/plugins/outputs/loki/loki_test.go index efe31728218d7..ba6d0808fabaa 100644 --- a/plugins/outputs/loki/loki_test.go +++ b/plugins/outputs/loki/loki_test.go @@ -4,14 +4,15 @@ import ( "compress/gzip" "encoding/json" "fmt" - "github.com/influxdata/telegraf/testutil" - "io/ioutil" + "io" "net/http" "net/http/httptest" "net/url" "testing" "time" + "github.com/influxdata/telegraf/testutil" + "github.com/influxdata/telegraf" "github.com/influxdata/telegraf/internal" "github.com/stretchr/testify/require" @@ -215,7 +216,7 @@ func TestContentEncodingGzip(t *testing.T) { require.NoError(t, err) } - payload, err := ioutil.ReadAll(body) + payload, err := io.ReadAll(body) require.NoError(t, err) var s Request @@ -394,7 +395,7 @@ func TestMetricSorting(t *testing.T) { body := r.Body var err error - payload, err := ioutil.ReadAll(body) + payload, err := io.ReadAll(body) require.NoError(t, err) var s Request diff --git a/plugins/outputs/opentsdb/opentsdb_http.go b/plugins/outputs/opentsdb/opentsdb_http.go index b164765850578..582a9bb85fc9a 100644 --- a/plugins/outputs/opentsdb/opentsdb_http.go +++ b/plugins/outputs/opentsdb/opentsdb_http.go @@ -6,7 +6,6 @@ import ( "encoding/json" "fmt" "io" - "io/ioutil" "log" "net/http" "net/http/httputil" @@ -163,7 +162,7 @@ func (o *openTSDBHttp) flush() error { fmt.Printf("Received response\n%s\n\n", dump) } else { // Important so http client reuse connection for next request if need be. - io.Copy(ioutil.Discard, resp.Body) + _, _ = io.Copy(io.Discard, resp.Body) } if resp.StatusCode/100 != 2 { diff --git a/plugins/outputs/prometheus_client/prometheus_client_v1_test.go b/plugins/outputs/prometheus_client/prometheus_client_v1_test.go index 39b8fec262095..95fa97fb688b7 100644 --- a/plugins/outputs/prometheus_client/prometheus_client_v1_test.go +++ b/plugins/outputs/prometheus_client/prometheus_client_v1_test.go @@ -2,7 +2,7 @@ package prometheus import ( "fmt" - "io/ioutil" + "io" "net/http" "net/http/httptest" "net/url" @@ -261,7 +261,7 @@ rpc_duration_seconds_count 2693 require.NoError(t, err) require.Equal(t, http.StatusOK, resp.StatusCode) defer resp.Body.Close() - body, err := ioutil.ReadAll(resp.Body) + body, err := io.ReadAll(resp.Body) require.NoError(t, err) require.Equal(t, @@ -392,7 +392,7 @@ rpc_duration_seconds_count 2693 resp, err := http.Get(output.URL()) require.NoError(t, err) - actual, err := ioutil.ReadAll(resp.Body) + actual, err := io.ReadAll(resp.Body) require.NoError(t, err) require.Equal(t, @@ -422,7 +422,7 @@ func TestLandingPage(t *testing.T) { resp, err := http.Get(u.String()) require.NoError(t, err) - actual, err := ioutil.ReadAll(resp.Body) + actual, err := io.ReadAll(resp.Body) require.NoError(t, err) require.Equal(t, expected, strings.TrimSpace(string(actual))) diff --git a/plugins/outputs/prometheus_client/prometheus_client_v2_test.go b/plugins/outputs/prometheus_client/prometheus_client_v2_test.go index 27be9103b28bd..c5ff76d4017a7 100644 --- a/plugins/outputs/prometheus_client/prometheus_client_v2_test.go +++ b/plugins/outputs/prometheus_client/prometheus_client_v2_test.go @@ -2,7 +2,7 @@ package prometheus import ( "fmt" - "io/ioutil" + "io" "net/http" "net/http/httptest" "strings" @@ -321,7 +321,7 @@ cpu_usage_idle_count{cpu="cpu1"} 20 require.NoError(t, err) require.Equal(t, http.StatusOK, resp.StatusCode) defer resp.Body.Close() - body, err := ioutil.ReadAll(resp.Body) + body, err := io.ReadAll(resp.Body) require.NoError(t, err) require.Equal(t, @@ -452,7 +452,7 @@ rpc_duration_seconds_count 2693 resp, err := http.Get(output.URL()) require.NoError(t, err) - actual, err := ioutil.ReadAll(resp.Body) + actual, err := io.ReadAll(resp.Body) require.NoError(t, err) require.Equal(t, diff --git a/plugins/outputs/sensu/sensu.go b/plugins/outputs/sensu/sensu.go index 568f8f7a144e4..3cd8b2274e52a 100644 --- a/plugins/outputs/sensu/sensu.go +++ b/plugins/outputs/sensu/sensu.go @@ -5,7 +5,6 @@ import ( "encoding/json" "fmt" "io" - "io/ioutil" "math" "net/http" "net/url" @@ -336,7 +335,7 @@ func (s *Sensu) write(reqBody []byte) error { defer resp.Body.Close() if resp.StatusCode != http.StatusCreated { - bodyData, err := ioutil.ReadAll(resp.Body) + bodyData, err := io.ReadAll(resp.Body) if err != nil { s.Log.Debugf("Couldn't read response body: %v", err) } diff --git a/plugins/outputs/sensu/sensu_test.go b/plugins/outputs/sensu/sensu_test.go index 249775727a481..e7a272ed5e149 100644 --- a/plugins/outputs/sensu/sensu_test.go +++ b/plugins/outputs/sensu/sensu_test.go @@ -3,7 +3,7 @@ package sensu import ( "encoding/json" "fmt" - "io/ioutil" + "io" "math" "net/http" "net/http/httptest" @@ -118,7 +118,7 @@ func TestConnectAndWrite(t *testing.T) { require.Equal(t, expectedURL, r.URL.String()) require.Equal(t, expectedAuthHeader, r.Header.Get("Authorization")) // let's make sure what we received is a valid Sensu event that contains all of the expected data - body, err := ioutil.ReadAll(r.Body) + body, err := io.ReadAll(r.Body) require.NoError(t, err) receivedEvent := &corev2.Event{} err = json.Unmarshal(body, receivedEvent) diff --git a/plugins/outputs/socket_writer/socket_writer_test.go b/plugins/outputs/socket_writer/socket_writer_test.go index 3c20583e15e20..0decb644cccab 100644 --- a/plugins/outputs/socket_writer/socket_writer_test.go +++ b/plugins/outputs/socket_writer/socket_writer_test.go @@ -2,7 +2,6 @@ package socket_writer import ( "bufio" - "io/ioutil" "net" "os" "path/filepath" @@ -46,7 +45,7 @@ func TestSocketWriter_udp(t *testing.T) { } func TestSocketWriter_unix(t *testing.T) { - tmpdir, err := ioutil.TempDir("", "telegraf") + tmpdir, err := os.MkdirTemp("", "telegraf") require.NoError(t, err) defer os.RemoveAll(tmpdir) sock := filepath.Join(tmpdir, "sw.TestSocketWriter_unix.sock") @@ -71,7 +70,7 @@ func TestSocketWriter_unixgram(t *testing.T) { t.Skip("Skipping on Windows, as unixgram sockets are not supported") } - tmpdir, err := ioutil.TempDir("", "telegraf") + tmpdir, err := os.MkdirTemp("", "telegraf") require.NoError(t, err) defer os.RemoveAll(tmpdir) sock := filepath.Join(tmpdir, "sw.TSW_unixgram.sock") diff --git a/plugins/outputs/sql/sql_test.go b/plugins/outputs/sql/sql_test.go index 5dad6752d4cfe..ef02c89b11fad 100644 --- a/plugins/outputs/sql/sql_test.go +++ b/plugins/outputs/sql/sql_test.go @@ -3,7 +3,6 @@ package sql import ( "context" "fmt" - "io/ioutil" "math/rand" "os" "path/filepath" @@ -162,7 +161,7 @@ func TestMysqlIntegration(t *testing.T) { const username = "root" password := pwgen(32) - outDir, err := ioutil.TempDir("", "tg-mysql-*") + outDir, err := os.MkdirTemp("", "tg-mysql-*") require.NoError(t, err) defer os.RemoveAll(outDir) @@ -230,9 +229,9 @@ func TestMysqlIntegration(t *testing.T) { require.FileExists(t, dumpfile) //compare the dump to what we expected - expected, err := ioutil.ReadFile("testdata/mariadb/expected.sql") + expected, err := os.ReadFile("testdata/mariadb/expected.sql") require.NoError(t, err) - actual, err := ioutil.ReadFile(dumpfile) + actual, err := os.ReadFile(dumpfile) require.NoError(t, err) require.Equal(t, string(expected), string(actual)) } @@ -252,7 +251,7 @@ func TestPostgresIntegration(t *testing.T) { const username = "postgres" password := pwgen(32) - outDir, err := ioutil.TempDir("", "tg-postgres-*") + outDir, err := os.MkdirTemp("", "tg-postgres-*") require.NoError(t, err) defer os.RemoveAll(outDir) @@ -329,9 +328,9 @@ func TestPostgresIntegration(t *testing.T) { require.FileExists(t, dumpfile) //compare the dump to what we expected - expected, err := ioutil.ReadFile("testdata/postgres/expected.sql") + expected, err := os.ReadFile("testdata/postgres/expected.sql") require.NoError(t, err) - actual, err := ioutil.ReadFile(dumpfile) + actual, err := os.ReadFile(dumpfile) require.NoError(t, err) require.Equal(t, string(expected), string(actual)) } diff --git a/plugins/outputs/sql/sqlite_test.go b/plugins/outputs/sql/sqlite_test.go index d54ffe877a80f..7707f9d085e7e 100644 --- a/plugins/outputs/sql/sqlite_test.go +++ b/plugins/outputs/sql/sqlite_test.go @@ -7,7 +7,6 @@ package sql import ( gosql "database/sql" - "io/ioutil" "os" "path/filepath" "testing" @@ -18,7 +17,7 @@ import ( ) func TestSqlite(t *testing.T) { - outDir, err := ioutil.TempDir("", "tg-sqlite-*") + outDir, err := os.MkdirTemp("", "tg-sqlite-*") require.NoError(t, err) defer os.RemoveAll(outDir) diff --git a/plugins/outputs/sumologic/sumologic_test.go b/plugins/outputs/sumologic/sumologic_test.go index 5ce502bab2c0e..5629defa4506e 100644 --- a/plugins/outputs/sumologic/sumologic_test.go +++ b/plugins/outputs/sumologic/sumologic_test.go @@ -6,7 +6,6 @@ import ( "compress/gzip" "fmt" "io" - "io/ioutil" "net/http" "net/http/httptest" "net/url" @@ -300,7 +299,7 @@ func TestContentEncodingGzip(t *testing.T) { body, err := gzip.NewReader(r.Body) require.NoError(t, err) - payload, err := ioutil.ReadAll(body) + payload, err := io.ReadAll(body) require.NoError(t, err) assert.Equal(t, string(payload), "metric=cpu field=value 42 0\n") diff --git a/plugins/outputs/warp10/warp10.go b/plugins/outputs/warp10/warp10.go index 7826047d7873d..4d3027b1b5331 100644 --- a/plugins/outputs/warp10/warp10.go +++ b/plugins/outputs/warp10/warp10.go @@ -3,7 +3,7 @@ package warp10 import ( "bytes" "fmt" - "io/ioutil" + "io" "log" "math" "net/http" @@ -154,7 +154,7 @@ func (w *Warp10) Write(metrics []telegraf.Metric) error { if resp.StatusCode != http.StatusOK { if w.PrintErrorBody { - body, _ := ioutil.ReadAll(resp.Body) + body, _ := io.ReadAll(resp.Body) return fmt.Errorf(w.WarpURL + ": " + w.HandleError(string(body), w.MaxStringErrorSize)) } diff --git a/plugins/outputs/yandex_cloud_monitoring/yandex_cloud_monitoring.go b/plugins/outputs/yandex_cloud_monitoring/yandex_cloud_monitoring.go index c6eb9db2ae5b5..dc097da45ac2a 100644 --- a/plugins/outputs/yandex_cloud_monitoring/yandex_cloud_monitoring.go +++ b/plugins/outputs/yandex_cloud_monitoring/yandex_cloud_monitoring.go @@ -4,7 +4,7 @@ import ( "bytes" "encoding/json" "fmt" - "io/ioutil" + "io" "net/http" "time" @@ -172,7 +172,7 @@ func getResponseFromMetadata(c *http.Client, metadataURL string) ([]byte, error) } defer resp.Body.Close() - body, err := ioutil.ReadAll(resp.Body) + body, err := io.ReadAll(resp.Body) if err != nil { return nil, err } @@ -242,7 +242,7 @@ func (a *YandexCloudMonitoring) send(body []byte) error { } defer resp.Body.Close() - _, err = ioutil.ReadAll(resp.Body) + _, err = io.ReadAll(resp.Body) if err != nil || resp.StatusCode < 200 || resp.StatusCode > 299 { return fmt.Errorf("failed to write batch: [%v] %s", resp.StatusCode, resp.Status) } diff --git a/plugins/parsers/json_v2/parser_test.go b/plugins/parsers/json_v2/parser_test.go index f0f018034dc5b..7b34b83c0af8a 100644 --- a/plugins/parsers/json_v2/parser_test.go +++ b/plugins/parsers/json_v2/parser_test.go @@ -3,7 +3,6 @@ package json_v2_test import ( "bufio" "fmt" - "io/ioutil" "os" "testing" @@ -90,7 +89,7 @@ func TestData(t *testing.T) { for _, tc := range tests { t.Run(tc.name, func(t *testing.T) { // Process the telegraf config file for the test - buf, err := ioutil.ReadFile(fmt.Sprintf("testdata/%s/telegraf.conf", tc.test)) + buf, err := os.ReadFile(fmt.Sprintf("testdata/%s/telegraf.conf", tc.test)) require.NoError(t, err) inputs.Add("file", func() telegraf.Input { return &file.File{} diff --git a/plugins/parsers/prometheus/parser_test.go b/plugins/parsers/prometheus/parser_test.go index f53b926bda4a5..a403887e093b9 100644 --- a/plugins/parsers/prometheus/parser_test.go +++ b/plugins/parsers/prometheus/parser_test.go @@ -2,7 +2,7 @@ package prometheus import ( "fmt" - "io/ioutil" + "io" "net/http" "net/http/httptest" "testing" @@ -435,7 +435,7 @@ func TestParserProtobufHeader(t *testing.T) { t.Fatalf("error making HTTP request to %s: %s", ts.URL, err) } defer resp.Body.Close() - body, err := ioutil.ReadAll(resp.Body) + body, err := io.ReadAll(resp.Body) if err != nil { t.Fatalf("error reading body: %s", err) } diff --git a/plugins/parsers/xpath/parser_test.go b/plugins/parsers/xpath/parser_test.go index 8e7a3087c0888..ead02e0392769 100644 --- a/plugins/parsers/xpath/parser_test.go +++ b/plugins/parsers/xpath/parser_test.go @@ -1,7 +1,7 @@ package xpath import ( - "io/ioutil" + "os" "path/filepath" "strings" "testing" @@ -1233,7 +1233,7 @@ func TestTestCases(t *testing.T) { pbmsgtype = protofields[1] } - content, err := ioutil.ReadFile(datafile) + content, err := os.ReadFile(datafile) require.NoError(t, err) // Get the expectations @@ -1266,7 +1266,7 @@ func TestTestCases(t *testing.T) { } func loadTestConfiguration(filename string) (*Config, []string, error) { - buf, err := ioutil.ReadFile(filename) + buf, err := os.ReadFile(filename) if err != nil { return nil, nil, err } diff --git a/plugins/processors/starlark/starlark_test.go b/plugins/processors/starlark/starlark_test.go index 15152a2f349c3..9eed069948bb0 100644 --- a/plugins/processors/starlark/starlark_test.go +++ b/plugins/processors/starlark/starlark_test.go @@ -3,7 +3,6 @@ package starlark import ( "errors" "fmt" - "io/ioutil" "os" "path/filepath" "strings" @@ -3194,7 +3193,7 @@ func TestAllScriptTestData(t *testing.T) { } fn := path t.Run(fn, func(t *testing.T) { - b, err := ioutil.ReadFile(fn) + b, err := os.ReadFile(fn) require.NoError(t, err) lines := strings.Split(string(b), "\n") inputMetrics := parseMetricsFrom(t, lines, "Example Input:") diff --git a/testutil/tls.go b/testutil/tls.go index 68a244a8b1e74..686f327d06f49 100644 --- a/testutil/tls.go +++ b/testutil/tls.go @@ -2,7 +2,7 @@ package testutil import ( "fmt" - "io/ioutil" + "io" "os" "path" @@ -93,7 +93,7 @@ func readCertificate(filename string) string { if err != nil { panic(fmt.Sprintf("opening %q: %v", filename, err)) } - octets, err := ioutil.ReadAll(file) + octets, err := io.ReadAll(file) if err != nil { panic(fmt.Sprintf("reading %q: %v", filename, err)) } From 56398237c4a931dba438db553a1ee1487926c072 Mon Sep 17 00:00:00 2001 From: helotpl Date: Tue, 28 Sep 2021 23:24:08 +0200 Subject: [PATCH 091/176] feat: telegraf to merge tables with different indexes (#9241) --- plugins/inputs/snmp/README.md | 107 +++++++++++ plugins/inputs/snmp/snmp.go | 65 +++++++ plugins/inputs/snmp/snmp_test.go | 248 ++++++++++++++++++++++++++ plugins/inputs/snmp/testdata/test.mib | 39 ++++ 4 files changed, 459 insertions(+) diff --git a/plugins/inputs/snmp/README.md b/plugins/inputs/snmp/README.md index 0d52881a72f04..3728cddb34349 100644 --- a/plugins/inputs/snmp/README.md +++ b/plugins/inputs/snmp/README.md @@ -201,6 +201,113 @@ One [metric][] is created for each row of the SNMP table. ## Specifies if the value of given field should be snmptranslated ## by default no field values are translated # translate = true + + ## Secondary index table allows to merge data from two tables with + ## different index that this filed will be used to join them. There can + ## be only one secondary index table. + # secondary_index_table = false + + ## This field is using secondary index, and will be later merged with + ## primary index using SecondaryIndexTable. SecondaryIndexTable and + ## SecondaryIndexUse are exclusive. + # secondary_index_use = false + + ## Controls if entries from secondary table should be added or not + ## if joining index is present or not. I set to true, means that join + ## is outer, and index is prepended with "Secondary." for missing values + ## to avoid overlaping indexes from both tables. Can be set per field or + ## globally with SecondaryIndexTable, global true overrides per field false. + # secondary_outer_join = false +``` + +##### Two Table Join +Snmp plugin can join two snmp tables that have different indexes. For this to work one table +should have translation field that return index of second table as value. Examples +of such fields are: + * Cisco portTable with translation field: `CISCO-STACK-MIB::portIfIndex`, +which value is IfIndex from ifTable + * Adva entityFacilityTable with translation field: `ADVA-FSPR7-MIB::entityFacilityOneIndex`, +which value is IfIndex from ifTable + * Cisco cpeExtPsePortTable with translation field: `CISCO-POWER-ETHERNET-EXT-MIB::cpeExtPsePortEntPhyIndex`, +which value is index from entPhysicalTable + +Such field can be used to translate index to secondary table with `secondary_index_table = true` +and all fields from secondary table (with index pointed from translation field), should have added option +`secondary_index_use = true`. Telegraf cannot duplicate entries during join so translation +must be 1-to-1 (not 1-to-many). To add fields from secondary table with index that is not present +in translation table (outer join), there is a second option for translation index `secondary_outer_join = true`. + +###### Example configuration for table joins + +CISCO-POWER-ETHERNET-EXT-MIB table before join: +``` +[[inputs.snmp.table]] +name = "ciscoPower" +index_as_tag = true + +[[inputs.snmp.table.field]] +name = "PortPwrConsumption" +oid = "CISCO-POWER-ETHERNET-EXT-MIB::cpeExtPsePortPwrConsumption" + +[[inputs.snmp.table.field]] +name = "EntPhyIndex" +oid = "CISCO-POWER-ETHERNET-EXT-MIB::cpeExtPsePortEntPhyIndex" +``` + +Partial result (removed agent_host and host columns from all following outputs in this section): +``` +> ciscoPower,index=1.2 EntPhyIndex=1002i,PortPwrConsumption=6643i 1621460628000000000 +> ciscoPower,index=1.6 EntPhyIndex=1006i,PortPwrConsumption=10287i 1621460628000000000 +> ciscoPower,index=1.5 EntPhyIndex=1005i,PortPwrConsumption=8358i 1621460628000000000 +``` + +Note here that EntPhyIndex column carries index from ENTITY-MIB table, config for it: +``` +[[inputs.snmp.table]] +name = "entityTable" +index_as_tag = true + +[[inputs.snmp.table.field]] +name = "EntPhysicalName" +oid = "ENTITY-MIB::entPhysicalName" +``` +Partial result: +``` +> entityTable,index=1006 EntPhysicalName="GigabitEthernet1/6" 1621460809000000000 +> entityTable,index=1002 EntPhysicalName="GigabitEthernet1/2" 1621460809000000000 +> entityTable,index=1005 EntPhysicalName="GigabitEthernet1/5" 1621460809000000000 +``` + +Now, lets attempt to join these results into one table. EntPhyIndex matches index +from second table, and lets convert EntPhysicalName into tag, so second table will +only provide tags into result. Configuration: + +``` +[[inputs.snmp.table]] +name = "ciscoPowerEntity" +index_as_tag = true + +[[inputs.snmp.table.field]] +name = "PortPwrConsumption" +oid = "CISCO-POWER-ETHERNET-EXT-MIB::cpeExtPsePortPwrConsumption" + +[[inputs.snmp.table.field]] +name = "EntPhyIndex" +oid = "CISCO-POWER-ETHERNET-EXT-MIB::cpeExtPsePortEntPhyIndex" +secondary_index_table = true # enables joining + +[[inputs.snmp.table.field]] +name = "EntPhysicalName" +oid = "ENTITY-MIB::entPhysicalName" +secondary_index_use = true # this tag is indexed from secondary table +is_tag = true +``` + +Result: +``` +> ciscoPowerEntity,EntPhysicalName=GigabitEthernet1/2,index=1.2 EntPhyIndex=1002i,PortPwrConsumption=6643i 1621461148000000000 +> ciscoPowerEntity,EntPhysicalName=GigabitEthernet1/6,index=1.6 EntPhyIndex=1006i,PortPwrConsumption=10287i 1621461148000000000 +> ciscoPowerEntity,EntPhysicalName=GigabitEthernet1/5,index=1.5 EntPhyIndex=1005i,PortPwrConsumption=8358i 1621461148000000000 ``` ### Troubleshooting diff --git a/plugins/inputs/snmp/snmp.go b/plugins/inputs/snmp/snmp.go index 7f2df6b689eac..a2259e88179c2 100644 --- a/plugins/inputs/snmp/snmp.go +++ b/plugins/inputs/snmp/snmp.go @@ -187,11 +187,18 @@ func (t *Table) Init() error { return err } + secondaryIndexTablePresent := false // initialize all the nested fields for i := range t.Fields { if err := t.Fields[i].init(); err != nil { return fmt.Errorf("initializing field %s: %w", t.Fields[i].Name, err) } + if t.Fields[i].SecondaryIndexTable { + if secondaryIndexTablePresent { + return fmt.Errorf("only one field can be SecondaryIndexTable") + } + secondaryIndexTablePresent = true + } } t.initialized = true @@ -252,6 +259,19 @@ type Field struct { Conversion string // Translate tells if the value of the field should be snmptranslated Translate bool + // Secondary index table allows to merge data from two tables with different index + // that this filed will be used to join them. There can be only one secondary index table. + SecondaryIndexTable bool + // This field is using secondary index, and will be later merged with primary index + // using SecondaryIndexTable. SecondaryIndexTable and SecondaryIndexUse are exclusive. + SecondaryIndexUse bool + // Controls if entries from secondary table should be added or not if joining + // index is present or not. I set to true, means that join is outer, and + // index is prepended with "Secondary." for missing values to avoid overlaping + // indexes from both tables. + // Can be set per field or globally with SecondaryIndexTable, global true overrides + // per field false. + SecondaryOuterJoin bool initialized bool } @@ -278,6 +298,14 @@ func (f *Field) init() error { //TODO use textual convention conversion from the MIB } + if f.SecondaryIndexTable && f.SecondaryIndexUse { + return fmt.Errorf("SecondaryIndexTable and UseSecondaryIndex are exclusive") + } + + if !f.SecondaryIndexTable && !f.SecondaryIndexUse && f.SecondaryOuterJoin { + return fmt.Errorf("SecondaryOuterJoin set to true, but field is not being used in join") + } + f.initialized = true return nil } @@ -414,6 +442,19 @@ func (s *Snmp) gatherTable(acc telegraf.Accumulator, gs snmpConnection, t Table, func (t Table) Build(gs snmpConnection, walk bool) (*RTable, error) { rows := map[string]RTableRow{} + //translation table for secondary index (when preforming join on two tables) + secIdxTab := make(map[string]string) + secGlobalOuterJoin := false + for i, f := range t.Fields { + if f.SecondaryIndexTable { + secGlobalOuterJoin = f.SecondaryOuterJoin + if i != 0 { + t.Fields[0], t.Fields[i] = t.Fields[i], t.Fields[0] + } + break + } + } + tagCount := 0 for _, f := range t.Fields { if f.IsTag { @@ -519,6 +560,16 @@ func (t Table) Build(gs snmpConnection, walk bool) (*RTable, error) { } for idx, v := range ifv { + if f.SecondaryIndexUse { + if newidx, ok := secIdxTab[idx]; ok { + idx = newidx + } else { + if !secGlobalOuterJoin && !f.SecondaryOuterJoin { + continue + } + idx = ".Secondary" + idx + } + } rtr, ok := rows[idx] if !ok { rtr = RTableRow{} @@ -543,6 +594,20 @@ func (t Table) Build(gs snmpConnection, walk bool) (*RTable, error) { } else { rtr.Fields[f.Name] = v } + if f.SecondaryIndexTable { + //indexes are stored here with prepending "." so we need to add them if needed + var vss string + if ok { + vss = "." + vs + } else { + vss = fmt.Sprintf(".%v", v) + } + if idx[0] == '.' { + secIdxTab[vss] = idx + } else { + secIdxTab[vss] = "." + idx + } + } } } } diff --git a/plugins/inputs/snmp/snmp_test.go b/plugins/inputs/snmp/snmp_test.go index f447f13c54e67..49c9bf381b107 100644 --- a/plugins/inputs/snmp/snmp_test.go +++ b/plugins/inputs/snmp/snmp_test.go @@ -81,6 +81,15 @@ var tsc = &testSNMPConnection{ ".1.0.0.2.1.5.0.9.9": 11, ".1.0.0.2.1.5.1.9.9": 22, ".1.0.0.0.1.6.0": ".1.0.0.0.1.7", + ".1.0.0.3.1.1.10": "instance", + ".1.0.0.3.1.1.11": "instance2", + ".1.0.0.3.1.1.12": "instance3", + ".1.0.0.3.1.2.10": 10, + ".1.0.0.3.1.2.11": 20, + ".1.0.0.3.1.2.12": 20, + ".1.0.0.3.1.3.10": 1, + ".1.0.0.3.1.3.11": 2, + ".1.0.0.3.1.3.12": 3, }, } @@ -960,3 +969,242 @@ func TestSnmpTableCache_hit(t *testing.T) { assert.Equal(t, []Field{{Name: "d"}}, fields) assert.Equal(t, fmt.Errorf("e"), err) } + +func TestTableJoin_walk(t *testing.T) { + tbl := Table{ + Name: "mytable", + IndexAsTag: true, + Fields: []Field{ + { + Name: "myfield1", + Oid: ".1.0.0.3.1.1", + IsTag: true, + }, + { + Name: "myfield2", + Oid: ".1.0.0.3.1.2", + }, + { + Name: "myfield3", + Oid: ".1.0.0.3.1.3", + SecondaryIndexTable: true, + }, + { + Name: "myfield4", + Oid: ".1.0.0.0.1.1", + SecondaryIndexUse: true, + IsTag: true, + }, + { + Name: "myfield5", + Oid: ".1.0.0.0.1.2", + SecondaryIndexUse: true, + }, + }, + } + + tb, err := tbl.Build(tsc, true) + require.NoError(t, err) + + assert.Equal(t, tb.Name, "mytable") + rtr1 := RTableRow{ + Tags: map[string]string{ + "myfield1": "instance", + "myfield4": "bar", + "index": "10", + }, + Fields: map[string]interface{}{ + "myfield2": 10, + "myfield3": 1, + "myfield5": 2, + }, + } + rtr2 := RTableRow{ + Tags: map[string]string{ + "myfield1": "instance2", + "index": "11", + }, + Fields: map[string]interface{}{ + "myfield2": 20, + "myfield3": 2, + "myfield5": 0, + }, + } + rtr3 := RTableRow{ + Tags: map[string]string{ + "myfield1": "instance3", + "index": "12", + }, + Fields: map[string]interface{}{ + "myfield2": 20, + "myfield3": 3, + }, + } + assert.Len(t, tb.Rows, 3) + assert.Contains(t, tb.Rows, rtr1) + assert.Contains(t, tb.Rows, rtr2) + assert.Contains(t, tb.Rows, rtr3) +} + +func TestTableOuterJoin_walk(t *testing.T) { + tbl := Table{ + Name: "mytable", + IndexAsTag: true, + Fields: []Field{ + { + Name: "myfield1", + Oid: ".1.0.0.3.1.1", + IsTag: true, + }, + { + Name: "myfield2", + Oid: ".1.0.0.3.1.2", + }, + { + Name: "myfield3", + Oid: ".1.0.0.3.1.3", + SecondaryIndexTable: true, + SecondaryOuterJoin: true, + }, + { + Name: "myfield4", + Oid: ".1.0.0.0.1.1", + SecondaryIndexUse: true, + IsTag: true, + }, + { + Name: "myfield5", + Oid: ".1.0.0.0.1.2", + SecondaryIndexUse: true, + }, + }, + } + + tb, err := tbl.Build(tsc, true) + require.NoError(t, err) + + assert.Equal(t, tb.Name, "mytable") + rtr1 := RTableRow{ + Tags: map[string]string{ + "myfield1": "instance", + "myfield4": "bar", + "index": "10", + }, + Fields: map[string]interface{}{ + "myfield2": 10, + "myfield3": 1, + "myfield5": 2, + }, + } + rtr2 := RTableRow{ + Tags: map[string]string{ + "myfield1": "instance2", + "index": "11", + }, + Fields: map[string]interface{}{ + "myfield2": 20, + "myfield3": 2, + "myfield5": 0, + }, + } + rtr3 := RTableRow{ + Tags: map[string]string{ + "myfield1": "instance3", + "index": "12", + }, + Fields: map[string]interface{}{ + "myfield2": 20, + "myfield3": 3, + }, + } + rtr4 := RTableRow{ + Tags: map[string]string{ + "index": "Secondary.0", + "myfield4": "foo", + }, + Fields: map[string]interface{}{ + "myfield5": 1, + }, + } + assert.Len(t, tb.Rows, 4) + assert.Contains(t, tb.Rows, rtr1) + assert.Contains(t, tb.Rows, rtr2) + assert.Contains(t, tb.Rows, rtr3) + assert.Contains(t, tb.Rows, rtr4) +} + +func TestTableJoinNoIndexAsTag_walk(t *testing.T) { + tbl := Table{ + Name: "mytable", + IndexAsTag: false, + Fields: []Field{ + { + Name: "myfield1", + Oid: ".1.0.0.3.1.1", + IsTag: true, + }, + { + Name: "myfield2", + Oid: ".1.0.0.3.1.2", + }, + { + Name: "myfield3", + Oid: ".1.0.0.3.1.3", + SecondaryIndexTable: true, + }, + { + Name: "myfield4", + Oid: ".1.0.0.0.1.1", + SecondaryIndexUse: true, + IsTag: true, + }, + { + Name: "myfield5", + Oid: ".1.0.0.0.1.2", + SecondaryIndexUse: true, + }, + }, + } + + tb, err := tbl.Build(tsc, true) + require.NoError(t, err) + + assert.Equal(t, tb.Name, "mytable") + rtr1 := RTableRow{ + Tags: map[string]string{ + "myfield1": "instance", + "myfield4": "bar", + //"index": "10", + }, + Fields: map[string]interface{}{ + "myfield2": 10, + "myfield3": 1, + "myfield5": 2, + }, + } + rtr2 := RTableRow{ + Tags: map[string]string{ + "myfield1": "instance2", + //"index": "11", + }, + Fields: map[string]interface{}{ + "myfield2": 20, + "myfield3": 2, + "myfield5": 0, + }, + } + rtr3 := RTableRow{ + Tags: map[string]string{ + "myfield1": "instance3", + //"index": "12", + }, + Fields: map[string]interface{}{ + "myfield2": 20, + "myfield3": 3, + }, + } + assert.Len(t, tb.Rows, 3) + assert.Contains(t, tb.Rows, rtr1) + assert.Contains(t, tb.Rows, rtr2) + assert.Contains(t, tb.Rows, rtr3) +} diff --git a/plugins/inputs/snmp/testdata/test.mib b/plugins/inputs/snmp/testdata/test.mib index 7c3758d66d9a1..c6e7a2a8962b6 100644 --- a/plugins/inputs/snmp/testdata/test.mib +++ b/plugins/inputs/snmp/testdata/test.mib @@ -55,4 +55,43 @@ hostname OBJECT-TYPE STATUS current ::= { testOID 1 1 } +testSecondaryTable OBJECT-TYPE + SYNTAX SEQUENCE OF testSecondaryTableEntry + MAX-ACCESS not-accessible + STATUS current + ::= { testOID 3 } + +testSecondaryTableEntry OBJECT-TYPE + SYNTAX TestSecondaryTableEntry + MAX-ACCESS not-accessible + STATUS current + INDEX { + instance + } + ::= { testSecondaryTable 1 } + +TestSecondaryTableEntry ::= + SEQUENCE { + instance OCTET STRING, + connections INTEGER, + testTableIndex INTEGER, + } + +instance OBJECT-TYPE + SYNTAX OCTET STRING + MAX-ACCESS read-only + STATUS current + ::= { testSecondaryTableEntry 1 } + +connections OBJECT-TYPE + SYNTAX OCTET STRING + MAX-ACCESS read-only + STATUS current + ::= { testSecondaryTableEntry 2 } + +testTableIndex OBJECT-TYPE + SYNTAX OCTET STRING + MAX-ACCESS read-only + STATUS current + ::= { testSecondaryTableEntry 3 } END From e6155346203b9ecf95ef9a25994da76654e2b187 Mon Sep 17 00:00:00 2001 From: Joshua Powers Date: Wed, 29 Sep 2021 07:50:44 -0600 Subject: [PATCH 092/176] feat: add debug query output to elasticsearch_query (#9827) --- .../inputs/elasticsearch_query/aggregation_query.go | 11 +++++++++++ 1 file changed, 11 insertions(+) diff --git a/plugins/inputs/elasticsearch_query/aggregation_query.go b/plugins/inputs/elasticsearch_query/aggregation_query.go index b5fa9db3c667a..51bdd98e7130b 100644 --- a/plugins/inputs/elasticsearch_query/aggregation_query.go +++ b/plugins/inputs/elasticsearch_query/aggregation_query.go @@ -2,6 +2,7 @@ package elasticsearch_query import ( "context" + "encoding/json" "fmt" "strings" "time" @@ -34,6 +35,16 @@ func (e *ElasticsearchQuery) runAggregationQuery(ctx context.Context, aggregatio query = query.Filter(elastic5.NewQueryStringQuery(filterQuery)) query = query.Filter(elastic5.NewRangeQuery(aggregation.DateField).From(from).To(now)) + src, err := query.Source() + if err != nil { + return nil, fmt.Errorf("failed to get query source - %v", err) + } + data, err := json.Marshal(src) + if err != nil { + return nil, fmt.Errorf("failed to unmarshal response - %v", err) + } + e.Log.Debugf("{\"query\": %s}", string(data)) + search := e.esClient.Search().Index(aggregation.Index).Query(query).Size(0) // add only parent elastic.Aggregations to the search request, all the rest are subaggregations of these From 872b29bf958cf6c485f0d649b0540b0bae137a50 Mon Sep 17 00:00:00 2001 From: Alexander Krantz Date: Wed, 29 Sep 2021 14:39:46 -0700 Subject: [PATCH 093/176] fix: Couchbase insecure certificate validation (#9458) --- plugins/inputs/couchbase/README.md | 8 ++++++ plugins/inputs/couchbase/couchbase.go | 33 ++++++++++++++++++++-- plugins/inputs/couchbase/couchbase_test.go | 12 ++++++-- 3 files changed, 49 insertions(+), 4 deletions(-) diff --git a/plugins/inputs/couchbase/README.md b/plugins/inputs/couchbase/README.md index 2c777e17a9ed0..1acdaea4ac76e 100644 --- a/plugins/inputs/couchbase/README.md +++ b/plugins/inputs/couchbase/README.md @@ -20,6 +20,14 @@ This plugin gets metrics for each Couchbase node, as well as detailed metrics fo ## Filter bucket fields to include only here. # bucket_stats_included = ["quota_percent_used", "ops_per_sec", "disk_fetches", "item_count", "disk_used", "data_used", "mem_used"] + + ## Optional TLS Config + # tls_ca = "/etc/telegraf/ca.pem" + # tls_cert = "/etc/telegraf/cert.pem" + # tls_key = "/etc/telegraf/key.pem" + ## Use TLS but skip chain & host verification (defaults to false) + ## If set to false, tls_cert and tls_key are required + # insecure_skip_verify = false ``` ## Measurements: diff --git a/plugins/inputs/couchbase/couchbase.go b/plugins/inputs/couchbase/couchbase.go index 7b99c76e6982c..f67e75096cde3 100644 --- a/plugins/inputs/couchbase/couchbase.go +++ b/plugins/inputs/couchbase/couchbase.go @@ -11,6 +11,7 @@ import ( "github.com/influxdata/telegraf" "github.com/influxdata/telegraf/filter" + "github.com/influxdata/telegraf/plugins/common/tls" "github.com/influxdata/telegraf/plugins/inputs" ) @@ -20,6 +21,9 @@ type Couchbase struct { BucketStatsIncluded []string `toml:"bucket_stats_included"` bucketInclude filter.Filter + client *http.Client + + tls.ClientConfig } var sampleConfig = ` @@ -36,10 +40,17 @@ var sampleConfig = ` ## Filter bucket fields to include only here. # bucket_stats_included = ["quota_percent_used", "ops_per_sec", "disk_fetches", "item_count", "disk_used", "data_used", "mem_used"] + + ## Optional TLS Config + # tls_ca = "/etc/telegraf/ca.pem" + # tls_cert = "/etc/telegraf/cert.pem" + # tls_key = "/etc/telegraf/key.pem" + ## Use TLS but skip chain & host verification (defaults to false) + ## If set to false, tls_cert and tls_key are required + # insecure_skip_verify = false ` var regexpURI = regexp.MustCompile(`(\S+://)?(\S+\:\S+@)`) -var client = &http.Client{Timeout: 10 * time.Second} func (cb *Couchbase) SampleConfig() string { return sampleConfig @@ -369,7 +380,7 @@ func (cb *Couchbase) queryDetailedBucketStats(server, bucket string, bucketStats return err } - r, err := client.Do(req) + r, err := cb.client.Do(req) if err != nil { return err } @@ -387,6 +398,24 @@ func (cb *Couchbase) Init() error { cb.bucketInclude = f + tlsConfig, err := cb.TLSConfig() + if err != nil { + return err + } + + cb.client = &http.Client{ + Timeout: 10 * time.Second, + Transport: &http.Transport{ + MaxIdleConnsPerHost: couchbaseClient.MaxIdleConnsPerHost, + TLSClientConfig: tlsConfig, + }, + } + + couchbaseClient.SetSkipVerify(cb.ClientConfig.InsecureSkipVerify) + couchbaseClient.SetCertFile(cb.ClientConfig.TLSCert) + couchbaseClient.SetKeyFile(cb.ClientConfig.TLSKey) + couchbaseClient.SetRootFile(cb.ClientConfig.TLSCA) + return nil } diff --git a/plugins/inputs/couchbase/couchbase_test.go b/plugins/inputs/couchbase/couchbase_test.go index a739732458a51..e6abc3ea74c01 100644 --- a/plugins/inputs/couchbase/couchbase_test.go +++ b/plugins/inputs/couchbase/couchbase_test.go @@ -2,6 +2,7 @@ package couchbase import ( "encoding/json" + "github.com/influxdata/telegraf/plugins/common/tls" "net/http" "net/http/httptest" "testing" @@ -26,8 +27,12 @@ func TestGatherServer(t *testing.T) { } })) - var cb Couchbase - cb.BucketStatsIncluded = []string{"quota_percent_used", "ops_per_sec", "disk_fetches", "item_count", "disk_used", "data_used", "mem_used"} + cb := Couchbase{ + BucketStatsIncluded: []string{"quota_percent_used", "ops_per_sec", "disk_fetches", "item_count", "disk_used", "data_used", "mem_used"}, + ClientConfig: tls.ClientConfig{ + InsecureSkipVerify: true, + }, + } err := cb.Init() require.NoError(t, err) @@ -105,6 +110,9 @@ func TestGatherDetailedBucketMetrics(t *testing.T) { var err error var cb Couchbase cb.BucketStatsIncluded = []string{"couch_total_disk_size"} + cb.ClientConfig = tls.ClientConfig{ + InsecureSkipVerify: true, + } err = cb.Init() require.NoError(t, err) var acc testutil.Accumulator From 11193a3b4cbffd4ccdb7eb5b2aa12e83be729c11 Mon Sep 17 00:00:00 2001 From: Samantha Wang <32681364+sjwang90@users.noreply.github.com> Date: Wed, 29 Sep 2021 14:40:23 -0700 Subject: [PATCH 094/176] docs: update readme title for amd_rocm_smi (#9826) --- plugins/inputs/amd_rocm_smi/README.md | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/plugins/inputs/amd_rocm_smi/README.md b/plugins/inputs/amd_rocm_smi/README.md index 89a5b063065d7..ac080974dd274 100644 --- a/plugins/inputs/amd_rocm_smi/README.md +++ b/plugins/inputs/amd_rocm_smi/README.md @@ -1,11 +1,11 @@ -# ROCm System Management Interface (SMI) Input Plugin +# AMD ROCm System Management Interface (SMI) Input Plugin This plugin uses a query on the [`rocm-smi`](https://github.com/RadeonOpenCompute/rocm_smi_lib/tree/master/python_smi_tools) binary to pull GPU stats including memory and GPU usage, temperatures and other. ### Configuration ```toml -# Pulls statistics from nvidia GPUs attached to the host +# Pulls statistics from AMD GPUs attached to the host [[inputs.amd_rocm_smi]] ## Optional: path to rocm-smi binary, defaults to $PATH via exec.LookPath # bin_path = "/opt/rocm/bin/rocm-smi" From 70afc94d121c4bb75ded3f8177859436355c4dfa Mon Sep 17 00:00:00 2001 From: Joshua Powers Date: Thu, 30 Sep 2021 10:28:48 -0600 Subject: [PATCH 095/176] fix: Revert "Reset the flush interval timer when flush is requested or batch is ready. (#8953)" (#9800) This reverts commit a6d2c4f254dbe9f7353961d892f8b91d907423ea. --- agent/agent.go | 12 ++++++++---- agent/tick.go | 19 +++++++------------ 2 files changed, 15 insertions(+), 16 deletions(-) diff --git a/agent/agent.go b/agent/agent.go index 78097bcd47731..7bd6b108df048 100644 --- a/agent/agent.go +++ b/agent/agent.go @@ -775,7 +775,7 @@ func (a *Agent) runOutputs( func (a *Agent) flushLoop( ctx context.Context, output *models.RunningOutput, - ticker *RollingTicker, + ticker Ticker, ) { logError := func(err error) { if err != nil { @@ -804,11 +804,15 @@ func (a *Agent) flushLoop( case <-ticker.Elapsed(): logError(a.flushOnce(output, ticker, output.Write)) case <-flushRequested: - ticker.Reset() logError(a.flushOnce(output, ticker, output.Write)) case <-output.BatchReady: - ticker.Reset() - logError(a.flushOnce(output, ticker, output.WriteBatch)) + // Favor the ticker over batch ready + select { + case <-ticker.Elapsed(): + logError(a.flushOnce(output, ticker, output.Write)) + default: + logError(a.flushOnce(output, ticker, output.WriteBatch)) + } } } } diff --git a/agent/tick.go b/agent/tick.go index 9696cd2c18c16..16233ba6d4adb 100644 --- a/agent/tick.go +++ b/agent/tick.go @@ -214,7 +214,6 @@ type RollingTicker struct { ch chan time.Time cancel context.CancelFunc wg sync.WaitGroup - timer *clock.Timer } func NewRollingTicker(interval, jitter time.Duration) *RollingTicker { @@ -231,12 +230,12 @@ func newRollingTicker(interval, jitter time.Duration, clock clock.Clock) *Rollin } d := t.next() - t.timer = clock.Timer(d) + timer := clock.Timer(d) t.wg.Add(1) go func() { defer t.wg.Done() - t.run(ctx) + t.run(ctx, timer) }() return t @@ -246,28 +245,24 @@ func (t *RollingTicker) next() time.Duration { return t.interval + internal.RandomDuration(t.jitter) } -func (t *RollingTicker) run(ctx context.Context) { +func (t *RollingTicker) run(ctx context.Context, timer *clock.Timer) { for { select { case <-ctx.Done(): - t.timer.Stop() + timer.Stop() return - case now := <-t.timer.C: + case now := <-timer.C: select { case t.ch <- now: default: } - t.Reset() + d := t.next() + timer.Reset(d) } } } -// Reset the ticker to the next interval + jitter. -func (t *RollingTicker) Reset() { - t.timer.Reset(t.next()) -} - func (t *RollingTicker) Elapsed() <-chan time.Time { return t.ch } From 3990ab5eb9047c99b03a40afd3f02a90e7aabdb2 Mon Sep 17 00:00:00 2001 From: Helen Weller <38860767+helenosheaa@users.noreply.github.com> Date: Fri, 1 Oct 2021 11:10:30 -0400 Subject: [PATCH 096/176] fix: add keep alive config option, add documentation around issue with eclipse/mosquitto version combined with this plugin, update test (#9803) --- plugins/outputs/mqtt/README.md | 7 +++++++ plugins/outputs/mqtt/mqtt.go | 21 +++++++++++++++++---- plugins/outputs/mqtt/mqtt_test.go | 1 + 3 files changed, 25 insertions(+), 4 deletions(-) diff --git a/plugins/outputs/mqtt/README.md b/plugins/outputs/mqtt/README.md index abb770f068d4f..f82d7597c5bea 100644 --- a/plugins/outputs/mqtt/README.md +++ b/plugins/outputs/mqtt/README.md @@ -40,6 +40,12 @@ This plugin writes to a [MQTT Broker](http://http://mqtt.org/) acting as a mqtt ## When true, messages will have RETAIN flag set. # retain = false + ## Defines the maximum length of time that the broker and client may not communicate. + ## Defaults to 0 which turns the feature off. For version v2.0.12 mosquitto there is a + ## [bug](https://github.com/eclipse/mosquitto/issues/2117) which requires keep_alive to be set. + ## As a reference eclipse/paho.mqtt.golang v1.3.0 defaults to 30. + # keep_alive = 0 + ## Data format to output. # data_format = "influx" ``` @@ -62,3 +68,4 @@ This plugin writes to a [MQTT Broker](http://http://mqtt.org/) acting as a mqtt * `batch`: When true, metrics will be sent in one MQTT message per flush. Otherwise, metrics are written one metric per MQTT message. * `retain`: Set `retain` flag when publishing * `data_format`: [About Telegraf data formats](https://github.com/influxdata/telegraf/blob/master/docs/DATA_FORMATS_OUTPUT.md) +* `keep_alive`: Defines the maximum length of time that the broker and client may not communicate with each other. Defaults to 0 which deactivates this feature. diff --git a/plugins/outputs/mqtt/mqtt.go b/plugins/outputs/mqtt/mqtt.go index 584a79ffd2ef1..54203ee0dba66 100644 --- a/plugins/outputs/mqtt/mqtt.go +++ b/plugins/outputs/mqtt/mqtt.go @@ -16,6 +16,10 @@ import ( "github.com/influxdata/telegraf/plugins/serializers" ) +const ( + defaultKeepAlive = 0 +) + var sampleConfig = ` servers = ["localhost:1883"] # required. @@ -55,6 +59,12 @@ var sampleConfig = ` ## actually reads it # retain = false + ## Defines the maximum length of time that the broker and client may not communicate. + ## Defaults to 0 which turns the feature off. For version v2.0.12 of eclipse/mosquitto there is a + ## [bug](https://github.com/eclipse/mosquitto/issues/2117) which requires keep_alive to be set. + ## As a reference eclipse/paho.mqtt.golang v1.3.0 defaults to 30. + # keep_alive = 0 + ## Data format to output. ## Each data format has its own unique set of configuration options, read ## more about them here: @@ -72,8 +82,9 @@ type MQTT struct { QoS int `toml:"qos"` ClientID string `toml:"client_id"` tls.ClientConfig - BatchMessage bool `toml:"batch"` - Retain bool `toml:"retain"` + BatchMessage bool `toml:"batch"` + Retain bool `toml:"retain"` + KeepAlive int64 `toml:"keep_alive"` client paho.Client opts *paho.ClientOptions @@ -190,7 +201,7 @@ func (m *MQTT) publish(topic string, body []byte) error { func (m *MQTT) createOpts() (*paho.ClientOptions, error) { opts := paho.NewClientOptions() - opts.KeepAlive = 0 + opts.KeepAlive = m.KeepAlive if m.Timeout < config.Duration(time.Second) { m.Timeout = config.Duration(5 * time.Second) @@ -237,6 +248,8 @@ func (m *MQTT) createOpts() (*paho.ClientOptions, error) { func init() { outputs.Add("mqtt", func() telegraf.Output { - return &MQTT{} + return &MQTT{ + KeepAlive: defaultKeepAlive, + } }) } diff --git a/plugins/outputs/mqtt/mqtt_test.go b/plugins/outputs/mqtt/mqtt_test.go index 8affce1c93ddf..fd36d6d0577ac 100644 --- a/plugins/outputs/mqtt/mqtt_test.go +++ b/plugins/outputs/mqtt/mqtt_test.go @@ -19,6 +19,7 @@ func TestConnectAndWriteIntegration(t *testing.T) { m := &MQTT{ Servers: []string{url}, serializer: s, + KeepAlive: 30, } // Verify that we can connect to the MQTT broker From 49e50863901354fbc8c66e8f07920beb88bbd2ac Mon Sep 17 00:00:00 2001 From: Joshua Powers Date: Fri, 1 Oct 2021 15:09:50 -0600 Subject: [PATCH 097/176] fix: gitignore should ignore .toml/.conf files (#9818) As the application requires a config.toml or config.conf file it makes sense to ignore these types of files rather than having them show up in git status output. While the files are technically in the toml format, we use the .conf extension in our documentation so ignore both. --- .gitignore | 2 ++ 1 file changed, 2 insertions(+) diff --git a/.gitignore b/.gitignore index 7c3fbd21c3535..614809d0681e1 100644 --- a/.gitignore +++ b/.gitignore @@ -7,3 +7,5 @@ .DS_Store process.yml /.vscode +/*.toml +/*.conf From ac40bdc52e8ced5afc9605cea33b4fe32d998797 Mon Sep 17 00:00:00 2001 From: Joshua Powers Date: Fri, 1 Oct 2021 15:10:25 -0600 Subject: [PATCH 098/176] fix: procstat missing tags in procstat_lookup metric (#9808) In #9488 the way that tags were built for procstat_lookup was changed and it was only including the pid_finder and result tags. This is not consistent with the documentation and is a regression from how they were previously constructed. Becuase of the large change to how procstat metrics are gathered, this will use one of the process metric's tags as a basis for the tags for procstat_lookup. Resolves: #9793 --- plugins/inputs/procstat/procstat.go | 5 +++-- 1 file changed, 3 insertions(+), 2 deletions(-) diff --git a/plugins/inputs/procstat/procstat.go b/plugins/inputs/procstat/procstat.go index ce29a08460cca..7b2ffba26b430 100644 --- a/plugins/inputs/procstat/procstat.go +++ b/plugins/inputs/procstat/procstat.go @@ -154,9 +154,10 @@ func (p *Procstat) Gather(acc telegraf.Accumulator) error { } } + tags := make(map[string]string) p.procs = newProcs - for _, proc := range p.procs { + tags = proc.Tags() p.addMetric(proc, acc, now) } @@ -165,7 +166,7 @@ func (p *Procstat) Gather(acc telegraf.Accumulator) error { "running": len(p.procs), "result_code": 0, } - tags := make(map[string]string) + tags["pid_finder"] = p.PidFinder tags["result"] = "success" acc.AddFields("procstat_lookup", fields, tags, now) From 021dedb792cf2791a21d3bb80024dd67db7b875c Mon Sep 17 00:00:00 2001 From: Helen Weller <38860767+helenosheaa@users.noreply.github.com> Date: Mon, 4 Oct 2021 10:05:56 -0400 Subject: [PATCH 099/176] fix: update toml tag to match sample config / readme (#9848) --- plugins/outputs/loki/loki.go | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/plugins/outputs/loki/loki.go b/plugins/outputs/loki/loki.go index 2f920ec829e3b..07d4d473bf396 100644 --- a/plugins/outputs/loki/loki.go +++ b/plugins/outputs/loki/loki.go @@ -57,7 +57,7 @@ type Loki struct { Timeout config.Duration `toml:"timeout"` Username string `toml:"username"` Password string `toml:"password"` - Headers map[string]string `toml:"headers"` + Headers map[string]string `toml:"http_headers"` ClientID string `toml:"client_id"` ClientSecret string `toml:"client_secret"` TokenURL string `toml:"token_url"` From 6c1bdfad76d8833f538f346a95ca5a5af88e9db9 Mon Sep 17 00:00:00 2001 From: "Guo Qiao (Joe)" Date: Tue, 5 Oct 2021 05:04:30 +1300 Subject: [PATCH 100/176] fix: logging in intel_rdt.go caused service stop timeout even as root (#9844) (#9850) --- plugins/inputs/intel_rdt/intel_rdt.go | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/plugins/inputs/intel_rdt/intel_rdt.go b/plugins/inputs/intel_rdt/intel_rdt.go index 89370062d730e..e0c7de526b067 100644 --- a/plugins/inputs/intel_rdt/intel_rdt.go +++ b/plugins/inputs/intel_rdt/intel_rdt.go @@ -278,12 +278,12 @@ func (r *IntelRDT) readData(ctx context.Context, args []string, processesPIDsAss }() err = cmd.Start() if err != nil { - r.errorChan <- fmt.Errorf("pqos: %v", err) + r.Log.Errorf("pqos: %v", err) return } err = cmd.Wait() if err != nil { - r.errorChan <- fmt.Errorf("pqos: %v", err) + r.Log.Errorf("pqos: %v", err) } } From c1f51b0645235e851f8c68e01b2e649dd7af5d22 Mon Sep 17 00:00:00 2001 From: Howard Yoo <32691630+howardyoo@users.noreply.github.com> Date: Mon, 4 Oct 2021 11:04:58 -0500 Subject: [PATCH 101/176] fix: mongodb input plugin issue #9845 (#9846) --- plugins/inputs/mongodb/mongostat.go | 6 ++++-- 1 file changed, 4 insertions(+), 2 deletions(-) diff --git a/plugins/inputs/mongodb/mongostat.go b/plugins/inputs/mongodb/mongostat.go index 41f735d389c7a..3871f6d252909 100644 --- a/plugins/inputs/mongodb/mongostat.go +++ b/plugins/inputs/mongodb/mongostat.go @@ -1086,8 +1086,10 @@ func NewStatLine(oldMongo, newMongo MongoStatus, key string, all bool, sampleSec } if newStat.Metrics.Repl.Network != nil { returnVal.ReplNetworkBytes = newStat.Metrics.Repl.Network.Bytes - returnVal.ReplNetworkGetmoresNum = newStat.Metrics.Repl.Network.GetMores.Num - returnVal.ReplNetworkGetmoresTotalMillis = newStat.Metrics.Repl.Network.GetMores.TotalMillis + if newStat.Metrics.Repl.Network.GetMores != nil { + returnVal.ReplNetworkGetmoresNum = newStat.Metrics.Repl.Network.GetMores.Num + returnVal.ReplNetworkGetmoresTotalMillis = newStat.Metrics.Repl.Network.GetMores.TotalMillis + } returnVal.ReplNetworkOps = newStat.Metrics.Repl.Network.Ops } } From df5c19c17edac8cab07bbd5107f23603b1686a00 Mon Sep 17 00:00:00 2001 From: Sebastian Spaink <3441183+sspaink@users.noreply.github.com> Date: Mon, 4 Oct 2021 11:19:06 -0700 Subject: [PATCH 102/176] feat (parser.json_v2): Support defining field/tag tables within an object table (#9449) --- config/config.go | 64 +++++--- go.mod | 2 +- go.sum | 4 +- plugins/parsers/json_v2/README.md | 80 ++++++--- plugins/parsers/json_v2/parser.go | 154 +++++++++++++----- plugins/parsers/json_v2/parser_test.go | 12 ++ .../multiple_arrays_in_object/expected.out | 1 - .../testdata/multiple_json_input/expected.out | 2 + .../testdata/multiple_json_input/input_1.json | 87 ++++++++++ .../testdata/multiple_json_input/input_2.json | 134 +++++++++++++++ .../multiple_json_input/telegraf.conf | 18 ++ .../subfieldtag_in_object/expected.out | 1 + .../testdata/subfieldtag_in_object/input.json | 97 +++++++++++ .../subfieldtag_in_object/telegraf.conf | 17 ++ .../subfieldtag_in_object_2/expected.out | 4 + .../subfieldtag_in_object_2/input.json | 10 ++ .../subfieldtag_in_object_2/telegraf.conf | 16 ++ 17 files changed, 617 insertions(+), 86 deletions(-) create mode 100644 plugins/parsers/json_v2/testdata/multiple_json_input/expected.out create mode 100644 plugins/parsers/json_v2/testdata/multiple_json_input/input_1.json create mode 100644 plugins/parsers/json_v2/testdata/multiple_json_input/input_2.json create mode 100644 plugins/parsers/json_v2/testdata/multiple_json_input/telegraf.conf create mode 100644 plugins/parsers/json_v2/testdata/subfieldtag_in_object/expected.out create mode 100644 plugins/parsers/json_v2/testdata/subfieldtag_in_object/input.json create mode 100644 plugins/parsers/json_v2/testdata/subfieldtag_in_object/telegraf.conf create mode 100644 plugins/parsers/json_v2/testdata/subfieldtag_in_object_2/expected.out create mode 100644 plugins/parsers/json_v2/testdata/subfieldtag_in_object_2/input.json create mode 100644 plugins/parsers/json_v2/testdata/subfieldtag_in_object_2/telegraf.conf diff --git a/config/config.go b/config/config.go index 4880da4832e5a..76aa494c4ca43 100644 --- a/config/config.go +++ b/config/config.go @@ -1421,28 +1421,8 @@ func (c *Config) getParserConfig(name string, tbl *ast.Table) (*parsers.Config, c.getFieldString(metricConfig, "timestamp_format", &mc.TimestampFormat) c.getFieldString(metricConfig, "timestamp_timezone", &mc.TimestampTimezone) - if fieldConfigs, ok := metricConfig.Fields["field"]; ok { - if fieldConfigs, ok := fieldConfigs.([]*ast.Table); ok { - for _, fieldconfig := range fieldConfigs { - var f json_v2.DataSet - c.getFieldString(fieldconfig, "path", &f.Path) - c.getFieldString(fieldconfig, "rename", &f.Rename) - c.getFieldString(fieldconfig, "type", &f.Type) - mc.Fields = append(mc.Fields, f) - } - } - } - if fieldConfigs, ok := metricConfig.Fields["tag"]; ok { - if fieldConfigs, ok := fieldConfigs.([]*ast.Table); ok { - for _, fieldconfig := range fieldConfigs { - var t json_v2.DataSet - c.getFieldString(fieldconfig, "path", &t.Path) - c.getFieldString(fieldconfig, "rename", &t.Rename) - t.Type = "string" - mc.Tags = append(mc.Tags, t) - } - } - } + mc.Fields = getFieldSubtable(c, metricConfig) + mc.Tags = getTagSubtable(c, metricConfig) if objectconfigs, ok := metricConfig.Fields["object"]; ok { if objectconfigs, ok := objectconfigs.([]*ast.Table); ok { @@ -1458,6 +1438,10 @@ func (c *Config) getParserConfig(name string, tbl *ast.Table) (*parsers.Config, c.getFieldStringSlice(objectConfig, "tags", &o.Tags) c.getFieldStringMap(objectConfig, "renames", &o.Renames) c.getFieldStringMap(objectConfig, "fields", &o.Fields) + + o.FieldPaths = getFieldSubtable(c, objectConfig) + o.TagPaths = getTagSubtable(c, objectConfig) + mc.JSONObjects = append(mc.JSONObjects, o) } } @@ -1477,6 +1461,42 @@ func (c *Config) getParserConfig(name string, tbl *ast.Table) (*parsers.Config, return pc, nil } +func getFieldSubtable(c *Config, metricConfig *ast.Table) []json_v2.DataSet { + var fields []json_v2.DataSet + + if fieldConfigs, ok := metricConfig.Fields["field"]; ok { + if fieldConfigs, ok := fieldConfigs.([]*ast.Table); ok { + for _, fieldconfig := range fieldConfigs { + var f json_v2.DataSet + c.getFieldString(fieldconfig, "path", &f.Path) + c.getFieldString(fieldconfig, "rename", &f.Rename) + c.getFieldString(fieldconfig, "type", &f.Type) + fields = append(fields, f) + } + } + } + + return fields +} + +func getTagSubtable(c *Config, metricConfig *ast.Table) []json_v2.DataSet { + var tags []json_v2.DataSet + + if fieldConfigs, ok := metricConfig.Fields["tag"]; ok { + if fieldConfigs, ok := fieldConfigs.([]*ast.Table); ok { + for _, fieldconfig := range fieldConfigs { + var t json_v2.DataSet + c.getFieldString(fieldconfig, "path", &t.Path) + c.getFieldString(fieldconfig, "rename", &t.Rename) + t.Type = "string" + tags = append(tags, t) + } + } + } + + return tags +} + // buildSerializer grabs the necessary entries from the ast.Table for creating // a serializers.Serializer object, and creates it, which can then be added onto // an Output object. diff --git a/go.mod b/go.mod index dc8b762d1e6d1..0999e764200ca 100644 --- a/go.mod +++ b/go.mod @@ -246,7 +246,7 @@ require ( github.com/stretchr/testify v1.7.0 github.com/tbrandon/mbserver v0.0.0-20170611213546-993e1772cc62 github.com/testcontainers/testcontainers-go v0.11.1 - github.com/tidwall/gjson v1.8.0 + github.com/tidwall/gjson v1.9.0 github.com/tidwall/match v1.0.3 // indirect github.com/tidwall/pretty v1.1.0 // indirect github.com/tinylib/msgp v1.1.6 diff --git a/go.sum b/go.sum index 4189b415723f0..6b60e06efb308 100644 --- a/go.sum +++ b/go.sum @@ -1535,8 +1535,8 @@ github.com/tchap/go-patricia v2.2.6+incompatible/go.mod h1:bmLyhP68RS6kStMGxByiQ github.com/tedsuo/ifrit v0.0.0-20180802180643-bea94bb476cc/go.mod h1:eyZnKCc955uh98WQvzOm0dgAeLnf2O0Rz0LPoC5ze+0= github.com/testcontainers/testcontainers-go v0.11.1 h1:FiYsB83LSGbiawoV8TpAZGfcCUbtaeeg1SXqEKUxh08= github.com/testcontainers/testcontainers-go v0.11.1/go.mod h1:/V0UVq+1e7NWYoqTPog179clf0Qp9TOyp4EcXaEFQz8= -github.com/tidwall/gjson v1.8.0 h1:Qt+orfosKn0rbNTZqHYDqBrmm3UDA4KRkv70fDzG+PQ= -github.com/tidwall/gjson v1.8.0/go.mod h1:5/xDoumyyDNerp2U36lyolv46b3uF/9Bu6OfyQ9GImk= +github.com/tidwall/gjson v1.9.0 h1:+Od7AE26jAaMgVC31cQV/Ope5iKXulNMflrlB7k+F9E= +github.com/tidwall/gjson v1.9.0/go.mod h1:5/xDoumyyDNerp2U36lyolv46b3uF/9Bu6OfyQ9GImk= github.com/tidwall/match v1.0.3 h1:FQUVvBImDutD8wJLN6c5eMzWtjgONK9MwIBCOrUJKeE= github.com/tidwall/match v1.0.3/go.mod h1:eRSPERbgtNPcGhD8UCthc6PmLEQXEWd3PRB5JTxsfmM= github.com/tidwall/pretty v1.0.0/go.mod h1:XNkn88O1ChpSDQmQeStsy+sBenx6DDtFZJxhVysOjyk= diff --git a/plugins/parsers/json_v2/README.md b/plugins/parsers/json_v2/README.md index a1effd5940614..d1e2e9c407255 100644 --- a/plugins/parsers/json_v2/README.md +++ b/plugins/parsers/json_v2/README.md @@ -1,10 +1,10 @@ # JSON Parser - Version 2 -This parser takes valid JSON input and turns it into metrics. The query syntax supported is [GJSON Path Syntax](https://github.com/tidwall/gjson/blob/v1.7.5/SYNTAX.md), you can go to this playground to test out your GJSON path here: https://gjson.dev/. You can find multiple examples under the `testdata` folder. +This parser takes valid JSON input and turns it into line protocol. The query syntax supported is [GJSON Path Syntax](https://github.com/tidwall/gjson/blob/v1.7.5/SYNTAX.md), you can go to this playground to test out your GJSON path here: https://gjson.dev/. You can find multiple examples under the `testdata` folder. ## Configuration -You configure this parser by describing the metric you want by defining the fields and tags from the input. The configuration is divided into config sub-tables called `field`, `tag`, and `object`. In the example below you can see all the possible configuration keys you can define for each config table. In the sections that follow these configuration keys are defined in more detail. +You configure this parser by describing the line protocol you want by defining the fields and tags from the input. The configuration is divided into config sub-tables called `field`, `tag`, and `object`. In the example below you can see all the possible configuration keys you can define for each config table. In the sections that follow these configuration keys are defined in more detail. **Example configuration:** @@ -19,27 +19,45 @@ You configure this parser by describing the metric you want by defining the fiel timestamp_format = "" # A string with a valid timestamp format (see below for possible values) timestamp_timezone = "" # A string with with a valid timezone (see below for possible values) [[inputs.file.json_v2.tag]] - path = "" # A string with valid GJSON path syntax + path = "" # A string with valid GJSON path syntax to a non-array/non-object value rename = "new name" # A string with a new name for the tag key [[inputs.file.json_v2.field]] - path = "" # A string with valid GJSON path syntax + path = "" # A string with valid GJSON path syntax to a non-array/non-object value rename = "new name" # A string with a new name for the tag key type = "int" # A string specifying the type (int,uint,float,string,bool) [[inputs.file.json_v2.object]] - path = "" # A string with valid GJSON path syntax + path = "" # A string with valid GJSON path syntax, can include array's and object's + + ## Configuration to define what JSON keys should be used as timestamps ## timestamp_key = "" # A JSON key (for a nested key, prepend the parent keys with underscores) to a valid timestamp timestamp_format = "" # A string with a valid timestamp format (see below for possible values) timestamp_timezone = "" # A string with with a valid timezone (see below for possible values) - disable_prepend_keys = false (or true, just not both) + + ### Configuration to define what JSON keys should be included and how (field/tag) ### + tags = [] # List of JSON keys (for a nested key, prepend the parent keys with underscores) to be a tag instead of a field, when adding a JSON key in this list you don't have to define it in the included_keys list included_keys = [] # List of JSON keys (for a nested key, prepend the parent keys with underscores) that should be only included in result excluded_keys = [] # List of JSON keys (for a nested key, prepend the parent keys with underscores) that shouldn't be included in result - tags = [] # List of JSON keys (for a nested key, prepend the parent keys with underscores) to be a tag instead of a field + # When a tag/field sub-table is defined, they will be the only field/tag's along with any keys defined in the included_keys list. + # If the resulting values aren't included in the object/array returned by the root object path, it won't be included. + # You can define as many tag/field sub-tables as you want. + [[inputs.file.json_v2.object.tag]] + path = "" # # A string with valid GJSON path syntax, can include array's and object's + rename = "new name" # A string with a new name for the tag key + [[inputs.file.json_v2.object.field]] + path = "" # # A string with valid GJSON path syntax, can include array's and object's + rename = "new name" # A string with a new name for the tag key + type = "int" # A string specifying the type (int,uint,float,string,bool) + + ### Configuration to modify the resutling line protocol ### + disable_prepend_keys = false (or true, just not both) [inputs.file.json_v2.object.renames] # A map of JSON keys (for a nested key, prepend the parent keys with underscores) with a new name for the tag key key = "new name" [inputs.file.json_v2.object.fields] # A map of JSON keys (for a nested key, prepend the parent keys with underscores) with a type (int,uint,float,string,bool) key = "int" ``` + --- + ### root config options * **measurement_name (OPTIONAL)**: Will set the measurement name to the provided string. @@ -56,7 +74,7 @@ such as `America/New_York`, to `Local` to utilize the system timezone, or to `UT ### `field` and `tag` config options -`field` and `tag` represent the elements of [line protocol](https://docs.influxdata.com/influxdb/v2.0/reference/syntax/line-protocol/), which is used to define a `metric`. You can use the `field` and `tag` config tables to gather a single value or an array of values that all share the same type and name. With this you can add a field or tag to a metric from data stored anywhere in your JSON. If you define the GJSON path to return a single value then you will get a single resutling metric that contains the field/tag. If you define the GJSON path to return an array of values, then each field/tag will be put into a separate metric (you use the # character to retrieve JSON arrays, find examples [here](https://github.com/tidwall/gjson/blob/v1.7.5/SYNTAX.md#arrays)). +`field` and `tag` represent the elements of [line protocol](https://docs.influxdata.com/influxdb/v2.0/reference/syntax/line-protocol/). You can use the `field` and `tag` config tables to gather a single value or an array of values that all share the same type and name. With this you can add a field or tag to a line protocol from data stored anywhere in your JSON. If you define the GJSON path to return a single value then you will get a single resutling line protocol that contains the field/tag. If you define the GJSON path to return an array of values, then each field/tag will be put into a separate line protocol (you use the # character to retrieve JSON arrays, find examples [here](https://github.com/tidwall/gjson/blob/v1.7.5/SYNTAX.md#arrays)). Note that objects are handled separately, therefore if you provide a path that returns a object it will be ignored. You will need use the `object` config table to parse objects, because `field` and `tag` doesn't handle relationships between data. Each `field` and `tag` you define is handled as a separate data point. @@ -70,26 +88,34 @@ The notable difference between `field` and `tag`, is that `tag` values will alwa #### **field** -* **path (REQUIRED)**: You must define the path query that gathers the object with [GJSON Path Syntax](https://github.com/tidwall/gjson/blob/v1.7.5/SYNTAX.md). +Using this field configuration you can gather a non-array/non-object values. Note this acts as a global field when used with the `object` configuration, if you gather an array of values using `object` then the field gathered will be added to each resulting line protocol without acknowledging its location in the original JSON. This is defined in TOML as an array table using double brackets. + +* **path (REQUIRED)**: A string with valid GJSON path syntax to a non-array/non-object value * **name (OPTIONAL)**: You can define a string value to set the field name. If not defined it will use the trailing word from the provided query. * **type (OPTIONAL)**: You can define a string value to set the desired type (float, int, uint, string, bool). If not defined it won't enforce a type and default to using the original type defined in the JSON (bool, float, or string). #### **tag** -* **path (REQUIRED)**: You must define the path query that gathers the object with [GJSON Path Syntax](https://github.com/tidwall/gjson/blob/v1.7.5/SYNTAX.md). +Using this tag configuration you can gather a non-array/non-object values. Note this acts as a global tag when used with the `object` configuration, if you gather an array of values using `object` then the tag gathered will be added to each resulting line protocol without acknowledging its location in the original JSON. This is defined in TOML as an array table using double brackets. + + +* **path (REQUIRED)**: A string with valid GJSON path syntax to a non-array/non-object value * **name (OPTIONAL)**: You can define a string value to set the field name. If not defined it will use the trailing word from the provided query. For good examples in using `field` and `tag` you can reference the following example configs: -* [fields_and_tags](testdata/fields_and_tags/telegraf.conf) --- + ### object -With the configuration section `object`, you can gather metrics from [JSON objects](https://www.w3schools.com/js/js_json_objects.asp). +With the configuration section `object`, you can gather values from [JSON objects](https://www.w3schools.com/js/js_json_objects.asp). This is defined in TOML as an array table using double brackets. -The following keys can be set for `object`: +#### The following keys can be set for `object` * **path (REQUIRED)**: You must define the path query that gathers the object with [GJSON Path Syntax](https://github.com/tidwall/gjson/blob/v1.7.5/SYNTAX.md) + +*Keys to define what JSON keys should be used as timestamps:* + * **timestamp_key(OPTIONAL)**: You can define a json key (for a nested key, prepend the parent keys with underscores) for the value to be set as the timestamp from the JSON input. * **timestamp_format (OPTIONAL, but REQUIRED when timestamp_query is defined**: Must be set to `unix`, `unix_ms`, `unix_us`, `unix_ns`, or the Go "reference time" which is defined to be the specific time: @@ -97,22 +123,30 @@ the Go "reference time" which is defined to be the specific time: * **timestamp_timezone (OPTIONAL, but REQUIRES timestamp_query**: This option should be set to a [Unix TZ value](https://en.wikipedia.org/wiki/List_of_tz_database_time_zones), such as `America/New_York`, to `Local` to utilize the system timezone, or to `UTC`. Defaults to `UTC` -* **disable_prepend_keys (OPTIONAL)**: Set to true to prevent resulting nested data to contain the parent key prepended to its key **NOTE**: duplicate names can overwrite each other when this is enabled -* **included_keys (OPTIONAL)**: You can define a list of key's that should be the only data included in the metric, by default it will include everything. -* **excluded_keys (OPTIONAL)**: You can define json keys to be excluded in the metric, for a nested key, prepend the parent keys with underscores + +*Configuration to define what JSON keys should be included and how (field/tag):* + +* **included_keys (OPTIONAL)**: You can define a list of key's that should be the only data included in the line protocol, by default it will include everything. +* **excluded_keys (OPTIONAL)**: You can define json keys to be excluded in the line protocol, for a nested key, prepend the parent keys with underscores * **tags (OPTIONAL)**: You can define json keys to be set as tags instead of fields, if you define a key that is an array or object then all nested values will become a tag -* **renames (OPTIONAL)**: A table matching the json key with the desired name (oppossed to defaulting to using the key), use names that include the prepended keys of its parent keys for nested results -* **fields (OPTIONAL)**: A table matching the json key with the desired type (int,string,bool,float), if you define a key that is an array or object then all nested values will become that type +* **field (OPTIONAL, defined in TOML as an array table using double brackets)**: Identical to the [field](#field) table you can define, but with two key differences. The path supports arrays and objects and is defined under the object table and therefore will adhere to how the JSON is structured. You want to use this if you want the field/tag to be added as it would if it were in the included_key list, but then use the GJSON path syntax. +* **tag (OPTIONAL, defined in TOML as an array table using double brackets)**: Identical to the [tag](#tag) table you can define, but with two key differences. The path supports arrays and objects and is defined under the object table and therefore will adhere to how the JSON is structured. You want to use this if you want the field/tag to be added as it would if it were in the included_key list, but then use the GJSON path syntax. + +*Configuration to modify the resutling line protocol:* + +* **disable_prepend_keys (OPTIONAL)**: Set to true to prevent resulting nested data to contain the parent key prepended to its key **NOTE**: duplicate names can overwrite each other when this is enabled +* **renames (OPTIONAL, defined in TOML as a table using single bracket)**: A table matching the json key with the desired name (oppossed to defaulting to using the key), use names that include the prepended keys of its parent keys for nested results +* **fields (OPTIONAL, defined in TOML as a table using single bracket)**: A table matching the json key with the desired type (int,string,bool,float), if you define a key that is an array or object then all nested values will become that type ## Arrays and Objects The following describes the high-level approach when parsing arrays and objects: -**Array**: Every element in an array is treated as a *separate* metric +**Array**: Every element in an array is treated as a *separate* line protocol -**Object**: Every key/value in a object is treated as a *single* metric +**Object**: Every key/value in a object is treated as a *single* line protocol -When handling nested arrays and objects, these above rules continue to apply as the parser creates metrics. When an object has multiple array's as values, the array's will become separate metrics containing only non-array values from the obejct. Below you can see an example of this behavior, with an input json containing an array of book objects that has a nested array of characters. +When handling nested arrays and objects, these above rules continue to apply as the parser creates line protocol. When an object has multiple array's as values, the array's will become separate line protocol containing only non-array values from the obejct. Below you can see an example of this behavior, with an input json containing an array of book objects that has a nested array of characters. Example JSON: @@ -157,7 +191,7 @@ Example configuration: disable_prepend_keys = true ``` -Expected metrics: +Expected line protocol: ``` file,title=The\ Lord\ Of\ The\ Rings author="Tolkien",chapters="A Long-expected Party" @@ -173,7 +207,7 @@ You can find more complicated examples under the folder `testdata`. ## Types -For each field you have the option to define the types for each metric. The following rules are in place for this configuration: +For each field you have the option to define the types. The following rules are in place for this configuration: * If a type is explicitly defined, the parser will enforce this type and convert the data to the defined type if possible. If the type can't be converted then the parser will fail. * If a type isn't defined, the parser will use the default type defined in the JSON (int, float, string) diff --git a/plugins/parsers/json_v2/parser.go b/plugins/parsers/json_v2/parser.go index fa0946621cde4..f4f84c562e781 100644 --- a/plugins/parsers/json_v2/parser.go +++ b/plugins/parsers/json_v2/parser.go @@ -13,6 +13,7 @@ import ( ) type Parser struct { + InputJSON []byte Configs []Config DefaultTags map[string]string Log telegraf.Logger @@ -20,8 +21,16 @@ type Parser struct { measurementName string - iterateObjects bool + iterateObjects bool + currentSettings JSONObject + pathResults []PathResult +} + +type PathResult struct { + result gjson.Result + tag bool + DataSet } type Config struct { @@ -53,21 +62,30 @@ type JSONObject struct { IncludedKeys []string `toml:"included_keys"` // OPTIONAL ExcludedKeys []string `toml:"excluded_keys"` // OPTIONAL DisablePrependKeys bool `toml:"disable_prepend_keys"` // OPTIONAL + FieldPaths []DataSet // OPTIONAL + TagPaths []DataSet // OPTIONAL } type MetricNode struct { + ParentIndex int OutputName string SetName string Tag bool DesiredType string // Can be "int", "uint", "float", "bool", "string" + /* + IncludeCollection is only used when processing objects and is responsible for containing the gjson results + found by the gjson paths provided in the FieldPaths and TagPaths configs. + */ + IncludeCollection *PathResult Metric telegraf.Metric gjson.Result } func (p *Parser) Parse(input []byte) ([]telegraf.Metric, error) { + p.InputJSON = input // Only valid JSON is supported - if !gjson.Valid(string(input)) { + if !gjson.Valid(string(p.InputJSON)) { return nil, fmt.Errorf("Invalid JSON provided, unable to parse") } @@ -77,7 +95,7 @@ func (p *Parser) Parse(input []byte) ([]telegraf.Metric, error) { // Measurement name configuration p.measurementName = c.MeasurementName if c.MeasurementNamePath != "" { - result := gjson.GetBytes(input, c.MeasurementNamePath) + result := gjson.GetBytes(p.InputJSON, c.MeasurementNamePath) if !result.IsArray() && !result.IsObject() { p.measurementName = result.String() } @@ -86,7 +104,7 @@ func (p *Parser) Parse(input []byte) ([]telegraf.Metric, error) { // Timestamp configuration p.Timestamp = time.Now() if c.TimestampPath != "" { - result := gjson.GetBytes(input, c.TimestampPath) + result := gjson.GetBytes(p.InputJSON, c.TimestampPath) if !result.IsArray() && !result.IsObject() { if c.TimestampFormat == "" { err := fmt.Errorf("use of 'timestamp_query' requires 'timestamp_format'") @@ -101,17 +119,17 @@ func (p *Parser) Parse(input []byte) ([]telegraf.Metric, error) { } } - fields, err := p.processMetric(c.Fields, input, false) + fields, err := p.processMetric(c.Fields, false) if err != nil { return nil, err } - tags, err := p.processMetric(c.Tags, input, true) + tags, err := p.processMetric(c.Tags, true) if err != nil { return nil, err } - objects, err := p.processObjects(c.JSONObjects, input) + objects, err := p.processObjects(c.JSONObjects) if err != nil { return nil, err } @@ -137,7 +155,7 @@ func (p *Parser) Parse(input []byte) ([]telegraf.Metric, error) { // processMetric will iterate over all 'field' or 'tag' configs and create metrics for each // A field/tag can either be a single value or an array of values, each resulting in its own metric // For multiple configs, a set of metrics is created from the cartesian product of each separate config -func (p *Parser) processMetric(data []DataSet, input []byte, tag bool) ([]telegraf.Metric, error) { +func (p *Parser) processMetric(data []DataSet, tag bool) ([]telegraf.Metric, error) { if len(data) == 0 { return nil, nil } @@ -149,7 +167,7 @@ func (p *Parser) processMetric(data []DataSet, input []byte, tag bool) ([]telegr if c.Path == "" { return nil, fmt.Errorf("GJSON path is required") } - result := gjson.GetBytes(input, c.Path) + result := gjson.GetBytes(p.InputJSON, c.Path) if result.IsObject() { p.Log.Debugf("Found object in the path: %s, ignoring it please use 'object' to gather metrics from objects", c.Path) @@ -233,6 +251,9 @@ func (p *Parser) expandArray(result MetricNode) ([]telegraf.Metric, error) { p.Log.Debugf("Found object in query ignoring it please use 'object' to gather metrics from objects") return results, nil } + if result.IncludeCollection == nil && (len(p.currentSettings.FieldPaths) > 0 || len(p.currentSettings.TagPaths) > 0) { + result.IncludeCollection = p.existsInpathResults(result.Index, result.Raw) + } r, err := p.combineObject(result) if err != nil { return nil, err @@ -243,6 +264,9 @@ func (p *Parser) expandArray(result MetricNode) ([]telegraf.Metric, error) { if result.IsArray() { var err error + if result.IncludeCollection == nil && (len(p.currentSettings.FieldPaths) > 0 || len(p.currentSettings.TagPaths) > 0) { + result.IncludeCollection = p.existsInpathResults(result.Index, result.Raw) + } result.ForEach(func(_, val gjson.Result) bool { m := metric.New( p.measurementName, @@ -250,13 +274,14 @@ func (p *Parser) expandArray(result MetricNode) ([]telegraf.Metric, error) { map[string]interface{}{}, p.Timestamp, ) - if val.IsObject() { if p.iterateObjects { - n := MetricNode{ - SetName: result.SetName, - Metric: m, - Result: val, + n := result + n.ParentIndex += val.Index + n.Metric = m + n.Result = val + if n.IncludeCollection == nil && (len(p.currentSettings.FieldPaths) > 0 || len(p.currentSettings.TagPaths) > 0) { + n.IncludeCollection = p.existsInpathResults(n.Index, n.Raw) } r, err := p.combineObject(n) if err != nil { @@ -281,13 +306,12 @@ func (p *Parser) expandArray(result MetricNode) ([]telegraf.Metric, error) { for _, f := range result.Metric.TagList() { m.AddTag(f.Key, f.Value) } - n := MetricNode{ - Tag: result.Tag, - DesiredType: result.DesiredType, - OutputName: result.OutputName, - SetName: result.SetName, - Metric: m, - Result: val, + n := result + n.ParentIndex += val.Index + n.Metric = m + n.Result = val + if n.IncludeCollection == nil && (len(p.currentSettings.FieldPaths) > 0 || len(p.currentSettings.TagPaths) > 0) { + n.IncludeCollection = p.existsInpathResults(n.Index, n.Raw) } r, err := p.expandArray(n) if err != nil { @@ -314,17 +338,43 @@ func (p *Parser) expandArray(result MetricNode) ([]telegraf.Metric, error) { switch result.Value().(type) { case nil: // Ignore JSON values that are set as null default: + outputName := result.OutputName + desiredType := result.DesiredType + + if len(p.currentSettings.FieldPaths) > 0 || len(p.currentSettings.TagPaths) > 0 { + var pathResult *PathResult + // When IncludeCollection isn't nil, that means the current result is included in the collection. + if result.IncludeCollection != nil { + pathResult = result.IncludeCollection + } else { + // Verify that the result should be included based on the results of fieldpaths and tag paths + pathResult = p.existsInpathResults(result.ParentIndex, result.Raw) + } + if pathResult == nil { + return results, nil + } + if pathResult.tag { + result.Tag = true + } + if !pathResult.tag { + desiredType = pathResult.Type + } + if pathResult.Rename != "" { + outputName = pathResult.Rename + } + } + if result.Tag { - result.DesiredType = "string" + desiredType = "string" } - v, err := p.convertType(result.Result, result.DesiredType, result.SetName) + v, err := p.convertType(result.Result, desiredType, result.SetName) if err != nil { return nil, err } if result.Tag { - result.Metric.AddTag(result.OutputName, v.(string)) + result.Metric.AddTag(outputName, v.(string)) } else { - result.Metric.AddField(result.OutputName, v) + result.Metric.AddField(outputName, v) } } } @@ -335,22 +385,55 @@ func (p *Parser) expandArray(result MetricNode) ([]telegraf.Metric, error) { return results, nil } +func (p *Parser) existsInpathResults(index int, raw string) *PathResult { + for _, f := range p.pathResults { + if f.result.Index == 0 { + for _, i := range f.result.Indexes { + if i == index { + return &f + } + } + } else if f.result.Index == index { + return &f + } + } + return nil +} + // processObjects will iterate over all 'object' configs and create metrics for each -func (p *Parser) processObjects(objects []JSONObject, input []byte) ([]telegraf.Metric, error) { +func (p *Parser) processObjects(objects []JSONObject) ([]telegraf.Metric, error) { p.iterateObjects = true var t []telegraf.Metric for _, c := range objects { p.currentSettings = c + if c.Path == "" { return nil, fmt.Errorf("GJSON path is required") } - result := gjson.GetBytes(input, c.Path) + result := gjson.GetBytes(p.InputJSON, c.Path) + + scopedJSON := []byte(result.Raw) + for _, f := range c.FieldPaths { + var r PathResult + r.result = gjson.GetBytes(scopedJSON, f.Path) + r.DataSet = f + p.pathResults = append(p.pathResults, r) + } + + for _, f := range c.TagPaths { + var r PathResult + r.result = gjson.GetBytes(scopedJSON, f.Path) + r.DataSet = f + r.tag = true + p.pathResults = append(p.pathResults, r) + } if result.Type == gjson.Null { return nil, fmt.Errorf("GJSON Path returned null") } rootObject := MetricNode{ + ParentIndex: 0, Metric: metric.New( p.measurementName, map[string]string{}, @@ -401,14 +484,11 @@ func (p *Parser) combineObject(result MetricNode) ([]telegraf.Metric, error) { } } - arrayNode := MetricNode{ - DesiredType: result.DesiredType, - Tag: result.Tag, - OutputName: outputName, - SetName: setName, - Metric: result.Metric, - Result: val, - } + arrayNode := result + arrayNode.ParentIndex += val.Index + arrayNode.OutputName = outputName + arrayNode.SetName = setName + arrayNode.Result = val for k, t := range p.currentSettings.Fields { if setName == k { @@ -455,8 +535,8 @@ func (p *Parser) isIncluded(key string, val gjson.Result) bool { return true } // automatically adds tags to included_keys so it does NOT have to be repeated in the config - p.currentSettings.IncludedKeys = append(p.currentSettings.IncludedKeys, p.currentSettings.Tags...) - for _, i := range p.currentSettings.IncludedKeys { + allKeys := append(p.currentSettings.IncludedKeys, p.currentSettings.Tags...) + for _, i := range allKeys { if i == key { return true } diff --git a/plugins/parsers/json_v2/parser_test.go b/plugins/parsers/json_v2/parser_test.go index 7b34b83c0af8a..3ef08856190ac 100644 --- a/plugins/parsers/json_v2/parser_test.go +++ b/plugins/parsers/json_v2/parser_test.go @@ -28,6 +28,18 @@ func TestData(t *testing.T) { name: "Test having an array of objects", test: "array_of_objects", }, + { + name: "Test having multiple JSON inputs", + test: "multiple_json_input", + }, + { + name: "A second test when selecting with sub field and tags", + test: "subfieldtag_in_object_2", + }, + { + name: "Test selecting with sub field and tags", + test: "subfieldtag_in_object", + }, { name: "Test using just fields and tags", test: "fields_and_tags", diff --git a/plugins/parsers/json_v2/testdata/multiple_arrays_in_object/expected.out b/plugins/parsers/json_v2/testdata/multiple_arrays_in_object/expected.out index 2948da1720f64..04cd0635a5497 100644 --- a/plugins/parsers/json_v2/testdata/multiple_arrays_in_object/expected.out +++ b/plugins/parsers/json_v2/testdata/multiple_arrays_in_object/expected.out @@ -6,4 +6,3 @@ file,title=The\ Lord\ Of\ The\ Rings author="Tolkien",chapters="The Shadow of th file,title=The\ Lord\ Of\ The\ Rings author="Tolkien",chapters="The Shadow of the Past",name="Bilbo",species="hobbit",random=2 file,title=The\ Lord\ Of\ The\ Rings author="Tolkien",chapters="The Shadow of the Past",name="Frodo",species="hobbit",random=1 file,title=The\ Lord\ Of\ The\ Rings author="Tolkien",chapters="The Shadow of the Past",name="Frodo",species="hobbit",random=2 - diff --git a/plugins/parsers/json_v2/testdata/multiple_json_input/expected.out b/plugins/parsers/json_v2/testdata/multiple_json_input/expected.out new file mode 100644 index 0000000000000..f3fa9f0d8571c --- /dev/null +++ b/plugins/parsers/json_v2/testdata/multiple_json_input/expected.out @@ -0,0 +1,2 @@ +file,from_station=COLM,to_station=ANTC,etd_estimate_direction=North minutes=2i +file,from_station=POWL,to_station=DALY,etd_estimate_direction=South minutes=6i diff --git a/plugins/parsers/json_v2/testdata/multiple_json_input/input_1.json b/plugins/parsers/json_v2/testdata/multiple_json_input/input_1.json new file mode 100644 index 0000000000000..f60cd59f91247 --- /dev/null +++ b/plugins/parsers/json_v2/testdata/multiple_json_input/input_1.json @@ -0,0 +1,87 @@ +{ + "?xml": { + "@version": "1.0", + "@encoding": "utf-8" + }, + "root": { + "@id": "1", + "uri": { + "#cdata-section": "http://api.bart.gov/api/etd.aspx?cmd=etd&orig=COLM&dir=n&json=y" + }, + "date": "07/02/2021", + "time": "06:05:47 PM PDT", + "station": [ + { + "name": "Colma", + "abbr": "COLM", + "etd": [ + { + "destination": "Antioch", + "abbreviation": "ANTC", + "limited": "0", + "estimate": [ + { + "minutes": "2", + "platform": "2", + "direction": "North", + "length": "10", + "color": "YELLOW", + "hexcolor": "#ffff33", + "bikeflag": "1", + "delay": "0" + }, + { + "minutes": "16", + "platform": "2", + "direction": "North", + "length": "10", + "color": "YELLOW", + "hexcolor": "#ffff33", + "bikeflag": "1", + "delay": "0" + }, + { + "minutes": "31", + "platform": "2", + "direction": "North", + "length": "10", + "color": "YELLOW", + "hexcolor": "#ffff33", + "bikeflag": "1", + "delay": "0" + } + ] + }, + { + "destination": "Richmond", + "abbreviation": "RICH", + "limited": "0", + "estimate": [ + { + "minutes": "22", + "platform": "2", + "direction": "North", + "length": "10", + "color": "RED", + "hexcolor": "#ff0000", + "bikeflag": "1", + "delay": "0" + }, + { + "minutes": "52", + "platform": "2", + "direction": "North", + "length": "10", + "color": "RED", + "hexcolor": "#ff0000", + "bikeflag": "1", + "delay": "0" + } + ] + } + ] + } + ], + "message": "" + } +} diff --git a/plugins/parsers/json_v2/testdata/multiple_json_input/input_2.json b/plugins/parsers/json_v2/testdata/multiple_json_input/input_2.json new file mode 100644 index 0000000000000..e75e84a093b37 --- /dev/null +++ b/plugins/parsers/json_v2/testdata/multiple_json_input/input_2.json @@ -0,0 +1,134 @@ +{ + "?xml": { + "@version": "1.0", + "@encoding": "utf-8" + }, + "root": { + "@id": "1", + "uri": { + "#cdata-section": "http://api.bart.gov/api/etd.aspx?cmd=etd&orig=POWL&dir=s&json=y" + }, + "date": "07/02/2021", + "time": "06:06:01 PM PDT", + "station": [ + { + "name": "Powell St.", + "abbr": "POWL", + "etd": [ + { + "destination": "Daly City", + "abbreviation": "DALY", + "limited": "0", + "estimate": [ + { + "minutes": "6", + "platform": "1", + "direction": "South", + "length": "10", + "color": "GREEN", + "hexcolor": "#339933", + "bikeflag": "1", + "delay": "0" + }, + { + "minutes": "26", + "platform": "1", + "direction": "South", + "length": "9", + "color": "BLUE", + "hexcolor": "#0099cc", + "bikeflag": "1", + "delay": "0" + }, + { + "minutes": "36", + "platform": "1", + "direction": "South", + "length": "10", + "color": "GREEN", + "hexcolor": "#339933", + "bikeflag": "1", + "delay": "0" + } + ] + }, + { + "destination": "Millbrae", + "abbreviation": "MLBR", + "limited": "0", + "estimate": [ + { + "minutes": "19", + "platform": "1", + "direction": "South", + "length": "10", + "color": "RED", + "hexcolor": "#ff0000", + "bikeflag": "1", + "delay": "0" + }, + { + "minutes": "49", + "platform": "1", + "direction": "South", + "length": "10", + "color": "RED", + "hexcolor": "#ff0000", + "bikeflag": "1", + "delay": "0" + }, + { + "minutes": "79", + "platform": "1", + "direction": "South", + "length": "10", + "color": "RED", + "hexcolor": "#ff0000", + "bikeflag": "1", + "delay": "0" + } + ] + }, + { + "destination": "SF Airport", + "abbreviation": "SFIA", + "limited": "0", + "estimate": [ + { + "minutes": "7", + "platform": "1", + "direction": "South", + "length": "10", + "color": "YELLOW", + "hexcolor": "#ffff33", + "bikeflag": "1", + "delay": "0" + }, + { + "minutes": "37", + "platform": "1", + "direction": "South", + "length": "10", + "color": "YELLOW", + "hexcolor": "#ffff33", + "bikeflag": "1", + "delay": "0" + }, + { + "minutes": "67", + "platform": "1", + "direction": "South", + "length": "10", + "color": "YELLOW", + "hexcolor": "#ffff33", + "bikeflag": "1", + "delay": "0" + } + ] + } + ] + } + ], + "message": "" + } +} diff --git a/plugins/parsers/json_v2/testdata/multiple_json_input/telegraf.conf b/plugins/parsers/json_v2/testdata/multiple_json_input/telegraf.conf new file mode 100644 index 0000000000000..96c8ede181a54 --- /dev/null +++ b/plugins/parsers/json_v2/testdata/multiple_json_input/telegraf.conf @@ -0,0 +1,18 @@ +[[inputs.file]] + files = ["./testdata/multiple_json_input/input_1.json", "./testdata/multiple_json_input/input_2.json"] + data_format = "json_v2" + [[inputs.file.json_v2]] + [[inputs.file.json_v2.object]] + path = "root.station" + [[inputs.file.json_v2.object.tag]] + path="#.abbr" + rename = "from_station" + [[inputs.file.json_v2.object.field]] + path = "#.etd.0.estimate.0.minutes" + rename = "minutes" + type = "int" + [[inputs.file.json_v2.object.tag]] + path = "#.etd.0.abbreviation" + rename = "to_station" + [[inputs.file.json_v2.object.tag]] + path = "#.etd.0.estimate.0.direction" diff --git a/plugins/parsers/json_v2/testdata/subfieldtag_in_object/expected.out b/plugins/parsers/json_v2/testdata/subfieldtag_in_object/expected.out new file mode 100644 index 0000000000000..a7db83863a63c --- /dev/null +++ b/plugins/parsers/json_v2/testdata/subfieldtag_in_object/expected.out @@ -0,0 +1 @@ +file,from_station=COLM,to_station=ANTC,etd_estimate_direction=North etd_estimate_minutes=6i diff --git a/plugins/parsers/json_v2/testdata/subfieldtag_in_object/input.json b/plugins/parsers/json_v2/testdata/subfieldtag_in_object/input.json new file mode 100644 index 0000000000000..45d0d5514ae76 --- /dev/null +++ b/plugins/parsers/json_v2/testdata/subfieldtag_in_object/input.json @@ -0,0 +1,97 @@ +{ + "?xml": { + "@version": "1.0", + "@encoding": "utf-8" + }, + "root": { + "@id": "1", + "uri": { + "#cdata-section": "http://api.bart.gov/api/etd.aspx?cmd=etd&orig=COLM&dir=n&json=y" + }, + "date": "06/25/2021", + "time": "05:01:31 PM PDT", + "station": [ + { + "name": "Colma", + "abbr": "COLM", + "etd": [ + { + "destination": "Antioch", + "abbreviation": "ANTC", + "limited": "0", + "estimate": [ + { + "minutes": "6", + "platform": "2", + "direction": "North", + "length": "10", + "color": "YELLOW", + "hexcolor": "#ffff33", + "bikeflag": "1", + "delay": "0" + }, + { + "minutes": "36", + "platform": "2", + "direction": "North", + "length": "10", + "color": "YELLOW", + "hexcolor": "#ffff33", + "bikeflag": "1", + "delay": "0" + }, + { + "minutes": "51", + "platform": "2", + "direction": "North", + "length": "10", + "color": "YELLOW", + "hexcolor": "#ffff33", + "bikeflag": "1", + "delay": "0" + } + ] + }, + { + "destination": "Richmond", + "abbreviation": "RICH", + "limited": "0", + "estimate": [ + { + "minutes": "12", + "platform": "2", + "direction": "North", + "length": "10", + "color": "RED", + "hexcolor": "#ff0000", + "bikeflag": "1", + "delay": "0" + }, + { + "minutes": "26", + "platform": "2", + "direction": "North", + "length": "10", + "color": "RED", + "hexcolor": "#ff0000", + "bikeflag": "1", + "delay": "0" + }, + { + "minutes": "41", + "platform": "2", + "direction": "North", + "length": "10", + "color": "RED", + "hexcolor": "#ff0000", + "bikeflag": "1", + "delay": "0" + } + ] + } + ] + } + ], + "message": "" + } +} diff --git a/plugins/parsers/json_v2/testdata/subfieldtag_in_object/telegraf.conf b/plugins/parsers/json_v2/testdata/subfieldtag_in_object/telegraf.conf new file mode 100644 index 0000000000000..7a8a283d77c3d --- /dev/null +++ b/plugins/parsers/json_v2/testdata/subfieldtag_in_object/telegraf.conf @@ -0,0 +1,17 @@ +[[inputs.file]] + files = ["./testdata/subfieldtag_in_object/input.json"] + data_format = "json_v2" + [[inputs.file.json_v2]] + [[inputs.file.json_v2.object]] + path = "root.station" + [[inputs.file.json_v2.object.field]] + path = "#.etd.0.estimate.0.minutes" + type = "int" + [[inputs.file.json_v2.object.tag]] + path = "#.abbr" + rename = "from_station" + [[inputs.file.json_v2.object.tag]] + path = "#.etd.0.abbreviation" + rename = "to_station" + [[inputs.file.json_v2.object.tag]] + path = "#.etd.0.estimate.0.direction" diff --git a/plugins/parsers/json_v2/testdata/subfieldtag_in_object_2/expected.out b/plugins/parsers/json_v2/testdata/subfieldtag_in_object_2/expected.out new file mode 100644 index 0000000000000..89748967a1ee9 --- /dev/null +++ b/plugins/parsers/json_v2/testdata/subfieldtag_in_object_2/expected.out @@ -0,0 +1,4 @@ +file,data=3 cnt=23i,format=0i +file,data=7 cnt=23i,format=0i +file,data=10 cnt=23i,format=0i +file,data=23 cnt=23i,format=0i diff --git a/plugins/parsers/json_v2/testdata/subfieldtag_in_object_2/input.json b/plugins/parsers/json_v2/testdata/subfieldtag_in_object_2/input.json new file mode 100644 index 0000000000000..62b768eae05a7 --- /dev/null +++ b/plugins/parsers/json_v2/testdata/subfieldtag_in_object_2/input.json @@ -0,0 +1,10 @@ +{ + "cnt": 23, + "data": [ + 3, + 7, + 10, + 23 + ], + "format": 0 +} diff --git a/plugins/parsers/json_v2/testdata/subfieldtag_in_object_2/telegraf.conf b/plugins/parsers/json_v2/testdata/subfieldtag_in_object_2/telegraf.conf new file mode 100644 index 0000000000000..60d7d18da43d0 --- /dev/null +++ b/plugins/parsers/json_v2/testdata/subfieldtag_in_object_2/telegraf.conf @@ -0,0 +1,16 @@ +# Example taken from: https://github.com/influxdata/telegraf/issues/5940 + +[[inputs.file]] + files = ["./testdata/subfieldtag_in_object_2/input.json"] + data_format = "json_v2" + [[inputs.file.json_v2]] + [[inputs.file.json_v2.object]] + path = "@this" + [[inputs.file.json_v2.object.tag]] + path = "data" + [[inputs.file.json_v2.object.field]] + path = "cnt" + type = "int" + [[inputs.file.json_v2.object.field]] + path = "format" + type = "int" From 68333d70f02d5ad89eac0dce290c1ad8b3917ffd Mon Sep 17 00:00:00 2001 From: Helen Weller <38860767+helenosheaa@users.noreply.github.com> Date: Mon, 4 Oct 2021 17:30:59 -0400 Subject: [PATCH 103/176] fix: remove eg fix: which breaks label bot functionality (#9859) --- .github/PULL_REQUEST_TEMPLATE.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/.github/PULL_REQUEST_TEMPLATE.md b/.github/PULL_REQUEST_TEMPLATE.md index 1c717ddbb1a15..67b65a26247fb 100644 --- a/.github/PULL_REQUEST_TEMPLATE.md +++ b/.github/PULL_REQUEST_TEMPLATE.md @@ -5,7 +5,7 @@ show completion. --> - [ ] Updated associated README.md. - [ ] Wrote appropriate unit tests. -- [ ] Pull request title or commits are in [conventional commit format](https://www.conventionalcommits.org/en/v1.0.0/#summary) (e.g. feat: or fix:) +- [ ] Pull request title or commits are in [conventional commit format](https://www.conventionalcommits.org/en/v1.0.0/#summary) setting alias: {} name: {} id: {}'".format(alias, name, id)) + state["aliases"][id] = name + if "value" in metric.fields: + buildTopicTags(metric, topicFields) + buildNameTags(metric, name) + else: + output = None + + # Try to resolve the unresolved if any + if len(state["unresolved"]) > 0: + # Filter out the matching metrics and keep the rest as unresolved + log.debug(" unresolved") + unresolved = [("{}/{}".format(edgeid, m.fields["alias"]), m) for m in state["unresolved"]] + matching = [(mid, m) for mid, m in unresolved if mid == id] + state["unresolved"] = [m for mid, m in unresolved if mid != id] + + log.debug(" found {} matching unresolved metrics".format(len(matching))) + # Process the matching metrics and output - TODO - needs debugging + # for mid, m in matching: + # buildTopicTags(m,topicFields) + # buildNameTags(m) + # output = [m for _, m in matching] + [metric] + + elif DATA_TAG in topic: + log.debug(" metric msg_type: {} edgeid: {} topic: {}".format(DATA_TAG, edgeid, topic)) + if "alias" in metric.fields: + alias = metric.fields.get("alias") + + # Lookup the ID. If we know it, replace the name of the metric with the lookup value, + # otherwise we need to keep the metric for resolving later. + # This can happen if the messages are out-of-order for some reason... + id = "{}/{}".format(edgeid,alias) + if id in state["aliases"]: + name = state["aliases"][id] + log.debug(" found alias: {} name: {}".format(alias, name)) + buildTopicTags(metric,topicFields) + buildNameTags(metric,name) + else: + # We want to hold the metric until we get the corresponding birth message + log.debug(" id not found: {}".format(id)) + output = None + if len(state["unresolved"]) >= MAX_UNRESOLVED: + log.warn(" metric overflow, trimming {}".format(len(state["unresolved"]) - MAX_UNRESOLVED+1)) + # Release the unresolved metrics as raw and trim buffer + output = state["unresolved"][MAX_UNRESOLVED-1:] + state["unresolved"] = state["unresolved"][:MAX_UNRESOLVED-1] + log.debug(" --> keeping metric") + state["unresolved"].append(metric) + else: + output = None + + return output + From 014161cd0c2c75ae96c189f8227e9a94c6abc358 Mon Sep 17 00:00:00 2001 From: Joshua Powers Date: Tue, 5 Oct 2021 15:06:53 -0600 Subject: [PATCH 105/176] feat: add custom time/date format field for elasticsearch_query (#9838) --- plugins/inputs/elasticsearch_query/README.md | 8 ++++++++ .../elasticsearch_query/aggregation_query.go | 2 +- .../elasticsearch_query/elasticsearch_query.go | 8 ++++++++ .../elasticsearch_query_test.go | 17 +++++++++++++++++ 4 files changed, 34 insertions(+), 1 deletion(-) diff --git a/plugins/inputs/elasticsearch_query/README.md b/plugins/inputs/elasticsearch_query/README.md index 881cb6609b5b0..5e90d19e72f21 100755 --- a/plugins/inputs/elasticsearch_query/README.md +++ b/plugins/inputs/elasticsearch_query/README.md @@ -54,6 +54,13 @@ Currently it is known to break on 7.x or greater versions. ## The date/time field in the Elasticsearch index (mandatory). date_field = "@timestamp" + ## If the field used for the date/time field in Elasticsearch is also using + ## a custom date/time format it may be required to provide the format to + ## correctly parse the field. + ## + ## If using one of the built in elasticsearch formats this is not required. + # date_field_custom_format = "" + ## Time window to query (eg. "1m" to query documents from last minute). ## Normally should be set to same as collection interval query_period = "1m" @@ -150,6 +157,7 @@ Please note that the `[[inputs.elasticsearch_query]]` is still required for all ### Optional parameters +- `date_field_custom_format`: Not needed if using one of the built in date/time formats of Elasticsearch, but may be required if using a custom date/time format. The format syntax uses the [Joda date format](https://www.elastic.co/guide/en/elasticsearch/reference/6.8/search-aggregations-bucket-daterange-aggregation.html#date-format-pattern). - `filter_query`: Lucene query to filter the results (default: "\*") - `metric_fields`: The list of fields to perform metric aggregation (these must be indexed as numeric fields) - `metric_funcion`: The single-value metric aggregation function to be performed on the `metric_fields` defined. Currently supported aggregations are "avg", "min", "max", "sum". (see [https://www.elastic.co/guide/en/elasticsearch/reference/current/search-aggregations-metrics.html](https://www.elastic.co/guide/en/elasticsearch/reference/current/search-aggregations-metrics.html) diff --git a/plugins/inputs/elasticsearch_query/aggregation_query.go b/plugins/inputs/elasticsearch_query/aggregation_query.go index 51bdd98e7130b..aff67d2baa884 100644 --- a/plugins/inputs/elasticsearch_query/aggregation_query.go +++ b/plugins/inputs/elasticsearch_query/aggregation_query.go @@ -33,7 +33,7 @@ func (e *ElasticsearchQuery) runAggregationQuery(ctx context.Context, aggregatio query := elastic5.NewBoolQuery() query = query.Filter(elastic5.NewQueryStringQuery(filterQuery)) - query = query.Filter(elastic5.NewRangeQuery(aggregation.DateField).From(from).To(now)) + query = query.Filter(elastic5.NewRangeQuery(aggregation.DateField).From(from).To(now).Format(aggregation.DateFieldFormat)) src, err := query.Source() if err != nil { diff --git a/plugins/inputs/elasticsearch_query/elasticsearch_query.go b/plugins/inputs/elasticsearch_query/elasticsearch_query.go index 3c04f952b5bee..009577573a4f3 100644 --- a/plugins/inputs/elasticsearch_query/elasticsearch_query.go +++ b/plugins/inputs/elasticsearch_query/elasticsearch_query.go @@ -55,6 +55,13 @@ const sampleConfig = ` ## The date/time field in the Elasticsearch index (mandatory). date_field = "@timestamp" + ## If the field used for the date/time field in Elasticsearch is also using + ## a custom date/time format it may be required to provide the format to + ## correctly parse the field. + ## + ## If using one of the built in elasticsearch formats this is not required. + # date_field_custom_format = "" + ## Time window to query (eg. "1m" to query documents from last minute). ## Normally should be set to same as collection interval query_period = "1m" @@ -104,6 +111,7 @@ type esAggregation struct { Index string `toml:"index"` MeasurementName string `toml:"measurement_name"` DateField string `toml:"date_field"` + DateFieldFormat string `toml:"date_field_custom_format"` QueryPeriod config.Duration `toml:"query_period"` FilterQuery string `toml:"filter_query"` MetricFields []string `toml:"metric_fields"` diff --git a/plugins/inputs/elasticsearch_query/elasticsearch_query_test.go b/plugins/inputs/elasticsearch_query/elasticsearch_query_test.go index 6a89dc8eea617..e017681b7c58d 100644 --- a/plugins/inputs/elasticsearch_query/elasticsearch_query_test.go +++ b/plugins/inputs/elasticsearch_query/elasticsearch_query_test.go @@ -484,6 +484,23 @@ var testEsAggregationData = []esAggregationQueryTest{ false, false, }, + { + "query 14 - non-existing custom date/time format", + esAggregation{ + Index: testindex, + MeasurementName: "measurement14", + DateField: "@timestamp", + DateFieldFormat: "yyyy", + QueryPeriod: queryPeriod, + Tags: []string{}, + mapMetricFields: map[string]string{}, + }, + nil, + nil, + false, + false, + true, + }, } func setupIntegrationTest() error { From d2a25456d58cb269fd57e0204a52b32b309c5d4a Mon Sep 17 00:00:00 2001 From: Yuji Kawamoto Date: Wed, 6 Oct 2021 06:11:46 +0900 Subject: [PATCH 106/176] feat(prometheus): add ignore_timestamp option (#9740) --- config/config.go | 2 +- etc/telegraf.conf | 4 ++ plugins/inputs/prometheus/README.md | 4 ++ plugins/inputs/prometheus/parser.go | 4 +- plugins/inputs/prometheus/parser_test.go | 45 ++++++++++++++++++-- plugins/inputs/prometheus/prometheus.go | 13 +++++- plugins/inputs/prometheus/prometheus_test.go | 23 ++++++++++ plugins/parsers/prometheus/parser.go | 21 +++++---- plugins/parsers/prometheus/parser_test.go | 28 +++++++++++- plugins/parsers/registry.go | 13 ++++-- 10 files changed, 133 insertions(+), 24 deletions(-) diff --git a/config/config.go b/config/config.go index 76aa494c4ca43..d6081aedcfaf3 100644 --- a/config/config.go +++ b/config/config.go @@ -1593,7 +1593,7 @@ func (c *Config) missingTomlField(_ reflect.Type, key string) error { "json_string_fields", "json_time_format", "json_time_key", "json_timestamp_format", "json_timestamp_units", "json_timezone", "json_v2", "lvm", "metric_batch_size", "metric_buffer_limit", "name_override", "name_prefix", "name_suffix", "namedrop", "namepass", "order", "pass", "period", "precision", - "prefix", "prometheus_export_timestamp", "prometheus_sort_metrics", "prometheus_string_as_label", + "prefix", "prometheus_export_timestamp", "prometheus_ignore_timestamp", "prometheus_sort_metrics", "prometheus_string_as_label", "separator", "splunkmetric_hec_routing", "splunkmetric_multimetric", "tag_keys", "tagdrop", "tagexclude", "taginclude", "tagpass", "tags", "template", "templates", "value_field_name", "wavefront_source_override", "wavefront_use_strict", diff --git a/etc/telegraf.conf b/etc/telegraf.conf index 2892d99142be5..12672da45cf7d 100644 --- a/etc/telegraf.conf +++ b/etc/telegraf.conf @@ -8305,6 +8305,10 @@ # ## Url tag name (tag containing scrapped url. optional, default is "url") # # url_tag = "url" # +# ## Whether the timestamp of the scraped metrics will be ignored. +# ## If set to true, the gather time will be used. +# # ignore_timestamp = false +# # ## An array of Kubernetes services to scrape metrics from. # # kubernetes_services = ["http://my-service-dns.my-namespace:9100/metrics"] # diff --git a/plugins/inputs/prometheus/README.md b/plugins/inputs/prometheus/README.md index 955c6ab7d978b..fe6d3a8e816da 100644 --- a/plugins/inputs/prometheus/README.md +++ b/plugins/inputs/prometheus/README.md @@ -23,6 +23,10 @@ in Prometheus format. ## Url tag name (tag containing scrapped url. optional, default is "url") # url_tag = "url" + ## Whether the timestamp of the scraped metrics will be ignored. + ## If set to true, the gather time will be used. + # ignore_timestamp = false + ## An array of Kubernetes services to scrape metrics from. # kubernetes_services = ["http://my-service-dns.my-namespace:9100/metrics"] diff --git a/plugins/inputs/prometheus/parser.go b/plugins/inputs/prometheus/parser.go index 7d3140dc7d627..dfe5cc4749813 100644 --- a/plugins/inputs/prometheus/parser.go +++ b/plugins/inputs/prometheus/parser.go @@ -19,7 +19,7 @@ import ( "github.com/prometheus/common/expfmt" ) -func Parse(buf []byte, header http.Header) ([]telegraf.Metric, error) { +func Parse(buf []byte, header http.Header, ignoreTimestamp bool) ([]telegraf.Metric, error) { var parser expfmt.TextParser var metrics []telegraf.Metric var err error @@ -76,7 +76,7 @@ func Parse(buf []byte, header http.Header) ([]telegraf.Metric, error) { // converting to telegraf metric if len(fields) > 0 { var t time.Time - if m.TimestampMs != nil && *m.TimestampMs > 0 { + if !ignoreTimestamp && m.TimestampMs != nil && *m.TimestampMs > 0 { t = time.Unix(0, *m.TimestampMs*1000000) } else { t = now diff --git a/plugins/inputs/prometheus/parser_test.go b/plugins/inputs/prometheus/parser_test.go index 293e1968d2b5d..ffd5967458c9f 100644 --- a/plugins/inputs/prometheus/parser_test.go +++ b/plugins/inputs/prometheus/parser_test.go @@ -1,8 +1,10 @@ package prometheus import ( + "fmt" "net/http" "testing" + "time" "github.com/stretchr/testify/assert" ) @@ -42,7 +44,7 @@ apiserver_request_latencies_count{resource="bindings",verb="POST"} 2025 func TestParseValidPrometheus(t *testing.T) { // Gauge value - metrics, err := Parse([]byte(validUniqueGauge), http.Header{}) + metrics, err := Parse([]byte(validUniqueGauge), http.Header{}, false) assert.NoError(t, err) assert.Len(t, metrics, 1) assert.Equal(t, "cadvisor_version_info", metrics[0].Name()) @@ -58,7 +60,7 @@ func TestParseValidPrometheus(t *testing.T) { }, metrics[0].Tags()) // Counter value - metrics, err = Parse([]byte(validUniqueCounter), http.Header{}) + metrics, err = Parse([]byte(validUniqueCounter), http.Header{}, false) assert.NoError(t, err) assert.Len(t, metrics, 1) assert.Equal(t, "get_token_fail_count", metrics[0].Name()) @@ -69,7 +71,7 @@ func TestParseValidPrometheus(t *testing.T) { // Summary data //SetDefaultTags(map[string]string{}) - metrics, err = Parse([]byte(validUniqueSummary), http.Header{}) + metrics, err = Parse([]byte(validUniqueSummary), http.Header{}, false) assert.NoError(t, err) assert.Len(t, metrics, 1) assert.Equal(t, "http_request_duration_microseconds", metrics[0].Name()) @@ -83,7 +85,7 @@ func TestParseValidPrometheus(t *testing.T) { assert.Equal(t, map[string]string{"handler": "prometheus"}, metrics[0].Tags()) // histogram data - metrics, err = Parse([]byte(validUniqueHistogram), http.Header{}) + metrics, err = Parse([]byte(validUniqueHistogram), http.Header{}, false) assert.NoError(t, err) assert.Len(t, metrics, 1) assert.Equal(t, "apiserver_request_latencies", metrics[0].Name()) @@ -103,3 +105,38 @@ func TestParseValidPrometheus(t *testing.T) { map[string]string{"verb": "POST", "resource": "bindings"}, metrics[0].Tags()) } + +func TestMetricsWithTimestamp(t *testing.T) { + testTime := time.Date(2020, time.October, 4, 17, 0, 0, 0, time.UTC) + testTimeUnix := testTime.UnixNano() / int64(time.Millisecond) + metricsWithTimestamps := fmt.Sprintf(` +# TYPE test_counter counter +test_counter{label="test"} 1 %d +`, testTimeUnix) + + // IgnoreTimestamp is false + metrics, err := Parse([]byte(metricsWithTimestamps), http.Header{}, false) + assert.NoError(t, err) + assert.Len(t, metrics, 1) + assert.Equal(t, "test_counter", metrics[0].Name()) + assert.Equal(t, map[string]interface{}{ + "counter": float64(1), + }, metrics[0].Fields()) + assert.Equal(t, map[string]string{ + "label": "test", + }, metrics[0].Tags()) + assert.Equal(t, testTime, metrics[0].Time().UTC()) + + // IgnoreTimestamp is true + metrics, err = Parse([]byte(metricsWithTimestamps), http.Header{}, true) + assert.NoError(t, err) + assert.Len(t, metrics, 1) + assert.Equal(t, "test_counter", metrics[0].Name()) + assert.Equal(t, map[string]interface{}{ + "counter": float64(1), + }, metrics[0].Fields()) + assert.Equal(t, map[string]string{ + "label": "test", + }, metrics[0].Tags()) + assert.WithinDuration(t, time.Now(), metrics[0].Time().UTC(), 5*time.Second) +} diff --git a/plugins/inputs/prometheus/prometheus.go b/plugins/inputs/prometheus/prometheus.go index 136e8ae0f6d9d..18cbf6c8b3d59 100644 --- a/plugins/inputs/prometheus/prometheus.go +++ b/plugins/inputs/prometheus/prometheus.go @@ -58,6 +58,8 @@ type Prometheus struct { URLTag string `toml:"url_tag"` + IgnoreTimestamp bool `toml:"ignore_timestamp"` + tls.ClientConfig Log telegraf.Logger @@ -101,6 +103,10 @@ var sampleConfig = ` ## Url tag name (tag containing scrapped url. optional, default is "url") # url_tag = "url" + ## Whether the timestamp of the scraped metrics will be ignored. + ## If set to true, the gather time will be used. + # ignore_timestamp = false + ## An array of Kubernetes services to scrape metrics from. # kubernetes_services = ["http://my-service-dns.my-namespace:9100/metrics"] @@ -414,10 +420,13 @@ func (p *Prometheus) gatherURL(u URLAndAddress, acc telegraf.Accumulator) error } if p.MetricVersion == 2 { - parser := parser_v2.Parser{Header: resp.Header} + parser := parser_v2.Parser{ + Header: resp.Header, + IgnoreTimestamp: p.IgnoreTimestamp, + } metrics, err = parser.Parse(body) } else { - metrics, err = Parse(body, resp.Header) + metrics, err = Parse(body, resp.Header, p.IgnoreTimestamp) } if err != nil { diff --git a/plugins/inputs/prometheus/prometheus_test.go b/plugins/inputs/prometheus/prometheus_test.go index ea8ca0e9346ab..11117e05b45d9 100644 --- a/plugins/inputs/prometheus/prometheus_test.go +++ b/plugins/inputs/prometheus/prometheus_test.go @@ -242,6 +242,29 @@ func TestPrometheusGeneratesGaugeMetricsV2(t *testing.T) { assert.True(t, acc.HasTimestamp("prometheus", time.Unix(1490802350, 0))) } +func TestPrometheusGeneratesMetricsWithIgnoreTimestamp(t *testing.T) { + ts := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + _, err := fmt.Fprintln(w, sampleTextFormat) + require.NoError(t, err) + })) + defer ts.Close() + + p := &Prometheus{ + Log: testutil.Logger{}, + URLs: []string{ts.URL}, + URLTag: "url", + IgnoreTimestamp: true, + } + + var acc testutil.Accumulator + + err := acc.GatherError(p.Gather) + require.NoError(t, err) + + m, _ := acc.Get("test_metric") + assert.WithinDuration(t, time.Now(), m.Time, 5*time.Second) +} + func TestUnsupportedFieldSelector(t *testing.T) { fieldSelectorString := "spec.containerName=container" prom := &Prometheus{Log: testutil.Logger{}, KubernetesFieldSelector: fieldSelectorString} diff --git a/plugins/parsers/prometheus/parser.go b/plugins/parsers/prometheus/parser.go index e55789f7957b4..bc7ea0c636e4d 100644 --- a/plugins/parsers/prometheus/parser.go +++ b/plugins/parsers/prometheus/parser.go @@ -21,8 +21,9 @@ import ( ) type Parser struct { - DefaultTags map[string]string - Header http.Header + DefaultTags map[string]string + Header http.Header + IgnoreTimestamp bool } func (p *Parser) Parse(buf []byte) ([]telegraf.Metric, error) { @@ -65,14 +66,15 @@ func (p *Parser) Parse(buf []byte) ([]telegraf.Metric, error) { for _, m := range mf.Metric { // reading tags tags := common.MakeLabels(m, p.DefaultTags) + t := p.GetTimestamp(m, now) if mf.GetType() == dto.MetricType_SUMMARY { // summary metric - telegrafMetrics := makeQuantiles(m, tags, metricName, mf.GetType(), now) + telegrafMetrics := makeQuantiles(m, tags, metricName, mf.GetType(), t) metrics = append(metrics, telegrafMetrics...) } else if mf.GetType() == dto.MetricType_HISTOGRAM { // histogram metric - telegrafMetrics := makeBuckets(m, tags, metricName, mf.GetType(), now) + telegrafMetrics := makeBuckets(m, tags, metricName, mf.GetType(), t) metrics = append(metrics, telegrafMetrics...) } else { // standard metric @@ -80,7 +82,6 @@ func (p *Parser) Parse(buf []byte) ([]telegraf.Metric, error) { fields := getNameAndValue(m, metricName) // converting to telegraf metric if len(fields) > 0 { - t := getTimestamp(m, now) m := metric.New("prometheus", tags, fields, t, common.ValueType(mf.GetType())) metrics = append(metrics, m) } @@ -113,10 +114,9 @@ func (p *Parser) SetDefaultTags(tags map[string]string) { } // Get Quantiles for summary metric & Buckets for histogram -func makeQuantiles(m *dto.Metric, tags map[string]string, metricName string, metricType dto.MetricType, now time.Time) []telegraf.Metric { +func makeQuantiles(m *dto.Metric, tags map[string]string, metricName string, metricType dto.MetricType, t time.Time) []telegraf.Metric { var metrics []telegraf.Metric fields := make(map[string]interface{}) - t := getTimestamp(m, now) fields[metricName+"_count"] = float64(m.GetSummary().GetSampleCount()) fields[metricName+"_sum"] = float64(m.GetSummary().GetSampleSum()) @@ -137,10 +137,9 @@ func makeQuantiles(m *dto.Metric, tags map[string]string, metricName string, met } // Get Buckets from histogram metric -func makeBuckets(m *dto.Metric, tags map[string]string, metricName string, metricType dto.MetricType, now time.Time) []telegraf.Metric { +func makeBuckets(m *dto.Metric, tags map[string]string, metricName string, metricType dto.MetricType, t time.Time) []telegraf.Metric { var metrics []telegraf.Metric fields := make(map[string]interface{}) - t := getTimestamp(m, now) fields[metricName+"_count"] = float64(m.GetHistogram().GetSampleCount()) fields[metricName+"_sum"] = float64(m.GetHistogram().GetSampleSum()) @@ -179,9 +178,9 @@ func getNameAndValue(m *dto.Metric, metricName string) map[string]interface{} { return fields } -func getTimestamp(m *dto.Metric, now time.Time) time.Time { +func (p *Parser) GetTimestamp(m *dto.Metric, now time.Time) time.Time { var t time.Time - if m.TimestampMs != nil && *m.TimestampMs > 0 { + if !p.IgnoreTimestamp && m.TimestampMs != nil && *m.TimestampMs > 0 { t = time.Unix(0, m.GetTimestampMs()*1000000) } else { t = now diff --git a/plugins/parsers/prometheus/parser_test.go b/plugins/parsers/prometheus/parser_test.go index a403887e093b9..52ef2f5a3bed3 100644 --- a/plugins/parsers/prometheus/parser_test.go +++ b/plugins/parsers/prometheus/parser_test.go @@ -74,7 +74,7 @@ func TestParsingValidGauge(t *testing.T) { testutil.RequireMetricsEqual(t, expected, metrics, testutil.IgnoreTime(), testutil.SortMetrics()) } -func TestParsingValieCounter(t *testing.T) { +func TestParsingValidCounter(t *testing.T) { expected := []telegraf.Metric{ testutil.MustMetric( "prometheus", @@ -340,6 +340,32 @@ test_counter{label="test"} 1 %d testutil.RequireMetricsEqual(t, expected, metrics, testutil.SortMetrics()) } +func TestMetricsWithoutIgnoreTimestamp(t *testing.T) { + testTime := time.Date(2020, time.October, 4, 17, 0, 0, 0, time.UTC) + testTimeUnix := testTime.UnixNano() / int64(time.Millisecond) + metricsWithTimestamps := fmt.Sprintf(` +# TYPE test_counter counter +test_counter{label="test"} 1 %d +`, testTimeUnix) + expected := testutil.MustMetric( + "prometheus", + map[string]string{ + "label": "test", + }, + map[string]interface{}{ + "test_counter": float64(1.0), + }, + testTime, + telegraf.Counter, + ) + + parser := Parser{IgnoreTimestamp: true} + metric, _ := parser.ParseLine(metricsWithTimestamps) + + testutil.RequireMetricEqual(t, expected, metric, testutil.IgnoreTime(), testutil.SortMetrics()) + assert.WithinDuration(t, time.Now(), metric.Time(), 5*time.Second) +} + func parse(buf []byte) ([]telegraf.Metric, error) { parser := Parser{} return parser.Parse(buf) diff --git a/plugins/parsers/registry.go b/plugins/parsers/registry.go index f07c789a272f1..fcdfc473ae37a 100644 --- a/plugins/parsers/registry.go +++ b/plugins/parsers/registry.go @@ -156,6 +156,9 @@ type Config struct { // FormData configuration FormUrlencodedTagKeys []string `toml:"form_urlencoded_tag_keys"` + // Prometheus configuration + PrometheusIgnoreTimestamp bool `toml:"prometheus_ignore_timestamp"` + // Value configuration ValueFieldName string `toml:"value_field_name"` @@ -259,7 +262,10 @@ func NewParser(config *Config) (Parser, error) { config.FormUrlencodedTagKeys, ) case "prometheus": - parser, err = NewPrometheusParser(config.DefaultTags) + parser, err = NewPrometheusParser( + config.DefaultTags, + config.PrometheusIgnoreTimestamp, + ) case "prometheusremotewrite": parser, err = NewPrometheusRemoteWriteParser(config.DefaultTags) case "xml", "xpath_json", "xpath_msgpack", "xpath_protobuf": @@ -378,9 +384,10 @@ func NewFormUrlencodedParser( }, nil } -func NewPrometheusParser(defaultTags map[string]string) (Parser, error) { +func NewPrometheusParser(defaultTags map[string]string, ignoreTimestamp bool) (Parser, error) { return &prometheus.Parser{ - DefaultTags: defaultTags, + DefaultTags: defaultTags, + IgnoreTimestamp: ignoreTimestamp, }, nil } From e0c45e4a769f607e2b5028b6b65905924a58bf18 Mon Sep 17 00:00:00 2001 From: Minni Walia Date: Tue, 5 Oct 2021 21:51:45 +0000 Subject: [PATCH 107/176] docs: updated azure data explorer plugin documentation (#9816) --- plugins/outputs/azure_data_explorer/README.md | 112 +++++++++++------- 1 file changed, 66 insertions(+), 46 deletions(-) diff --git a/plugins/outputs/azure_data_explorer/README.md b/plugins/outputs/azure_data_explorer/README.md index bb6d0d039b0d2..4ae5bf7139924 100644 --- a/plugins/outputs/azure_data_explorer/README.md +++ b/plugins/outputs/azure_data_explorer/README.md @@ -1,10 +1,11 @@ # Azure Data Explorer output plugin -This plugin writes metrics collected by any of the input plugins of Telegraf to [Azure Data Explorer](https://azure.microsoft.com/en-au/services/data-explorer/). +This plugin writes data collected by any of the Telegraf input plugins to [Azure Data Explorer](https://azure.microsoft.com/en-au/services/data-explorer/). +Azure Data Explorer is a distributed, columnar store, purpose built for any type of logs, metrics and time series data. ## Pre-requisites: - [Create Azure Data Explorer cluster and database](https://docs.microsoft.com/en-us/azure/data-explorer/create-cluster-database-portal) -- VM/compute or container to host Telegraf - it could be hosted locally where an app/services to be monitored are deployed or remotely on a dedicated monitoring compute/container. +- VM/compute or container to host Telegraf - it could be hosted locally where an app/service to be monitored is deployed or remotely on a dedicated monitoring compute/container. ## Configuration: @@ -21,7 +22,7 @@ This plugin writes metrics collected by any of the input plugins of Telegraf to # database = "" ## Timeout for Azure Data Explorer operations - # timeout = "15s" + # timeout = "20s" ## Type of metrics grouping used when pushing to Azure Data Explorer. ## Default is "TablePerMetric" for one table per different metric. @@ -30,9 +31,6 @@ This plugin writes metrics collected by any of the input plugins of Telegraf to ## Name of the single table to store all the metrics (Only needed if metrics_grouping_type is "SingleTable"). # table_name = "" - - # timeout = "20s" - ``` ## Metrics Grouping @@ -48,12 +46,12 @@ The table name will match the `name` property of the metric, this means that the ### SingleTable -The plugin will send all the metrics received to a single Azure Data Explorer table. The name of the table must be supplied via `table_name` the config file. If the table doesn't exist the plugin will create the table, if the table exists then the plugin will try to merge the Telegraf metric schema to the existing table. For more information about the merge process check the [`.create-merge` documentation](https://docs.microsoft.com/en-us/azure/data-explorer/kusto/management/create-merge-table-command). +The plugin will send all the metrics received to a single Azure Data Explorer table. The name of the table must be supplied via `table_name` in the config file. If the table doesn't exist the plugin will create the table, if the table exists then the plugin will try to merge the Telegraf metric schema to the existing table. For more information about the merge process check the [`.create-merge` documentation](https://docs.microsoft.com/en-us/azure/data-explorer/kusto/management/create-merge-table-command). ## Tables Schema -The schema of the Azure Data Explorer table will match the structure of the Telegraf `Metric` object. The corresponding Azure Data Explorer command would be like the following: +The schema of the Azure Data Explorer table will match the structure of the Telegraf `Metric` object. The corresponding Azure Data Explorer command generated by the plugin would be like the following: ``` .create-merge table ['table-name'] (['fields']:dynamic, ['name']:string, ['tags']:dynamic, ['timestamp']:datetime) ``` @@ -63,7 +61,7 @@ The corresponding table mapping would be like the following: .create-or-alter table ['table-name'] ingestion json mapping 'table-name_mapping' '[{"column":"fields", "Properties":{"Path":"$[\'fields\']"}},{"column":"name", "Properties":{"Path":"$[\'name\']"}},{"column":"tags", "Properties":{"Path":"$[\'tags\']"}},{"column":"timestamp", "Properties":{"Path":"$[\'timestamp\']"}}]' ``` -**Note**: This plugin will automatically create Azure Data Explorer tables and corresponding table mapping as per the above mentioned commands. Since the `Metric` object is a complex type, the only output format supported is JSON. +**Note**: This plugin will automatically create Azure Data Explorer tables and corresponding table mapping as per the above mentioned commands. ## Authentiation @@ -95,7 +93,7 @@ The plugin will authenticate using the first available of the following configurations, **it's important to understand that the assessment, and consequently choosing the authentication method, will happen in order as below**: 1. **Client Credentials**: Azure AD Application ID and Secret. - + Set the following environment variables: - `AZURE_TENANT_ID`: Specifies the Tenant to which to authenticate. @@ -126,50 +124,72 @@ following configurations, **it's important to understand that the assessment, an [arm]: https://docs.microsoft.com/en-us/azure/azure-resource-manager/resource-group-overview -## Querying collected metrics data in Azure Data Explorer -With all above configurations, you will have data stored in following standard format for each metric type stored as an Azure Data Explorer table - -ColumnName | ColumnType ----------- | ---------- -fields | dynamic -name | string -tags | dynamic -timestamp | datetime - -As "fields" and "tags" are of dynamic data type so following multiple ways to query this data - -1. **Query JSON attributes directly**: This is one of the coolest feature of Azure Data Explorer so you can run query like this - - ``` - Tablename - | where fields.size_kb == 9120 - ``` -2. **Use [Update policy](https://docs.microsoft.com/en-us/azure/data-explorer/kusto/management/updatepolicy)**: to transform data, in this case, to flatten dynamic data type columns. This is the recommended performant way for querying over large data volumes compared to querying directly over JSON attributes. +## Querying data collected in Azure Data Explorer +Examples of data transformations and queries that would be useful to gain insights - +1. **Data collected using SQL input plugin** + + Sample SQL metrics data - + + name | tags | timestamp | fields + -----|------|-----------|------- + sqlserver_database_io|{"database_name":"azure-sql-db2","file_type":"DATA","host":"adx-vm","logical_filename":"tempdev","measurement_db_type":"AzureSQLDB","physical_filename":"tempdb.mdf","replica_updateability":"READ_WRITE","sql_instance":"adx-sql-server"}|2021-09-09T13:51:20Z|{"current_size_mb":16,"database_id":2,"file_id":1,"read_bytes":2965504,"read_latency_ms":68,"reads":47,"rg_read_stall_ms":42,"rg_write_stall_ms":0,"space_used_mb":0,"write_bytes":1220608,"write_latency_ms":103,"writes":149} + sqlserver_waitstats|{"database_name":"azure-sql-db2","host":"adx-vm","measurement_db_type":"AzureSQLDB","replica_updateability":"READ_WRITE","sql_instance":"adx-sql-server","wait_category":"Worker Thread","wait_type":"THREADPOOL"}|2021-09-09T13:51:20Z|{"max_wait_time_ms":15,"resource_wait_ms":4469,"signal_wait_time_ms":0,"wait_time_ms":4469,"waiting_tasks_count":1464} + + + Since collected metrics object is of complex type so "fields" and "tags" are stored as dynamic data type, multiple ways to query this data- + + - **Query JSON attributes directly**: Azure Data Explorer provides an ability to query JSON data in raw format without parsing it, so JSON attributes can be queried directly in following way - + ``` + Tablename + | where name == "sqlserver_azure_db_resource_stats" and todouble(fields.avg_cpu_percent) > 7 + ``` + ``` + Tablename + | distinct tostring(tags.database_name) + ``` + **Note** - This approach could have performance impact in case of large volumes of data, use belwo mentioned approach for such cases. + + - **Use [Update policy](https://docs.microsoft.com/en-us/azure/data-explorer/kusto/management/updatepolicy)**: Transform dynamic data type columns using update policy. This is the recommended performant way for querying over large volumes of data compared to querying directly over JSON attributes. + ``` // Function to transform data .create-or-alter function Transform_TargetTableName() { - SourceTableName - | extend clerk_type = tags.clerk_type - | extend host = tags.host + SourceTableName + | mv-apply fields on (extend key = tostring(bag_keys(fields)[0])) + | project fieldname=key, value=todouble(fields[key]), name, tags, timestamp } - // Create the destination table (if it doesn't exist already) + // Create destination table with above query's results schema (if it doesn't exist already) .set-or-append TargetTableName <| Transform_TargetTableName() | limit 0 // Apply update policy on destination table .alter table TargetTableName policy update - @'[{"IsEnabled": true, "Source": "SourceTableName", "Query": "Transform_TargetTableName()", "IsTransactional": false, "PropagateIngestionProperties": false}]' - + @'[{"IsEnabled": true, "Source": "SourceTableName", "Query": "Transform_TargetTableName()", "IsTransactional": true, "PropagateIngestionProperties": false}]' ``` - There are two ways to flatten dynamic columns as explained below. You can use either of these ways in above mentioned update policy function - 'Transform_TargetTableName()' - - Use [bag_unpack plugin](https://docs.microsoft.com/en-us/azure/data-explorer/kusto/query/bag-unpackplugin) to unpack the dynamic columns as shown below. This method will unpack all columns, it could lead to issues in case source schema changes. - ``` - Tablename - | evaluate bag_unpack(tags) - | evaluate bag_unpack(fields) - ``` - - - Use [extend](https://docs.microsoft.com/en-us/azure/data-explorer/kusto/query/extendoperator) operator as shown below. This is the best way provided you know what columns are needed in the final destination table. Another benefit of this method is even if schema changes, it will not break your queries or dashboards. - ``` - Tablename - | extend clerk_type = tags.clerk_type - | extend host = tags.host - ``` + +2. **Data collected using syslog input plugin** + + Sample syslog data - + + name | tags | timestamp | fields + -----|------|-----------|------- + syslog|{"appname":"azsecmond","facility":"user","host":"adx-linux-vm","hostname":"adx-linux-vm","severity":"info"}|2021-09-20T14:36:44Z|{"facility_code":1,"message":" 2021/09/20 14:36:44.890110 Failed to connect to mdsd: dial unix /var/run/mdsd/default_djson.socket: connect: no such file or directory","procid":"2184","severity_code":6,"timestamp":"1632148604890477000","version":1} + syslog|{"appname":"CRON","facility":"authpriv","host":"adx-linux-vm","hostname":"adx-linux-vm","severity":"info"}|2021-09-20T14:37:01Z|{"facility_code":10,"message":" pam_unix(cron:session): session opened for user root by (uid=0)","procid":"26446","severity_code":6,"timestamp":"1632148621120781000","version":1} + + There are multiple ways to flatten dynamic columns using 'extend' or 'bag_unpack' operator. You can use either of these ways in above mentioned update policy function - 'Transform_TargetTableName()' + + - Use [extend](https://docs.microsoft.com/en-us/azure/data-explorer/kusto/query/extendoperator) operator - This is the recommended approach compared to 'bag_unpack' as it is faster and robust. Even if schema changes, it will not break queries or dashboards. + ``` + Tablenmae + | extend facility_code=toint(fields.facility_code), message=tostring(fields.message), procid= tolong(fields.procid), severity_code=toint(fields.severity_code), + SysLogTimestamp=unixtime_nanoseconds_todatetime(tolong(fields.timestamp)), version= todouble(fields.version), + appname= tostring(tags.appname), facility= tostring(tags.facility),host= tostring(tags.host), hostname=tostring(tags.hostname), severity=tostring(tags.severity) + | project-away fields, tags + ``` + - Use [bag_unpack plugin](https://docs.microsoft.com/en-us/azure/data-explorer/kusto/query/bag-unpackplugin) to unpack the dynamic type columns automatically. This method could lead to issues if source schema changes as its dynamically expanding columns. + ``` + Tablename + | evaluate bag_unpack(tags, columnsConflict='replace_source') + | evaluate bag_unpack(fields, columnsConflict='replace_source') + ``` From d5c52bbfadb4b6103c92e84a816ecea1ea930a9b Mon Sep 17 00:00:00 2001 From: Mya Date: Tue, 5 Oct 2021 18:20:47 -0600 Subject: [PATCH 108/176] chore: update consul to v1.11.0 (#9863) --- go.mod | 2 +- go.sum | 4 ++-- 2 files changed, 3 insertions(+), 3 deletions(-) diff --git a/go.mod b/go.mod index 0999e764200ca..9680b8c71e258 100644 --- a/go.mod +++ b/go.mod @@ -131,7 +131,7 @@ require ( github.com/grpc-ecosystem/grpc-gateway v1.16.0 // indirect github.com/hailocab/go-hostpool v0.0.0-20160125115350-e80d13ce29ed // indirect github.com/harlow/kinesis-consumer v0.3.1-0.20181230152818-2f58b136fee0 - github.com/hashicorp/consul/api v1.9.1 + github.com/hashicorp/consul/api v1.11.0 github.com/hashicorp/go-cleanhttp v0.5.1 // indirect github.com/hashicorp/go-hclog v0.12.2 // indirect github.com/hashicorp/go-immutable-radix v1.2.0 // indirect diff --git a/go.sum b/go.sum index 6b60e06efb308..01652479bf4b9 100644 --- a/go.sum +++ b/go.sum @@ -873,8 +873,8 @@ github.com/harlow/kinesis-consumer v0.3.1-0.20181230152818-2f58b136fee0 h1:U0KvG github.com/harlow/kinesis-consumer v0.3.1-0.20181230152818-2f58b136fee0/go.mod h1:dk23l2BruuUzRP8wbybQbPn3J7sZga2QHICCeaEy5rQ= github.com/hashicorp/consul/api v1.3.0/go.mod h1:MmDNSzIMUjNpY/mQ398R4bk2FnqQLoPndWW5VkKPlCE= github.com/hashicorp/consul/api v1.6.0/go.mod h1:1NSuaUUkFaJzMasbfq/11wKYWSR67Xn6r2DXKhuDNFg= -github.com/hashicorp/consul/api v1.9.1 h1:SngrdG2L62qqLsUz85qcPhFZ78rPf8tcD5qjMgs6MME= -github.com/hashicorp/consul/api v1.9.1/go.mod h1:XjsvQN+RJGWI2TWy1/kqaE16HrR2J/FWgkYjdZQsX9M= +github.com/hashicorp/consul/api v1.11.0 h1:Hw/G8TtRvOElqxVIhBzXciiSTbapq8hZ2XKZsXk5ZCE= +github.com/hashicorp/consul/api v1.11.0/go.mod h1:XjsvQN+RJGWI2TWy1/kqaE16HrR2J/FWgkYjdZQsX9M= github.com/hashicorp/consul/sdk v0.3.0/go.mod h1:VKf9jXwCTEY1QZP2MOLRhb5i/I/ssyNV1vwHyQBF0x8= github.com/hashicorp/consul/sdk v0.6.0/go.mod h1:fY08Y9z5SvJqevyZNy6WWPXiG3KwBPAvlcdx16zZ0fM= github.com/hashicorp/consul/sdk v0.8.0 h1:OJtKBtEjboEZvG6AOUdh4Z1Zbyu0WcxQ0qatRrZHTVU= From 80188e35696c57064cb6c0db98144d9ec2038aac Mon Sep 17 00:00:00 2001 From: Joshua Powers Date: Wed, 6 Oct 2021 08:11:58 -0600 Subject: [PATCH 109/176] chore: update containerd to v1.5.7 (#9864) --- go.mod | 20 +++++------ go.sum | 108 +++++++++++++++++++++++++++++++++++++++++++++++---------- 2 files changed, 100 insertions(+), 28 deletions(-) diff --git a/go.mod b/go.mod index 9680b8c71e258..55de18a59efa0 100644 --- a/go.mod +++ b/go.mod @@ -30,8 +30,8 @@ require ( github.com/Azure/go-autorest/tracing v0.6.0 // indirect github.com/BurntSushi/toml v0.3.1 github.com/Mellanox/rdmamap v0.0.0-20191106181932-7c3c4763a6ee - github.com/Microsoft/go-winio v0.4.17-0.20210211115548-6eac466e5fa3 // indirect - github.com/Microsoft/hcsshim v0.8.16 // indirect + github.com/Microsoft/go-winio v0.4.17 // indirect + github.com/Microsoft/hcsshim v0.8.21 // indirect github.com/Shopify/sarama v1.27.2 github.com/StackExchange/wmi v0.0.0-20210224194228-fe8f1750fd46 // indirect github.com/aerospike/aerospike-client-go v1.27.0 @@ -70,8 +70,8 @@ require ( github.com/cenkalti/backoff v2.2.1+incompatible // indirect github.com/cespare/xxhash/v2 v2.1.1 // indirect github.com/cisco-ie/nx-telemetry-proto v0.0.0-20190531143454-82441e232cf6 - github.com/containerd/cgroups v0.0.0-20210114181951-8a68de567b68 // indirect - github.com/containerd/containerd v1.5.0-beta.4 // indirect + github.com/containerd/cgroups v1.0.1 // indirect + github.com/containerd/containerd v1.5.7 // indirect github.com/coreos/go-semver v0.3.0 // indirect github.com/couchbase/go-couchbase v0.1.0 github.com/couchbase/gomemcached v0.1.3 // indirect @@ -203,7 +203,7 @@ require ( github.com/openconfig/gnmi v0.0.0-20180912164834-33a1865c3029 github.com/opencontainers/go-digest v1.0.0 // indirect github.com/opencontainers/image-spec v1.0.1 // indirect - github.com/opencontainers/runc v1.0.0-rc95 // indirect + github.com/opencontainers/runc v1.0.2 // indirect github.com/opentracing-contrib/go-observer v0.0.0-20170622124052-a52f23424492 // indirect github.com/opentracing/opentracing-go v1.2.0 github.com/openzipkin-contrib/zipkin-go-opentracing v0.4.5 @@ -238,7 +238,7 @@ require ( github.com/signalfx/gohistogram v0.0.0-20160107210732-1ccfd2ff5083 // indirect github.com/signalfx/golib/v3 v3.3.34 github.com/signalfx/sapm-proto v0.4.0 // indirect - github.com/sirupsen/logrus v1.7.0 + github.com/sirupsen/logrus v1.8.1 github.com/sleepinggenius2/gosmi v0.4.3 github.com/snowflakedb/gosnowflake v1.5.0 github.com/streadway/amqp v0.0.0-20190827072141-edfb9018d271 @@ -253,8 +253,8 @@ require ( github.com/tklauser/go-sysconf v0.3.5 // indirect github.com/tklauser/numcpus v0.2.2 // indirect github.com/vapourismo/knx-go v0.0.0-20201122213738-75fe09ace330 - github.com/vishvananda/netlink v1.1.0 // indirect - github.com/vishvananda/netns v0.0.0-20191106174202-0a2b9b5464df // indirect + github.com/vishvananda/netlink v1.1.1-0.20201029203352-d40f9887b852 // indirect + github.com/vishvananda/netns v0.0.0-20200728191858-db3c7e526aae // indirect github.com/vjeantet/grok v1.0.1 github.com/vmware/govmomi v0.26.0 github.com/wavefronthq/wavefront-sdk-go v0.9.7 @@ -314,9 +314,9 @@ require ( gopkg.in/yaml.v2 v2.4.0 gopkg.in/yaml.v3 v3.0.0-20210107192922-496545a6307b // indirect gotest.tools v2.2.0+incompatible - k8s.io/api v0.20.4 + k8s.io/api v0.20.6 k8s.io/apimachinery v0.21.1 - k8s.io/client-go v0.20.4 + k8s.io/client-go v0.20.6 k8s.io/klog/v2 v2.8.0 // indirect k8s.io/utils v0.0.0-20201110183641-67b214c5f920 // indirect modernc.org/cc/v3 v3.33.5 // indirect diff --git a/go.sum b/go.sum index 01652479bf4b9..fccbfb9b9bdcb 100644 --- a/go.sum +++ b/go.sum @@ -158,10 +158,11 @@ github.com/Microsoft/go-winio v0.4.16-0.20201130162521-d1ffc52c7331/go.mod h1:XB github.com/Microsoft/go-winio v0.4.16-0.20201130162521-d1ffc52c7331/go.mod h1:XB6nPKklQyQ7GC9LdcBEcBl8PF76WugXOPRXwdLnMv0= github.com/Microsoft/go-winio v0.4.16/go.mod h1:XB6nPKklQyQ7GC9LdcBEcBl8PF76WugXOPRXwdLnMv0= github.com/Microsoft/go-winio v0.4.16/go.mod h1:XB6nPKklQyQ7GC9LdcBEcBl8PF76WugXOPRXwdLnMv0= -github.com/Microsoft/go-winio v0.4.17-0.20210211115548-6eac466e5fa3 h1:mw6pDQqv38/WGF1cO/jF5t/jyAJ2yi7CmtFLLO5tGFI= -github.com/Microsoft/go-winio v0.4.17-0.20210211115548-6eac466e5fa3 h1:mw6pDQqv38/WGF1cO/jF5t/jyAJ2yi7CmtFLLO5tGFI= github.com/Microsoft/go-winio v0.4.17-0.20210211115548-6eac466e5fa3/go.mod h1:JPGBdM1cNvN/6ISo+n8V5iA4v8pBzdOpzfwIujj1a84= github.com/Microsoft/go-winio v0.4.17-0.20210211115548-6eac466e5fa3/go.mod h1:JPGBdM1cNvN/6ISo+n8V5iA4v8pBzdOpzfwIujj1a84= +github.com/Microsoft/go-winio v0.4.17-0.20210324224401-5516f17a5958/go.mod h1:JPGBdM1cNvN/6ISo+n8V5iA4v8pBzdOpzfwIujj1a84= +github.com/Microsoft/go-winio v0.4.17 h1:iT12IBVClFevaf8PuVyi3UmZOVh4OqnaLxDTW2O6j3w= +github.com/Microsoft/go-winio v0.4.17/go.mod h1:JPGBdM1cNvN/6ISo+n8V5iA4v8pBzdOpzfwIujj1a84= github.com/Microsoft/hcsshim v0.8.6/go.mod h1:Op3hHsoHPAvb6lceZHDtd9OkTew38wNoXnJs8iY7rUg= github.com/Microsoft/hcsshim v0.8.6/go.mod h1:Op3hHsoHPAvb6lceZHDtd9OkTew38wNoXnJs8iY7rUg= github.com/Microsoft/hcsshim v0.8.7-0.20190325164909-8abdbb8205e4/go.mod h1:Op3hHsoHPAvb6lceZHDtd9OkTew38wNoXnJs8iY7rUg= @@ -174,8 +175,9 @@ github.com/Microsoft/hcsshim v0.8.14/go.mod h1:NtVKoYxQuTLx6gEq0L96c9Ju4JbRJ4nY2 github.com/Microsoft/hcsshim v0.8.14/go.mod h1:NtVKoYxQuTLx6gEq0L96c9Ju4JbRJ4nY2ow3VK6a9Lg= github.com/Microsoft/hcsshim v0.8.15/go.mod h1:x38A4YbHbdxJtc0sF6oIz+RG0npwSCAvn69iY6URG00= github.com/Microsoft/hcsshim v0.8.15/go.mod h1:x38A4YbHbdxJtc0sF6oIz+RG0npwSCAvn69iY6URG00= -github.com/Microsoft/hcsshim v0.8.16 h1:8/auA4LFIZFTGrqfKhGBSXwM6/4X1fHa/xniyEHu8ac= github.com/Microsoft/hcsshim v0.8.16/go.mod h1:o5/SZqmR7x9JNKsW3pu+nqHm0MF8vbA+VxGOoXdC600= +github.com/Microsoft/hcsshim v0.8.21 h1:btRfUDThBE5IKcvI8O8jOiIkujUsAMBSRsYDYmEi6oM= +github.com/Microsoft/hcsshim v0.8.21/go.mod h1:+w2gRZ5ReXQhFOrvSQeNfhrYB/dg3oDwTOcER2fw4I4= github.com/Microsoft/hcsshim/test v0.0.0-20201218223536-d3e5debf77da/go.mod h1:5hlzMzRKMLyo42nCZ9oml8AdTlq/0cvIaBv6tK1RehU= github.com/Microsoft/hcsshim/test v0.0.0-20201218223536-d3e5debf77da/go.mod h1:5hlzMzRKMLyo42nCZ9oml8AdTlq/0cvIaBv6tK1RehU= github.com/Microsoft/hcsshim/test v0.0.0-20210227013316-43a75bb4edd3/go.mod h1:mw7qgWloBUl75W/gVH3cQszUg1+gUITj7D6NY7ywVnY= @@ -244,6 +246,7 @@ github.com/aristanetworks/glog v0.0.0-20191112221043-67e8567f59f3/go.mod h1:KASm github.com/aristanetworks/goarista v0.0.0-20190325233358-a123909ec740 h1:FD4/ikKOFxwP8muWDypbmBWc634+YcAs3eBrYAmRdZY= github.com/aristanetworks/goarista v0.0.0-20190325233358-a123909ec740/go.mod h1:D/tb0zPVXnP7fmsLZjtdUhSsumbK/ij54UXjjVgMGxQ= github.com/armon/circbuf v0.0.0-20150827004946-bbbad097214e/go.mod h1:3U/XgcO3hCbHZ8TKRvWD2dDTCfh9M9ya+I9JpbB7O8o= +github.com/armon/consul-api v0.0.0-20180202201655-eb2c6b5be1b6/go.mod h1:grANhF5doyWs3UAsr3K4I6qtAmlQcZDesFNEHPZAzj8= github.com/armon/go-metrics v0.0.0-20180917152333-f0300d1749da/go.mod h1:Q73ZrmVTwzkszR9V5SSuryQ31EELlFMUz1kKyl939pY= github.com/armon/go-metrics v0.3.3 h1:a9F4rlj7EWWrbj7BYw8J8+x+ZZkJeqzNyRk8hdPF+ro= github.com/armon/go-metrics v0.3.3/go.mod h1:4O98XIr/9W0sxpJ8UaYkvjk10Iff7SnFrb4QAOwNTFc= @@ -302,6 +305,7 @@ github.com/bgentry/speakeasy v0.1.0/go.mod h1:+zsyZBPWlz7T6j88CTgSN5bM796AkVf0kB github.com/bitly/go-hostpool v0.1.0 h1:XKmsF6k5el6xHG3WPJ8U0Ku/ye7njX7W81Ng7O2ioR0= github.com/bitly/go-hostpool v0.1.0/go.mod h1:4gOCgp6+NZnVqlKyZ/iBZFTAJKembaVENUpMkpg42fw= github.com/bitly/go-simplejson v0.5.0/go.mod h1:cXHtHw4XUPsvGaxgjIAn8PhEWG9NfngEKAMDJEczWVA= +github.com/bits-and-blooms/bitset v1.2.0/go.mod h1:gIdJ4wp64HaoK2YrL1Q5/N7Y16edYb8uY+O0FJTyyDA= github.com/blang/semver v3.1.0+incompatible/go.mod h1:kRBLl5iJ+tD4TcOOxsy/0fnwebNt5EWlYSAyrTnjyyk= github.com/blang/semver v3.5.1+incompatible/go.mod h1:kRBLl5iJ+tD4TcOOxsy/0fnwebNt5EWlYSAyrTnjyyk= github.com/bmatcuk/doublestar/v3 v3.0.0 h1:TQtVPlDnAYwcrVNB2JiGuMc++H5qzWZd9PhkNo5WyHI= @@ -348,7 +352,8 @@ github.com/chzyer/test v0.0.0-20180213035817-a1ea475d72b1/go.mod h1:Q3SI9o4m/ZMn github.com/cilium/ebpf v0.0.0-20200110133405-4032b1d8aae3/go.mod h1:MA5e5Lr8slmEg9bt0VpxxWqJlO4iwu3FBdHUzV7wQVg= github.com/cilium/ebpf v0.0.0-20200702112145-1c8d4c9ef775/go.mod h1:7cR51M8ViRLIdUjrmSXlK9pkrsDlLHbO8jiB8X8JnOc= github.com/cilium/ebpf v0.2.0/go.mod h1:To2CFviqOWL/M0gIMsvSMlqe7em/l1ALkX1PyjrX2Qs= -github.com/cilium/ebpf v0.5.0/go.mod h1:4tRaxcgiL706VnOzHOdBlY8IEAIdxINsQBcU4xJJXRs= +github.com/cilium/ebpf v0.4.0/go.mod h1:4tRaxcgiL706VnOzHOdBlY8IEAIdxINsQBcU4xJJXRs= +github.com/cilium/ebpf v0.6.2/go.mod h1:4tRaxcgiL706VnOzHOdBlY8IEAIdxINsQBcU4xJJXRs= github.com/circonus-labs/circonus-gometrics v2.3.1+incompatible/go.mod h1:nmEj6Dob7S7YxXgwXpfOuvO54S+tGdZdw9fuRZt25Ag= github.com/circonus-labs/circonusllhist v0.1.3/go.mod h1:kMXHVDlOchFAehlya5ePtbp5jckzBHf4XRpQvBOLI+I= github.com/cisco-ie/nx-telemetry-proto v0.0.0-20190531143454-82441e232cf6 h1:57RI0wFkG/smvVTcz7F43+R0k+Hvci3jAVQF9lyMoOo= @@ -366,17 +371,20 @@ github.com/codahale/hdrhistogram v0.0.0-20161010025455-3a0bb77429bd/go.mod h1:sE github.com/containerd/aufs v0.0.0-20200908144142-dab0cbea06f4/go.mod h1:nukgQABAEopAHvB6j7cnP5zJ+/3aVcE7hCYqvIwAHyE= github.com/containerd/aufs v0.0.0-20201003224125-76a6863f2989/go.mod h1:AkGGQs9NM2vtYHaUen+NljV0/baGCAPELGm2q9ZXpWU= github.com/containerd/aufs v0.0.0-20210316121734-20793ff83c97/go.mod h1:kL5kd6KM5TzQjR79jljyi4olc1Vrx6XBlcyj3gNv2PU= +github.com/containerd/aufs v1.0.0/go.mod h1:kL5kd6KM5TzQjR79jljyi4olc1Vrx6XBlcyj3gNv2PU= github.com/containerd/btrfs v0.0.0-20201111183144-404b9149801e/go.mod h1:jg2QkJcsabfHugurUvvPhS3E08Oxiuh5W/g1ybB4e0E= github.com/containerd/btrfs v0.0.0-20201111183144-404b9149801e/go.mod h1:jg2QkJcsabfHugurUvvPhS3E08Oxiuh5W/g1ybB4e0E= github.com/containerd/btrfs v0.0.0-20210316141732-918d888fb676/go.mod h1:zMcX3qkXTAi9GI50+0HOeuV8LU2ryCE/V2vG/ZBiTss= +github.com/containerd/btrfs v1.0.0/go.mod h1:zMcX3qkXTAi9GI50+0HOeuV8LU2ryCE/V2vG/ZBiTss= github.com/containerd/cgroups v0.0.0-20190717030353-c4b9ac5c7601/go.mod h1:X9rLEHIqSf/wfK8NsPqxJmeZgW4pcfzdXITDrUSJ6uI= github.com/containerd/cgroups v0.0.0-20190919134610-bf292b21730f/go.mod h1:OApqhQ4XNSNC13gXIwDjhOQxjWa/NxkwZXJ1EvqT0ko= github.com/containerd/cgroups v0.0.0-20200531161412-0dbf7f05ba59/go.mod h1:pA0z1pT8KYB3TCXK/ocprsh7MAkoW8bZVzPdih9snmM= github.com/containerd/cgroups v0.0.0-20200710171044-318312a37340/go.mod h1:s5q4SojHctfxANBDvMeIaIovkq29IP48TKAxnhYRxvo= github.com/containerd/cgroups v0.0.0-20200824123100-0b889c03f102/go.mod h1:s5q4SojHctfxANBDvMeIaIovkq29IP48TKAxnhYRxvo= github.com/containerd/cgroups v0.0.0-20200824123100-0b889c03f102/go.mod h1:s5q4SojHctfxANBDvMeIaIovkq29IP48TKAxnhYRxvo= -github.com/containerd/cgroups v0.0.0-20210114181951-8a68de567b68 h1:hkGVFjz+plgr5UfxZUTPFbUFIF/Km6/s+RVRIRHLrrY= github.com/containerd/cgroups v0.0.0-20210114181951-8a68de567b68/go.mod h1:ZJeTFisyysqgcCdecO57Dj79RfL0LNeGiFUqLYQRYLE= +github.com/containerd/cgroups v1.0.1 h1:iJnMvco9XGvKUvNQkv88bE4uJXxRQH18efbKo9w5vHQ= +github.com/containerd/cgroups v1.0.1/go.mod h1:0SJrPIenamHDcZhEcJMNBB85rHcUsw4f25ZfBiPYRkU= github.com/containerd/console v0.0.0-20180822173158-c12b1e7919c1/go.mod h1:Tj/on1eG8kiEhd0+fhSDzsPAFESxzBBvdyEgyryXffw= github.com/containerd/console v0.0.0-20181022165439-0650fd9eeb50/go.mod h1:Tj/on1eG8kiEhd0+fhSDzsPAFESxzBBvdyEgyryXffw= github.com/containerd/console v0.0.0-20191206165004-02ecf6a7291e/go.mod h1:8Pf4gM6VEbTNRIT26AyyU7hxdQU3MvAvxVI0sc00XBE= @@ -394,8 +402,11 @@ github.com/containerd/containerd v1.4.3/go.mod h1:bC6axHOhabU15QhwfG7w5PipXdVtMX github.com/containerd/containerd v1.5.0-beta.1/go.mod h1:5HfvG1V2FsKesEGQ17k5/T7V960Tmcumvqn8Mc+pCYQ= github.com/containerd/containerd v1.5.0-beta.1/go.mod h1:5HfvG1V2FsKesEGQ17k5/T7V960Tmcumvqn8Mc+pCYQ= github.com/containerd/containerd v1.5.0-beta.3/go.mod h1:/wr9AVtEM7x9c+n0+stptlo/uBBoBORwEx6ardVcmKU= -github.com/containerd/containerd v1.5.0-beta.4 h1:zjz4MOAOFgdBlwid2nNUlJ3YLpVi/97L36lfMYJex60= github.com/containerd/containerd v1.5.0-beta.4/go.mod h1:GmdgZd2zA2GYIBZ0w09ZvgqEq8EfBp/m3lcVZIvPHhI= +github.com/containerd/containerd v1.5.0-rc.0/go.mod h1:V/IXoMqNGgBlabz3tHD2TWDoTJseu1FGOKuoA4nNb2s= +github.com/containerd/containerd v1.5.1/go.mod h1:0DOxVqwDy2iZvrZp2JUx/E+hS0UNTVn7dJnIOwtYR4g= +github.com/containerd/containerd v1.5.7 h1:rQyoYtj4KddB3bxG6SAqd4+08gePNyJjRqvOIfV3rkM= +github.com/containerd/containerd v1.5.7/go.mod h1:gyvv6+ugqY25TiXxcZC3L5yOeYgEw0QMhscqVp1AR9c= github.com/containerd/continuity v0.0.0-20190426062206-aaeac12a7ffc/go.mod h1:GL3xCUCBDV3CZiTSEKksMWbLE66hEyuu9qyDOOqM47Y= github.com/containerd/continuity v0.0.0-20190426062206-aaeac12a7ffc/go.mod h1:GL3xCUCBDV3CZiTSEKksMWbLE66hEyuu9qyDOOqM47Y= github.com/containerd/continuity v0.0.0-20190815185530-f2a389ac0a02/go.mod h1:GL3xCUCBDV3CZiTSEKksMWbLE66hEyuu9qyDOOqM47Y= @@ -403,25 +414,31 @@ github.com/containerd/continuity v0.0.0-20191127005431-f65d91d395eb/go.mod h1:GL github.com/containerd/continuity v0.0.0-20200710164510-efbc4488d8fe/go.mod h1:cECdGN1O8G9bgKTlLhuPJimka6Xb/Gg7vYzCTNVxhvo= github.com/containerd/continuity v0.0.0-20201208142359-180525291bb7/go.mod h1:kR3BEg7bDFaEddKm54WSmrol1fKWDU1nKYkgrcgZT7Y= github.com/containerd/continuity v0.0.0-20201208142359-180525291bb7/go.mod h1:kR3BEg7bDFaEddKm54WSmrol1fKWDU1nKYkgrcgZT7Y= -github.com/containerd/continuity v0.0.0-20210208174643-50096c924a4e h1:6JKvHHt396/qabvMhnhUZvWaHZzfVfldxE60TK8YLhg= github.com/containerd/continuity v0.0.0-20210208174643-50096c924a4e/go.mod h1:EXlVlkqNba9rJe3j7w3Xa924itAMLgZH4UD/Q4PExuQ= +github.com/containerd/continuity v0.1.0 h1:UFRRY5JemiAhPZrr/uE0n8fMTLcZsUvySPr1+D7pgr8= +github.com/containerd/continuity v0.1.0/go.mod h1:ICJu0PwR54nI0yPEnJ6jcS+J7CZAUXrLh8lPo2knzsM= github.com/containerd/fifo v0.0.0-20180307165137-3d5202aec260/go.mod h1:ODA38xgv3Kuk8dQz2ZQXpnv/UZZUHUCL7pnLehbXgQI= github.com/containerd/fifo v0.0.0-20190226154929-a9fb20d87448/go.mod h1:ODA38xgv3Kuk8dQz2ZQXpnv/UZZUHUCL7pnLehbXgQI= github.com/containerd/fifo v0.0.0-20200410184934-f15a3290365b/go.mod h1:jPQ2IAeZRCYxpS/Cm1495vGFww6ecHmMk1YJH2Q5ln0= github.com/containerd/fifo v0.0.0-20201026212402-0724c46b320c/go.mod h1:jPQ2IAeZRCYxpS/Cm1495vGFww6ecHmMk1YJH2Q5ln0= github.com/containerd/fifo v0.0.0-20210316144830-115abcc95a1d/go.mod h1:ocF/ME1SX5b1AOlWi9r677YJmCPSwwWnQ9O123vzpE4= +github.com/containerd/fifo v1.0.0/go.mod h1:ocF/ME1SX5b1AOlWi9r677YJmCPSwwWnQ9O123vzpE4= github.com/containerd/go-cni v1.0.1/go.mod h1:+vUpYxKvAF72G9i1WoDOiPGRtQpqsNW/ZHtSlv++smU= +github.com/containerd/go-cni v1.0.2/go.mod h1:nrNABBHzu0ZwCug9Ije8hL2xBCYh/pjfMb1aZGrrohk= github.com/containerd/go-runc v0.0.0-20180907222934-5a6d9f37cfa3/go.mod h1:IV7qH3hrUgRmyYrtgEeGWJfWbgcHL9CSRruz2Vqcph0= github.com/containerd/go-runc v0.0.0-20190911050354-e029b79d8cda/go.mod h1:IV7qH3hrUgRmyYrtgEeGWJfWbgcHL9CSRruz2Vqcph0= github.com/containerd/go-runc v0.0.0-20200220073739-7016d3ce2328/go.mod h1:PpyHrqVs8FTi9vpyHwPwiNEGaACDxT/N/pLcvMSRA9g= github.com/containerd/go-runc v0.0.0-20201020171139-16b287bc67d0/go.mod h1:cNU0ZbCgCQVZK4lgG3P+9tn9/PaJNmoDXPpoJhDR+Ok= +github.com/containerd/go-runc v1.0.0/go.mod h1:cNU0ZbCgCQVZK4lgG3P+9tn9/PaJNmoDXPpoJhDR+Ok= github.com/containerd/imgcrypt v1.0.1/go.mod h1:mdd8cEPW7TPgNG4FpuP3sGBiQ7Yi/zak9TYCG3juvb0= github.com/containerd/imgcrypt v1.0.1/go.mod h1:mdd8cEPW7TPgNG4FpuP3sGBiQ7Yi/zak9TYCG3juvb0= github.com/containerd/imgcrypt v1.0.4-0.20210301171431-0ae5c75f59ba/go.mod h1:6TNsg0ctmizkrOgXRNQjAPFWpMYRWuiB6dSF4Pfa5SA= github.com/containerd/imgcrypt v1.1.1-0.20210312161619-7ed62a527887/go.mod h1:5AZJNI6sLHJljKuI9IHnw1pWqo/F0nGDOuR9zgTs7ow= +github.com/containerd/imgcrypt v1.1.1/go.mod h1:xpLnwiQmEUJPvQoAapeb2SNCxz7Xr6PJrXQb0Dpc4ms= github.com/containerd/nri v0.0.0-20201007170849-eb1350a75164/go.mod h1:+2wGSDGFYfE5+So4M5syatU0N0f0LbWpuqyMi4/BE8c= github.com/containerd/nri v0.0.0-20201007170849-eb1350a75164/go.mod h1:+2wGSDGFYfE5+So4M5syatU0N0f0LbWpuqyMi4/BE8c= github.com/containerd/nri v0.0.0-20210316161719-dbaa18c31c14/go.mod h1:lmxnXF6oMkbqs39FiCt1s0R2HSMhcLel9vNL3m4AaeY= +github.com/containerd/nri v0.1.0/go.mod h1:lmxnXF6oMkbqs39FiCt1s0R2HSMhcLel9vNL3m4AaeY= github.com/containerd/ttrpc v0.0.0-20190828154514-0e0f228740de/go.mod h1:PvCDdDGpgqzQIzDW1TphrGLssLDZp2GuS+X5DkEJB8o= github.com/containerd/ttrpc v0.0.0-20190828172938-92c8520ef9f8/go.mod h1:PvCDdDGpgqzQIzDW1TphrGLssLDZp2GuS+X5DkEJB8o= github.com/containerd/ttrpc v0.0.0-20191028202541-4f1b8fe65a5c/go.mod h1:LPm1u0xBw8r8NOKoOdNMeVHSawSsltak+Ihv+etqsE8= @@ -430,15 +447,24 @@ github.com/containerd/ttrpc v1.0.2/go.mod h1:UAxOpgT9ziI0gJrmKvgcZivgxOp8iFPSk8h github.com/containerd/typeurl v0.0.0-20180627222232-a93fcdb778cd/go.mod h1:Cm3kwCdlkCfMSHURc+r6fwoGH6/F1hH3S4sg0rLFWPc= github.com/containerd/typeurl v0.0.0-20190911142611-5eb25027c9fd/go.mod h1:GeKYzf2pQcqv7tJ0AoCuuhtnqhva5LNU3U+OyKxxJpk= github.com/containerd/typeurl v1.0.1/go.mod h1:TB1hUtrpaiO88KEK56ijojHS1+NeF0izUACaJW2mdXg= +github.com/containerd/typeurl v1.0.2/go.mod h1:9trJWW2sRlGub4wZJRTW83VtbOLS6hwcDZXTn6oPz9s= github.com/containerd/zfs v0.0.0-20200918131355-0a33824f23a2/go.mod h1:8IgZOBdv8fAgXddBT4dBXJPtxyRsejFIpXoklgxgEjw= github.com/containerd/zfs v0.0.0-20210301145711-11e8f1707f62/go.mod h1:A9zfAbMlQwE+/is6hi0Xw8ktpL+6glmqZYtevJgaB8Y= github.com/containerd/zfs v0.0.0-20210315114300-dde8f0fda960/go.mod h1:m+m51S1DvAP6r3FcmYCp54bQ34pyOwTieQDNRIRHsFY= +github.com/containerd/zfs v0.0.0-20210324211415-d5c4544f0433/go.mod h1:m+m51S1DvAP6r3FcmYCp54bQ34pyOwTieQDNRIRHsFY= +github.com/containerd/zfs v1.0.0/go.mod h1:m+m51S1DvAP6r3FcmYCp54bQ34pyOwTieQDNRIRHsFY= github.com/containernetworking/cni v0.7.1/go.mod h1:LGwApLUm2FpoOfxTDEeq8T9ipbpZ61X79hmU3w8FmsY= github.com/containernetworking/cni v0.8.0/go.mod h1:LGwApLUm2FpoOfxTDEeq8T9ipbpZ61X79hmU3w8FmsY= +github.com/containernetworking/cni v0.8.1/go.mod h1:LGwApLUm2FpoOfxTDEeq8T9ipbpZ61X79hmU3w8FmsY= github.com/containernetworking/plugins v0.8.6/go.mod h1:qnw5mN19D8fIwkqW7oHHYDHVlzhJpcY6TQxn/fUyDDM= +github.com/containernetworking/plugins v0.9.1/go.mod h1:xP/idU2ldlzN6m4p5LmGiwRDjeJr6FLK6vuiUwoH7P8= github.com/containers/ocicrypt v1.0.1/go.mod h1:MeJDzk1RJHv89LjsH0Sp5KTY3ZYkjXO/C+bKAeWFIrc= github.com/containers/ocicrypt v1.1.0/go.mod h1:b8AOe0YR67uU8OqfVNcznfFpAzu3rdgUV4GP9qXPfu4= +github.com/containers/ocicrypt v1.1.1/go.mod h1:Dm55fwWm1YZAjYRaJ94z2mfZikIyIN4B0oB3dj3jFxY= +github.com/coreos/bbolt v1.3.2/go.mod h1:iRUV2dpdMOn7Bo10OQBFzIJO9kkE559Wcmn+qkEiiKk= +github.com/coreos/etcd v3.3.10+incompatible/go.mod h1:uF7uidLiAD3TWHmW31ZFd/JWoc32PjwdhPthX9715RE= github.com/coreos/go-iptables v0.4.5/go.mod h1:/mVI274lEDI2ns62jHCDnCyBF9Iwsmekav8Dbxlm1MU= +github.com/coreos/go-iptables v0.5.0/go.mod h1:/mVI274lEDI2ns62jHCDnCyBF9Iwsmekav8Dbxlm1MU= github.com/coreos/go-oidc v2.1.0+incompatible/go.mod h1:CgnwVTmzoESiwO9qyAFEMiHoZ1nMCKZlZ9V6mm3/LKc= github.com/coreos/go-semver v0.2.0/go.mod h1:nnelYz7RCh+5ahJtPPxZlU+153eP4D4r3EedlOD2RNk= github.com/coreos/go-semver v0.3.0 h1:wkHLiw0WNATZnSG7epLsujiMCgPAc9xhjJ4tgnAxmfM= @@ -450,7 +476,7 @@ github.com/coreos/go-systemd v0.0.0-20190321100706-95778dfbb74e/go.mod h1:F5haX7 github.com/coreos/go-systemd v0.0.0-20190719114852-fd7a80b32e1f/go.mod h1:F5haX7vjVVG0kc13fIWeqUViNPyEJxv/OmvnBo0Yme4= github.com/coreos/go-systemd/v22 v22.0.0/go.mod h1:xO0FLkIi5MaZafQlIrOotqXZ90ih+1atmu1JpKERPPk= github.com/coreos/go-systemd/v22 v22.1.0/go.mod h1:xO0FLkIi5MaZafQlIrOotqXZ90ih+1atmu1JpKERPPk= -github.com/coreos/go-systemd/v22 v22.3.1/go.mod h1:Y58oyj3AT4RCenI/lSvhwexgC+NSVTIJ3seZv2GcEnc= +github.com/coreos/go-systemd/v22 v22.3.2/go.mod h1:Y58oyj3AT4RCenI/lSvhwexgC+NSVTIJ3seZv2GcEnc= github.com/coreos/pkg v0.0.0-20160727233714-3ac0863d7acf/go.mod h1:E3G3o1h8I7cfcXa63jLwjI0eiQQMgzzUDFVpN/nH/eA= github.com/coreos/pkg v0.0.0-20180928190104-399ea9e2e55f/go.mod h1:E3G3o1h8I7cfcXa63jLwjI0eiQQMgzzUDFVpN/nH/eA= github.com/couchbase/go-couchbase v0.1.0 h1:g4bCvDwRL+ZL6HLhYeRlXxEYP31Wpy0VFxnFw6efEp8= @@ -486,6 +512,7 @@ github.com/dgrijalva/jwt-go v0.0.0-20170104182250-a601269ab70c/go.mod h1:E3ru+11 github.com/dgrijalva/jwt-go v3.2.0+incompatible h1:7qlOGliEKZXTDg6OTjfoBKDXWrumCAMpl/TFQ4/5kLM= github.com/dgrijalva/jwt-go v3.2.0+incompatible/go.mod h1:E3ru+11k8xSBh+hMPgOLZmtrrCbhqsmaPHjLKYnJCaQ= github.com/dgryski/go-bitstream v0.0.0-20180413035011-3522498ce2c8/go.mod h1:VMaSuZ+SZcx/wljOQKvp5srsbCiKDEb6K2wC4+PiBmQ= +github.com/dgryski/go-sip13 v0.0.0-20181026042036-e10d5fee7954/go.mod h1:vAd38F8PWV+bWy6jNmig1y/TA+kYO4g3RSRF0IAv0no= github.com/dgryski/go-sip13 v0.0.0-20190329191031-25c5027a8c7b/go.mod h1:vAd38F8PWV+bWy6jNmig1y/TA+kYO4g3RSRF0IAv0no= github.com/digitalocean/godo v1.42.1/go.mod h1:p7dOjjtSBqCTUksqtA5Fd3uaKs9kyTq2xcz76ulEJRU= github.com/dimchansky/utfbom v1.1.0/go.mod h1:rO41eb7gLfo8SF1jd9F8HplJm1Fewwi4mQvIirEdv+8= @@ -741,6 +768,7 @@ github.com/golang/geo v0.0.0-20190916061304-5b978397cfec h1:lJwO/92dFXWeXOZdoGXg github.com/golang/geo v0.0.0-20190916061304-5b978397cfec/go.mod h1:QZ0nwyI2jOfgRAoBvP+ab5aRr7c9x7lhGEJrKvBwjWI= github.com/golang/glog v0.0.0-20160126235308-23def4e6c14b/go.mod h1:SBH7ygxi8pfUlaOkMMuAQtPIUF8ecWP5IEl/CR7VP2Q= github.com/golang/groupcache v0.0.0-20160516000752-02826c3e7903/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc= +github.com/golang/groupcache v0.0.0-20190129154638-5b532d6fd5ef/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc= github.com/golang/groupcache v0.0.0-20190702054246-869f871628b6/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc= github.com/golang/groupcache v0.0.0-20191227052852-215e87163ea7/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc= github.com/golang/groupcache v0.0.0-20200121045136-8c9f03a8e57e h1:1r7pUrabqp18hOBcwBwiTsbnFeTZHV9eER/QT5JVZxY= @@ -852,6 +880,7 @@ github.com/gorilla/mux v1.7.2/go.mod h1:1lud6UwP+6orDFRuTfBEV8e9/aOM/c4fVVCaMa2z github.com/gorilla/mux v1.7.3 h1:gnP5JzjVOuiZD07fKKToCAOjS0yOpj/qPETTXCCS6hw= github.com/gorilla/mux v1.7.3/go.mod h1:1lud6UwP+6orDFRuTfBEV8e9/aOM/c4fVVCaMa2zaAs= github.com/gorilla/websocket v0.0.0-20170926233335-4201258b820c/go.mod h1:E7qHFY5m1UJ88s3WnNqhKjPHQ0heANvMoAMk2YaljkQ= +github.com/gorilla/websocket v1.4.0/go.mod h1:E7qHFY5m1UJ88s3WnNqhKjPHQ0heANvMoAMk2YaljkQ= github.com/gorilla/websocket v1.4.2 h1:+/TMaTYc4QFitKJxsQ7Yye35DkWvkdLcvGKqM+x0Ufc= github.com/gorilla/websocket v1.4.2/go.mod h1:YR8l580nyteQvAITg2hZ9XVh4b55+EU/adAjf1fMHhE= github.com/gosnmp/gosnmp v1.32.0 h1:gctewmZx5qFI0oHMzRnjETqIZ093d9NgZy9TQr3V0iA= @@ -861,8 +890,10 @@ github.com/grid-x/modbus v0.0.0-20210224155242-c4a3d042e99b h1:Y4xqzO0CDNoehCr3n github.com/grid-x/modbus v0.0.0-20210224155242-c4a3d042e99b/go.mod h1:YaK0rKJenZ74vZFcSSLlAQqtG74PMI68eDjpDCDDmTw= github.com/grid-x/serial v0.0.0-20191104121038-e24bc9bf6f08 h1:syBxnRYnSPUDdkdo5U4sy2roxBPQDjNiw4od7xlsABQ= github.com/grid-x/serial v0.0.0-20191104121038-e24bc9bf6f08/go.mod h1:kdOd86/VGFWRrtkNwf1MPk0u1gIjc4Y7R2j7nhwc7Rk= +github.com/grpc-ecosystem/go-grpc-middleware v1.0.0/go.mod h1:FiyG127CGDf3tlThmgyCl78X/SZQqEOJBCDaAfeWzPs= github.com/grpc-ecosystem/go-grpc-middleware v1.0.1-0.20190118093823-f849b5445de4/go.mod h1:FiyG127CGDf3tlThmgyCl78X/SZQqEOJBCDaAfeWzPs= github.com/grpc-ecosystem/go-grpc-prometheus v1.2.0/go.mod h1:8NvIoxWQoOIhqOTXgfV/d3M/q6VIi02HzZEHgUlZvzk= +github.com/grpc-ecosystem/grpc-gateway v1.9.0/go.mod h1:vNeuVxBJEsws4ogUvrchl83t/GYV9WGTSLVdBhOQFDY= github.com/grpc-ecosystem/grpc-gateway v1.9.5/go.mod h1:vNeuVxBJEsws4ogUvrchl83t/GYV9WGTSLVdBhOQFDY= github.com/grpc-ecosystem/grpc-gateway v1.14.6/go.mod h1:zdiPV4Yse/1gnckTHtghG4GkDEdKCRJduHpTxT3/jcw= github.com/grpc-ecosystem/grpc-gateway v1.16.0 h1:gmcG1KaJ57LophUzW0Hy8NmPhnMZb4M0+kPpLofRdBo= @@ -916,6 +947,7 @@ github.com/hashicorp/golang-lru v0.5.0/go.mod h1:/m3WP610KZHVQ1SGc6re/UDhFvYD7pJ github.com/hashicorp/golang-lru v0.5.1/go.mod h1:/m3WP610KZHVQ1SGc6re/UDhFvYD7pJ4Ao+sR/qLZy8= github.com/hashicorp/golang-lru v0.5.4 h1:YDjusn29QI/Das2iO9M0BHnIbxPeyuCHsjMW+lJfyTc= github.com/hashicorp/golang-lru v0.5.4/go.mod h1:iADmTwqILo4mZ8BN3D2Q6+9jd8WM5uGBxy+E8yxSoD4= +github.com/hashicorp/hcl v1.0.0/go.mod h1:E5yfLk+7swimpb2L/Alb/PJmXilQ/rhwaUYs4T20WEQ= github.com/hashicorp/logutils v1.0.0/go.mod h1:QIAnNjmIWmVIIkWDTG1z5v++HQmx9WQRO+LraFDTW64= github.com/hashicorp/mdns v1.0.0/go.mod h1:tL+uN++7HEJ6SQLQ2/p+z2pH24WQKWjBPkE0mNTz8vQ= github.com/hashicorp/mdns v1.0.1/go.mod h1:4gW7WsVCke5TE7EPeYliwHlRUyBtfCwuFwuMg2DmyNY= @@ -927,7 +959,6 @@ github.com/hashicorp/serf v0.9.3/go.mod h1:UWDWwZeL5cuWDJdl0C6wrvrUwEqtQ4ZKBKKEN github.com/hashicorp/serf v0.9.5 h1:EBWvyu9tcRszt3Bxp3KNssBMP1KuHWyO51lz9+786iM= github.com/hashicorp/serf v0.9.5/go.mod h1:UWDWwZeL5cuWDJdl0C6wrvrUwEqtQ4ZKBKKENpqIUyk= github.com/hetznercloud/hcloud-go v1.21.1/go.mod h1:xng8lbDUg+xM1dgc0yGHX5EeqbwIq7UYlMWMTx3SQVg= -github.com/hpcloud/tail v1.0.0 h1:nfCOvKYfkgYP8hkirhJocXT2+zOD8yUNjXaWfTlyFKI= github.com/hpcloud/tail v1.0.0/go.mod h1:ab1qPbhIpdTxEkNHXyeSf5vhxWSCs/tWer42PpOxQnU= github.com/hudl/fargo v1.3.0/go.mod h1:y3CKSmjA+wD2gak7sUSXTAoopbhU08POFhmITJgmKTg= github.com/ianlancetaylor/demangle v0.0.0-20181102032728-5e5cf60278f6/go.mod h1:aSSvb/t6k1mPoxDqO4vJh6VOCGPwU4O0C2/Eqndh1Sc= @@ -936,6 +967,7 @@ github.com/imdario/mergo v0.3.5/go.mod h1:2EnlNZ0deacrJVfApfmtdGgDfMuh/nq6Ok1EcJ github.com/imdario/mergo v0.3.8/go.mod h1:2EnlNZ0deacrJVfApfmtdGgDfMuh/nq6Ok1EcJh5FfA= github.com/imdario/mergo v0.3.10/go.mod h1:jmQim1M+e3UYxmgPu/WyfjB3N3VflVyUjjjwH0dnCYA= github.com/imdario/mergo v0.3.11/go.mod h1:jmQim1M+e3UYxmgPu/WyfjB3N3VflVyUjjjwH0dnCYA= +github.com/imdario/mergo v0.3.12/go.mod h1:jmQim1M+e3UYxmgPu/WyfjB3N3VflVyUjjjwH0dnCYA= github.com/inconshreveable/mousetrap v1.0.0/go.mod h1:PxqpIevigyE2G7u3NXJIT2ANytuPF1OarO4DADm73n8= github.com/influxdata/apcupsd v0.0.0-20210427145308-694d5caead0e h1:3J1OB4RDKwXs5l8uEV6BP/tucOJOPDQysiT7/9cuXzA= github.com/influxdata/apcupsd v0.0.0-20210427145308-694d5caead0e/go.mod h1:WYK/Z/aXq9cbMFIL5ihcA4sX/r/3/WCas/Qvs/2fXcA= @@ -1070,6 +1102,7 @@ github.com/klauspost/compress v1.11.0/go.mod h1:aoV0uJVorq1K+umq18yTdKaF57EivdYs github.com/klauspost/compress v1.11.3/go.mod h1:aoV0uJVorq1K+umq18yTdKaF57EivdYsUV+/s2qKfXs= github.com/klauspost/compress v1.11.12/go.mod h1:aoV0uJVorq1K+umq18yTdKaF57EivdYsUV+/s2qKfXs= github.com/klauspost/compress v1.11.12/go.mod h1:aoV0uJVorq1K+umq18yTdKaF57EivdYsUV+/s2qKfXs= +github.com/klauspost/compress v1.11.13/go.mod h1:aoV0uJVorq1K+umq18yTdKaF57EivdYsUV+/s2qKfXs= github.com/klauspost/compress v1.13.1 h1:wXr2uRxZTJXHLly6qhJabee5JqIhTRoLBhDOA74hDEQ= github.com/klauspost/compress v1.13.1/go.mod h1:8dP1Hq4DHOhN9w426knH3Rhby4rFm6D8eO+e+Dq5Gzg= github.com/klauspost/cpuid v0.0.0-20170728055534-ae7887de9fa5/go.mod h1:Pj4uuM528wm8OyEC2QMXAi2YiTZ96dNQPGgoMS4s3ek= @@ -1103,6 +1136,7 @@ github.com/lib/pq v1.3.0/go.mod h1:5WUZQaWbwv1U+lTReE5YruASi9Al49XbQIvNi/34Woo= github.com/lightstep/lightstep-tracer-common/golang/gogo v0.0.0-20190605223551-bc2310a04743/go.mod h1:qklhhLq1aX+mtWk9cPHPzaBjWImj5ULL6C7HFJtXQMM= github.com/lightstep/lightstep-tracer-go v0.18.1/go.mod h1:jlF1pusYV4pidLvZ+XD0UBX0ZE6WURAspgAczcDHrL4= github.com/lyft/protoc-gen-validate v0.0.13/go.mod h1:XbGvPuh87YZc5TdIa2/I4pLk0QoUACkjt2znoq26NVQ= +github.com/magiconair/properties v1.8.0/go.mod h1:PppfXfuXeibc/6YijjN8zIbojt8czPbwD3XqdrwzmxQ= github.com/mailru/easyjson v0.0.0-20160728113105-d5b7844b561a/go.mod h1:C1wdFJiN94OJF2b5HbByQZoLdCWB1Yqtg26g4irojpc= github.com/mailru/easyjson v0.0.0-20180823135443-60711f1a8329/go.mod h1:C1wdFJiN94OJF2b5HbByQZoLdCWB1Yqtg26g4irojpc= github.com/mailru/easyjson v0.0.0-20190312143242-1de009706dbe/go.mod h1:C1wdFJiN94OJF2b5HbByQZoLdCWB1Yqtg26g4irojpc= @@ -1180,6 +1214,7 @@ github.com/mitchellh/mapstructure v1.2.2/go.mod h1:bFUtVrKA4DC2yAKiSyO/QUcy7e+RR github.com/mitchellh/osext v0.0.0-20151018003038-5e2d6d41470f/go.mod h1:OkQIRizQZAeMln+1tSwduZz7+Af5oFlKirV/MSYes2A= github.com/moby/ipvs v1.0.1 h1:aoZ7fhLTXgDbzVrAnvV+XbKOU8kOET7B3+xULDF/1o0= github.com/moby/ipvs v1.0.1/go.mod h1:2pngiyseZbIKXNv7hsKj3O9UEz30c53MT9005gt2hxQ= +github.com/moby/locker v1.0.1/go.mod h1:S7SDdo5zpBK84bzzVlKr2V0hz+7x9hWbYC/kq7oQppc= github.com/moby/spdystream v0.2.0/go.mod h1:f7i0iNDQJ059oMTcWxx8MA/zKFIuD/lY+0GqbN2Wy8c= github.com/moby/sys/mount v0.2.0 h1:WhCW5B355jtxndN5ovugJlMFJawbUODuW8fSnEH6SSM= github.com/moby/sys/mount v0.2.0 h1:WhCW5B355jtxndN5ovugJlMFJawbUODuW8fSnEH6SSM= @@ -1243,6 +1278,8 @@ github.com/niemeyer/pretty v0.0.0-20200227124842-a10e7caefd8e/go.mod h1:zD1mROLA github.com/nishanths/predeclared v0.0.0-20200524104333-86fad755b4d3/go.mod h1:nt3d53pc1VYcphSCIaYAJtnPYnr3Zyn8fMq2wvPGPso= github.com/nsqio/go-nsq v1.0.8 h1:3L2F8tNLlwXXlp2slDUrUWSBn2O3nMh8R1/KEDFTHPk= github.com/nsqio/go-nsq v1.0.8/go.mod h1:vKq36oyeVXgsS5Q8YEO7WghqidAVXQlcFxzQbQTuDEY= +github.com/nxadm/tail v1.4.4 h1:DQuhQpB1tVlglWS2hLQ5OV6B5r8aGxSrPc5Qo6uTN78= +github.com/nxadm/tail v1.4.4/go.mod h1:kenIhsEOeOJmVchQTgglprH7qJGnHDVpk1VPCcaMI8A= github.com/oklog/oklog v0.3.2/go.mod h1:FCV+B7mhrz4o+ueLpx+KqkyXRGMWOYEvfiXtdGtbWGs= github.com/oklog/run v1.0.0/go.mod h1:dlhp/R75TPv97u0XWUtDeV/lRKWPKSdTuV0TZvrmrQA= github.com/oklog/run v1.1.0/go.mod h1:sVPdnTZT1zYwAJeCMu2Th4T21pA3FPOQRfWjQlk7DVU= @@ -1256,15 +1293,17 @@ github.com/onsi/ginkgo v1.8.0/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+W github.com/onsi/ginkgo v1.10.1/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE= github.com/onsi/ginkgo v1.10.1/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE= github.com/onsi/ginkgo v1.10.3/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE= -github.com/onsi/ginkgo v1.11.0 h1:JAKSXpt1YjtLA7YpPiqO9ss6sNXEsPfSGdwN0UHqzrw= github.com/onsi/ginkgo v1.11.0/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE= +github.com/onsi/ginkgo v1.12.1 h1:mFwc4LvZ0xpSvDZ3E+k8Yte0hLOMxXUlP+yXtJqkYfQ= +github.com/onsi/ginkgo v1.12.1/go.mod h1:zj2OWP4+oCPe1qIXoGWkgMRwljMUYCdkwsT2108oapk= github.com/onsi/gomega v0.0.0-20151007035656-2152b45fa28a/go.mod h1:C1qb7wdrVGGVU+Z6iS04AVkA3Q65CEZX59MT0QO5uiA= github.com/onsi/gomega v0.0.0-20170829124025-dcabb60a477c/go.mod h1:C1qb7wdrVGGVU+Z6iS04AVkA3Q65CEZX59MT0QO5uiA= github.com/onsi/gomega v1.4.3/go.mod h1:ex+gbHU/CVuBBDIJjb2X0qEXbFg53c61hWP/1CpauHY= github.com/onsi/gomega v1.5.0/go.mod h1:ex+gbHU/CVuBBDIJjb2X0qEXbFg53c61hWP/1CpauHY= github.com/onsi/gomega v1.7.0/go.mod h1:ex+gbHU/CVuBBDIJjb2X0qEXbFg53c61hWP/1CpauHY= -github.com/onsi/gomega v1.7.1 h1:K0jcRCwNQM3vFGh1ppMtDh/+7ApJrjldlX8fA0jDTLQ= github.com/onsi/gomega v1.7.1/go.mod h1:XdKZgCCFLUoM/7CFJVPcG8C1xQ1AJ0vpAezJrB7JYyY= +github.com/onsi/gomega v1.10.3 h1:gph6h/qe9GSUw1NhH1gp+qb+h8rXD8Cy60Z32Qw3ELA= +github.com/onsi/gomega v1.10.3/go.mod h1:V9xEwhxec5O8UDM77eCW8vLymOMltsqPVYWrpDsH8xc= github.com/op/go-logging v0.0.0-20160315200505-970db520ece7/go.mod h1:HzydrMdWErDVzsI23lYNej1Htcns9BCg93Dk0bBINWk= github.com/openconfig/gnmi v0.0.0-20180912164834-33a1865c3029 h1:lXQqyLroROhwR2Yq/kXbLzVecgmVeZh2TFLg6OxCd+w= github.com/openconfig/gnmi v0.0.0-20180912164834-33a1865c3029/go.mod h1:t+O9It+LKzfOAhKTT5O0ehDix+MTqbtT0T9t+7zzOvc= @@ -1286,8 +1325,8 @@ github.com/opencontainers/runc v1.0.0-rc9/go.mod h1:qT5XzbpPznkRYVz/mWwUaVBUv2rm github.com/opencontainers/runc v1.0.0-rc9/go.mod h1:qT5XzbpPznkRYVz/mWwUaVBUv2rmF59PVA73FjuZG0U= github.com/opencontainers/runc v1.0.0-rc93/go.mod h1:3NOsor4w32B2tC0Zbl8Knk4Wg84SM2ImC1fxBuqJ/H0= github.com/opencontainers/runc v1.0.0-rc93/go.mod h1:3NOsor4w32B2tC0Zbl8Knk4Wg84SM2ImC1fxBuqJ/H0= -github.com/opencontainers/runc v1.0.0-rc95 h1:RMuWVfY3E1ILlVsC3RhIq38n4sJtlOFwU9gfFZSqrd0= -github.com/opencontainers/runc v1.0.0-rc95/go.mod h1:z+bZxa/+Tz/FmYVWkhUajJdzFeOqjc5vrqskhVyHGUM= +github.com/opencontainers/runc v1.0.2 h1:opHZMaswlyxz1OuGpBE53Dwe4/xF7EZTY0A2L/FpCOg= +github.com/opencontainers/runc v1.0.2/go.mod h1:aTaHFFwQXuA71CiyxOdFFIorAoemI04suvGRQFzWTD0= github.com/opencontainers/runtime-spec v0.1.2-0.20190507144316-5b71a03e2700/go.mod h1:jwyrGlmzljRJv/Fgzds9SsS/C5hL+LL3ko9hs6T5lQ0= github.com/opencontainers/runtime-spec v0.1.2-0.20190507144316-5b71a03e2700/go.mod h1:jwyrGlmzljRJv/Fgzds9SsS/C5hL+LL3ko9hs6T5lQ0= github.com/opencontainers/runtime-spec v1.0.1/go.mod h1:jwyrGlmzljRJv/Fgzds9SsS/C5hL+LL3ko9hs6T5lQ0= @@ -1305,6 +1344,7 @@ github.com/opencontainers/selinux v1.6.0/go.mod h1:VVGKuOLlE7v4PJyT6h7mNWvq1rzqi github.com/opencontainers/selinux v1.6.0/go.mod h1:VVGKuOLlE7v4PJyT6h7mNWvq1rzqiriPsEqVhc+svHE= github.com/opencontainers/selinux v1.8.0/go.mod h1:RScLhm78qiWa2gbVCcGkC7tCGdgk3ogry1nUQF8Evvo= github.com/opencontainers/selinux v1.8.0/go.mod h1:RScLhm78qiWa2gbVCcGkC7tCGdgk3ogry1nUQF8Evvo= +github.com/opencontainers/selinux v1.8.2/go.mod h1:MUIHuUEvKB1wtJjQdOyYRgOnLD2xAPP8dBsCoU0KuF8= github.com/opentracing-contrib/go-observer v0.0.0-20170622124052-a52f23424492 h1:lM6RxxfUMrYL/f8bWEUqdXrANWtrL7Nndbm9iFN0DlU= github.com/opentracing-contrib/go-observer v0.0.0-20170622124052-a52f23424492/go.mod h1:Ngi6UdF0k5OKD5t5wlmGhe/EDKPoUM3BXZSSfIuJbis= github.com/opentracing-contrib/go-stdlib v1.0.0/go.mod h1:qtI1ogk+2JhVPIXVc6q+NHziSmy2W5GbdQZFUHADCBU= @@ -1328,8 +1368,10 @@ github.com/pascaldekloe/goe v0.1.0/go.mod h1:lzWF7FIEvWOWxwDKqyGYQf6ZUaNfKdP144T github.com/paulbellamy/ratecounter v0.2.0/go.mod h1:Hfx1hDpSGoqxkVVpBi/IlYD7kChlfo5C6hzIHwPqfFE= github.com/pavius/impi v0.0.0-20180302134524-c1cbdcb8df2b/go.mod h1:x/hU0bfdWIhuOT1SKwiJg++yvkk6EuOtJk8WtDZqgr8= github.com/pborman/uuid v1.2.0/go.mod h1:X/NO0urCmaxf9VXbdlT7C2Yzkj2IKimNn4k+gtPdI/k= +github.com/pelletier/go-toml v1.2.0/go.mod h1:5z9KED0ma1S8pY6P1sdut58dfprrGBbd/94hg7ilaic= github.com/pelletier/go-toml v1.4.0/go.mod h1:PN7xzY2wHTK0K9p34ErDQMlFxa51Fk0OUruD3k1mMwo= github.com/pelletier/go-toml v1.7.0/go.mod h1:vwGMzjaWMwyfHwgIBhI2YUM4fB6nL6lVAvS1LBMMhTE= +github.com/pelletier/go-toml v1.8.1/go.mod h1:T2/BmBdy8dvIRq1a/8aqjN41wvWlN4lrapLU/GW4pbc= github.com/performancecopilot/speed v3.0.0+incompatible/go.mod h1:/CLtqpZ5gBg1M9iaPbIdPPGyKcA8hKdoy6hAWba7Yac= github.com/peterbourgon/diskv v2.0.1+incompatible/go.mod h1:uqqh8zWWbv1HBMNONnaR/tNboyR3/BZd58JJSHlUSCU= github.com/peterh/liner v1.0.1-0.20180619022028-8c1271fcf47f/go.mod h1:xIteQHvHuaLYG9IFj6mSxM0fCKrs34IrEQUhOYuGPHc= @@ -1367,6 +1409,7 @@ github.com/prometheus/alertmanager v0.21.0/go.mod h1:h7tJ81NA0VLWvWEayi1QltevFkL github.com/prometheus/client_golang v0.0.0-20180209125602-c332b6f63c06/go.mod h1:7SWBe2y4D6OKWSNQJUaRYU/AaXPKyh/dDVn+NZz0KFw= github.com/prometheus/client_golang v0.9.1/go.mod h1:7SWBe2y4D6OKWSNQJUaRYU/AaXPKyh/dDVn+NZz0KFw= github.com/prometheus/client_golang v0.9.3-0.20190127221311-3c4408c8b829/go.mod h1:p2iRAGwDERtqlqzRXnrOVns+ignqQo//hLXqYxZYVNs= +github.com/prometheus/client_golang v0.9.3/go.mod h1:/TN21ttK/J9q6uSwhBd54HahCDft0ttaMvbicHlPoso= github.com/prometheus/client_golang v1.0.0/go.mod h1:db9x61etRT2tGnBNRi70OPL5FsnadC4Ky3P0J6CfImo= github.com/prometheus/client_golang v1.1.0/go.mod h1:I1FGZT9+L76gKKOs5djB6ezCbFQP1xR9D75/vuwEF3g= github.com/prometheus/client_golang v1.3.0/go.mod h1:hJaj2vgQTGQmVCsAACORcieXFeDPbaTKGT+JTgUa3og= @@ -1384,7 +1427,9 @@ github.com/prometheus/client_model v0.1.0/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6T github.com/prometheus/client_model v0.2.0 h1:uq5h0d+GuxiXLJLNABMgp2qUWDPiLvgCzz2dUR+/W/M= github.com/prometheus/client_model v0.2.0/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA= github.com/prometheus/common v0.0.0-20180110214958-89604d197083/go.mod h1:daVV7qP5qjZbuso7PdcryaAu0sAZbrN9i7WWcTMWvro= +github.com/prometheus/common v0.0.0-20181113130724-41aa239b4cce/go.mod h1:daVV7qP5qjZbuso7PdcryaAu0sAZbrN9i7WWcTMWvro= github.com/prometheus/common v0.2.0/go.mod h1:TNfzLD0ON7rHzMJeJkieUDPYmFC7Snx/y86RQel1bk4= +github.com/prometheus/common v0.4.0/go.mod h1:TNfzLD0ON7rHzMJeJkieUDPYmFC7Snx/y86RQel1bk4= github.com/prometheus/common v0.4.1/go.mod h1:TNfzLD0ON7rHzMJeJkieUDPYmFC7Snx/y86RQel1bk4= github.com/prometheus/common v0.6.0/go.mod h1:eBmuwkDJBwy6iBfxCBob6t6dR6ENT/y+J+Zk0j9GMYc= github.com/prometheus/common v0.7.0/go.mod h1:DjGbpBbp5NYNiECxcL/VnbXCCaQpKd3tt26CguLLsqA= @@ -1396,6 +1441,7 @@ github.com/prometheus/common v0.26.0/go.mod h1:M7rCNAaPfAosfx8veZJCuw84e35h3Cfd9 github.com/prometheus/procfs v0.0.0-20180125133057-cb4147076ac7/go.mod h1:c3At6R/oaqEKCNdg8wHV1ftS6bRYblBhIjjI8uT2IGk= github.com/prometheus/procfs v0.0.0-20181005140218-185b4288413d/go.mod h1:c3At6R/oaqEKCNdg8wHV1ftS6bRYblBhIjjI8uT2IGk= github.com/prometheus/procfs v0.0.0-20190117184657-bf6a532e95b1/go.mod h1:c3At6R/oaqEKCNdg8wHV1ftS6bRYblBhIjjI8uT2IGk= +github.com/prometheus/procfs v0.0.0-20190507164030-5867b95ac084/go.mod h1:TjEm7ze935MbeOT/UhFTIMYKhuLP4wbCsTZCD3I8kEA= github.com/prometheus/procfs v0.0.0-20190522114515-bc1a522cf7b1/go.mod h1:TjEm7ze935MbeOT/UhFTIMYKhuLP4wbCsTZCD3I8kEA= github.com/prometheus/procfs v0.0.2/go.mod h1:TjEm7ze935MbeOT/UhFTIMYKhuLP4wbCsTZCD3I8kEA= github.com/prometheus/procfs v0.0.3/go.mod h1:4A/X28fw3Fc593LaREMrKMqOKvUAntwMDaekg4FpcdQ= @@ -1409,6 +1455,7 @@ github.com/prometheus/procfs v0.6.0 h1:mxy4L2jP6qMonqmq+aTtOx1ifVWUgG/TAmntgbh3x github.com/prometheus/procfs v0.6.0/go.mod h1:cz+aTbrPOrUb4q7XlbU9ygM+/jj0fzG6c1xBZuNvfVA= github.com/prometheus/prometheus v1.8.2-0.20200911110723-e83ef207b6c2 h1:IB/5RJRcJiR/YzKs4Aou86s/RaMepZOZVCArYNHJHWc= github.com/prometheus/prometheus v1.8.2-0.20200911110723-e83ef207b6c2/go.mod h1:Td6hjwdXDmVt5CI9T03Sw+yBNxLBq/Yx3ZtmtP8zlCA= +github.com/prometheus/tsdb v0.7.1/go.mod h1:qhTCs0VvXwvX/y3TZrWD7rabWM+ijKTux40TwIPHuXU= github.com/rcrowley/go-metrics v0.0.0-20181016184325-3113b8401b8a/go.mod h1:bCqnVzQkZxMG4s8nGwiZ5l3QUCyqpo9Y+/ZMZ9VjZe4= github.com/rcrowley/go-metrics v0.0.0-20200313005456-10cdbea86bc0 h1:MkV+77GLUNo5oJ0jf870itWm3D0Sjh7+Za9gazKc5LQ= github.com/rcrowley/go-metrics v0.0.0-20200313005456-10cdbea86bc0/go.mod h1:bCqnVzQkZxMG4s8nGwiZ5l3QUCyqpo9Y+/ZMZ9VjZe4= @@ -1480,8 +1527,9 @@ github.com/sirupsen/logrus v1.4.2/go.mod h1:tLMulIdttU9McNUspp0xgXVQah82FyeX6Mwd github.com/sirupsen/logrus v1.6.0/go.mod h1:7uNnSEd1DgxDLC74fIahvMZmmYsHGZGEOFrfsX/uA88= github.com/sirupsen/logrus v1.6.0/go.mod h1:7uNnSEd1DgxDLC74fIahvMZmmYsHGZGEOFrfsX/uA88= github.com/sirupsen/logrus v1.6.0/go.mod h1:7uNnSEd1DgxDLC74fIahvMZmmYsHGZGEOFrfsX/uA88= -github.com/sirupsen/logrus v1.7.0 h1:ShrD1U9pZB12TX0cVy0DtePoCH97K8EtX+mg7ZARUtM= github.com/sirupsen/logrus v1.7.0/go.mod h1:yWOB1SBYBC5VeMP7gHvWumXLIWorT60ONWic61uBYv0= +github.com/sirupsen/logrus v1.8.1 h1:dJKuHgqk1NNQlqoA6BTlM1Wf9DOH3NBjQyu0h9+AZZE= +github.com/sirupsen/logrus v1.8.1/go.mod h1:yWOB1SBYBC5VeMP7gHvWumXLIWorT60ONWic61uBYv0= github.com/sleepinggenius2/gosmi v0.4.3 h1:99Zwzy1Cvgsh396sw07oR2G4ab88ILGZFMxSlGWnR6o= github.com/sleepinggenius2/gosmi v0.4.3/go.mod h1:l8OniPmd3bJzw0MXP2/qh7AhP/e+bTY2CNivIhsnDT0= github.com/smartystreets/assertions v0.0.0-20180927180507-b2de0cb4f26d h1:zE9ykElWQ6/NYmHa3jpm/yHnI4xSofP+UP6SpjHcSeM= @@ -1495,10 +1543,13 @@ github.com/snowflakedb/gosnowflake v1.5.0/go.mod h1:1kyg2XEduwti88V11PKRHImhXLK5 github.com/soheilhy/cmux v0.1.4/go.mod h1:IM3LyeVVIOuxMH7sFAkER9+bJ4dT7Ms6E4xg4kGIyLM= github.com/sony/gobreaker v0.4.1/go.mod h1:ZKptC7FHNvhBz7dN2LGjPVBz2sZJmc0/PkyDJOjmxWY= github.com/spaolacci/murmur3 v0.0.0-20180118202830-f09979ecbc72/go.mod h1:JwIasOWyU6f++ZhiEuf87xNszmSA2myDM2Kzu9HwQUA= +github.com/spf13/afero v1.1.2/go.mod h1:j4pytiNVoe2o6bmDsKpLACNPDBIoEAkihy7loJ1B0CQ= github.com/spf13/afero v1.2.2/go.mod h1:9ZxEEn6pIJ8Rxe320qSDBk6AsU0r9pR7Q4OcevTdifk= github.com/spf13/cast v1.3.0/go.mod h1:Qx5cxh0v+4UWYiBimWS+eyWzqEqokIECu5etghLkUJE= github.com/spf13/cobra v0.0.2-0.20171109065643-2da4a54c5cee/go.mod h1:1l0Ry5zgKvJasoi3XT1TypsSe7PqH0Sj9dhYf7v3XqQ= github.com/spf13/cobra v0.0.3/go.mod h1:1l0Ry5zgKvJasoi3XT1TypsSe7PqH0Sj9dhYf7v3XqQ= +github.com/spf13/cobra v1.0.0/go.mod h1:/6GTrnGXV9HjY+aR4k0oJ5tcvakLuG6EuKReYlHNrgE= +github.com/spf13/jwalterweatherman v1.0.0/go.mod h1:cQK4TGJAtQXfYWX+Ddv3mKDzgVb68N+wFjFa4jdeBTo= github.com/spf13/pflag v0.0.0-20170130214245-9ff6c6923cff/go.mod h1:DYY7MBk1bdzusC3SYhjObp+wFpr4gzcvqqNjLnInEg4= github.com/spf13/pflag v1.0.1-0.20171106142849-4c012f6dcd95/go.mod h1:DYY7MBk1bdzusC3SYhjObp+wFpr4gzcvqqNjLnInEg4= github.com/spf13/pflag v1.0.1/go.mod h1:DYY7MBk1bdzusC3SYhjObp+wFpr4gzcvqqNjLnInEg4= @@ -1506,6 +1557,7 @@ github.com/spf13/pflag v1.0.3/go.mod h1:DYY7MBk1bdzusC3SYhjObp+wFpr4gzcvqqNjLnIn github.com/spf13/pflag v1.0.3/go.mod h1:DYY7MBk1bdzusC3SYhjObp+wFpr4gzcvqqNjLnInEg4= github.com/spf13/pflag v1.0.5 h1:iy+VFUOCP1a+8yFto/drg2CJ5u0yRoB7fZw3DKv/JXA= github.com/spf13/pflag v1.0.5/go.mod h1:McXfInJRrz4CZXVZOBLb0bTZqETkiAhM9Iw0y3An2Bg= +github.com/spf13/viper v1.4.0/go.mod h1:PTJ7Z/lr49W6bUbkmS1V3by4uWynFiR9p7+dSq/yZzE= github.com/stefanberger/go-pkcs11uri v0.0.0-20201008174630-78d3cae3a980/go.mod h1:AO3tvPzVZ/ayst6UlUKUv6rcPQInYe3IknH3jYhAKu8= github.com/streadway/amqp v0.0.0-20190404075320-75d898a42a94/go.mod h1:AZpEONHx3DKn8O/DFsRAY58/XVQiIPMTMB1SddzLXVw= github.com/streadway/amqp v0.0.0-20190827072141-edfb9018d271 h1:WhxRHzgeVGETMlmVfqhRn8RIeeNoPr2Czh33I4Zdccw= @@ -1554,6 +1606,7 @@ github.com/tmc/grpc-websocket-proxy v0.0.0-20190109142713-0ad062ec5ee5/go.mod h1 github.com/tv42/httpunix v0.0.0-20150427012821-b75d8614f926/go.mod h1:9ESjWnEqriFuLhtthL60Sar/7RFoluCcXsuvEwTV5KM= github.com/uber/jaeger-client-go v2.25.0+incompatible/go.mod h1:WVhlPFC8FDjOFMMWRy2pZqQJSXxYSwNYOkTr/Z6d3Kk= github.com/uber/jaeger-lib v2.2.0+incompatible/go.mod h1:ComeNDZlWwrWnDv8aPp0Ba6+uUTzImX/AauajbLI56U= +github.com/ugorji/go v1.1.4/go.mod h1:uQMGLiO92mf5W77hV/PUCpI3pbzQx3CRekS0kk+RGrc= github.com/urfave/cli v0.0.0-20171014202726-7bc6a0acffa5/go.mod h1:70zkFmudgCuE/ngEzBv17Jvp/497gISqfk5gWijbERA= github.com/urfave/cli v1.20.0/go.mod h1:70zkFmudgCuE/ngEzBv17Jvp/497gISqfk5gWijbERA= github.com/urfave/cli v1.22.1/go.mod h1:Gos4lmkARVdJ6EkW0WaNv/tZAAMe9V7XWyB60NtXRu0= @@ -1563,11 +1616,13 @@ github.com/vapourismo/knx-go v0.0.0-20201122213738-75fe09ace330/go.mod h1:7+aWBs github.com/vaughan0/go-ini v0.0.0-20130923145212-a98ad7ee00ec/go.mod h1:owBmyHYMLkxyrugmfwE/DLJyW8Ro9mkphwuVErQ0iUw= github.com/vektah/gqlparser v1.1.2/go.mod h1:1ycwN7Ij5njmMkPPAOaRFY4rET2Enx7IkVv3vaXspKw= github.com/vishvananda/netlink v0.0.0-20181108222139-023a6dafdcdf/go.mod h1:+SR5DhBJrl6ZM7CoCKvpw5BKroDKQ+PJqOg65H/2ktk= -github.com/vishvananda/netlink v1.1.0 h1:1iyaYNBLmP6L0220aDnYQpo1QEV4t4hJ+xEEhhJH8j0= github.com/vishvananda/netlink v1.1.0/go.mod h1:cTgwzPIzzgDAYoQrMm0EdrjRUBkTqKYppBueQtXaqoE= +github.com/vishvananda/netlink v1.1.1-0.20201029203352-d40f9887b852 h1:cPXZWzzG0NllBLdjWoD1nDfaqu98YMv+OneaKc8sPOA= +github.com/vishvananda/netlink v1.1.1-0.20201029203352-d40f9887b852/go.mod h1:twkDnbuQxJYemMlGd4JFIcuhgX83tXhKS2B/PRMpOho= github.com/vishvananda/netns v0.0.0-20180720170159-13995c7128cc/go.mod h1:ZjcWmFBXmLKZu9Nxj3WKYEafiSqer2rnvPr0en9UNpI= -github.com/vishvananda/netns v0.0.0-20191106174202-0a2b9b5464df h1:OviZH7qLw/7ZovXvuNyL3XQl8UFofeikI1NW1Gypu7k= github.com/vishvananda/netns v0.0.0-20191106174202-0a2b9b5464df/go.mod h1:JP3t17pCcGlemwknint6hfoeCVQrEMVwxRLRjXpq+BU= +github.com/vishvananda/netns v0.0.0-20200728191858-db3c7e526aae h1:4hwBBUfQCFe3Cym0ZtKyq7L16eZUtYKs+BaHDN6mAns= +github.com/vishvananda/netns v0.0.0-20200728191858-db3c7e526aae/go.mod h1:DD4vA1DwXk04H54A1oHXtwZmA0grkVMdPxx/VGLCah0= github.com/vjeantet/grok v1.0.1 h1:2rhIR7J4gThTgcZ1m2JY4TrJZNgjn985U28kT2wQrJ4= github.com/vjeantet/grok v1.0.1/go.mod h1:ax1aAchzC6/QMXMcyzHQGZWaW1l195+uMYIkCWPCNIo= github.com/vmware/govmomi v0.26.0 h1:JMZR5c7MHH3nCEAVYS3WyRIA35W3+b3tLwAqxVzq1Rw= @@ -1599,6 +1654,7 @@ github.com/xeipuuv/gojsonschema v0.0.0-20180618132009-1d523034197f/go.mod h1:5yf github.com/xiang90/probing v0.0.0-20190116061207-43a291ad63a2/go.mod h1:UETIi67q53MR2AWcXfiuqkDkRtnGDLqkBTpCHuJHxtU= github.com/xlab/treeprint v0.0.0-20180616005107-d6fb6747feb6/go.mod h1:ce1O1j6UtZfjr22oyGxGLbauSBp2YVXpARAosm7dHBg= github.com/xlab/treeprint v1.0.0/go.mod h1:IoImgRak9i3zJyuxOKUP1v4UZd1tMoKkq/Cimt1uhCg= +github.com/xordataexchange/crypt v0.0.3-0.20170626215501-b2862e3d0a77/go.mod h1:aYKd//L2LvnjZzWKhF00oedf4jCCReLcmhLdhm1A27Q= github.com/youmark/pkcs8 v0.0.0-20181117223130-1be2e3e5546d/go.mod h1:rHwXgn7JulP+udvsHwJoVG1YGAP6VLg4y9I5dyZdqmA= github.com/youmark/pkcs8 v0.0.0-20201027041543-1326539a0a0a h1:fZHgsYlfvtyqToslyjUt3VOPF4J7aK/3MPcK7xp3PDk= github.com/youmark/pkcs8 v0.0.0-20201027041543-1326539a0a0a/go.mod h1:ul22v+Nro/R083muKhosV54bj5niojjWZvU8xrevuH4= @@ -1613,6 +1669,7 @@ github.com/yvasiyarov/go-metrics v0.0.0-20140926110328-57bccd1ccd43/go.mod h1:aX github.com/yvasiyarov/gorelic v0.0.0-20141212073537-a9bba5b9ab50/go.mod h1:NUSPSUX/bi6SeDMUh6brw0nXpxHnc96TguQh0+r/ssA= github.com/yvasiyarov/newrelic_platform_go v0.0.0-20140908184405-b21fdbd4370f/go.mod h1:GlGEuHIJweS1mbCqG+7vt2nvWLzLLnRHbXz5JKd/Qbg= github.com/zenazn/goji v0.9.0/go.mod h1:7S9M489iMyHBNxwZnk9/EHS098H4/F6TATF2mIxtB1Q= +go.etcd.io/bbolt v1.3.2/go.mod h1:IbVyRI1SCnLcuJnV2u8VeU0CEYM7e686BmAb1XKL+uU= go.etcd.io/bbolt v1.3.3/go.mod h1:IbVyRI1SCnLcuJnV2u8VeU0CEYM7e686BmAb1XKL+uU= go.etcd.io/bbolt v1.3.4/go.mod h1:G5EMThwa9y8QZGBClrRx5EY+Yw9kAhnjy3bSjsnlVTQ= go.etcd.io/bbolt v1.3.5/go.mod h1:G5EMThwa9y8QZGBClrRx5EY+Yw9kAhnjy3bSjsnlVTQ= @@ -1807,6 +1864,7 @@ golang.org/x/net v0.0.0-20200707034311-ab3426394381/go.mod h1:/O7V0waA8r7cgGh81R golang.org/x/net v0.0.0-20200813134508-3edf25e44fcc/go.mod h1:/O7V0waA8r7cgGh81Ro3o1hOxt32SMVPicZroKQ2sZA= golang.org/x/net v0.0.0-20200822124328-c89045814202/go.mod h1:/O7V0waA8r7cgGh81Ro3o1hOxt32SMVPicZroKQ2sZA= golang.org/x/net v0.0.0-20200904194848-62affa334b73/go.mod h1:/O7V0waA8r7cgGh81Ro3o1hOxt32SMVPicZroKQ2sZA= +golang.org/x/net v0.0.0-20201006153459-a7d1128ccaa0/go.mod h1:sp8m0HH+o8qH0wwXwYZr8TS3Oi6o0r6Gce1SSxlDquU= golang.org/x/net v0.0.0-20201021035429-f5854403a974/go.mod h1:sp8m0HH+o8qH0wwXwYZr8TS3Oi6o0r6Gce1SSxlDquU= golang.org/x/net v0.0.0-20201031054903-ff519b6c9102/go.mod h1:sp8m0HH+o8qH0wwXwYZr8TS3Oi6o0r6Gce1SSxlDquU= golang.org/x/net v0.0.0-20201110031124-69a78807bb2b/go.mod h1:sp8m0HH+o8qH0wwXwYZr8TS3Oi6o0r6Gce1SSxlDquU= @@ -1890,6 +1948,7 @@ golang.org/x/sys v0.0.0-20190801041406-cbf593c0f2f3/go.mod h1:h1NjWce9XRLGQEsW7w golang.org/x/sys v0.0.0-20190812073006-9eafafc0a87e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20190813064441-fde4db37ae7a/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20190826190057-c7b8b68b1456/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20190904154756-749cb33beabd/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20190916202348-b4ddaad3f8a3/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20190922100055-0a153f010e69/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20190924154521-2837fb4f24fe/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= @@ -1918,6 +1977,7 @@ golang.org/x/sys v0.0.0-20200122134326-e047566fdf82/go.mod h1:h1NjWce9XRLGQEsW7w golang.org/x/sys v0.0.0-20200124204421-9fbb57f87de9/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20200202164722-d101bd2416d5/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20200212091648-12a6c2dcc1e4/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200217220822-9197077df867/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20200223170610-d5e6a3e2c0ae/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20200302150141-5c8b2ff67527/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20200323222414-85ca7c5b95cd/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= @@ -1943,6 +2003,7 @@ golang.org/x/sys v0.0.0-20200916030750-2334cc1a136f/go.mod h1:h1NjWce9XRLGQEsW7w golang.org/x/sys v0.0.0-20200922070232-aee5d888a860/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20200930185726-fdedc70b468f/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20201112073958-5cba982894dd/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20201117170446-d9b008d0a637/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20201119102817-f84b799fce68/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20201126233918-771906719818/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20201201145000-ef89a241ccb3/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= @@ -2343,21 +2404,30 @@ honnef.co/go/tools v0.0.1-2020.1.3/go.mod h1:X/FiERA/W4tHapMX5mGpAtMSVEeEUOyHaw9 honnef.co/go/tools v0.0.1-2020.1.4/go.mod h1:X/FiERA/W4tHapMX5mGpAtMSVEeEUOyHaw9vFzvIQ3k= k8s.io/api v0.18.8/go.mod h1:d/CXqwWv+Z2XEG1LgceeDmHQwpUJhROPx16SlxJgERY= k8s.io/api v0.20.1/go.mod h1:KqwcCVogGxQY3nBlRpwt+wpAMF/KjaCc7RpywacvqUo= -k8s.io/api v0.20.4 h1:xZjKidCirayzX6tHONRQyTNDVIR55TYVqgATqo6ZULY= k8s.io/api v0.20.4/go.mod h1:++lNL1AJMkDymriNniQsWRkMDzRaX2Y/POTUi8yvqYQ= +k8s.io/api v0.20.6 h1:bgdZrW++LqgrLikWYNruIKAtltXbSCX2l5mJu11hrVE= +k8s.io/api v0.20.6/go.mod h1:X9e8Qag6JV/bL5G6bU8sdVRltWKmdHsFUGS3eVndqE8= k8s.io/apimachinery v0.18.8/go.mod h1:6sQd+iHEqmOtALqOFjSWp2KZ9F0wlU/nWm0ZgsYWMig= k8s.io/apimachinery v0.20.1/go.mod h1:WlLqWAHZGg07AeltaI0MV5uk1Omp8xaN0JGLY6gkRpU= k8s.io/apimachinery v0.20.4/go.mod h1:WlLqWAHZGg07AeltaI0MV5uk1Omp8xaN0JGLY6gkRpU= +k8s.io/apimachinery v0.20.6/go.mod h1:ejZXtW1Ra6V1O5H8xPBGz+T3+4gfkTCeExAHKU57MAc= k8s.io/apimachinery v0.21.1 h1:Q6XuHGlj2xc+hlMCvqyYfbv3H7SRGn2c8NycxJquDVs= k8s.io/apimachinery v0.21.1/go.mod h1:jbreFvJo3ov9rj7eWT7+sYiRx+qZuCYXwWT1bcDswPY= k8s.io/apiserver v0.20.1/go.mod h1:ro5QHeQkgMS7ZGpvf4tSMx6bBOgPfE+f52KwvXfScaU= +k8s.io/apiserver v0.20.4/go.mod h1:Mc80thBKOyy7tbvFtB4kJv1kbdD0eIH8k8vianJcbFM= +k8s.io/apiserver v0.20.6/go.mod h1:QIJXNt6i6JB+0YQRNcS0hdRHJlMhflFmsBDeSgT1r8Q= k8s.io/client-go v0.18.8/go.mod h1:HqFqMllQ5NnQJNwjro9k5zMyfhZlOwpuTLVrxjkYSxU= k8s.io/client-go v0.20.1/go.mod h1:/zcHdt1TeWSd5HoUe6elJmHSQ6uLLgp4bIJHVEuy+/Y= -k8s.io/client-go v0.20.4 h1:85crgh1IotNkLpKYKZHVNI1JT86nr/iDCvq2iWKsql4= k8s.io/client-go v0.20.4/go.mod h1:LiMv25ND1gLUdBeYxBIwKpkSC5IsozMMmOOeSJboP+k= +k8s.io/client-go v0.20.6 h1:nJZOfolnsVtDtbGJNCxzOtKUAu7zvXjB8+pMo9UNxZo= +k8s.io/client-go v0.20.6/go.mod h1:nNQMnOvEUEsOzRRFIIkdmYOjAZrC8bgq0ExboWSU1I0= k8s.io/component-base v0.20.1/go.mod h1:guxkoJnNoh8LNrbtiQOlyp2Y2XFCZQmrcg2n/DeYNLk= +k8s.io/component-base v0.20.4/go.mod h1:t4p9EdiagbVCJKrQ1RsA5/V4rFQNDfRlevJajlGwgjI= +k8s.io/component-base v0.20.6/go.mod h1:6f1MPBAeI+mvuts3sIdtpjljHWBQ2cIy38oBIWMYnrM= k8s.io/cri-api v0.17.3/go.mod h1:X1sbHmuXhwaHs9xxYffLqJogVsnI+f6cPRcgPel7ywM= k8s.io/cri-api v0.20.1/go.mod h1:2JRbKt+BFLTjtrILYVqQK5jqhI+XNdF6UiGMgczeBCI= +k8s.io/cri-api v0.20.4/go.mod h1:2JRbKt+BFLTjtrILYVqQK5jqhI+XNdF6UiGMgczeBCI= +k8s.io/cri-api v0.20.6/go.mod h1:ew44AjNXwyn1s0U4xCKGodU7J1HzBeZ1MpGrpa5r8Yc= k8s.io/gengo v0.0.0-20190128074634-0689ccc1d7d6/go.mod h1:ezvh/TsK7cY6rbqRK0oQQ8IAqLxYwwyPxAX1Pzy0ii0= k8s.io/gengo v0.0.0-20200413195148-3a45101e95ac/go.mod h1:ezvh/TsK7cY6rbqRK0oQQ8IAqLxYwwyPxAX1Pzy0ii0= k8s.io/klog v0.0.0-20181102134211-b9b56d5dfc92/go.mod h1:Gq+BEi5rUBO/HRz0bTSXDUcqjScdoY3a9IHpCEIOOfk= @@ -2410,9 +2480,11 @@ rsc.io/pdf v0.1.1/go.mod h1:n8OzWcQ6Sp37PL01nO98y4iUCRdTGarVfzxY20ICaU4= rsc.io/quote/v3 v3.1.0/go.mod h1:yEA65RcK8LyAZtP9Kv3t0HmxON59tX3rD+tICJqUlj0= rsc.io/sampler v1.3.0/go.mod h1:T1hPZKmBbMNahiBKFy5HrXp6adAjACjK9JXDnKaTXpA= sigs.k8s.io/apiserver-network-proxy/konnectivity-client v0.0.14/go.mod h1:LEScyzhFmoF5pso/YSeBstl57mOzx9xlU9n85RGrDQg= +sigs.k8s.io/apiserver-network-proxy/konnectivity-client v0.0.15/go.mod h1:LEScyzhFmoF5pso/YSeBstl57mOzx9xlU9n85RGrDQg= sigs.k8s.io/structured-merge-diff/v3 v3.0.0-20200116222232-67a7b8c61874/go.mod h1:PlARxl6Hbt/+BC80dRLi1qAmnMqwqDg62YvvVkZjemw= sigs.k8s.io/structured-merge-diff/v3 v3.0.0/go.mod h1:PlARxl6Hbt/+BC80dRLi1qAmnMqwqDg62YvvVkZjemw= sigs.k8s.io/structured-merge-diff/v4 v4.0.2/go.mod h1:bJZC9H9iH24zzfZ/41RGcq60oK1F7G282QMXDPYydCw= +sigs.k8s.io/structured-merge-diff/v4 v4.0.3/go.mod h1:bJZC9H9iH24zzfZ/41RGcq60oK1F7G282QMXDPYydCw= sigs.k8s.io/structured-merge-diff/v4 v4.1.0 h1:C4r9BgJ98vrKnnVCjwCSXcWjWe0NKcUQkmzDXZXGwH8= sigs.k8s.io/structured-merge-diff/v4 v4.1.0/go.mod h1:bJZC9H9iH24zzfZ/41RGcq60oK1F7G282QMXDPYydCw= sigs.k8s.io/yaml v1.1.0/go.mod h1:UJmg0vDUVViEyp3mgSv9WPwZCDxu4rQW1olrI1uml+o= From 83dae504ade945e982cae37b95ea983048b3a951 Mon Sep 17 00:00:00 2001 From: "telegraf-tiger[bot]" <76974415+telegraf-tiger[bot]@users.noreply.github.com> Date: Wed, 6 Oct 2021 11:48:58 -0500 Subject: [PATCH 110/176] fix: update etc/telegraf.conf and etc/telegraf_windows.conf (#9814) Co-authored-by: Tiger Bot <> --- etc/telegraf.conf | 284 +- etc/telegraf_windows.conf | 8843 ++++++++++++++++++++++++++++++++++++- 2 files changed, 8723 insertions(+), 404 deletions(-) diff --git a/etc/telegraf.conf b/etc/telegraf.conf index 12672da45cf7d..5564bc23ac8aa 100644 --- a/etc/telegraf.conf +++ b/etc/telegraf.conf @@ -765,6 +765,9 @@ # ## Endpoints for your graylog instances. # servers = ["udp://127.0.0.1:12201"] # +# ## Connection timeout. +# # timeout = "5s" +# # ## The field to use as the GELF short_message, if unset the static string # ## "telegraf" will be used. # ## example: short_message_field = "message" @@ -1254,6 +1257,12 @@ # ## actually reads it # # retain = false # +# ## Defines the maximum length of time that the broker and client may not communicate. +# ## Defaults to 0 which turns the feature off. For version v2.0.12 of eclipse/mosquitto there is a +# ## [bug](https://github.com/eclipse/mosquitto/issues/2117) which requires keep_alive to be set. +# ## As a reference eclipse/paho.mqtt.golang v1.3.0 defaults to 30. +# # keep_alive = 0 +# # ## Data format to output. # ## Each data format has its own unique set of configuration options, read # ## more about them here: @@ -3415,6 +3424,14 @@ # # ## Filter bucket fields to include only here. # # bucket_stats_included = ["quota_percent_used", "ops_per_sec", "disk_fetches", "item_count", "disk_used", "data_used", "mem_used"] +# +# ## Optional TLS Config +# # tls_ca = "/etc/telegraf/ca.pem" +# # tls_cert = "/etc/telegraf/cert.pem" +# # tls_key = "/etc/telegraf/key.pem" +# ## Use TLS but skip chain & host verification (defaults to false) +# ## If set to false, tls_cert and tls_key are required +# # insecure_skip_verify = false # # Read CouchDB Stats from one or more servers @@ -3769,6 +3786,13 @@ # ## The date/time field in the Elasticsearch index (mandatory). # date_field = "@timestamp" # +# ## If the field used for the date/time field in Elasticsearch is also using +# ## a custom date/time format it may be required to provide the format to +# ## correctly parse the field. +# ## +# ## If using one of the built in elasticsearch formats this is not required. +# # date_field_custom_format = "" +# # ## Time window to query (eg. "1m" to query documents from last minute). # ## Normally should be set to same as collection interval # query_period = "1m" @@ -4777,6 +4801,12 @@ # # ] +# # Read metrics about LVM physical volumes, volume groups, logical volumes. +# [[inputs.lvm]] +# ## Use sudo to run LVM commands +# use_sudo = false + + # # Gathers metrics from the /3.0/reports MailChimp API # [[inputs.mailchimp]] # ## MailChimp API key @@ -5471,6 +5501,12 @@ # ## Password. Required for auth_method = "UserName" # # password = "" # # +# ## Option to select the metric timestamp to use. Valid options are: +# ## "gather" -- uses the time of receiving the data in telegraf +# ## "server" -- uses the timestamp provided by the server +# ## "source" -- uses the timestamp provided by the source +# # timestamp = "gather" +# # # ## Node ID configuration # ## name - field name to use in the output # ## namespace - OPC UA namespace of the node (integer value 0 thru 3) @@ -5546,7 +5582,7 @@ # # timeout = "5ms" -# # A plugin to collect stats from Opensmtpd - a validating, recursive, and caching DNS resolver +# # A plugin to collect stats from Opensmtpd - a validating, recursive, and caching DNS resolver # [[inputs.opensmtpd]] # ## If running as a restricted user you can prepend sudo for additional access: # #use_sudo = false @@ -6486,219 +6522,6 @@ # ## General connection timeout # # timeout = "5s" -# # Input plugin to collect Windows Event Log messages -# [[inputs.win_eventlog]] -# ## Telegraf should have Administrator permissions to subscribe for some Windows Events channels -# ## (System log, for example) -# -# ## LCID (Locale ID) for event rendering -# ## 1033 to force English language -# ## 0 to use default Windows locale -# # locale = 0 -# -# ## Name of eventlog, used only if xpath_query is empty -# ## Example: "Application" -# # eventlog_name = "" -# -# ## xpath_query can be in defined short form like "Event/System[EventID=999]" -# ## or you can form a XML Query. Refer to the Consuming Events article: -# ## https://docs.microsoft.com/en-us/windows/win32/wes/consuming-events -# ## XML query is the recommended form, because it is most flexible -# ## You can create or debug XML Query by creating Custom View in Windows Event Viewer -# ## and then copying resulting XML here -# xpath_query = ''' -# -# -# -# *[System[( (EventID >= 5152 and EventID <= 5158) or EventID=5379 or EventID=4672)]] -# -# -# -# -# -# -# -# -# -# -# -# -# -# -# ''' -# -# ## System field names: -# ## "Source", "EventID", "Version", "Level", "Task", "Opcode", "Keywords", "TimeCreated", -# ## "EventRecordID", "ActivityID", "RelatedActivityID", "ProcessID", "ThreadID", "ProcessName", -# ## "Channel", "Computer", "UserID", "UserName", "Message", "LevelText", "TaskText", "OpcodeText" -# -# ## In addition to System, Data fields can be unrolled from additional XML nodes in event. -# ## Human-readable representation of those nodes is formatted into event Message field, -# ## but XML is more machine-parsable -# -# # Process UserData XML to fields, if this node exists in Event XML -# process_userdata = true -# -# # Process EventData XML to fields, if this node exists in Event XML -# process_eventdata = true -# -# ## Separator character to use for unrolled XML Data field names -# separator = "_" -# -# ## Get only first line of Message field. For most events first line is usually more than enough -# only_first_line_of_message = true -# -# ## Parse timestamp from TimeCreated.SystemTime event field. -# ## Will default to current time of telegraf processing on parsing error or if set to false -# timestamp_from_event = true -# -# ## Fields to include as tags. Globbing supported ("Level*" for both "Level" and "LevelText") -# event_tags = ["Source", "EventID", "Level", "LevelText", "Task", "TaskText", "Opcode", "OpcodeText", "Keywords", "Channel", "Computer"] -# -# ## Default list of fields to send. All fields are sent by default. Globbing supported -# event_fields = ["*"] -# -# ## Fields to exclude. Also applied to data fields. Globbing supported -# exclude_fields = ["TimeCreated", "Binary", "Data_Address*"] -# -# ## Skip those tags or fields if their value is empty or equals to zero. Globbing supported -# exclude_empty = ["*ActivityID", "UserID"] - - -# # Input plugin to counterPath Performance Counters on Windows operating systems -# [[inputs.win_perf_counters]] -# ## By default this plugin returns basic CPU and Disk statistics. -# ## See the README file for more examples. -# ## Uncomment examples below or write your own as you see fit. If the system -# ## being polled for data does not have the Object at startup of the Telegraf -# ## agent, it will not be gathered. -# ## Settings: -# # PrintValid = false # Print All matching performance counters -# # Whether request a timestamp along with the PerfCounter data or just use current time -# # UsePerfCounterTime=true -# # If UseWildcardsExpansion params is set to true, wildcards (partial wildcards in instance names and wildcards in counters names) in configured counter paths will be expanded -# # and in case of localized Windows, counter paths will be also localized. It also returns instance indexes in instance names. -# # If false, wildcards (not partial) in instance names will still be expanded, but instance indexes will not be returned in instance names. -# #UseWildcardsExpansion = false -# # Period after which counters will be reread from configuration and wildcards in counter paths expanded -# CountersRefreshInterval="1m" -# -# [[inputs.win_perf_counters.object]] -# # Processor usage, alternative to native, reports on a per core. -# ObjectName = "Processor" -# Instances = ["*"] -# Counters = [ -# "% Idle Time", -# "% Interrupt Time", -# "% Privileged Time", -# "% User Time", -# "% Processor Time", -# "% DPC Time", -# ] -# Measurement = "win_cpu" -# # Set to true to include _Total instance when querying for all (*). -# # IncludeTotal=false -# # Print out when the performance counter is missing from object, counter or instance. -# # WarnOnMissing = false -# -# [[inputs.win_perf_counters.object]] -# # Disk times and queues -# ObjectName = "LogicalDisk" -# Instances = ["*"] -# Counters = [ -# "% Idle Time", -# "% Disk Time", -# "% Disk Read Time", -# "% Disk Write Time", -# "% User Time", -# "% Free Space", -# "Current Disk Queue Length", -# "Free Megabytes", -# ] -# Measurement = "win_disk" -# -# [[inputs.win_perf_counters.object]] -# ObjectName = "PhysicalDisk" -# Instances = ["*"] -# Counters = [ -# "Disk Read Bytes/sec", -# "Disk Write Bytes/sec", -# "Current Disk Queue Length", -# "Disk Reads/sec", -# "Disk Writes/sec", -# "% Disk Time", -# "% Disk Read Time", -# "% Disk Write Time", -# ] -# Measurement = "win_diskio" -# -# [[inputs.win_perf_counters.object]] -# ObjectName = "Network Interface" -# Instances = ["*"] -# Counters = [ -# "Bytes Received/sec", -# "Bytes Sent/sec", -# "Packets Received/sec", -# "Packets Sent/sec", -# "Packets Received Discarded", -# "Packets Outbound Discarded", -# "Packets Received Errors", -# "Packets Outbound Errors", -# ] -# Measurement = "win_net" -# -# -# [[inputs.win_perf_counters.object]] -# ObjectName = "System" -# Counters = [ -# "Context Switches/sec", -# "System Calls/sec", -# "Processor Queue Length", -# "System Up Time", -# ] -# Instances = ["------"] -# Measurement = "win_system" -# -# [[inputs.win_perf_counters.object]] -# # Example counterPath where the Instance portion must be removed to get data back, -# # such as from the Memory object. -# ObjectName = "Memory" -# Counters = [ -# "Available Bytes", -# "Cache Faults/sec", -# "Demand Zero Faults/sec", -# "Page Faults/sec", -# "Pages/sec", -# "Transition Faults/sec", -# "Pool Nonpaged Bytes", -# "Pool Paged Bytes", -# "Standby Cache Reserve Bytes", -# "Standby Cache Normal Priority Bytes", -# "Standby Cache Core Bytes", -# ] -# Instances = ["------"] # Use 6 x - to remove the Instance bit from the counterPath. -# Measurement = "win_mem" -# -# [[inputs.win_perf_counters.object]] -# # Example query where the Instance portion must be removed to get data back, -# # such as from the Paging File object. -# ObjectName = "Paging File" -# Counters = [ -# "% Usage", -# ] -# Instances = ["_Total"] -# Measurement = "win_swap" - - -# # Input plugin to report Windows services info. -# [[inputs.win_services]] -# ## Names of the services to monitor. Leave empty to monitor all the available services on the host. Globs accepted. -# service_names = [ -# "LanmanServer", -# "TermService", -# "Win*", -# ] - # # A plugin to collect stats from Varnish HTTP Cache # [[inputs.varnish]] @@ -6802,7 +6625,7 @@ # # Listener capable of handling KNX bus messages provided through a KNX-IP Interface. -# [[inputs.knx_listener]] +# [[inputs.KNXListener]] # ## Type of KNX-IP interface. # ## Can be either "tunnel" or "router". # # service_type = "tunnel" @@ -7667,7 +7490,7 @@ # ## This value is propagated to pqos tool. Interval format is defined by pqos itself. # ## If not provided or provided 0, will be set to 10 = 10x100ms = 1s. # # sampling_interval = "10" -# +# # ## Optionally specify the path to pqos executable. # ## If not provided, auto discovery will be performed. # # pqos_path = "/usr/local/bin/pqos" @@ -7675,12 +7498,12 @@ # ## Optionally specify if IPC and LLC_Misses metrics shouldn't be propagated. # ## If not provided, default value is false. # # shortened_metrics = false -# +# # ## Specify the list of groups of CPU core(s) to be provided as pqos input. # ## Mandatory if processes aren't set and forbidden if processes are specified. # ## e.g. ["0-3", "4,5,6"] or ["1-3,4"] # # cores = ["0-3"] -# +# # ## Specify the list of processes for which Metrics will be collected. # ## Mandatory if cores aren't set and forbidden if cores are specified. # ## e.g. ["qemu", "pmd"] @@ -7924,6 +7747,30 @@ # table_name = "default" +# # Listener capable of handling KNX bus messages provided through a KNX-IP Interface. +# [[inputs.knx_listener]] +# ## Type of KNX-IP interface. +# ## Can be either "tunnel" or "router". +# # service_type = "tunnel" +# +# ## Address of the KNX-IP interface. +# service_address = "localhost:3671" +# +# ## Measurement definition(s) +# # [[inputs.knx_listener.measurement]] +# # ## Name of the measurement +# # name = "temperature" +# # ## Datapoint-Type (DPT) of the KNX messages +# # dpt = "9.001" +# # ## List of Group-Addresses (GAs) assigned to the measurement +# # addresses = ["5/5/1"] +# +# # [[inputs.knx_listener.measurement]] +# # name = "illumination" +# # dpt = "9.004" +# # addresses = ["5/5/3"] + + # # Read metrics off Arista LANZ, via socket # [[inputs.lanz]] # ## URL to Arista LANZ endpoint @@ -9103,3 +8950,4 @@ # [[inputs.zipkin]] # # path = "/api/v1/spans" # URL path for span data # # port = 9411 # Port on which Telegraf listens + diff --git a/etc/telegraf_windows.conf b/etc/telegraf_windows.conf index 5b7ca95057444..ee67219c3c3f5 100644 --- a/etc/telegraf_windows.conf +++ b/etc/telegraf_windows.conf @@ -90,8 +90,8 @@ ## If set to -1, no archives are removed. # logfile_rotation_max_archives = 5 - ## Pick a timezone to use when logging or type 'local' for local time. Example: 'America/Chicago'. - ## See https://socketloop.com/tutorials/golang-display-list-of-timezones-with-gmt for timezone formatting options. + ## Pick a timezone to use when logging or type 'local' for local time. + ## Example: America/Chicago # log_with_timezone = "" ## Override default hostname, if empty use os.Hostname() @@ -99,7 +99,6 @@ ## If set to true, do no set the "host" tag in the telegraf agent. omit_hostname = false - ############################################################################### # OUTPUT PLUGINS # ############################################################################### @@ -175,7 +174,7 @@ ## HTTP Content-Encoding for write request body, can be set to "gzip" to ## compress body or "identity" to apply no encoding. - # content_encoding = "identity" + # content_encoding = "gzip" ## When true, Telegraf will output unsigned integers as unsigned values, ## i.e.: "42u". You will need a version of InfluxDB supporting unsigned @@ -183,6 +182,696 @@ ## existing data has been written. # influx_uint_support = false + +# # Configuration for Amon Server to send metrics to. +# [[outputs.amon]] +# ## Amon Server Key +# server_key = "my-server-key" # required. +# +# ## Amon Instance URL +# amon_instance = "https://youramoninstance" # required +# +# ## Connection timeout. +# # timeout = "5s" + + +# # Publishes metrics to an AMQP broker +# [[outputs.amqp]] +# ## Broker to publish to. +# ## deprecated in 1.7; use the brokers option +# # url = "amqp://localhost:5672/influxdb" +# +# ## Brokers to publish to. If multiple brokers are specified a random broker +# ## will be selected anytime a connection is established. This can be +# ## helpful for load balancing when not using a dedicated load balancer. +# brokers = ["amqp://localhost:5672/influxdb"] +# +# ## Maximum messages to send over a connection. Once this is reached, the +# ## connection is closed and a new connection is made. This can be helpful for +# ## load balancing when not using a dedicated load balancer. +# # max_messages = 0 +# +# ## Exchange to declare and publish to. +# exchange = "telegraf" +# +# ## Exchange type; common types are "direct", "fanout", "topic", "header", "x-consistent-hash". +# # exchange_type = "topic" +# +# ## If true, exchange will be passively declared. +# # exchange_passive = false +# +# ## Exchange durability can be either "transient" or "durable". +# # exchange_durability = "durable" +# +# ## Additional exchange arguments. +# # exchange_arguments = { } +# # exchange_arguments = {"hash_property" = "timestamp"} +# +# ## Authentication credentials for the PLAIN auth_method. +# # username = "" +# # password = "" +# +# ## Auth method. PLAIN and EXTERNAL are supported +# ## Using EXTERNAL requires enabling the rabbitmq_auth_mechanism_ssl plugin as +# ## described here: https://www.rabbitmq.com/plugins.html +# # auth_method = "PLAIN" +# +# ## Metric tag to use as a routing key. +# ## ie, if this tag exists, its value will be used as the routing key +# # routing_tag = "host" +# +# ## Static routing key. Used when no routing_tag is set or as a fallback +# ## when the tag specified in routing tag is not found. +# # routing_key = "" +# # routing_key = "telegraf" +# +# ## Delivery Mode controls if a published message is persistent. +# ## One of "transient" or "persistent". +# # delivery_mode = "transient" +# +# ## InfluxDB database added as a message header. +# ## deprecated in 1.7; use the headers option +# # database = "telegraf" +# +# ## InfluxDB retention policy added as a message header +# ## deprecated in 1.7; use the headers option +# # retention_policy = "default" +# +# ## Static headers added to each published message. +# # headers = { } +# # headers = {"database" = "telegraf", "retention_policy" = "default"} +# +# ## Connection timeout. If not provided, will default to 5s. 0s means no +# ## timeout (not recommended). +# # timeout = "5s" +# +# ## Optional TLS Config +# # tls_ca = "/etc/telegraf/ca.pem" +# # tls_cert = "/etc/telegraf/cert.pem" +# # tls_key = "/etc/telegraf/key.pem" +# ## Use TLS but skip chain & host verification +# # insecure_skip_verify = false +# +# ## If true use batch serialization format instead of line based delimiting. +# ## Only applies to data formats which are not line based such as JSON. +# ## Recommended to set to true. +# # use_batch_format = false +# +# ## Content encoding for message payloads, can be set to "gzip" to or +# ## "identity" to apply no encoding. +# ## +# ## Please note that when use_batch_format = false each amqp message contains only +# ## a single metric, it is recommended to use compression with batch format +# ## for best results. +# # content_encoding = "identity" +# +# ## Data format to output. +# ## Each data format has its own unique set of configuration options, read +# ## more about them here: +# ## https://github.com/influxdata/telegraf/blob/master/docs/DATA_FORMATS_OUTPUT.md +# # data_format = "influx" + + +# # Send metrics to Azure Application Insights +# [[outputs.application_insights]] +# ## Instrumentation key of the Application Insights resource. +# instrumentation_key = "xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxx" +# +# ## Regions that require endpoint modification https://docs.microsoft.com/en-us/azure/azure-monitor/app/custom-endpoints +# # endpoint_url = "https://dc.services.visualstudio.com/v2/track" +# +# ## Timeout for closing (default: 5s). +# # timeout = "5s" +# +# ## Enable additional diagnostic logging. +# # enable_diagnostic_logging = false +# +# ## Context Tag Sources add Application Insights context tags to a tag value. +# ## +# ## For list of allowed context tag keys see: +# ## https://github.com/microsoft/ApplicationInsights-Go/blob/master/appinsights/contracts/contexttagkeys.go +# # [outputs.application_insights.context_tag_sources] +# # "ai.cloud.role" = "kubernetes_container_name" +# # "ai.cloud.roleInstance" = "kubernetes_pod_name" + + +# # Sends metrics to Azure Data Explorer +# [[outputs.azure_data_explorer]] +# ## Azure Data Exlorer cluster endpoint +# ## ex: endpoint_url = "https://clustername.australiasoutheast.kusto.windows.net" +# endpoint_url = "" +# +# ## The Azure Data Explorer database that the metrics will be ingested into. +# ## The plugin will NOT generate this database automatically, it's expected that this database already exists before ingestion. +# ## ex: "exampledatabase" +# database = "" +# +# ## Timeout for Azure Data Explorer operations +# # timeout = "20s" +# +# ## Type of metrics grouping used when pushing to Azure Data Explorer. +# ## Default is "TablePerMetric" for one table per different metric. +# ## For more information, please check the plugin README. +# # metrics_grouping_type = "TablePerMetric" +# +# ## Name of the single table to store all the metrics (Only needed if metrics_grouping_type is "SingleTable"). +# # table_name = "" +# + + +# # Send aggregate metrics to Azure Monitor +# [[outputs.azure_monitor]] +# ## Timeout for HTTP writes. +# # timeout = "20s" +# +# ## Set the namespace prefix, defaults to "Telegraf/". +# # namespace_prefix = "Telegraf/" +# +# ## Azure Monitor doesn't have a string value type, so convert string +# ## fields to dimensions (a.k.a. tags) if enabled. Azure Monitor allows +# ## a maximum of 10 dimensions so Telegraf will only send the first 10 +# ## alphanumeric dimensions. +# # strings_as_dimensions = false +# +# ## Both region and resource_id must be set or be available via the +# ## Instance Metadata service on Azure Virtual Machines. +# # +# ## Azure Region to publish metrics against. +# ## ex: region = "southcentralus" +# # region = "" +# # +# ## The Azure Resource ID against which metric will be logged, e.g. +# ## ex: resource_id = "/subscriptions//resourceGroups//providers/Microsoft.Compute/virtualMachines/" +# # resource_id = "" +# +# ## Optionally, if in Azure US Government, China or other sovereign +# ## cloud environment, set appropriate REST endpoint for receiving +# ## metrics. (Note: region may be unused in this context) +# # endpoint_url = "https://monitoring.core.usgovcloudapi.net" + + +# # Publish Telegraf metrics to a Google Cloud PubSub topic +# [[outputs.cloud_pubsub]] +# ## Required. Name of Google Cloud Platform (GCP) Project that owns +# ## the given PubSub topic. +# project = "my-project" +# +# ## Required. Name of PubSub topic to publish metrics to. +# topic = "my-topic" +# +# ## Required. Data format to consume. +# ## Each data format has its own unique set of configuration options. +# ## Read more about them here: +# ## https://github.com/influxdata/telegraf/blob/master/docs/DATA_FORMATS_INPUT.md +# data_format = "influx" +# +# ## Optional. Filepath for GCP credentials JSON file to authorize calls to +# ## PubSub APIs. If not set explicitly, Telegraf will attempt to use +# ## Application Default Credentials, which is preferred. +# # credentials_file = "path/to/my/creds.json" +# +# ## Optional. If true, will send all metrics per write in one PubSub message. +# # send_batched = true +# +# ## The following publish_* parameters specifically configures batching +# ## requests made to the GCP Cloud PubSub API via the PubSub Golang library. Read +# ## more here: https://godoc.org/cloud.google.com/go/pubsub#PublishSettings +# +# ## Optional. Send a request to PubSub (i.e. actually publish a batch) +# ## when it has this many PubSub messages. If send_batched is true, +# ## this is ignored and treated as if it were 1. +# # publish_count_threshold = 1000 +# +# ## Optional. Send a request to PubSub (i.e. actually publish a batch) +# ## when it has this many PubSub messages. If send_batched is true, +# ## this is ignored and treated as if it were 1 +# # publish_byte_threshold = 1000000 +# +# ## Optional. Specifically configures requests made to the PubSub API. +# # publish_num_go_routines = 2 +# +# ## Optional. Specifies a timeout for requests to the PubSub API. +# # publish_timeout = "30s" +# +# ## Optional. If true, published PubSub message data will be base64-encoded. +# # base64_data = false +# +# ## Optional. PubSub attributes to add to metrics. +# # [outputs.cloud_pubsub.attributes] +# # my_attr = "tag_value" + + +# # Configuration for AWS CloudWatch output. +# [[outputs.cloudwatch]] +# ## Amazon REGION +# region = "us-east-1" +# +# ## Amazon Credentials +# ## Credentials are loaded in the following order +# ## 1) Web identity provider credentials via STS if role_arn and web_identity_token_file are specified +# ## 2) Assumed credentials via STS if role_arn is specified +# ## 3) explicit credentials from 'access_key' and 'secret_key' +# ## 4) shared profile from 'profile' +# ## 5) environment variables +# ## 6) shared credentials file +# ## 7) EC2 Instance Profile +# #access_key = "" +# #secret_key = "" +# #token = "" +# #role_arn = "" +# #web_identity_token_file = "" +# #role_session_name = "" +# #profile = "" +# #shared_credential_file = "" +# +# ## Endpoint to make request against, the correct endpoint is automatically +# ## determined and this option should only be set if you wish to override the +# ## default. +# ## ex: endpoint_url = "http://localhost:8000" +# # endpoint_url = "" +# +# ## Namespace for the CloudWatch MetricDatums +# namespace = "InfluxData/Telegraf" +# +# ## If you have a large amount of metrics, you should consider to send statistic +# ## values instead of raw metrics which could not only improve performance but +# ## also save AWS API cost. If enable this flag, this plugin would parse the required +# ## CloudWatch statistic fields (count, min, max, and sum) and send them to CloudWatch. +# ## You could use basicstats aggregator to calculate those fields. If not all statistic +# ## fields are available, all fields would still be sent as raw metrics. +# # write_statistics = false +# +# ## Enable high resolution metrics of 1 second (if not enabled, standard resolution are of 60 seconds precision) +# # high_resolution_metrics = false + + +# # Configuration for AWS CloudWatchLogs output. +# [[outputs.cloudwatch_logs]] +# ## The region is the Amazon region that you wish to connect to. +# ## Examples include but are not limited to: +# ## - us-west-1 +# ## - us-west-2 +# ## - us-east-1 +# ## - ap-southeast-1 +# ## - ap-southeast-2 +# ## ... +# region = "us-east-1" +# +# ## Amazon Credentials +# ## Credentials are loaded in the following order +# ## 1) Web identity provider credentials via STS if role_arn and web_identity_token_file are specified +# ## 2) Assumed credentials via STS if role_arn is specified +# ## 3) explicit credentials from 'access_key' and 'secret_key' +# ## 4) shared profile from 'profile' +# ## 5) environment variables +# ## 6) shared credentials file +# ## 7) EC2 Instance Profile +# #access_key = "" +# #secret_key = "" +# #token = "" +# #role_arn = "" +# #web_identity_token_file = "" +# #role_session_name = "" +# #profile = "" +# #shared_credential_file = "" +# +# ## Endpoint to make request against, the correct endpoint is automatically +# ## determined and this option should only be set if you wish to override the +# ## default. +# ## ex: endpoint_url = "http://localhost:8000" +# # endpoint_url = "" +# +# ## Cloud watch log group. Must be created in AWS cloudwatch logs upfront! +# ## For example, you can specify the name of the k8s cluster here to group logs from all cluster in oine place +# log_group = "my-group-name" +# +# ## Log stream in log group +# ## Either log group name or reference to metric attribute, from which it can be parsed: +# ## tag: or field:. If log stream is not exist, it will be created. +# ## Since AWS is not automatically delete logs streams with expired logs entries (i.e. empty log stream) +# ## you need to put in place appropriate house-keeping (https://forums.aws.amazon.com/thread.jspa?threadID=178855) +# log_stream = "tag:location" +# +# ## Source of log data - metric name +# ## specify the name of the metric, from which the log data should be retrieved. +# ## I.e., if you are using docker_log plugin to stream logs from container, then +# ## specify log_data_metric_name = "docker_log" +# log_data_metric_name = "docker_log" +# +# ## Specify from which metric attribute the log data should be retrieved: +# ## tag: or field:. +# ## I.e., if you are using docker_log plugin to stream logs from container, then +# ## specify log_data_source = "field:message" +# log_data_source = "field:message" + + +# # Configuration for CrateDB to send metrics to. +# [[outputs.cratedb]] +# # A github.com/jackc/pgx/v4 connection string. +# # See https://pkg.go.dev/github.com/jackc/pgx/v4#ParseConfig +# url = "postgres://user:password@localhost/schema?sslmode=disable" +# # Timeout for all CrateDB queries. +# timeout = "5s" +# # Name of the table to store metrics in. +# table = "metrics" +# # If true, and the metrics table does not exist, create it automatically. +# table_create = true +# # The character(s) to replace any '.' in an object key with +# key_separator = "_" + + +# # Configuration for DataDog API to send metrics to. +# [[outputs.datadog]] +# ## Datadog API key +# apikey = "my-secret-key" +# +# ## Connection timeout. +# # timeout = "5s" +# +# ## Write URL override; useful for debugging. +# # url = "https://app.datadoghq.com/api/v1/series" +# +# ## Set http_proxy (telegraf uses the system wide proxy settings if it isn't set) +# # http_proxy_url = "http://localhost:8888" + + +# # Send metrics to nowhere at all +# [[outputs.discard]] +# # no configuration + + +# # Send telegraf metrics to a Dynatrace environment +# [[outputs.dynatrace]] +# ## For usage with the Dynatrace OneAgent you can omit any configuration, +# ## the only requirement is that the OneAgent is running on the same host. +# ## Only setup environment url and token if you want to monitor a Host without the OneAgent present. +# ## +# ## Your Dynatrace environment URL. +# ## For Dynatrace OneAgent you can leave this empty or set it to "http://127.0.0.1:14499/metrics/ingest" (default) +# ## For Dynatrace SaaS environments the URL scheme is "https://{your-environment-id}.live.dynatrace.com/api/v2/metrics/ingest" +# ## For Dynatrace Managed environments the URL scheme is "https://{your-domain}/e/{your-environment-id}/api/v2/metrics/ingest" +# url = "" +# +# ## Your Dynatrace API token. +# ## Create an API token within your Dynatrace environment, by navigating to Settings > Integration > Dynatrace API +# ## The API token needs data ingest scope permission. When using OneAgent, no API token is required. +# api_token = "" +# +# ## Optional prefix for metric names (e.g.: "telegraf") +# prefix = "telegraf" +# +# ## Optional TLS Config +# # tls_ca = "/etc/telegraf/ca.pem" +# # tls_cert = "/etc/telegraf/cert.pem" +# # tls_key = "/etc/telegraf/key.pem" +# +# ## Optional flag for ignoring tls certificate check +# # insecure_skip_verify = false +# +# +# ## Connection timeout, defaults to "5s" if not set. +# timeout = "5s" +# +# ## If you want metrics to be treated and reported as delta counters, add the metric names here +# additional_counters = [ ] +# +# ## Optional dimensions to be added to every metric +# # [outputs.dynatrace.default_dimensions] +# # default_key = "default value" + + +# # Configuration for Elasticsearch to send metrics to. +# [[outputs.elasticsearch]] +# ## The full HTTP endpoint URL for your Elasticsearch instance +# ## Multiple urls can be specified as part of the same cluster, +# ## this means that only ONE of the urls will be written to each interval. +# urls = [ "http://node1.es.example.com:9200" ] # required. +# ## Elasticsearch client timeout, defaults to "5s" if not set. +# timeout = "5s" +# ## Set to true to ask Elasticsearch a list of all cluster nodes, +# ## thus it is not necessary to list all nodes in the urls config option. +# enable_sniffer = false +# ## Set to true to enable gzip compression +# enable_gzip = false +# ## Set the interval to check if the Elasticsearch nodes are available +# ## Setting to "0s" will disable the health check (not recommended in production) +# health_check_interval = "10s" +# ## HTTP basic authentication details +# # username = "telegraf" +# # password = "mypassword" +# +# ## Index Config +# ## The target index for metrics (Elasticsearch will create if it not exists). +# ## You can use the date specifiers below to create indexes per time frame. +# ## The metric timestamp will be used to decide the destination index name +# # %Y - year (2016) +# # %y - last two digits of year (00..99) +# # %m - month (01..12) +# # %d - day of month (e.g., 01) +# # %H - hour (00..23) +# # %V - week of the year (ISO week) (01..53) +# ## Additionally, you can specify a tag name using the notation {{tag_name}} +# ## which will be used as part of the index name. If the tag does not exist, +# ## the default tag value will be used. +# # index_name = "telegraf-{{host}}-%Y.%m.%d" +# # default_tag_value = "none" +# index_name = "telegraf-%Y.%m.%d" # required. +# +# ## Optional TLS Config +# # tls_ca = "/etc/telegraf/ca.pem" +# # tls_cert = "/etc/telegraf/cert.pem" +# # tls_key = "/etc/telegraf/key.pem" +# ## Use TLS but skip chain & host verification +# # insecure_skip_verify = false +# +# ## Template Config +# ## Set to true if you want telegraf to manage its index template. +# ## If enabled it will create a recommended index template for telegraf indexes +# manage_template = true +# ## The template name used for telegraf indexes +# template_name = "telegraf" +# ## Set to true if you want telegraf to overwrite an existing template +# overwrite_template = false +# ## If set to true a unique ID hash will be sent as sha256(concat(timestamp,measurement,series-hash)) string +# ## it will enable data resend and update metric points avoiding duplicated metrics with diferent id's +# force_document_id = false + + +# # Send metrics to command as input over stdin +# [[outputs.exec]] +# ## Command to ingest metrics via stdin. +# command = ["tee", "-a", "/dev/null"] +# +# ## Timeout for command to complete. +# # timeout = "5s" +# +# ## Data format to output. +# ## Each data format has its own unique set of configuration options, read +# ## more about them here: +# ## https://github.com/influxdata/telegraf/blob/master/docs/DATA_FORMATS_OUTPUT.md +# # data_format = "influx" + + +# # Run executable as long-running output plugin +# [[outputs.execd]] +# ## Program to run as daemon +# command = ["my-telegraf-output", "--some-flag", "value"] +# +# ## Delay before the process is restarted after an unexpected termination +# restart_delay = "10s" +# +# ## Data format to export. +# ## Each data format has its own unique set of configuration options, read +# ## more about them here: +# ## https://github.com/influxdata/telegraf/blob/master/docs/DATA_FORMATS_OUTPUT.md +# data_format = "influx" + + +# # Send telegraf metrics to file(s) +# [[outputs.file]] +# ## Files to write to, "stdout" is a specially handled file. +# files = ["stdout", "/tmp/metrics.out"] +# +# ## Use batch serialization format instead of line based delimiting. The +# ## batch format allows for the production of non line based output formats and +# ## may more efficiently encode metric groups. +# # use_batch_format = false +# +# ## The file will be rotated after the time interval specified. When set +# ## to 0 no time based rotation is performed. +# # rotation_interval = "0d" +# +# ## The logfile will be rotated when it becomes larger than the specified +# ## size. When set to 0 no size based rotation is performed. +# # rotation_max_size = "0MB" +# +# ## Maximum number of rotated archives to keep, any older logs are deleted. +# ## If set to -1, no archives are removed. +# # rotation_max_archives = 5 +# +# ## Data format to output. +# ## Each data format has its own unique set of configuration options, read +# ## more about them here: +# ## https://github.com/influxdata/telegraf/blob/master/docs/DATA_FORMATS_OUTPUT.md +# data_format = "influx" + + +# # Configuration for Graphite server to send metrics to +# [[outputs.graphite]] +# ## TCP endpoint for your graphite instance. +# ## If multiple endpoints are configured, output will be load balanced. +# ## Only one of the endpoints will be written to with each iteration. +# servers = ["localhost:2003"] +# ## Prefix metrics name +# prefix = "" +# ## Graphite output template +# ## see https://github.com/influxdata/telegraf/blob/master/docs/DATA_FORMATS_OUTPUT.md +# template = "host.tags.measurement.field" +# +# ## Enable Graphite tags support +# # graphite_tag_support = false +# +# ## Define how metric names and tags are sanitized; options are "strict", or "compatible" +# ## strict - Default method, and backwards compatible with previous versionf of Telegraf +# ## compatible - More relaxed sanitizing when using tags, and compatible with the graphite spec +# # graphite_tag_sanitize_mode = "strict" +# +# ## Character for separating metric name and field for Graphite tags +# # graphite_separator = "." +# +# ## Graphite templates patterns +# ## 1. Template for cpu +# ## 2. Template for disk* +# ## 3. Default template +# # templates = [ +# # "cpu tags.measurement.host.field", +# # "disk* measurement.field", +# # "host.measurement.tags.field" +# #] +# +# ## timeout in seconds for the write connection to graphite +# timeout = 2 +# +# ## Optional TLS Config +# # tls_ca = "/etc/telegraf/ca.pem" +# # tls_cert = "/etc/telegraf/cert.pem" +# # tls_key = "/etc/telegraf/key.pem" +# ## Use TLS but skip chain & host verification +# # insecure_skip_verify = false + + +# # Send telegraf metrics to graylog +# [[outputs.graylog]] +# ## Endpoints for your graylog instances. +# servers = ["udp://127.0.0.1:12201"] +# +# ## Connection timeout. +# # timeout = "5s" +# +# ## The field to use as the GELF short_message, if unset the static string +# ## "telegraf" will be used. +# ## example: short_message_field = "message" +# # short_message_field = "" + + +# # Configurable HTTP health check resource based on metrics +# [[outputs.health]] +# ## Address and port to listen on. +# ## ex: service_address = "http://localhost:8080" +# ## service_address = "unix:///var/run/telegraf-health.sock" +# # service_address = "http://:8080" +# +# ## The maximum duration for reading the entire request. +# # read_timeout = "5s" +# ## The maximum duration for writing the entire response. +# # write_timeout = "5s" +# +# ## Username and password to accept for HTTP basic authentication. +# # basic_username = "user1" +# # basic_password = "secret" +# +# ## Allowed CA certificates for client certificates. +# # tls_allowed_cacerts = ["/etc/telegraf/clientca.pem"] +# +# ## TLS server certificate and private key. +# # tls_cert = "/etc/telegraf/cert.pem" +# # tls_key = "/etc/telegraf/key.pem" +# +# ## One or more check sub-tables should be defined, it is also recommended to +# ## use metric filtering to limit the metrics that flow into this output. +# ## +# ## When using the default buffer sizes, this example will fail when the +# ## metric buffer is half full. +# ## +# ## namepass = ["internal_write"] +# ## tagpass = { output = ["influxdb"] } +# ## +# ## [[outputs.health.compares]] +# ## field = "buffer_size" +# ## lt = 5000.0 +# ## +# ## [[outputs.health.contains]] +# ## field = "buffer_size" + + +# # A plugin that can transmit metrics over HTTP +# [[outputs.http]] +# ## URL is the address to send metrics to +# url = "http://127.0.0.1:8080/telegraf" +# +# ## Timeout for HTTP message +# # timeout = "5s" +# +# ## HTTP method, one of: "POST" or "PUT" +# # method = "POST" +# +# ## HTTP Basic Auth credentials +# # username = "username" +# # password = "pa$$word" +# +# ## OAuth2 Client Credentials Grant +# # client_id = "clientid" +# # client_secret = "secret" +# # token_url = "https://indentityprovider/oauth2/v1/token" +# # scopes = ["urn:opc:idm:__myscopes__"] +# +# ## Optional TLS Config +# # tls_ca = "/etc/telegraf/ca.pem" +# # tls_cert = "/etc/telegraf/cert.pem" +# # tls_key = "/etc/telegraf/key.pem" +# ## Use TLS but skip chain & host verification +# # insecure_skip_verify = false +# +# ## Optional Cookie authentication +# # cookie_auth_url = "https://localhost/authMe" +# # cookie_auth_method = "POST" +# # cookie_auth_username = "username" +# # cookie_auth_password = "pa$$word" +# # cookie_auth_body = '{"username": "user", "password": "pa$$word", "authenticate": "me"}' +# ## cookie_auth_renewal not set or set to "0" will auth once and never renew the cookie +# # cookie_auth_renewal = "5m" +# +# ## Data format to output. +# ## Each data format has it's own unique set of configuration options, read +# ## more about them here: +# ## https://github.com/influxdata/telegraf/blob/master/docs/DATA_FORMATS_OUTPUT.md +# # data_format = "influx" +# +# ## HTTP Content-Encoding for write request body, can be set to "gzip" to +# ## compress body or "identity" to apply no encoding. +# # content_encoding = "identity" +# +# ## Additional HTTP headers +# # [outputs.http.headers] +# # # Should be set manually to "application/json" for json data_format +# # Content-Type = "text/plain; charset=utf-8" +# +# ## Idle (keep-alive) connection timeout. +# ## Maximum amount of time before idle connection is closed. +# ## Zero means no limit. +# # idle_conn_timeout = 0 + + # # Configuration for sending metrics to InfluxDB # [[outputs.influxdb_v2]] # ## The URLs of the InfluxDB cluster nodes. @@ -190,7 +879,7 @@ # ## Multiple URLs can be specified for a single cluster, only ONE of the # ## urls will be written to each interval. # ## ex: urls = ["https://us-west-2-1.aws.cloud2.influxdata.com"] -# urls = ["http://127.0.0.1:9999"] +# urls = ["http://127.0.0.1:8086"] # # ## Token for authentication. # token = "" @@ -236,188 +925,7970 @@ # # insecure_skip_verify = false -############################################################################### -# INPUT PLUGINS # -############################################################################### +# # Configuration for sending metrics to an Instrumental project +# [[outputs.instrumental]] +# ## Project API Token (required) +# api_token = "API Token" # required +# ## Prefix the metrics with a given name +# prefix = "" +# ## Stats output template (Graphite formatting) +# ## see https://github.com/influxdata/telegraf/blob/master/docs/DATA_FORMATS_OUTPUT.md#graphite +# template = "host.tags.measurement.field" +# ## Timeout in seconds to connect +# timeout = "2s" +# ## Display Communication to Instrumental +# debug = false -# Windows Performance Counters plugin. -# These are the recommended method of monitoring system metrics on windows, -# as the regular system plugins (inputs.cpu, inputs.mem, etc.) rely on WMI, -# which utilize more system resources. -# -# See more configuration examples at: -# https://github.com/influxdata/telegraf/tree/master/plugins/inputs/win_perf_counters - -[[inputs.win_perf_counters]] - [[inputs.win_perf_counters.object]] - # Processor usage, alternative to native, reports on a per core. - ObjectName = "Processor" - Instances = ["*"] - Counters = [ - "% Idle Time", - "% Interrupt Time", - "% Privileged Time", - "% User Time", - "% Processor Time", - "% DPC Time", - ] - Measurement = "win_cpu" - # Set to true to include _Total instance when querying for all (*). - IncludeTotal=true - - [[inputs.win_perf_counters.object]] - # Disk times and queues - ObjectName = "LogicalDisk" - Instances = ["*"] - Counters = [ - "% Idle Time", - "% Disk Time", - "% Disk Read Time", - "% Disk Write Time", - "% Free Space", - "Current Disk Queue Length", - "Free Megabytes", - ] - Measurement = "win_disk" - # Set to true to include _Total instance when querying for all (*). - #IncludeTotal=false - - [[inputs.win_perf_counters.object]] - ObjectName = "PhysicalDisk" - Instances = ["*"] - Counters = [ - "Disk Read Bytes/sec", - "Disk Write Bytes/sec", - "Current Disk Queue Length", - "Disk Reads/sec", - "Disk Writes/sec", - "% Disk Time", - "% Disk Read Time", - "% Disk Write Time", - ] - Measurement = "win_diskio" - - [[inputs.win_perf_counters.object]] - ObjectName = "Network Interface" - Instances = ["*"] - Counters = [ - "Bytes Received/sec", - "Bytes Sent/sec", - "Packets Received/sec", - "Packets Sent/sec", - "Packets Received Discarded", - "Packets Outbound Discarded", - "Packets Received Errors", - "Packets Outbound Errors", - ] - Measurement = "win_net" - - [[inputs.win_perf_counters.object]] - ObjectName = "System" - Counters = [ - "Context Switches/sec", - "System Calls/sec", - "Processor Queue Length", - "System Up Time", - ] - Instances = ["------"] - Measurement = "win_system" - # Set to true to include _Total instance when querying for all (*). - #IncludeTotal=false - - [[inputs.win_perf_counters.object]] - # Example query where the Instance portion must be removed to get data back, - # such as from the Memory object. - ObjectName = "Memory" - Counters = [ - "Available Bytes", - "Cache Faults/sec", - "Demand Zero Faults/sec", - "Page Faults/sec", - "Pages/sec", - "Transition Faults/sec", - "Pool Nonpaged Bytes", - "Pool Paged Bytes", - "Standby Cache Reserve Bytes", - "Standby Cache Normal Priority Bytes", - "Standby Cache Core Bytes", - ] - # Use 6 x - to remove the Instance bit from the query. - Instances = ["------"] - Measurement = "win_mem" - # Set to true to include _Total instance when querying for all (*). - #IncludeTotal=false - - [[inputs.win_perf_counters.object]] - # Example query where the Instance portion must be removed to get data back, - # such as from the Paging File object. - ObjectName = "Paging File" - Counters = [ - "% Usage", - ] - Instances = ["_Total"] - Measurement = "win_swap" - - -# Windows system plugins using WMI (disabled by default, using -# win_perf_counters over WMI is recommended) - - -# # Read metrics about cpu usage -# [[inputs.cpu]] -# ## Whether to report per-cpu stats or not -# percpu = true -# ## Whether to report total system cpu stats or not -# totalcpu = true -# ## If true, collect raw CPU time metrics. -# collect_cpu_time = false -# ## If true, compute and report the sum of all non-idle CPU states. -# report_active = false - - -# # Read metrics about disk usage by mount point -# [[inputs.disk]] -# ## By default stats will be gathered for all mount points. -# ## Set mount_points will restrict the stats to only the specified mount points. -# # mount_points = ["/"] -# -# ## Ignore mount points by filesystem type. -# ignore_fs = ["tmpfs", "devtmpfs", "devfs", "overlay", "aufs", "squashfs"] - - -# # Read metrics about disk IO by device -# [[inputs.diskio]] -# ## By default, telegraf will gather stats for all devices including -# ## disk partitions. -# ## Setting devices will restrict the stats to the specified devices. -# # devices = ["sda", "sdb", "vd*"] -# ## Uncomment the following line if you need disk serial numbers. -# # skip_serial_number = false -# # -# ## On systems which support it, device metadata can be added in the form of -# ## tags. -# ## Currently only Linux is supported via udev properties. You can view -# ## available properties for a device by running: -# ## 'udevadm info -q property -n /dev/sda' -# # device_tags = ["ID_FS_TYPE", "ID_FS_USAGE"] -# # -# ## Using the same metadata source as device_tags, you can also customize the -# ## name of the device via templates. -# ## The 'name_templates' parameter is a list of templates to try and apply to -# ## the device. The template may contain variables in the form of '$PROPERTY' or -# ## '${PROPERTY}'. The first template which does not contain any variables not -# ## present for the device is used as the device name tag. -# ## The typical use case is for LVM volumes, to get the VG/LV name instead of -# ## the near-meaningless DM-0 name. -# # name_templates = ["$ID_FS_LABEL","$DM_VG_NAME/$DM_LV_NAME"] - - -# # Read metrics about memory usage -# [[inputs.mem]] -# # no configuration +# # Configuration for the Kafka server to send metrics to +# [[outputs.kafka]] +# ## URLs of kafka brokers +# brokers = ["localhost:9092"] +# ## Kafka topic for producer messages +# topic = "telegraf" +# +# ## The value of this tag will be used as the topic. If not set the 'topic' +# ## option is used. +# # topic_tag = "" +# +# ## If true, the 'topic_tag' will be removed from to the metric. +# # exclude_topic_tag = false +# +# ## Optional Client id +# # client_id = "Telegraf" +# +# ## Set the minimal supported Kafka version. Setting this enables the use of new +# ## Kafka features and APIs. Of particular interest, lz4 compression +# ## requires at least version 0.10.0.0. +# ## ex: version = "1.1.0" +# # version = "" +# +# ## Optional topic suffix configuration. +# ## If the section is omitted, no suffix is used. +# ## Following topic suffix methods are supported: +# ## measurement - suffix equals to separator + measurement's name +# ## tags - suffix equals to separator + specified tags' values +# ## interleaved with separator +# +# ## Suffix equals to "_" + measurement name +# # [outputs.kafka.topic_suffix] +# # method = "measurement" +# # separator = "_" +# +# ## Suffix equals to "__" + measurement's "foo" tag value. +# ## If there's no such a tag, suffix equals to an empty string +# # [outputs.kafka.topic_suffix] +# # method = "tags" +# # keys = ["foo"] +# # separator = "__" +# +# ## Suffix equals to "_" + measurement's "foo" and "bar" +# ## tag values, separated by "_". If there is no such tags, +# ## their values treated as empty strings. +# # [outputs.kafka.topic_suffix] +# # method = "tags" +# # keys = ["foo", "bar"] +# # separator = "_" +# +# ## The routing tag specifies a tagkey on the metric whose value is used as +# ## the message key. The message key is used to determine which partition to +# ## send the message to. This tag is prefered over the routing_key option. +# routing_tag = "host" +# +# ## The routing key is set as the message key and used to determine which +# ## partition to send the message to. This value is only used when no +# ## routing_tag is set or as a fallback when the tag specified in routing tag +# ## is not found. +# ## +# ## If set to "random", a random value will be generated for each message. +# ## +# ## When unset, no message key is added and each message is routed to a random +# ## partition. +# ## +# ## ex: routing_key = "random" +# ## routing_key = "telegraf" +# # routing_key = "" +# +# ## Compression codec represents the various compression codecs recognized by +# ## Kafka in messages. +# ## 0 : None +# ## 1 : Gzip +# ## 2 : Snappy +# ## 3 : LZ4 +# ## 4 : ZSTD +# # compression_codec = 0 +# +# ## Idempotent Writes +# ## If enabled, exactly one copy of each message is written. +# # idempotent_writes = false +# +# ## RequiredAcks is used in Produce Requests to tell the broker how many +# ## replica acknowledgements it must see before responding +# ## 0 : the producer never waits for an acknowledgement from the broker. +# ## This option provides the lowest latency but the weakest durability +# ## guarantees (some data will be lost when a server fails). +# ## 1 : the producer gets an acknowledgement after the leader replica has +# ## received the data. This option provides better durability as the +# ## client waits until the server acknowledges the request as successful +# ## (only messages that were written to the now-dead leader but not yet +# ## replicated will be lost). +# ## -1: the producer gets an acknowledgement after all in-sync replicas have +# ## received the data. This option provides the best durability, we +# ## guarantee that no messages will be lost as long as at least one in +# ## sync replica remains. +# # required_acks = -1 +# +# ## The maximum number of times to retry sending a metric before failing +# ## until the next flush. +# # max_retry = 3 +# +# ## The maximum permitted size of a message. Should be set equal to or +# ## smaller than the broker's 'message.max.bytes'. +# # max_message_bytes = 1000000 +# +# ## Optional TLS Config +# # tls_ca = "/etc/telegraf/ca.pem" +# # tls_cert = "/etc/telegraf/cert.pem" +# # tls_key = "/etc/telegraf/key.pem" +# ## Use TLS but skip chain & host verification +# # insecure_skip_verify = false +# +# ## Optional SASL Config +# # sasl_username = "kafka" +# # sasl_password = "secret" +# +# ## Optional SASL: +# ## one of: OAUTHBEARER, PLAIN, SCRAM-SHA-256, SCRAM-SHA-512, GSSAPI +# ## (defaults to PLAIN) +# # sasl_mechanism = "" +# +# ## used if sasl_mechanism is GSSAPI (experimental) +# # sasl_gssapi_service_name = "" +# # ## One of: KRB5_USER_AUTH and KRB5_KEYTAB_AUTH +# # sasl_gssapi_auth_type = "KRB5_USER_AUTH" +# # sasl_gssapi_kerberos_config_path = "/" +# # sasl_gssapi_realm = "realm" +# # sasl_gssapi_key_tab_path = "" +# # sasl_gssapi_disable_pafxfast = false +# +# ## used if sasl_mechanism is OAUTHBEARER (experimental) +# # sasl_access_token = "" +# +# ## SASL protocol version. When connecting to Azure EventHub set to 0. +# # sasl_version = 1 +# +# ## Data format to output. +# ## Each data format has its own unique set of configuration options, read +# ## more about them here: +# ## https://github.com/influxdata/telegraf/blob/master/docs/DATA_FORMATS_OUTPUT.md +# # data_format = "influx" -# # Read metrics about swap memory usage -# [[inputs.swap]] -# # no configuration +# # Configuration for the AWS Kinesis output. +# [[outputs.kinesis]] +# ## Amazon REGION of kinesis endpoint. +# region = "ap-southeast-2" +# +# ## Amazon Credentials +# ## Credentials are loaded in the following order +# ## 1) Web identity provider credentials via STS if role_arn and web_identity_token_file are specified +# ## 2) Assumed credentials via STS if role_arn is specified +# ## 3) explicit credentials from 'access_key' and 'secret_key' +# ## 4) shared profile from 'profile' +# ## 5) environment variables +# ## 6) shared credentials file +# ## 7) EC2 Instance Profile +# #access_key = "" +# #secret_key = "" +# #token = "" +# #role_arn = "" +# #web_identity_token_file = "" +# #role_session_name = "" +# #profile = "" +# #shared_credential_file = "" +# +# ## Endpoint to make request against, the correct endpoint is automatically +# ## determined and this option should only be set if you wish to override the +# ## default. +# ## ex: endpoint_url = "http://localhost:8000" +# # endpoint_url = "" +# +# ## Kinesis StreamName must exist prior to starting telegraf. +# streamname = "StreamName" +# ## DEPRECATED: PartitionKey as used for sharding data. +# partitionkey = "PartitionKey" +# ## DEPRECATED: If set the partitionKey will be a random UUID on every put. +# ## This allows for scaling across multiple shards in a stream. +# ## This will cause issues with ordering. +# use_random_partitionkey = false +# ## The partition key can be calculated using one of several methods: +# ## +# ## Use a static value for all writes: +# # [outputs.kinesis.partition] +# # method = "static" +# # key = "howdy" +# # +# ## Use a random partition key on each write: +# # [outputs.kinesis.partition] +# # method = "random" +# # +# ## Use the measurement name as the partition key: +# # [outputs.kinesis.partition] +# # method = "measurement" +# # +# ## Use the value of a tag for all writes, if the tag is not set the empty +# ## default option will be used. When no default, defaults to "telegraf" +# # [outputs.kinesis.partition] +# # method = "tag" +# # key = "host" +# # default = "mykey" +# +# +# ## Data format to output. +# ## Each data format has its own unique set of configuration options, read +# ## more about them here: +# ## https://github.com/influxdata/telegraf/blob/master/docs/DATA_FORMATS_OUTPUT.md +# data_format = "influx" +# +# ## debug will show upstream aws messages. +# debug = false + + +# # Configuration for Librato API to send metrics to. +# [[outputs.librato]] +# ## Librato API Docs +# ## http://dev.librato.com/v1/metrics-authentication +# ## Librato API user +# api_user = "telegraf@influxdb.com" # required. +# ## Librato API token +# api_token = "my-secret-token" # required. +# ## Debug +# # debug = false +# ## Connection timeout. +# # timeout = "5s" +# ## Output source Template (same as graphite buckets) +# ## see https://github.com/influxdata/telegraf/blob/master/docs/DATA_FORMATS_OUTPUT.md#graphite +# ## This template is used in librato's source (not metric's name) +# template = "host" +# + + +# # Send aggregate metrics to Logz.io +# [[outputs.logzio]] +# ## Connection timeout, defaults to "5s" if not set. +# timeout = "5s" +# +# ## Optional TLS Config +# # tls_ca = "/etc/telegraf/ca.pem" +# # tls_cert = "/etc/telegraf/cert.pem" +# # tls_key = "/etc/telegraf/key.pem" +# +# ## Logz.io account token +# token = "your logz.io token" # required +# +# ## Use your listener URL for your Logz.io account region. +# # url = "https://listener.logz.io:8071" + + +# # Send logs to Loki +# [[outputs.loki]] +# ## The domain of Loki +# domain = "https://loki.domain.tld" +# +# ## Endpoint to write api +# # endpoint = "/loki/api/v1/push" +# +# ## Connection timeout, defaults to "5s" if not set. +# # timeout = "5s" +# +# ## Basic auth credential +# # username = "loki" +# # password = "pass" +# +# ## Additional HTTP headers +# # http_headers = {"X-Scope-OrgID" = "1"} +# +# ## If the request must be gzip encoded +# # gzip_request = false +# +# ## Optional TLS Config +# # tls_ca = "/etc/telegraf/ca.pem" +# # tls_cert = "/etc/telegraf/cert.pem" +# # tls_key = "/etc/telegraf/key.pem" + + +# # Configuration for MQTT server to send metrics to +# [[outputs.mqtt]] +# servers = ["localhost:1883"] # required. +# +# ## MQTT outputs send metrics to this topic format +# ## "///" +# ## ex: prefix/web01.example.com/mem +# topic_prefix = "telegraf" +# +# ## QoS policy for messages +# ## 0 = at most once +# ## 1 = at least once +# ## 2 = exactly once +# # qos = 2 +# +# ## username and password to connect MQTT server. +# # username = "telegraf" +# # password = "metricsmetricsmetricsmetrics" +# +# ## client ID, if not set a random ID is generated +# # client_id = "" +# +# ## Timeout for write operations. default: 5s +# # timeout = "5s" +# +# ## Optional TLS Config +# # tls_ca = "/etc/telegraf/ca.pem" +# # tls_cert = "/etc/telegraf/cert.pem" +# # tls_key = "/etc/telegraf/key.pem" +# ## Use TLS but skip chain & host verification +# # insecure_skip_verify = false +# +# ## When true, metrics will be sent in one MQTT message per flush. Otherwise, +# ## metrics are written one metric per MQTT message. +# # batch = false +# +# ## When true, metric will have RETAIN flag set, making broker cache entries until someone +# ## actually reads it +# # retain = false +# +# ## Defines the maximum length of time that the broker and client may not communicate. +# ## Defaults to 0 which turns the feature off. For version v2.0.12 of eclipse/mosquitto there is a +# ## [bug](https://github.com/eclipse/mosquitto/issues/2117) which requires keep_alive to be set. +# ## As a reference eclipse/paho.mqtt.golang v1.3.0 defaults to 30. +# # keep_alive = 0 +# +# ## Data format to output. +# ## Each data format has its own unique set of configuration options, read +# ## more about them here: +# ## https://github.com/influxdata/telegraf/blob/master/docs/DATA_FORMATS_OUTPUT.md +# data_format = "influx" + + +# # Send telegraf measurements to NATS +# [[outputs.nats]] +# ## URLs of NATS servers +# servers = ["nats://localhost:4222"] +# +# ## Optional client name +# # name = "" +# +# ## Optional credentials +# # username = "" +# # password = "" +# +# ## Optional NATS 2.0 and NATS NGS compatible user credentials +# # credentials = "/etc/telegraf/nats.creds" +# +# ## NATS subject for producer messages +# subject = "telegraf" +# +# ## Use Transport Layer Security +# # secure = false +# +# ## Optional TLS Config +# # tls_ca = "/etc/telegraf/ca.pem" +# # tls_cert = "/etc/telegraf/cert.pem" +# # tls_key = "/etc/telegraf/key.pem" +# ## Use TLS but skip chain & host verification +# # insecure_skip_verify = false +# +# ## Data format to output. +# ## Each data format has its own unique set of configuration options, read +# ## more about them here: +# ## https://github.com/influxdata/telegraf/blob/master/docs/DATA_FORMATS_OUTPUT.md +# data_format = "influx" + + +# # Send metrics to New Relic metrics endpoint +# [[outputs.newrelic]] +# ## New Relic Insights API key +# insights_key = "insights api key" +# +# ## Prefix to add to add to metric name for easy identification. +# # metric_prefix = "" +# +# ## Timeout for writes to the New Relic API. +# # timeout = "15s" +# +# ## HTTP Proxy override. If unset use values from the standard +# ## proxy environment variables to determine proxy, if any. +# # http_proxy = "http://corporate.proxy:3128" +# +# ## Metric URL override to enable geographic location endpoints. +# # If not set use values from the standard +# # metric_url = "https://metric-api.newrelic.com/metric/v1" + + +# # Send telegraf measurements to NSQD +# [[outputs.nsq]] +# ## Location of nsqd instance listening on TCP +# server = "localhost:4150" +# ## NSQ topic for producer messages +# topic = "telegraf" +# +# ## Data format to output. +# ## Each data format has its own unique set of configuration options, read +# ## more about them here: +# ## https://github.com/influxdata/telegraf/blob/master/docs/DATA_FORMATS_OUTPUT.md +# data_format = "influx" + + +# # Send OpenTelemetry metrics over gRPC +# [[outputs.opentelemetry]] +# ## Override the default (localhost:4317) OpenTelemetry gRPC service +# ## address:port +# # service_address = "localhost:4317" +# +# ## Override the default (5s) request timeout +# # timeout = "5s" +# +# ## Optional TLS Config. +# ## +# ## Root certificates for verifying server certificates encoded in PEM format. +# # tls_ca = "/etc/telegraf/ca.pem" +# ## The public and private keypairs for the client encoded in PEM format. +# ## May contain intermediate certificates. +# # tls_cert = "/etc/telegraf/cert.pem" +# # tls_key = "/etc/telegraf/key.pem" +# ## Use TLS, but skip TLS chain and host verification. +# # insecure_skip_verify = false +# ## Send the specified TLS server name via SNI. +# # tls_server_name = "foo.example.com" +# +# ## Override the default (gzip) compression used to send data. +# ## Supports: "gzip", "none" +# # compression = "gzip" +# +# ## Additional OpenTelemetry resource attributes +# # [outputs.opentelemetry.attributes] +# # "service.name" = "demo" +# +# ## Additional gRPC request metadata +# # [outputs.opentelemetry.headers] +# # key1 = "value1" + + +# # Configuration for OpenTSDB server to send metrics to +# [[outputs.opentsdb]] +# ## prefix for metrics keys +# prefix = "my.specific.prefix." +# +# ## DNS name of the OpenTSDB server +# ## Using "opentsdb.example.com" or "tcp://opentsdb.example.com" will use the +# ## telnet API. "http://opentsdb.example.com" will use the Http API. +# host = "opentsdb.example.com" +# +# ## Port of the OpenTSDB server +# port = 4242 +# +# ## Number of data points to send to OpenTSDB in Http requests. +# ## Not used with telnet API. +# http_batch_size = 50 +# +# ## URI Path for Http requests to OpenTSDB. +# ## Used in cases where OpenTSDB is located behind a reverse proxy. +# http_path = "/api/put" +# +# ## Debug true - Prints OpenTSDB communication +# debug = false +# +# ## Separator separates measurement name from field +# separator = "_" + + +# # Configuration for the Prometheus client to spawn +# [[outputs.prometheus_client]] +# ## Address to listen on +# listen = ":9273" +# +# ## Metric version controls the mapping from Telegraf metrics into +# ## Prometheus format. When using the prometheus input, use the same value in +# ## both plugins to ensure metrics are round-tripped without modification. +# ## +# ## example: metric_version = 1; +# ## metric_version = 2; recommended version +# # metric_version = 1 +# +# ## Use HTTP Basic Authentication. +# # basic_username = "Foo" +# # basic_password = "Bar" +# +# ## If set, the IP Ranges which are allowed to access metrics. +# ## ex: ip_range = ["192.168.0.0/24", "192.168.1.0/30"] +# # ip_range = [] +# +# ## Path to publish the metrics on. +# # path = "/metrics" +# +# ## Expiration interval for each metric. 0 == no expiration +# # expiration_interval = "60s" +# +# ## Collectors to enable, valid entries are "gocollector" and "process". +# ## If unset, both are enabled. +# # collectors_exclude = ["gocollector", "process"] +# +# ## Send string metrics as Prometheus labels. +# ## Unless set to false all string metrics will be sent as labels. +# # string_as_label = true +# +# ## If set, enable TLS with the given certificate. +# # tls_cert = "/etc/ssl/telegraf.crt" +# # tls_key = "/etc/ssl/telegraf.key" +# +# ## Set one or more allowed client CA certificate file names to +# ## enable mutually authenticated TLS connections +# # tls_allowed_cacerts = ["/etc/telegraf/clientca.pem"] +# +# ## Export metric collection time. +# # export_timestamp = false + + +# # Configuration for the Riemann server to send metrics to +# [[outputs.riemann]] +# ## The full TCP or UDP URL of the Riemann server +# url = "tcp://localhost:5555" +# +# ## Riemann event TTL, floating-point time in seconds. +# ## Defines how long that an event is considered valid for in Riemann +# # ttl = 30.0 +# +# ## Separator to use between measurement and field name in Riemann service name +# ## This does not have any effect if 'measurement_as_attribute' is set to 'true' +# separator = "/" +# +# ## Set measurement name as Riemann attribute 'measurement', instead of prepending it to the Riemann service name +# # measurement_as_attribute = false +# +# ## Send string metrics as Riemann event states. +# ## Unless enabled all string metrics will be ignored +# # string_as_state = false +# +# ## A list of tag keys whose values get sent as Riemann tags. +# ## If empty, all Telegraf tag values will be sent as tags +# # tag_keys = ["telegraf","custom_tag"] +# +# ## Additional Riemann tags to send. +# # tags = ["telegraf-output"] +# +# ## Description for Riemann event +# # description_text = "metrics collected from telegraf" +# +# ## Riemann client write timeout, defaults to "5s" if not set. +# # timeout = "5s" + + +# # Configuration for the Riemann server to send metrics to +# [[outputs.riemann_legacy]] +# ## URL of server +# url = "localhost:5555" +# ## transport protocol to use either tcp or udp +# transport = "tcp" +# ## separator to use between input name and field name in Riemann service name +# separator = " " + + +# # Send aggregate metrics to Sensu Monitor +# [[outputs.sensu]] +# ## BACKEND API URL is the Sensu Backend API root URL to send metrics to +# ## (protocol, host, and port only). The output plugin will automatically +# ## append the corresponding backend API path +# ## /api/core/v2/namespaces/:entity_namespace/events/:entity_name/:check_name). +# ## +# ## Backend Events API reference: +# ## https://docs.sensu.io/sensu-go/latest/api/events/ +# ## +# ## AGENT API URL is the Sensu Agent API root URL to send metrics to +# ## (protocol, host, and port only). The output plugin will automatically +# ## append the correspeonding agent API path (/events). +# ## +# ## Agent API Events API reference: +# ## https://docs.sensu.io/sensu-go/latest/api/events/ +# ## +# ## NOTE: if backend_api_url and agent_api_url and api_key are set, the output +# ## plugin will use backend_api_url. If backend_api_url and agent_api_url are +# ## not provided, the output plugin will default to use an agent_api_url of +# ## http://127.0.0.1:3031 +# ## +# # backend_api_url = "http://127.0.0.1:8080" +# # agent_api_url = "http://127.0.0.1:3031" +# +# ## API KEY is the Sensu Backend API token +# ## Generate a new API token via: +# ## +# ## $ sensuctl cluster-role create telegraf --verb create --resource events,entities +# ## $ sensuctl cluster-role-binding create telegraf --cluster-role telegraf --group telegraf +# ## $ sensuctl user create telegraf --group telegraf --password REDACTED +# ## $ sensuctl api-key grant telegraf +# ## +# ## For more information on Sensu RBAC profiles & API tokens, please visit: +# ## - https://docs.sensu.io/sensu-go/latest/reference/rbac/ +# ## - https://docs.sensu.io/sensu-go/latest/reference/apikeys/ +# ## +# # api_key = "${SENSU_API_KEY}" +# +# ## Optional TLS Config +# # tls_ca = "/etc/telegraf/ca.pem" +# # tls_cert = "/etc/telegraf/cert.pem" +# # tls_key = "/etc/telegraf/key.pem" +# ## Use TLS but skip chain & host verification +# # insecure_skip_verify = false +# +# ## Timeout for HTTP message +# # timeout = "5s" +# +# ## HTTP Content-Encoding for write request body, can be set to "gzip" to +# ## compress body or "identity" to apply no encoding. +# # content_encoding = "identity" +# +# ## Sensu Event details +# ## +# ## Below are the event details to be sent to Sensu. The main portions of the +# ## event are the check, entity, and metrics specifications. For more information +# ## on Sensu events and its components, please visit: +# ## - Events - https://docs.sensu.io/sensu-go/latest/reference/events +# ## - Checks - https://docs.sensu.io/sensu-go/latest/reference/checks +# ## - Entities - https://docs.sensu.io/sensu-go/latest/reference/entities +# ## - Metrics - https://docs.sensu.io/sensu-go/latest/reference/events#metrics +# ## +# ## Check specification +# ## The check name is the name to give the Sensu check associated with the event +# ## created. This maps to check.metatadata.name in the event. +# [outputs.sensu.check] +# name = "telegraf" +# +# ## Entity specification +# ## Configure the entity name and namespace, if necessary. This will be part of +# ## the entity.metadata in the event. +# ## +# ## NOTE: if the output plugin is configured to send events to a +# ## backend_api_url and entity_name is not set, the value returned by +# ## os.Hostname() will be used; if the output plugin is configured to send +# ## events to an agent_api_url, entity_name and entity_namespace are not used. +# # [outputs.sensu.entity] +# # name = "server-01" +# # namespace = "default" +# +# ## Metrics specification +# ## Configure the tags for the metrics that are sent as part of the Sensu event +# # [outputs.sensu.tags] +# # source = "telegraf" +# +# ## Configure the handler(s) for processing the provided metrics +# # [outputs.sensu.metrics] +# # handlers = ["influxdb","elasticsearch"] + + +# # Send metrics and events to SignalFx +# [[outputs.signalfx]] +# ## SignalFx Org Access Token +# access_token = "my-secret-token" +# +# ## The SignalFx realm that your organization resides in +# signalfx_realm = "us9" # Required if ingest_url is not set +# +# ## You can optionally provide a custom ingest url instead of the +# ## signalfx_realm option above if you are using a gateway or proxy +# ## instance. This option takes precident over signalfx_realm. +# ingest_url = "https://my-custom-ingest/" +# +# ## Event typed metrics are omitted by default, +# ## If you require an event typed metric you must specify the +# ## metric name in the following list. +# included_event_names = ["plugin.metric_name"] + + +# # Generic socket writer capable of handling multiple socket types. +# [[outputs.socket_writer]] +# ## URL to connect to +# # address = "tcp://127.0.0.1:8094" +# # address = "tcp://example.com:http" +# # address = "tcp4://127.0.0.1:8094" +# # address = "tcp6://127.0.0.1:8094" +# # address = "tcp6://[2001:db8::1]:8094" +# # address = "udp://127.0.0.1:8094" +# # address = "udp4://127.0.0.1:8094" +# # address = "udp6://127.0.0.1:8094" +# # address = "unix:///tmp/telegraf.sock" +# # address = "unixgram:///tmp/telegraf.sock" +# +# ## Optional TLS Config +# # tls_ca = "/etc/telegraf/ca.pem" +# # tls_cert = "/etc/telegraf/cert.pem" +# # tls_key = "/etc/telegraf/key.pem" +# ## Use TLS but skip chain & host verification +# # insecure_skip_verify = false +# +# ## Period between keep alive probes. +# ## Only applies to TCP sockets. +# ## 0 disables keep alive probes. +# ## Defaults to the OS configuration. +# # keep_alive_period = "5m" +# +# ## Content encoding for packet-based connections (i.e. UDP, unixgram). +# ## Can be set to "gzip" or to "identity" to apply no encoding. +# ## +# # content_encoding = "identity" +# +# ## Data format to generate. +# ## Each data format has its own unique set of configuration options, read +# ## more about them here: +# ## https://github.com/influxdata/telegraf/blob/master/docs/DATA_FORMATS_INPUT.md +# # data_format = "influx" + + +# # Send metrics to SQL Database +# [[outputs.sql]] +# ## Database driver +# ## Valid options: mssql (Microsoft SQL Server), mysql (MySQL), pgx (Postgres), +# ## sqlite (SQLite3), snowflake (snowflake.com) +# # driver = "" +# +# ## Data source name +# ## The format of the data source name is different for each database driver. +# ## See the plugin readme for details. +# # data_source_name = "" +# +# ## Timestamp column name +# # timestamp_column = "timestamp" +# +# ## Table creation template +# ## Available template variables: +# ## {TABLE} - table name as a quoted identifier +# ## {TABLELITERAL} - table name as a quoted string literal +# ## {COLUMNS} - column definitions (list of quoted identifiers and types) +# # table_template = "CREATE TABLE {TABLE}({COLUMNS})" +# +# ## Table existence check template +# ## Available template variables: +# ## {TABLE} - tablename as a quoted identifier +# # table_exists_template = "SELECT 1 FROM {TABLE} LIMIT 1" +# +# ## Initialization SQL +# # init_sql = "" +# +# ## Metric type to SQL type conversion +# #[outputs.sql.convert] +# # integer = "INT" +# # real = "DOUBLE" +# # text = "TEXT" +# # timestamp = "TIMESTAMP" +# # defaultvalue = "TEXT" +# # unsigned = "UNSIGNED" + + +# # Configuration for Google Cloud Stackdriver to send metrics to +# [[outputs.stackdriver]] +# ## GCP Project +# project = "erudite-bloom-151019" +# +# ## The namespace for the metric descriptor +# namespace = "telegraf" +# +# ## Custom resource type +# # resource_type = "generic_node" +# +# ## Additional resource labels +# # [outputs.stackdriver.resource_labels] +# # node_id = "$HOSTNAME" +# # namespace = "myapp" +# # location = "eu-north0" + + +# # A plugin that can transmit metrics to Sumo Logic HTTP Source +# [[outputs.sumologic]] +# ## Unique URL generated for your HTTP Metrics Source. +# ## This is the address to send metrics to. +# # url = "https://events.sumologic.net/receiver/v1/http/" +# +# ## Data format to be used for sending metrics. +# ## This will set the "Content-Type" header accordingly. +# ## Currently supported formats: +# ## * graphite - for Content-Type of application/vnd.sumologic.graphite +# ## * carbon2 - for Content-Type of application/vnd.sumologic.carbon2 +# ## * prometheus - for Content-Type of application/vnd.sumologic.prometheus +# ## +# ## More information can be found at: +# ## https://help.sumologic.com/03Send-Data/Sources/02Sources-for-Hosted-Collectors/HTTP-Source/Upload-Metrics-to-an-HTTP-Source#content-type-headers-for-metrics +# ## +# ## NOTE: +# ## When unset, telegraf will by default use the influx serializer which is currently unsupported +# ## in HTTP Source. +# data_format = "carbon2" +# +# ## Timeout used for HTTP request +# # timeout = "5s" +# +# ## Max HTTP request body size in bytes before compression (if applied). +# ## By default 1MB is recommended. +# ## NOTE: +# ## Bear in mind that in some serializer a metric even though serialized to multiple +# ## lines cannot be split any further so setting this very low might not work +# ## as expected. +# # max_request_body_size = 1000000 +# +# ## Additional, Sumo specific options. +# ## Full list can be found here: +# ## https://help.sumologic.com/03Send-Data/Sources/02Sources-for-Hosted-Collectors/HTTP-Source/Upload-Metrics-to-an-HTTP-Source#supported-http-headers +# +# ## Desired source name. +# ## Useful if you want to override the source name configured for the source. +# # source_name = "" +# +# ## Desired host name. +# ## Useful if you want to override the source host configured for the source. +# # source_host = "" +# +# ## Desired source category. +# ## Useful if you want to override the source category configured for the source. +# # source_category = "" +# +# ## Comma-separated key=value list of dimensions to apply to every metric. +# ## Custom dimensions will allow you to query your metrics at a more granular level. +# # dimensions = "" + + +# # Configuration for Syslog server to send metrics to +# [[outputs.syslog]] +# ## URL to connect to +# ## ex: address = "tcp://127.0.0.1:8094" +# ## ex: address = "tcp4://127.0.0.1:8094" +# ## ex: address = "tcp6://127.0.0.1:8094" +# ## ex: address = "tcp6://[2001:db8::1]:8094" +# ## ex: address = "udp://127.0.0.1:8094" +# ## ex: address = "udp4://127.0.0.1:8094" +# ## ex: address = "udp6://127.0.0.1:8094" +# address = "tcp://127.0.0.1:8094" +# +# ## Optional TLS Config +# # tls_ca = "/etc/telegraf/ca.pem" +# # tls_cert = "/etc/telegraf/cert.pem" +# # tls_key = "/etc/telegraf/key.pem" +# ## Use TLS but skip chain & host verification +# # insecure_skip_verify = false +# +# ## Period between keep alive probes. +# ## Only applies to TCP sockets. +# ## 0 disables keep alive probes. +# ## Defaults to the OS configuration. +# # keep_alive_period = "5m" +# +# ## The framing technique with which it is expected that messages are +# ## transported (default = "octet-counting"). Whether the messages come +# ## using the octect-counting (RFC5425#section-4.3.1, RFC6587#section-3.4.1), +# ## or the non-transparent framing technique (RFC6587#section-3.4.2). Must +# ## be one of "octet-counting", "non-transparent". +# # framing = "octet-counting" +# +# ## The trailer to be expected in case of non-transparent framing (default = "LF"). +# ## Must be one of "LF", or "NUL". +# # trailer = "LF" +# +# ## SD-PARAMs settings +# ## Syslog messages can contain key/value pairs within zero or more +# ## structured data sections. For each unrecognized metric tag/field a +# ## SD-PARAMS is created. +# ## +# ## Example: +# ## [[outputs.syslog]] +# ## sdparam_separator = "_" +# ## default_sdid = "default@32473" +# ## sdids = ["foo@123", "bar@456"] +# ## +# ## input => xyzzy,x=y foo@123_value=42,bar@456_value2=84,something_else=1 +# ## output (structured data only) => [foo@123 value=42][bar@456 value2=84][default@32473 something_else=1 x=y] +# +# ## SD-PARAMs separator between the sdid and tag/field key (default = "_") +# # sdparam_separator = "_" +# +# ## Default sdid used for tags/fields that don't contain a prefix defined in +# ## the explicit sdids setting below If no default is specified, no SD-PARAMs +# ## will be used for unrecognized field. +# # default_sdid = "default@32473" +# +# ## List of explicit prefixes to extract from tag/field keys and use as the +# ## SDID, if they match (see above example for more details): +# # sdids = ["foo@123", "bar@456"] +# +# ## Default severity value. Severity and Facility are used to calculate the +# ## message PRI value (RFC5424#section-6.2.1). Used when no metric field +# ## with key "severity_code" is defined. If unset, 5 (notice) is the default +# # default_severity_code = 5 +# +# ## Default facility value. Facility and Severity are used to calculate the +# ## message PRI value (RFC5424#section-6.2.1). Used when no metric field with +# ## key "facility_code" is defined. If unset, 1 (user-level) is the default +# # default_facility_code = 1 +# +# ## Default APP-NAME value (RFC5424#section-6.2.5) +# ## Used when no metric tag with key "appname" is defined. +# ## If unset, "Telegraf" is the default +# # default_appname = "Telegraf" + + +# # Configuration for Amazon Timestream output. +# [[outputs.timestream]] +# ## Amazon Region +# region = "us-east-1" +# +# ## Amazon Credentials +# ## Credentials are loaded in the following order: +# ## 1) Web identity provider credentials via STS if role_arn and web_identity_token_file are specified +# ## 2) Assumed credentials via STS if role_arn is specified +# ## 3) explicit credentials from 'access_key' and 'secret_key' +# ## 4) shared profile from 'profile' +# ## 5) environment variables +# ## 6) shared credentials file +# ## 7) EC2 Instance Profile +# #access_key = "" +# #secret_key = "" +# #token = "" +# #role_arn = "" +# #web_identity_token_file = "" +# #role_session_name = "" +# #profile = "" +# #shared_credential_file = "" +# +# ## Endpoint to make request against, the correct endpoint is automatically +# ## determined and this option should only be set if you wish to override the +# ## default. +# ## ex: endpoint_url = "http://localhost:8000" +# # endpoint_url = "" +# +# ## Timestream database where the metrics will be inserted. +# ## The database must exist prior to starting Telegraf. +# database_name = "yourDatabaseNameHere" +# +# ## Specifies if the plugin should describe the Timestream database upon starting +# ## to validate if it has access necessary permissions, connection, etc., as a safety check. +# ## If the describe operation fails, the plugin will not start +# ## and therefore the Telegraf agent will not start. +# describe_database_on_start = false +# +# ## The mapping mode specifies how Telegraf records are represented in Timestream. +# ## Valid values are: single-table, multi-table. +# ## For example, consider the following data in line protocol format: +# ## weather,location=us-midwest,season=summer temperature=82,humidity=71 1465839830100400200 +# ## airquality,location=us-west no2=5,pm25=16 1465839830100400200 +# ## where weather and airquality are the measurement names, location and season are tags, +# ## and temperature, humidity, no2, pm25 are fields. +# ## In multi-table mode: +# ## - first line will be ingested to table named weather +# ## - second line will be ingested to table named airquality +# ## - the tags will be represented as dimensions +# ## - first table (weather) will have two records: +# ## one with measurement name equals to temperature, +# ## another with measurement name equals to humidity +# ## - second table (airquality) will have two records: +# ## one with measurement name equals to no2, +# ## another with measurement name equals to pm25 +# ## - the Timestream tables from the example will look like this: +# ## TABLE "weather": +# ## time | location | season | measure_name | measure_value::bigint +# ## 2016-06-13 17:43:50 | us-midwest | summer | temperature | 82 +# ## 2016-06-13 17:43:50 | us-midwest | summer | humidity | 71 +# ## TABLE "airquality": +# ## time | location | measure_name | measure_value::bigint +# ## 2016-06-13 17:43:50 | us-west | no2 | 5 +# ## 2016-06-13 17:43:50 | us-west | pm25 | 16 +# ## In single-table mode: +# ## - the data will be ingested to a single table, which name will be valueOf(single_table_name) +# ## - measurement name will stored in dimension named valueOf(single_table_dimension_name_for_telegraf_measurement_name) +# ## - location and season will be represented as dimensions +# ## - temperature, humidity, no2, pm25 will be represented as measurement name +# ## - the Timestream table from the example will look like this: +# ## Assuming: +# ## - single_table_name = "my_readings" +# ## - single_table_dimension_name_for_telegraf_measurement_name = "namespace" +# ## TABLE "my_readings": +# ## time | location | season | namespace | measure_name | measure_value::bigint +# ## 2016-06-13 17:43:50 | us-midwest | summer | weather | temperature | 82 +# ## 2016-06-13 17:43:50 | us-midwest | summer | weather | humidity | 71 +# ## 2016-06-13 17:43:50 | us-west | NULL | airquality | no2 | 5 +# ## 2016-06-13 17:43:50 | us-west | NULL | airquality | pm25 | 16 +# ## In most cases, using multi-table mapping mode is recommended. +# ## However, you can consider using single-table in situations when you have thousands of measurement names. +# mapping_mode = "multi-table" +# +# ## Only valid and required for mapping_mode = "single-table" +# ## Specifies the Timestream table where the metrics will be uploaded. +# # single_table_name = "yourTableNameHere" +# +# ## Only valid and required for mapping_mode = "single-table" +# ## Describes what will be the Timestream dimension name for the Telegraf +# ## measurement name. +# # single_table_dimension_name_for_telegraf_measurement_name = "namespace" +# +# ## Specifies if the plugin should create the table, if the table do not exist. +# ## The plugin writes the data without prior checking if the table exists. +# ## When the table does not exist, the error returned from Timestream will cause +# ## the plugin to create the table, if this parameter is set to true. +# create_table_if_not_exists = true +# +# ## Only valid and required if create_table_if_not_exists = true +# ## Specifies the Timestream table magnetic store retention period in days. +# ## Check Timestream documentation for more details. +# create_table_magnetic_store_retention_period_in_days = 365 +# +# ## Only valid and required if create_table_if_not_exists = true +# ## Specifies the Timestream table memory store retention period in hours. +# ## Check Timestream documentation for more details. +# create_table_memory_store_retention_period_in_hours = 24 +# +# ## Only valid and optional if create_table_if_not_exists = true +# ## Specifies the Timestream table tags. +# ## Check Timestream documentation for more details +# # create_table_tags = { "foo" = "bar", "environment" = "dev"} + + +# # Write metrics to Warp 10 +# [[outputs.warp10]] +# # Prefix to add to the measurement. +# prefix = "telegraf." +# +# # URL of the Warp 10 server +# warp_url = "http://localhost:8080" +# +# # Write token to access your app on warp 10 +# token = "Token" +# +# # Warp 10 query timeout +# # timeout = "15s" +# +# ## Print Warp 10 error body +# # print_error_body = false +# +# ## Max string error size +# # max_string_error_size = 511 +# +# ## Optional TLS Config +# # tls_ca = "/etc/telegraf/ca.pem" +# # tls_cert = "/etc/telegraf/cert.pem" +# # tls_key = "/etc/telegraf/key.pem" +# ## Use TLS but skip chain & host verification +# # insecure_skip_verify = false + + +# # Configuration for Wavefront server to send metrics to +# [[outputs.wavefront]] +# ## Url for Wavefront Direct Ingestion or using HTTP with Wavefront Proxy +# ## If using Wavefront Proxy, also specify port. example: http://proxyserver:2878 +# url = "https://metrics.wavefront.com" +# +# ## Authentication Token for Wavefront. Only required if using Direct Ingestion +# #token = "DUMMY_TOKEN" +# +# ## DNS name of the wavefront proxy server. Do not use if url is specified +# #host = "wavefront.example.com" +# +# ## Port that the Wavefront proxy server listens on. Do not use if url is specified +# #port = 2878 +# +# ## prefix for metrics keys +# #prefix = "my.specific.prefix." +# +# ## whether to use "value" for name of simple fields. default is false +# #simple_fields = false +# +# ## character to use between metric and field name. default is . (dot) +# #metric_separator = "." +# +# ## Convert metric name paths to use metricSeparator character +# ## When true will convert all _ (underscore) characters in final metric name. default is true +# #convert_paths = true +# +# ## Use Strict rules to sanitize metric and tag names from invalid characters +# ## When enabled forward slash (/) and comma (,) will be accepted +# #use_strict = false +# +# ## Use Regex to sanitize metric and tag names from invalid characters +# ## Regex is more thorough, but significantly slower. default is false +# #use_regex = false +# +# ## point tags to use as the source name for Wavefront (if none found, host will be used) +# #source_override = ["hostname", "address", "agent_host", "node_host"] +# +# ## whether to convert boolean values to numeric values, with false -> 0.0 and true -> 1.0. default is true +# #convert_bool = true +# +# ## Truncate metric tags to a total of 254 characters for the tag name value. Wavefront will reject any +# ## data point exceeding this limit if not truncated. Defaults to 'false' to provide backwards compatibility. +# #truncate_tags = false +# +# ## Flush the internal buffers after each batch. This effectively bypasses the background sending of metrics +# ## normally done by the Wavefront SDK. This can be used if you are experiencing buffer overruns. The sending +# ## of metrics will block for a longer time, but this will be handled gracefully by the internal buffering in +# ## Telegraf. +# #immediate_flush = true +# +# ## Define a mapping, namespaced by metric prefix, from string values to numeric values +# ## deprecated in 1.9; use the enum processor plugin +# #[[outputs.wavefront.string_to_number.elasticsearch]] +# # green = 1.0 +# # yellow = 0.5 +# # red = 0.0 + + +# # Generic WebSocket output writer. +# [[outputs.websocket]] +# ## URL is the address to send metrics to. Make sure ws or wss scheme is used. +# url = "ws://127.0.0.1:8080/telegraf" +# +# ## Timeouts (make sure read_timeout is larger than server ping interval or set to zero). +# # connect_timeout = "30s" +# # write_timeout = "30s" +# # read_timeout = "30s" +# +# ## Optionally turn on using text data frames (binary by default). +# # use_text_frames = false +# +# ## Optional TLS Config +# # tls_ca = "/etc/telegraf/ca.pem" +# # tls_cert = "/etc/telegraf/cert.pem" +# # tls_key = "/etc/telegraf/key.pem" +# ## Use TLS but skip chain & host verification +# # insecure_skip_verify = false +# +# ## Data format to output. +# ## Each data format has it's own unique set of configuration options, read +# ## more about them here: +# ## https://github.com/influxdata/telegraf/blob/master/docs/DATA_FORMATS_OUTPUT.md +# # data_format = "influx" +# +# ## Additional HTTP Upgrade headers +# # [outputs.websocket.headers] +# # Authorization = "Bearer " + + +# # Send aggregated metrics to Yandex.Cloud Monitoring +# [[outputs.yandex_cloud_monitoring]] +# ## Timeout for HTTP writes. +# # timeout = "20s" +# +# ## Yandex.Cloud monitoring API endpoint. Normally should not be changed +# # endpoint_url = "https://monitoring.api.cloud.yandex.net/monitoring/v2/data/write" +# +# ## All user metrics should be sent with "custom" service specified. Normally should not be changed +# # service = "custom" + + +############################################################################### +# PROCESSOR PLUGINS # +############################################################################### + + +# # Attach AWS EC2 metadata to metrics +# [[processors.aws_ec2]] +# ## Instance identity document tags to attach to metrics. +# ## For more information see: +# ## https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/instance-identity-documents.html +# ## +# ## Available tags: +# ## * accountId +# ## * architecture +# ## * availabilityZone +# ## * billingProducts +# ## * imageId +# ## * instanceId +# ## * instanceType +# ## * kernelId +# ## * pendingTime +# ## * privateIp +# ## * ramdiskId +# ## * region +# ## * version +# imds_tags = [] +# +# ## EC2 instance tags retrieved with DescribeTags action. +# ## In case tag is empty upon retrieval it's omitted when tagging metrics. +# ## Note that in order for this to work, role attached to EC2 instance or AWS +# ## credentials available from the environment must have a policy attached, that +# ## allows ec2:DescribeTags. +# ## +# ## For more information see: +# ## https://docs.aws.amazon.com/AWSEC2/latest/APIReference/API_DescribeTags.html +# ec2_tags = [] +# +# ## Timeout for http requests made by against aws ec2 metadata endpoint. +# timeout = "10s" +# +# ## ordered controls whether or not the metrics need to stay in the same order +# ## this plugin received them in. If false, this plugin will change the order +# ## with requests hitting cached results moving through immediately and not +# ## waiting on slower lookups. This may cause issues for you if you are +# ## depending on the order of metrics staying the same. If so, set this to true. +# ## Keeping the metrics ordered may be slightly slower. +# ordered = false +# +# ## max_parallel_calls is the maximum number of AWS API calls to be in flight +# ## at the same time. +# ## It's probably best to keep this number fairly low. +# max_parallel_calls = 10 + + +# # Clone metrics and apply modifications. +# [[processors.clone]] +# ## All modifications on inputs and aggregators can be overridden: +# # name_override = "new_name" +# # name_prefix = "new_name_prefix" +# # name_suffix = "new_name_suffix" +# +# ## Tags to be added (all values must be strings) +# # [processors.clone.tags] +# # additional_tag = "tag_value" + + +# # Convert values to another metric value type +# [[processors.converter]] +# ## Tags to convert +# ## +# ## The table key determines the target type, and the array of key-values +# ## select the keys to convert. The array may contain globs. +# ## = [...] +# [processors.converter.tags] +# measurement = [] +# string = [] +# integer = [] +# unsigned = [] +# boolean = [] +# float = [] +# +# ## Fields to convert +# ## +# ## The table key determines the target type, and the array of key-values +# ## select the keys to convert. The array may contain globs. +# ## = [...] +# [processors.converter.fields] +# measurement = [] +# tag = [] +# string = [] +# integer = [] +# unsigned = [] +# boolean = [] +# float = [] + + +# # Dates measurements, tags, and fields that pass through this filter. +# [[processors.date]] +# ## New tag to create +# tag_key = "month" +# +# ## New field to create (cannot set both field_key and tag_key) +# # field_key = "month" +# +# ## Date format string, must be a representation of the Go "reference time" +# ## which is "Mon Jan 2 15:04:05 -0700 MST 2006". +# date_format = "Jan" +# +# ## If destination is a field, date format can also be one of +# ## "unix", "unix_ms", "unix_us", or "unix_ns", which will insert an integer field. +# # date_format = "unix" +# +# ## Offset duration added to the date string when writing the new tag. +# # date_offset = "0s" +# +# ## Timezone to use when creating the tag or field using a reference time +# ## string. This can be set to one of "UTC", "Local", or to a location name +# ## in the IANA Time Zone database. +# ## example: timezone = "America/Los_Angeles" +# # timezone = "UTC" + + +# # Filter metrics with repeating field values +# [[processors.dedup]] +# ## Maximum time to suppress output +# dedup_interval = "600s" + + +# # Defaults sets default value(s) for specified fields that are not set on incoming metrics. +# [[processors.defaults]] +# ## Ensures a set of fields always exists on your metric(s) with their +# ## respective default value. +# ## For any given field pair (key = default), if it's not set, a field +# ## is set on the metric with the specified default. +# ## +# ## A field is considered not set if it is nil on the incoming metric; +# ## or it is not nil but its value is an empty string or is a string +# ## of one or more spaces. +# ## = +# # [processors.defaults.fields] +# # field_1 = "bar" +# # time_idle = 0 +# # is_error = true + + +# # Map enum values according to given table. +# [[processors.enum]] +# [[processors.enum.mapping]] +# ## Name of the field to map. Globs accepted. +# field = "status" +# +# ## Name of the tag to map. Globs accepted. +# # tag = "status" +# +# ## Destination tag or field to be used for the mapped value. By default the +# ## source tag or field is used, overwriting the original value. +# dest = "status_code" +# +# ## Default value to be used for all values not contained in the mapping +# ## table. When unset, the unmodified value for the field will be used if no +# ## match is found. +# # default = 0 +# +# ## Table of mappings +# [processors.enum.mapping.value_mappings] +# green = 1 +# amber = 2 +# red = 3 + + +# # Run executable as long-running processor plugin +# [[processors.execd]] +# ## Program to run as daemon +# ## eg: command = ["/path/to/your_program", "arg1", "arg2"] +# command = ["cat"] +# +# ## Delay before the process is restarted after an unexpected termination +# restart_delay = "10s" + + +# # Performs file path manipulations on tags and fields +# [[processors.filepath]] +# ## Treat the tag value as a path and convert it to its last element, storing the result in a new tag +# # [[processors.filepath.basename]] +# # tag = "path" +# # dest = "basepath" +# +# ## Treat the field value as a path and keep all but the last element of path, typically the path's directory +# # [[processors.filepath.dirname]] +# # field = "path" +# +# ## Treat the tag value as a path, converting it to its the last element without its suffix +# # [[processors.filepath.stem]] +# # tag = "path" +# +# ## Treat the tag value as a path, converting it to the shortest path name equivalent +# ## to path by purely lexical processing +# # [[processors.filepath.clean]] +# # tag = "path" +# +# ## Treat the tag value as a path, converting it to a relative path that is lexically +# ## equivalent to the source path when joined to 'base_path' +# # [[processors.filepath.rel]] +# # tag = "path" +# # base_path = "/var/log" +# +# ## Treat the tag value as a path, replacing each separator character in path with a '/' character. Has only +# ## effect on Windows +# # [[processors.filepath.toslash]] +# # tag = "path" + + +# # Add a tag of the network interface name looked up over SNMP by interface number +# [[processors.ifname]] +# ## Name of tag holding the interface number +# # tag = "ifIndex" +# +# ## Name of output tag where service name will be added +# # dest = "ifName" +# +# ## Name of tag of the SNMP agent to request the interface name from +# # agent = "agent" +# +# ## Timeout for each request. +# # timeout = "5s" +# +# ## SNMP version; can be 1, 2, or 3. +# # version = 2 +# +# ## SNMP community string. +# # community = "public" +# +# ## Number of retries to attempt. +# # retries = 3 +# +# ## The GETBULK max-repetitions parameter. +# # max_repetitions = 10 +# +# ## SNMPv3 authentication and encryption options. +# ## +# ## Security Name. +# # sec_name = "myuser" +# ## Authentication protocol; one of "MD5", "SHA", or "". +# # auth_protocol = "MD5" +# ## Authentication password. +# # auth_password = "pass" +# ## Security Level; one of "noAuthNoPriv", "authNoPriv", or "authPriv". +# # sec_level = "authNoPriv" +# ## Context Name. +# # context_name = "" +# ## Privacy protocol used for encrypted messages; one of "DES", "AES" or "". +# # priv_protocol = "" +# ## Privacy password used for encrypted messages. +# # priv_password = "" +# +# ## max_parallel_lookups is the maximum number of SNMP requests to +# ## make at the same time. +# # max_parallel_lookups = 100 +# +# ## ordered controls whether or not the metrics need to stay in the +# ## same order this plugin received them in. If false, this plugin +# ## may change the order when data is cached. If you need metrics to +# ## stay in order set this to true. keeping the metrics ordered may +# ## be slightly slower +# # ordered = false +# +# ## cache_ttl is the amount of time interface names are cached for a +# ## given agent. After this period elapses if names are needed they +# ## will be retrieved again. +# # cache_ttl = "8h" + + +# # Apply metric modifications using override semantics. +# [[processors.override]] +# ## All modifications on inputs and aggregators can be overridden: +# # name_override = "new_name" +# # name_prefix = "new_name_prefix" +# # name_suffix = "new_name_suffix" +# +# ## Tags to be added (all values must be strings) +# # [processors.override.tags] +# # additional_tag = "tag_value" + + +# # Parse a value in a specified field/tag(s) and add the result in a new metric +# [[processors.parser]] +# ## The name of the fields whose value will be parsed. +# parse_fields = [] +# +# ## If true, incoming metrics are not emitted. +# drop_original = false +# +# ## If set to override, emitted metrics will be merged by overriding the +# ## original metric using the newly parsed metrics. +# merge = "override" +# +# ## The dataformat to be read from files +# ## Each data format has its own unique set of configuration options, read +# ## more about them here: +# ## https://github.com/influxdata/telegraf/blob/master/docs/DATA_FORMATS_INPUT.md +# data_format = "influx" + + +# # Rotate a single valued metric into a multi field metric +# [[processors.pivot]] +# ## Tag to use for naming the new field. +# tag_key = "name" +# ## Field to use as the value of the new field. +# value_key = "value" + + +# # Given a tag/field of a TCP or UDP port number, add a tag/field of the service name looked up in the system services file +# [[processors.port_name]] +# [[processors.port_name]] +# ## Name of tag holding the port number +# # tag = "port" +# ## Or name of the field holding the port number +# # field = "port" +# +# ## Name of output tag or field (depending on the source) where service name will be added +# # dest = "service" +# +# ## Default tcp or udp +# # default_protocol = "tcp" +# +# ## Tag containing the protocol (tcp or udp, case-insensitive) +# # protocol_tag = "proto" +# +# ## Field containing the protocol (tcp or udp, case-insensitive) +# # protocol_field = "proto" + + +# # Print all metrics that pass through this filter. +# [[processors.printer]] + + +# # Transforms tag and field values with regex pattern +# [[processors.regex]] +# ## Tag and field conversions defined in a separate sub-tables +# # [[processors.regex.tags]] +# # ## Tag to change +# # key = "resp_code" +# # ## Regular expression to match on a tag value +# # pattern = "^(\\d)\\d\\d$" +# # ## Matches of the pattern will be replaced with this string. Use ${1} +# # ## notation to use the text of the first submatch. +# # replacement = "${1}xx" +# +# # [[processors.regex.fields]] +# # ## Field to change +# # key = "request" +# # ## All the power of the Go regular expressions available here +# # ## For example, named subgroups +# # pattern = "^/api(?P/[\\w/]+)\\S*" +# # replacement = "${method}" +# # ## If result_key is present, a new field will be created +# # ## instead of changing existing field +# # result_key = "method" +# +# ## Multiple conversions may be applied for one field sequentially +# ## Let's extract one more value +# # [[processors.regex.fields]] +# # key = "request" +# # pattern = ".*category=(\\w+).*" +# # replacement = "${1}" +# # result_key = "search_category" + + +# # Rename measurements, tags, and fields that pass through this filter. +# [[processors.rename]] + + +# # ReverseDNS does a reverse lookup on IP addresses to retrieve the DNS name +# [[processors.reverse_dns]] +# ## For optimal performance, you may want to limit which metrics are passed to this +# ## processor. eg: +# ## namepass = ["my_metric_*"] +# +# ## cache_ttl is how long the dns entries should stay cached for. +# ## generally longer is better, but if you expect a large number of diverse lookups +# ## you'll want to consider memory use. +# cache_ttl = "24h" +# +# ## lookup_timeout is how long should you wait for a single dns request to repsond. +# ## this is also the maximum acceptable latency for a metric travelling through +# ## the reverse_dns processor. After lookup_timeout is exceeded, a metric will +# ## be passed on unaltered. +# ## multiple simultaneous resolution requests for the same IP will only make a +# ## single rDNS request, and they will all wait for the answer for this long. +# lookup_timeout = "3s" +# +# ## max_parallel_lookups is the maximum number of dns requests to be in flight +# ## at the same time. Requesting hitting cached values do not count against this +# ## total, and neither do mulptiple requests for the same IP. +# ## It's probably best to keep this number fairly low. +# max_parallel_lookups = 10 +# +# ## ordered controls whether or not the metrics need to stay in the same order +# ## this plugin received them in. If false, this plugin will change the order +# ## with requests hitting cached results moving through immediately and not +# ## waiting on slower lookups. This may cause issues for you if you are +# ## depending on the order of metrics staying the same. If so, set this to true. +# ## keeping the metrics ordered may be slightly slower. +# ordered = false +# +# [[processors.reverse_dns.lookup]] +# ## get the ip from the field "source_ip", and put the result in the field "source_name" +# field = "source_ip" +# dest = "source_name" +# +# [[processors.reverse_dns.lookup]] +# ## get the ip from the tag "destination_ip", and put the result in the tag +# ## "destination_name". +# tag = "destination_ip" +# dest = "destination_name" +# +# ## If you would prefer destination_name to be a field instead, you can use a +# ## processors.converter after this one, specifying the order attribute. + + +# # Add the S2 Cell ID as a tag based on latitude and longitude fields +# [[processors.s2geo]] +# ## The name of the lat and lon fields containing WGS-84 latitude and +# ## longitude in decimal degrees. +# # lat_field = "lat" +# # lon_field = "lon" +# +# ## New tag to create +# # tag_key = "s2_cell_id" +# +# ## Cell level (see https://s2geometry.io/resources/s2cell_statistics.html) +# # cell_level = 9 + + +# # Process metrics using a Starlark script +# [[processors.starlark]] +# ## The Starlark source can be set as a string in this configuration file, or +# ## by referencing a file containing the script. Only one source or script +# ## should be set at once. +# ## +# ## Source of the Starlark script. +# source = ''' +# def apply(metric): +# return metric +# ''' +# +# ## File containing a Starlark script. +# # script = "/usr/local/bin/myscript.star" +# +# ## The constants of the Starlark script. +# # [processors.starlark.constants] +# # max_size = 10 +# # threshold = 0.75 +# # default_name = "Julia" +# # debug_mode = true + + +# # Perform string processing on tags, fields, and measurements +# [[processors.strings]] +# ## Convert a tag value to uppercase +# # [[processors.strings.uppercase]] +# # tag = "method" +# +# ## Convert a field value to lowercase and store in a new field +# # [[processors.strings.lowercase]] +# # field = "uri_stem" +# # dest = "uri_stem_normalised" +# +# ## Convert a field value to titlecase +# # [[processors.strings.titlecase]] +# # field = "status" +# +# ## Trim leading and trailing whitespace using the default cutset +# # [[processors.strings.trim]] +# # field = "message" +# +# ## Trim leading characters in cutset +# # [[processors.strings.trim_left]] +# # field = "message" +# # cutset = "\t" +# +# ## Trim trailing characters in cutset +# # [[processors.strings.trim_right]] +# # field = "message" +# # cutset = "\r\n" +# +# ## Trim the given prefix from the field +# # [[processors.strings.trim_prefix]] +# # field = "my_value" +# # prefix = "my_" +# +# ## Trim the given suffix from the field +# # [[processors.strings.trim_suffix]] +# # field = "read_count" +# # suffix = "_count" +# +# ## Replace all non-overlapping instances of old with new +# # [[processors.strings.replace]] +# # measurement = "*" +# # old = ":" +# # new = "_" +# +# ## Trims strings based on width +# # [[processors.strings.left]] +# # field = "message" +# # width = 10 +# +# ## Decode a base64 encoded utf-8 string +# # [[processors.strings.base64decode]] +# # field = "message" +# +# ## Sanitize a string to ensure it is a valid utf-8 string +# ## Each run of invalid UTF-8 byte sequences is replaced by the replacement string, which may be empty +# # [[processors.strings.valid_utf8]] +# # field = "message" +# # replacement = "" + + +# # Restricts the number of tags that can pass through this filter and chooses which tags to preserve when over the limit. +# [[processors.tag_limit]] +# ## Maximum number of tags to preserve +# limit = 10 +# +# ## List of tags to preferentially preserve +# keep = ["foo", "bar", "baz"] + + +# # Uses a Go template to create a new tag +# [[processors.template]] +# ## Tag to set with the output of the template. +# tag = "topic" +# +# ## Go template used to create the tag value. In order to ease TOML +# ## escaping requirements, you may wish to use single quotes around the +# ## template string. +# template = '{{ .Tag "hostname" }}.{{ .Tag "level" }}' + + +# # Print all metrics that pass through this filter. +# [[processors.topk]] +# ## How many seconds between aggregations +# # period = 10 +# +# ## How many top metrics to return +# # k = 10 +# +# ## Over which tags should the aggregation be done. Globs can be specified, in +# ## which case any tag matching the glob will aggregated over. If set to an +# ## empty list is no aggregation over tags is done +# # group_by = ['*'] +# +# ## Over which fields are the top k are calculated +# # fields = ["value"] +# +# ## What aggregation to use. Options: sum, mean, min, max +# # aggregation = "mean" +# +# ## Instead of the top k largest metrics, return the bottom k lowest metrics +# # bottomk = false +# +# ## The plugin assigns each metric a GroupBy tag generated from its name and +# ## tags. If this setting is different than "" the plugin will add a +# ## tag (which name will be the value of this setting) to each metric with +# ## the value of the calculated GroupBy tag. Useful for debugging +# # add_groupby_tag = "" +# +# ## These settings provide a way to know the position of each metric in +# ## the top k. The 'add_rank_field' setting allows to specify for which +# ## fields the position is required. If the list is non empty, then a field +# ## will be added to each and every metric for each string present in this +# ## setting. This field will contain the ranking of the group that +# ## the metric belonged to when aggregated over that field. +# ## The name of the field will be set to the name of the aggregation field, +# ## suffixed with the string '_topk_rank' +# # add_rank_fields = [] +# +# ## These settings provide a way to know what values the plugin is generating +# ## when aggregating metrics. The 'add_aggregate_field' setting allows to +# ## specify for which fields the final aggregation value is required. If the +# ## list is non empty, then a field will be added to each every metric for +# ## each field present in this setting. This field will contain +# ## the computed aggregation for the group that the metric belonged to when +# ## aggregated over that field. +# ## The name of the field will be set to the name of the aggregation field, +# ## suffixed with the string '_topk_aggregate' +# # add_aggregate_fields = [] + + +# # Rotate multi field metric into several single field metrics +# [[processors.unpivot]] +# ## Tag to use for the name. +# tag_key = "name" +# ## Field to use for the name of the value. +# value_key = "value" + + +############################################################################### +# AGGREGATOR PLUGINS # +############################################################################### + + +# # Keep the aggregate basicstats of each metric passing through. +# [[aggregators.basicstats]] +# ## The period on which to flush & clear the aggregator. +# period = "30s" +# +# ## If true, the original metric will be dropped by the +# ## aggregator and will not get sent to the output plugins. +# drop_original = false +# +# ## Configures which basic stats to push as fields +# # stats = ["count", "min", "max", "mean", "stdev", "s2", "sum"] + + +# # Calculates a derivative for every field. +# [[aggregators.derivative]] +# ## The period in which to flush the aggregator. +# period = "30s" +# ## +# ## If true, the original metric will be dropped by the +# ## aggregator and will not get sent to the output plugins. +# drop_original = false +# ## +# ## This aggregator will estimate a derivative for each field, which is +# ## contained in both the first and last metric of the aggregation interval. +# ## Without further configuration the derivative will be calculated with +# ## respect to the time difference between these two measurements in seconds. +# ## The formula applied is for every field: +# ## +# ## value_last - value_first +# ## derivative = -------------------------- +# ## time_difference_in_seconds +# ## +# ## The resulting derivative will be named *fieldname_rate*. The suffix +# ## "_rate" can be configured by the *suffix* parameter. When using a +# ## derivation variable you can include its name for more clarity. +# # suffix = "_rate" +# ## +# ## As an abstraction the derivative can be calculated not only by the time +# ## difference but by the difference of a field, which is contained in the +# ## measurement. This field is assumed to be monotonously increasing. This +# ## feature is used by specifying a *variable*. +# ## Make sure the specified variable is not filtered and exists in the metrics +# ## passed to this aggregator! +# # variable = "" +# ## +# ## When using a field as the derivation parameter the name of that field will +# ## be used for the resulting derivative, e.g. *fieldname_by_parameter*. +# ## +# ## Note, that the calculation is based on the actual timestamp of the +# ## measurements. When there is only one measurement during that period, the +# ## measurement will be rolled over to the next period. The maximum number of +# ## such roll-overs can be configured with a default of 10. +# # max_roll_over = 10 +# ## + + +# # Report the final metric of a series +# [[aggregators.final]] +# ## The period on which to flush & clear the aggregator. +# period = "30s" +# ## If true, the original metric will be dropped by the +# ## aggregator and will not get sent to the output plugins. +# drop_original = false +# +# ## The time that a series is not updated until considering it final. +# series_timeout = "5m" + + +# # Create aggregate histograms. +# [[aggregators.histogram]] +# ## The period in which to flush the aggregator. +# period = "30s" +# +# ## If true, the original metric will be dropped by the +# ## aggregator and will not get sent to the output plugins. +# drop_original = false +# +# ## If true, the histogram will be reset on flush instead +# ## of accumulating the results. +# reset = false +# +# ## Whether bucket values should be accumulated. If set to false, "gt" tag will be added. +# ## Defaults to true. +# cumulative = true +# +# ## Example config that aggregates all fields of the metric. +# # [[aggregators.histogram.config]] +# # ## Right borders of buckets (with +Inf implicitly added). +# # buckets = [0.0, 15.6, 34.5, 49.1, 71.5, 80.5, 94.5, 100.0] +# # ## The name of metric. +# # measurement_name = "cpu" +# +# ## Example config that aggregates only specific fields of the metric. +# # [[aggregators.histogram.config]] +# # ## Right borders of buckets (with +Inf implicitly added). +# # buckets = [0.0, 10.0, 20.0, 30.0, 40.0, 50.0, 60.0, 70.0, 80.0, 90.0, 100.0] +# # ## The name of metric. +# # measurement_name = "diskio" +# # ## The concrete fields of metric +# # fields = ["io_time", "read_time", "write_time"] + + +# # Merge metrics into multifield metrics by series key +# [[aggregators.merge]] +# ## If true, the original metric will be dropped by the +# ## aggregator and will not get sent to the output plugins. +# drop_original = true + + +# # Keep the aggregate min/max of each metric passing through. +# [[aggregators.minmax]] +# ## General Aggregator Arguments: +# ## The period on which to flush & clear the aggregator. +# period = "30s" +# ## If true, the original metric will be dropped by the +# ## aggregator and will not get sent to the output plugins. +# drop_original = false + + +# # Keep the aggregate quantiles of each metric passing through. +# [[aggregators.quantile]] +# ## General Aggregator Arguments: +# ## The period on which to flush & clear the aggregator. +# period = "30s" +# +# ## If true, the original metric will be dropped by the +# ## aggregator and will not get sent to the output plugins. +# drop_original = false +# +# ## Quantiles to output in the range [0,1] +# # quantiles = [0.25, 0.5, 0.75] +# +# ## Type of aggregation algorithm +# ## Supported are: +# ## "t-digest" -- approximation using centroids, can cope with large number of samples +# ## "exact R7" -- exact computation also used by Excel or NumPy (Hyndman & Fan 1996 R7) +# ## "exact R8" -- exact computation (Hyndman & Fan 1996 R8) +# ## NOTE: Do not use "exact" algorithms with large number of samples +# ## to not impair performance or memory consumption! +# # algorithm = "t-digest" +# +# ## Compression for approximation (t-digest). The value needs to be +# ## greater or equal to 1.0. Smaller values will result in more +# ## performance but less accuracy. +# # compression = 100.0 + + +# # Count the occurrence of values in fields. +# [[aggregators.valuecounter]] +# ## General Aggregator Arguments: +# ## The period on which to flush & clear the aggregator. +# period = "30s" +# ## If true, the original metric will be dropped by the +# ## aggregator and will not get sent to the output plugins. +# drop_original = false +# ## The fields for which the values will be counted +# fields = [] + + +############################################################################### +# INPUT PLUGINS # +############################################################################### + + +# Read metrics about cpu usage +[[inputs.cpu]] + ## Whether to report per-cpu stats or not + percpu = true + ## Whether to report total system cpu stats or not + totalcpu = true + ## If true, collect raw CPU time metrics + collect_cpu_time = false + ## If true, compute and report the sum of all non-idle CPU states + report_active = false + + +# Read metrics about disk usage by mount point +[[inputs.disk]] + ## By default stats will be gathered for all mount points. + ## Set mount_points will restrict the stats to only the specified mount points. + # mount_points = ["/"] + + ## Ignore mount points by filesystem type. + ignore_fs = ["tmpfs", "devtmpfs", "devfs", "iso9660", "overlay", "aufs", "squashfs"] + + +# Read metrics about disk IO by device +[[inputs.diskio]] + ## By default, telegraf will gather stats for all devices including + ## disk partitions. + ## Setting devices will restrict the stats to the specified devices. + # devices = ["sda", "sdb", "vd*"] + ## Uncomment the following line if you need disk serial numbers. + # skip_serial_number = false + # + ## On systems which support it, device metadata can be added in the form of + ## tags. + ## Currently only Linux is supported via udev properties. You can view + ## available properties for a device by running: + ## 'udevadm info -q property -n /dev/sda' + ## Note: Most, but not all, udev properties can be accessed this way. Properties + ## that are currently inaccessible include DEVTYPE, DEVNAME, and DEVPATH. + # device_tags = ["ID_FS_TYPE", "ID_FS_USAGE"] + # + ## Using the same metadata source as device_tags, you can also customize the + ## name of the device via templates. + ## The 'name_templates' parameter is a list of templates to try and apply to + ## the device. The template may contain variables in the form of '$PROPERTY' or + ## '${PROPERTY}'. The first template which does not contain any variables not + ## present for the device is used as the device name tag. + ## The typical use case is for LVM volumes, to get the VG/LV name instead of + ## the near-meaningless DM-0 name. + # name_templates = ["$ID_FS_LABEL","$DM_VG_NAME/$DM_LV_NAME"] + + +# Get kernel statistics from /proc/stat +[[inputs.kernel]] + # no configuration + + +# Read metrics about memory usage +[[inputs.mem]] + # no configuration + + +# Get the number of processes and group them by status +[[inputs.processes]] + # no configuration + + +# Read metrics about swap memory usage +[[inputs.swap]] + # no configuration + + +# Read metrics about system load & uptime +[[inputs.system]] + ## Uncomment to remove deprecated metrics. + # fielddrop = ["uptime_format"] + + +# # Gather ActiveMQ metrics +# [[inputs.activemq]] +# ## ActiveMQ WebConsole URL +# url = "http://127.0.0.1:8161" +# +# ## Required ActiveMQ Endpoint +# ## deprecated in 1.11; use the url option +# # server = "127.0.0.1" +# # port = 8161 +# +# ## Credentials for basic HTTP authentication +# # username = "admin" +# # password = "admin" +# +# ## Required ActiveMQ webadmin root path +# # webadmin = "admin" +# +# ## Maximum time to receive response. +# # response_timeout = "5s" +# +# ## Optional TLS Config +# # tls_ca = "/etc/telegraf/ca.pem" +# # tls_cert = "/etc/telegraf/cert.pem" +# # tls_key = "/etc/telegraf/key.pem" +# ## Use TLS but skip chain & host verification +# # insecure_skip_verify = false + + +# # Read stats from aerospike server(s) +# [[inputs.aerospike]] +# ## Aerospike servers to connect to (with port) +# ## This plugin will query all namespaces the aerospike +# ## server has configured and get stats for them. +# servers = ["localhost:3000"] +# +# # username = "telegraf" +# # password = "pa$$word" +# +# ## Optional TLS Config +# # enable_tls = false +# # tls_ca = "/etc/telegraf/ca.pem" +# # tls_cert = "/etc/telegraf/cert.pem" +# # tls_key = "/etc/telegraf/key.pem" +# ## If false, skip chain & host verification +# # insecure_skip_verify = true +# +# # Feature Options +# # Add namespace variable to limit the namespaces executed on +# # Leave blank to do all +# # disable_query_namespaces = true # default false +# # namespaces = ["namespace1", "namespace2"] +# +# # Enable set level telemetry +# # query_sets = true # default: false +# # Add namespace set combinations to limit sets executed on +# # Leave blank to do all sets +# # sets = ["namespace1/set1", "namespace1/set2", "namespace3"] +# +# # Histograms +# # enable_ttl_histogram = true # default: false +# # enable_object_size_linear_histogram = true # default: false +# +# # by default, aerospike produces a 100 bucket histogram +# # this is not great for most graphing tools, this will allow +# # the ability to squash this to a smaller number of buckets +# # To have a balanced histogram, the number of buckets chosen +# # should divide evenly into 100. +# # num_histogram_buckets = 100 # default: 10 + + +# # Query statistics from AMD Graphics cards using rocm-smi binary +# [[inputs.amd_rocm_smi]] +# ## Optional: path to rocm-smi binary, defaults to $PATH via exec.LookPath +# # bin_path = "/opt/rocm/bin/rocm-smi" +# +# ## Optional: timeout for GPU polling +# # timeout = "5s" + + +# # Read Apache status information (mod_status) +# [[inputs.apache]] +# ## An array of URLs to gather from, must be directed at the machine +# ## readable version of the mod_status page including the auto query string. +# ## Default is "http://localhost/server-status?auto". +# urls = ["http://localhost/server-status?auto"] +# +# ## Credentials for basic HTTP authentication. +# # username = "myuser" +# # password = "mypassword" +# +# ## Maximum time to receive response. +# # response_timeout = "5s" +# +# ## Optional TLS Config +# # tls_ca = "/etc/telegraf/ca.pem" +# # tls_cert = "/etc/telegraf/cert.pem" +# # tls_key = "/etc/telegraf/key.pem" +# ## Use TLS but skip chain & host verification +# # insecure_skip_verify = false + + +# # Monitor APC UPSes connected to apcupsd +# [[inputs.apcupsd]] +# # A list of running apcupsd server to connect to. +# # If not provided will default to tcp://127.0.0.1:3551 +# servers = ["tcp://127.0.0.1:3551"] +# +# ## Timeout for dialing server. +# timeout = "5s" + + +# # Gather metrics from Apache Aurora schedulers +# [[inputs.aurora]] +# ## Schedulers are the base addresses of your Aurora Schedulers +# schedulers = ["http://127.0.0.1:8081"] +# +# ## Set of role types to collect metrics from. +# ## +# ## The scheduler roles are checked each interval by contacting the +# ## scheduler nodes; zookeeper is not contacted. +# # roles = ["leader", "follower"] +# +# ## Timeout is the max time for total network operations. +# # timeout = "5s" +# +# ## Username and password are sent using HTTP Basic Auth. +# # username = "username" +# # password = "pa$$word" +# +# ## Optional TLS Config +# # tls_ca = "/etc/telegraf/ca.pem" +# # tls_cert = "/etc/telegraf/cert.pem" +# # tls_key = "/etc/telegraf/key.pem" +# ## Use TLS but skip chain & host verification +# # insecure_skip_verify = false + + +# # Gather Azure Storage Queue metrics +# [[inputs.azure_storage_queue]] +# ## Required Azure Storage Account name +# account_name = "mystorageaccount" +# +# ## Required Azure Storage Account access key +# account_key = "storageaccountaccesskey" +# +# ## Set to false to disable peeking age of oldest message (executes faster) +# # peek_oldest_message_age = true + + +# # Collects Beanstalkd server and tubes stats +# [[inputs.beanstalkd]] +# ## Server to collect data from +# server = "localhost:11300" +# +# ## List of tubes to gather stats about. +# ## If no tubes specified then data gathered for each tube on server reported by list-tubes command +# tubes = ["notifications"] + + +# # Read metrics exposed by Beat +# [[inputs.beat]] +# ## An URL from which to read Beat-formatted JSON +# ## Default is "http://127.0.0.1:5066". +# url = "http://127.0.0.1:5066" +# +# ## Enable collection of the listed stats +# ## An empty list means collect all. Available options are currently +# ## "beat", "libbeat", "system" and "filebeat". +# # include = ["beat", "libbeat", "filebeat"] +# +# ## HTTP method +# # method = "GET" +# +# ## Optional HTTP headers +# # headers = {"X-Special-Header" = "Special-Value"} +# +# ## Override HTTP "Host" header +# # host_header = "logstash.example.com" +# +# ## Timeout for HTTP requests +# # timeout = "5s" +# +# ## Optional HTTP Basic Auth credentials +# # username = "username" +# # password = "pa$$word" +# +# ## Optional TLS Config +# # tls_ca = "/etc/telegraf/ca.pem" +# # tls_cert = "/etc/telegraf/cert.pem" +# # tls_key = "/etc/telegraf/key.pem" +# ## Use TLS but skip chain & host verification +# # insecure_skip_verify = false + + +# # Read BIND nameserver XML statistics +# [[inputs.bind]] +# ## An array of BIND XML statistics URI to gather stats. +# ## Default is "http://localhost:8053/xml/v3". +# # urls = ["http://localhost:8053/xml/v3"] +# # gather_memory_contexts = false +# # gather_views = false +# +# ## Timeout for http requests made by bind nameserver +# # timeout = "4s" + + +# # Collect bond interface status, slaves statuses and failures count +# [[inputs.bond]] +# ## Sets 'proc' directory path +# ## If not specified, then default is /proc +# # host_proc = "/proc" +# +# ## By default, telegraf gather stats for all bond interfaces +# ## Setting interfaces will restrict the stats to the specified +# ## bond interfaces. +# # bond_interfaces = ["bond0"] + + +# # Collect Kafka topics and consumers status from Burrow HTTP API. +# [[inputs.burrow]] +# ## Burrow API endpoints in format "schema://host:port". +# ## Default is "http://localhost:8000". +# servers = ["http://localhost:8000"] +# +# ## Override Burrow API prefix. +# ## Useful when Burrow is behind reverse-proxy. +# # api_prefix = "/v3/kafka" +# +# ## Maximum time to receive response. +# # response_timeout = "5s" +# +# ## Limit per-server concurrent connections. +# ## Useful in case of large number of topics or consumer groups. +# # concurrent_connections = 20 +# +# ## Filter clusters, default is no filtering. +# ## Values can be specified as glob patterns. +# # clusters_include = [] +# # clusters_exclude = [] +# +# ## Filter consumer groups, default is no filtering. +# ## Values can be specified as glob patterns. +# # groups_include = [] +# # groups_exclude = [] +# +# ## Filter topics, default is no filtering. +# ## Values can be specified as glob patterns. +# # topics_include = [] +# # topics_exclude = [] +# +# ## Credentials for basic HTTP authentication. +# # username = "" +# # password = "" +# +# ## Optional SSL config +# # ssl_ca = "/etc/telegraf/ca.pem" +# # ssl_cert = "/etc/telegraf/cert.pem" +# # ssl_key = "/etc/telegraf/key.pem" +# # insecure_skip_verify = false + + +# # Collects performance metrics from the MON, OSD, MDS and RGW nodes in a Ceph storage cluster. +# [[inputs.ceph]] +# ## This is the recommended interval to poll. Too frequent and you will lose +# ## data points due to timeouts during rebalancing and recovery +# interval = '1m' +# +# ## All configuration values are optional, defaults are shown below +# +# ## location of ceph binary +# ceph_binary = "/usr/bin/ceph" +# +# ## directory in which to look for socket files +# socket_dir = "/var/run/ceph" +# +# ## prefix of MON and OSD socket files, used to determine socket type +# mon_prefix = "ceph-mon" +# osd_prefix = "ceph-osd" +# mds_prefix = "ceph-mds" +# rgw_prefix = "ceph-client" +# +# ## suffix used to identify socket files +# socket_suffix = "asok" +# +# ## Ceph user to authenticate as, ceph will search for the corresponding keyring +# ## e.g. client.admin.keyring in /etc/ceph, or the explicit path defined in the +# ## client section of ceph.conf for example: +# ## +# ## [client.telegraf] +# ## keyring = /etc/ceph/client.telegraf.keyring +# ## +# ## Consult the ceph documentation for more detail on keyring generation. +# ceph_user = "client.admin" +# +# ## Ceph configuration to use to locate the cluster +# ceph_config = "/etc/ceph/ceph.conf" +# +# ## Whether to gather statistics via the admin socket +# gather_admin_socket_stats = true +# +# ## Whether to gather statistics via ceph commands, requires ceph_user and ceph_config +# ## to be specified +# gather_cluster_stats = false + + +# # Read specific statistics per cgroup +# [[inputs.cgroup]] +# ## Directories in which to look for files, globs are supported. +# ## Consider restricting paths to the set of cgroups you really +# ## want to monitor if you have a large number of cgroups, to avoid +# ## any cardinality issues. +# # paths = [ +# # "/sys/fs/cgroup/memory", +# # "/sys/fs/cgroup/memory/child1", +# # "/sys/fs/cgroup/memory/child2/*", +# # ] +# ## cgroup stat fields, as file names, globs are supported. +# ## these file names are appended to each path from above. +# # files = ["memory.*usage*", "memory.limit_in_bytes"] + + +# # Get standard chrony metrics, requires chronyc executable. +# [[inputs.chrony]] +# ## If true, chronyc tries to perform a DNS lookup for the time server. +# # dns_lookup = false + + +# # Pull Metric Statistics from Amazon CloudWatch +# [[inputs.cloudwatch]] +# ## Amazon Region +# region = "us-east-1" +# +# ## Amazon Credentials +# ## Credentials are loaded in the following order +# ## 1) Web identity provider credentials via STS if role_arn and web_identity_token_file are specified +# ## 2) Assumed credentials via STS if role_arn is specified +# ## 3) explicit credentials from 'access_key' and 'secret_key' +# ## 4) shared profile from 'profile' +# ## 5) environment variables +# ## 6) shared credentials file +# ## 7) EC2 Instance Profile +# # access_key = "" +# # secret_key = "" +# # token = "" +# # role_arn = "" +# # web_identity_token_file = "" +# # role_session_name = "" +# # profile = "" +# # shared_credential_file = "" +# +# ## Endpoint to make request against, the correct endpoint is automatically +# ## determined and this option should only be set if you wish to override the +# ## default. +# ## ex: endpoint_url = "http://localhost:8000" +# # endpoint_url = "" +# +# ## Set http_proxy (telegraf uses the system wide proxy settings if it's is not set) +# # http_proxy_url = "http://localhost:8888" +# +# # The minimum period for Cloudwatch metrics is 1 minute (60s). However not all +# # metrics are made available to the 1 minute period. Some are collected at +# # 3 minute, 5 minute, or larger intervals. See https://aws.amazon.com/cloudwatch/faqs/#monitoring. +# # Note that if a period is configured that is smaller than the minimum for a +# # particular metric, that metric will not be returned by the Cloudwatch API +# # and will not be collected by Telegraf. +# # +# ## Requested CloudWatch aggregation Period (required - must be a multiple of 60s) +# period = "5m" +# +# ## Collection Delay (required - must account for metrics availability via CloudWatch API) +# delay = "5m" +# +# ## Recommended: use metric 'interval' that is a multiple of 'period' to avoid +# ## gaps or overlap in pulled data +# interval = "5m" +# +# ## Recommended if "delay" and "period" are both within 3 hours of request time. Invalid values will be ignored. +# ## Recently Active feature will only poll for CloudWatch ListMetrics values that occurred within the last 3 Hours. +# ## If enabled, it will reduce total API usage of the CloudWatch ListMetrics API and require less memory to retain. +# ## Do not enable if "period" or "delay" is longer than 3 hours, as it will not return data more than 3 hours old. +# ## See https://docs.aws.amazon.com/AmazonCloudWatch/latest/APIReference/API_ListMetrics.html +# #recently_active = "PT3H" +# +# ## Configure the TTL for the internal cache of metrics. +# # cache_ttl = "1h" +# +# ## Metric Statistic Namespaces (required) +# namespaces = ["AWS/ELB"] +# # A single metric statistic namespace that will be appended to namespaces on startup +# # namespace = "AWS/ELB" +# +# ## Maximum requests per second. Note that the global default AWS rate limit is +# ## 50 reqs/sec, so if you define multiple namespaces, these should add up to a +# ## maximum of 50. +# ## See http://docs.aws.amazon.com/AmazonCloudWatch/latest/monitoring/cloudwatch_limits.html +# # ratelimit = 25 +# +# ## Timeout for http requests made by the cloudwatch client. +# # timeout = "5s" +# +# ## Namespace-wide statistic filters. These allow fewer queries to be made to +# ## cloudwatch. +# # statistic_include = [ "average", "sum", "minimum", "maximum", sample_count" ] +# # statistic_exclude = [] +# +# ## Metrics to Pull +# ## Defaults to all Metrics in Namespace if nothing is provided +# ## Refreshes Namespace available metrics every 1h +# #[[inputs.cloudwatch.metrics]] +# # names = ["Latency", "RequestCount"] +# # +# # ## Statistic filters for Metric. These allow for retrieving specific +# # ## statistics for an individual metric. +# # # statistic_include = [ "average", "sum", "minimum", "maximum", sample_count" ] +# # # statistic_exclude = [] +# # +# # ## Dimension filters for Metric. All dimensions defined for the metric names +# # ## must be specified in order to retrieve the metric statistics. +# # ## 'value' has wildcard / 'glob' matching support such as 'p-*'. +# # [[inputs.cloudwatch.metrics.dimensions]] +# # name = "LoadBalancerName" +# # value = "p-example" + + +# # Gather health check statuses from services registered in Consul +# [[inputs.consul]] +# ## Consul server address +# # address = "localhost:8500" +# +# ## URI scheme for the Consul server, one of "http", "https" +# # scheme = "http" +# +# ## Metric version controls the mapping from Consul metrics into +# ## Telegraf metrics. +# ## +# ## example: metric_version = 1; deprecated in 1.15 +# ## metric_version = 2; recommended version +# # metric_version = 1 +# +# ## ACL token used in every request +# # token = "" +# +# ## HTTP Basic Authentication username and password. +# # username = "" +# # password = "" +# +# ## Data center to query the health checks from +# # datacenter = "" +# +# ## Optional TLS Config +# # tls_ca = "/etc/telegraf/ca.pem" +# # tls_cert = "/etc/telegraf/cert.pem" +# # tls_key = "/etc/telegraf/key.pem" +# ## Use TLS but skip chain & host verification +# # insecure_skip_verify = true +# +# ## Consul checks' tag splitting +# # When tags are formatted like "key:value" with ":" as a delimiter then +# # they will be splitted and reported as proper key:value in Telegraf +# # tag_delimiter = ":" + + +# # Read per-node and per-bucket metrics from Couchbase +# [[inputs.couchbase]] +# ## specify servers via a url matching: +# ## [protocol://][:password]@address[:port] +# ## e.g. +# ## http://couchbase-0.example.com/ +# ## http://admin:secret@couchbase-0.example.com:8091/ +# ## +# ## If no servers are specified, then localhost is used as the host. +# ## If no protocol is specified, HTTP is used. +# ## If no port is specified, 8091 is used. +# servers = ["http://localhost:8091"] +# +# ## Filter bucket fields to include only here. +# # bucket_stats_included = ["quota_percent_used", "ops_per_sec", "disk_fetches", "item_count", "disk_used", "data_used", "mem_used"] +# +# ## Optional TLS Config +# # tls_ca = "/etc/telegraf/ca.pem" +# # tls_cert = "/etc/telegraf/cert.pem" +# # tls_key = "/etc/telegraf/key.pem" +# ## Use TLS but skip chain & host verification (defaults to false) +# ## If set to false, tls_cert and tls_key are required +# # insecure_skip_verify = false + + +# # Read CouchDB Stats from one or more servers +# [[inputs.couchdb]] +# ## Works with CouchDB stats endpoints out of the box +# ## Multiple Hosts from which to read CouchDB stats: +# hosts = ["http://localhost:8086/_stats"] +# +# ## Use HTTP Basic Authentication. +# # basic_username = "telegraf" +# # basic_password = "p@ssw0rd" + + +# # Fetch metrics from a CSGO SRCDS +# [[inputs.csgo]] +# ## Specify servers using the following format: +# ## servers = [ +# ## ["ip1:port1", "rcon_password1"], +# ## ["ip2:port2", "rcon_password2"], +# ## ] +# # +# ## If no servers are specified, no data will be collected +# servers = [] + + +# # Input plugin for DC/OS metrics +# [[inputs.dcos]] +# ## The DC/OS cluster URL. +# cluster_url = "https://dcos-ee-master-1" +# +# ## The ID of the service account. +# service_account_id = "telegraf" +# ## The private key file for the service account. +# service_account_private_key = "/etc/telegraf/telegraf-sa-key.pem" +# +# ## Path containing login token. If set, will read on every gather. +# # token_file = "/home/dcos/.dcos/token" +# +# ## In all filter options if both include and exclude are empty all items +# ## will be collected. Arrays may contain glob patterns. +# ## +# ## Node IDs to collect metrics from. If a node is excluded, no metrics will +# ## be collected for its containers or apps. +# # node_include = [] +# # node_exclude = [] +# ## Container IDs to collect container metrics from. +# # container_include = [] +# # container_exclude = [] +# ## Container IDs to collect app metrics from. +# # app_include = [] +# # app_exclude = [] +# +# ## Maximum concurrent connections to the cluster. +# # max_connections = 10 +# ## Maximum time to receive a response from cluster. +# # response_timeout = "20s" +# +# ## Optional TLS Config +# # tls_ca = "/etc/telegraf/ca.pem" +# # tls_cert = "/etc/telegraf/cert.pem" +# # tls_key = "/etc/telegraf/key.pem" +# ## If false, skip chain & host verification +# # insecure_skip_verify = true +# +# ## Recommended filtering to reduce series cardinality. +# # [inputs.dcos.tagdrop] +# # path = ["/var/lib/mesos/slave/slaves/*"] + + +# # Read metrics from one or many disque servers +# [[inputs.disque]] +# ## An array of URI to gather stats about. Specify an ip or hostname +# ## with optional port and password. +# ## ie disque://localhost, disque://10.10.3.33:18832, 10.0.0.1:10000, etc. +# ## If no servers are specified, then localhost is used as the host. +# servers = ["localhost"] + + +# # Provide a native collection for dmsetup based statistics for dm-cache +# [[inputs.dmcache]] +# ## Whether to report per-device stats or not +# per_device = true + + +# # Query given DNS server and gives statistics +# [[inputs.dns_query]] +# ## servers to query +# servers = ["8.8.8.8"] +# +# ## Network is the network protocol name. +# # network = "udp" +# +# ## Domains or subdomains to query. +# # domains = ["."] +# +# ## Query record type. +# ## Possible values: A, AAAA, CNAME, MX, NS, PTR, TXT, SOA, SPF, SRV. +# # record_type = "A" +# +# ## Dns server port. +# # port = 53 +# +# ## Query timeout in seconds. +# # timeout = 2 + + +# # Read metrics about docker containers +# [[inputs.docker]] +# ## Docker Endpoint +# ## To use TCP, set endpoint = "tcp://[ip]:[port]" +# ## To use environment variables (ie, docker-machine), set endpoint = "ENV" +# endpoint = "unix:///var/run/docker.sock" +# +# ## Set to true to collect Swarm metrics(desired_replicas, running_replicas) +# gather_services = false +# +# ## Only collect metrics for these containers, collect all if empty +# container_names = [] +# +# ## Set the source tag for the metrics to the container ID hostname, eg first 12 chars +# source_tag = false +# +# ## Containers to include and exclude. Globs accepted. +# ## Note that an empty array for both will include all containers +# container_name_include = [] +# container_name_exclude = [] +# +# ## Container states to include and exclude. Globs accepted. +# ## When empty only containers in the "running" state will be captured. +# ## example: container_state_include = ["created", "restarting", "running", "removing", "paused", "exited", "dead"] +# ## example: container_state_exclude = ["created", "restarting", "running", "removing", "paused", "exited", "dead"] +# # container_state_include = [] +# # container_state_exclude = [] +# +# ## Timeout for docker list, info, and stats commands +# timeout = "5s" +# +# ## Whether to report for each container per-device blkio (8:0, 8:1...), +# ## network (eth0, eth1, ...) and cpu (cpu0, cpu1, ...) stats or not. +# ## Usage of this setting is discouraged since it will be deprecated in favor of 'perdevice_include'. +# ## Default value is 'true' for backwards compatibility, please set it to 'false' so that 'perdevice_include' setting +# ## is honored. +# perdevice = true +# +# ## Specifies for which classes a per-device metric should be issued +# ## Possible values are 'cpu' (cpu0, cpu1, ...), 'blkio' (8:0, 8:1, ...) and 'network' (eth0, eth1, ...) +# ## Please note that this setting has no effect if 'perdevice' is set to 'true' +# # perdevice_include = ["cpu"] +# +# ## Whether to report for each container total blkio and network stats or not. +# ## Usage of this setting is discouraged since it will be deprecated in favor of 'total_include'. +# ## Default value is 'false' for backwards compatibility, please set it to 'true' so that 'total_include' setting +# ## is honored. +# total = false +# +# ## Specifies for which classes a total metric should be issued. Total is an aggregated of the 'perdevice' values. +# ## Possible values are 'cpu', 'blkio' and 'network' +# ## Total 'cpu' is reported directly by Docker daemon, and 'network' and 'blkio' totals are aggregated by this plugin. +# ## Please note that this setting has no effect if 'total' is set to 'false' +# # total_include = ["cpu", "blkio", "network"] +# +# ## Which environment variables should we use as a tag +# ##tag_env = ["JAVA_HOME", "HEAP_SIZE"] +# +# ## docker labels to include and exclude as tags. Globs accepted. +# ## Note that an empty array for both will include all labels as tags +# docker_label_include = [] +# docker_label_exclude = [] +# +# ## Optional TLS Config +# # tls_ca = "/etc/telegraf/ca.pem" +# # tls_cert = "/etc/telegraf/cert.pem" +# # tls_key = "/etc/telegraf/key.pem" +# ## Use TLS but skip chain & host verification +# # insecure_skip_verify = false + + +# # Read statistics from one or many dovecot servers +# [[inputs.dovecot]] +# ## specify dovecot servers via an address:port list +# ## e.g. +# ## localhost:24242 +# ## +# ## If no servers are specified, then localhost is used as the host. +# servers = ["localhost:24242"] +# +# ## Type is one of "user", "domain", "ip", or "global" +# type = "global" +# +# ## Wildcard matches like "*.com". An empty string "" is same as "*" +# ## If type = "ip" filters should be +# filters = [""] + + +# # Read metrics about docker containers from Fargate/ECS v2, v3 meta endpoints. +# [[inputs.ecs]] +# ## ECS metadata url. +# ## Metadata v2 API is used if set explicitly. Otherwise, +# ## v3 metadata endpoint API is used if available. +# # endpoint_url = "" +# +# ## Containers to include and exclude. Globs accepted. +# ## Note that an empty array for both will include all containers +# # container_name_include = [] +# # container_name_exclude = [] +# +# ## Container states to include and exclude. Globs accepted. +# ## When empty only containers in the "RUNNING" state will be captured. +# ## Possible values are "NONE", "PULLED", "CREATED", "RUNNING", +# ## "RESOURCES_PROVISIONED", "STOPPED". +# # container_status_include = [] +# # container_status_exclude = [] +# +# ## ecs labels to include and exclude as tags. Globs accepted. +# ## Note that an empty array for both will include all labels as tags +# ecs_label_include = [ "com.amazonaws.ecs.*" ] +# ecs_label_exclude = [] +# +# ## Timeout for queries. +# # timeout = "5s" + + +# # Read stats from one or more Elasticsearch servers or clusters +# [[inputs.elasticsearch]] +# ## specify a list of one or more Elasticsearch servers +# # you can add username and password to your url to use basic authentication: +# # servers = ["http://user:pass@localhost:9200"] +# servers = ["http://localhost:9200"] +# +# ## Timeout for HTTP requests to the elastic search server(s) +# http_timeout = "5s" +# +# ## When local is true (the default), the node will read only its own stats. +# ## Set local to false when you want to read the node stats from all nodes +# ## of the cluster. +# local = true +# +# ## Set cluster_health to true when you want to also obtain cluster health stats +# cluster_health = false +# +# ## Adjust cluster_health_level when you want to also obtain detailed health stats +# ## The options are +# ## - indices (default) +# ## - cluster +# # cluster_health_level = "indices" +# +# ## Set cluster_stats to true when you want to also obtain cluster stats. +# cluster_stats = false +# +# ## Only gather cluster_stats from the master node. To work this require local = true +# cluster_stats_only_from_master = true +# +# ## Indices to collect; can be one or more indices names or _all +# ## Use of wildcards is allowed. Use a wildcard at the end to retrieve index names that end with a changing value, like a date. +# indices_include = ["_all"] +# +# ## One of "shards", "cluster", "indices" +# indices_level = "shards" +# +# ## node_stats is a list of sub-stats that you want to have gathered. Valid options +# ## are "indices", "os", "process", "jvm", "thread_pool", "fs", "transport", "http", +# ## "breaker". Per default, all stats are gathered. +# # node_stats = ["jvm", "http"] +# +# ## HTTP Basic Authentication username and password. +# # username = "" +# # password = "" +# +# ## Optional TLS Config +# # tls_ca = "/etc/telegraf/ca.pem" +# # tls_cert = "/etc/telegraf/cert.pem" +# # tls_key = "/etc/telegraf/key.pem" +# ## Use TLS but skip chain & host verification +# # insecure_skip_verify = false +# +# ## Sets the number of most recent indices to return for indices that are configured with a date-stamped suffix. +# ## Each 'indices_include' entry ending with a wildcard (*) or glob matching pattern will group together all indices that match it, and sort them +# ## by the date or number after the wildcard. Metrics then are gathered for only the 'num_most_recent_indices' amount of most recent indices. +# # num_most_recent_indices = 0 + + +# # Derive metrics from aggregating Elasticsearch query results +# [[inputs.elasticsearch_query]] +# ## The full HTTP endpoint URL for your Elasticsearch instance +# ## Multiple urls can be specified as part of the same cluster, +# ## this means that only ONE of the urls will be written to each interval. +# urls = [ "http://node1.es.example.com:9200" ] # required. +# +# ## Elasticsearch client timeout, defaults to "5s". +# # timeout = "5s" +# +# ## Set to true to ask Elasticsearch a list of all cluster nodes, +# ## thus it is not necessary to list all nodes in the urls config option +# # enable_sniffer = false +# +# ## Set the interval to check if the Elasticsearch nodes are available +# ## This option is only used if enable_sniffer is also set (0s to disable it) +# # health_check_interval = "10s" +# +# ## HTTP basic authentication details (eg. when using x-pack) +# # username = "telegraf" +# # password = "mypassword" +# +# ## Optional TLS Config +# # tls_ca = "/etc/telegraf/ca.pem" +# # tls_cert = "/etc/telegraf/cert.pem" +# # tls_key = "/etc/telegraf/key.pem" +# ## Use TLS but skip chain & host verification +# # insecure_skip_verify = false +# +# [[inputs.elasticsearch_query.aggregation]] +# ## measurement name for the results of the aggregation query +# measurement_name = "measurement" +# +# ## Elasticsearch indexes to query (accept wildcards). +# index = "index-*" +# +# ## The date/time field in the Elasticsearch index (mandatory). +# date_field = "@timestamp" +# +# ## If the field used for the date/time field in Elasticsearch is also using +# ## a custom date/time format it may be required to provide the format to +# ## correctly parse the field. +# ## +# ## If using one of the built in elasticsearch formats this is not required. +# # date_field_custom_format = "" +# +# ## Time window to query (eg. "1m" to query documents from last minute). +# ## Normally should be set to same as collection interval +# query_period = "1m" +# +# ## Lucene query to filter results +# # filter_query = "*" +# +# ## Fields to aggregate values (must be numeric fields) +# # metric_fields = ["metric"] +# +# ## Aggregation function to use on the metric fields +# ## Must be set if 'metric_fields' is set +# ## Valid values are: avg, sum, min, max, sum +# # metric_function = "avg" +# +# ## Fields to be used as tags +# ## Must be text, non-analyzed fields. Metric aggregations are performed per tag +# # tags = ["field.keyword", "field2.keyword"] +# +# ## Set to true to not ignore documents when the tag(s) above are missing +# # include_missing_tag = false +# +# ## String value of the tag when the tag does not exist +# ## Used when include_missing_tag is true +# # missing_tag_value = "null" + + +# # Returns ethtool statistics for given interfaces +# [[inputs.ethtool]] +# ## List of interfaces to pull metrics for +# # interface_include = ["eth0"] +# +# ## List of interfaces to ignore when pulling metrics. +# # interface_exclude = ["eth1"] + + +# # Read metrics from one or more commands that can output to stdout +# [[inputs.exec]] +# ## Commands array +# commands = [ +# "/tmp/test.sh", +# "/usr/bin/mycollector --foo=bar", +# "/tmp/collect_*.sh" +# ] +# +# ## Timeout for each command to complete. +# timeout = "5s" +# +# ## measurement name suffix (for separating different commands) +# name_suffix = "_mycollector" +# +# ## Data format to consume. +# ## Each data format has its own unique set of configuration options, read +# ## more about them here: +# ## https://github.com/influxdata/telegraf/blob/master/docs/DATA_FORMATS_INPUT.md +# data_format = "influx" + + +# # Read metrics from fail2ban. +# [[inputs.fail2ban]] +# ## Use sudo to run fail2ban-client +# use_sudo = false + + +# # Read devices value(s) from a Fibaro controller +# [[inputs.fibaro]] +# ## Required Fibaro controller address/hostname. +# ## Note: at the time of writing this plugin, Fibaro only implemented http - no https available +# url = "http://:80" +# +# ## Required credentials to access the API (http://) +# username = "" +# password = "" +# +# ## Amount of time allowed to complete the HTTP request +# # timeout = "5s" + + +# # Parse a complete file each interval +# [[inputs.file]] +# ## Files to parse each interval. Accept standard unix glob matching rules, +# ## as well as ** to match recursive files and directories. +# files = ["/tmp/metrics.out"] +# +# ## Name a tag containing the name of the file the data was parsed from. Leave empty +# ## to disable. +# # file_tag = "" +# +# ## Character encoding to use when interpreting the file contents. Invalid +# ## characters are replaced using the unicode replacement character. When set +# ## to the empty string the data is not decoded to text. +# ## ex: character_encoding = "utf-8" +# ## character_encoding = "utf-16le" +# ## character_encoding = "utf-16be" +# ## character_encoding = "" +# # character_encoding = "" +# +# ## The dataformat to be read from files +# ## Each data format has its own unique set of configuration options, read +# ## more about them here: +# ## https://github.com/influxdata/telegraf/blob/master/docs/DATA_FORMATS_INPUT.md +# data_format = "influx" + + +# # Count files in a directory +# [[inputs.filecount]] +# ## Directory to gather stats about. +# ## deprecated in 1.9; use the directories option +# # directory = "/var/cache/apt/archives" +# +# ## Directories to gather stats about. +# ## This accept standard unit glob matching rules, but with the addition of +# ## ** as a "super asterisk". ie: +# ## /var/log/** -> recursively find all directories in /var/log and count files in each directories +# ## /var/log/*/* -> find all directories with a parent dir in /var/log and count files in each directories +# ## /var/log -> count all files in /var/log and all of its subdirectories +# directories = ["/var/cache/apt/archives"] +# +# ## Only count files that match the name pattern. Defaults to "*". +# name = "*.deb" +# +# ## Count files in subdirectories. Defaults to true. +# recursive = false +# +# ## Only count regular files. Defaults to true. +# regular_only = true +# +# ## Follow all symlinks while walking the directory tree. Defaults to false. +# follow_symlinks = false +# +# ## Only count files that are at least this size. If size is +# ## a negative number, only count files that are smaller than the +# ## absolute value of size. Acceptable units are B, KiB, MiB, KB, ... +# ## Without quotes and units, interpreted as size in bytes. +# size = "0B" +# +# ## Only count files that have not been touched for at least this +# ## duration. If mtime is negative, only count files that have been +# ## touched in this duration. Defaults to "0s". +# mtime = "0s" + + +# # Read stats about given file(s) +# [[inputs.filestat]] +# ## Files to gather stats about. +# ## These accept standard unix glob matching rules, but with the addition of +# ## ** as a "super asterisk". ie: +# ## "/var/log/**.log" -> recursively find all .log files in /var/log +# ## "/var/log/*/*.log" -> find all .log files with a parent dir in /var/log +# ## "/var/log/apache.log" -> just tail the apache log file +# ## +# ## See https://github.com/gobwas/glob for more examples +# ## +# files = ["/var/log/**.log"] +# +# ## If true, read the entire file and calculate an md5 checksum. +# md5 = false + + +# # Read real time temps from fireboard.io servers +# [[inputs.fireboard]] +# ## Specify auth token for your account +# auth_token = "invalidAuthToken" +# ## You can override the fireboard server URL if necessary +# # url = https://fireboard.io/api/v1/devices.json +# ## You can set a different http_timeout if you need to +# ## You should set a string using an number and time indicator +# ## for example "12s" for 12 seconds. +# # http_timeout = "4s" + + +# # Read metrics exposed by fluentd in_monitor plugin +# [[inputs.fluentd]] +# ## This plugin reads information exposed by fluentd (using /api/plugins.json endpoint). +# ## +# ## Endpoint: +# ## - only one URI is allowed +# ## - https is not supported +# endpoint = "http://localhost:24220/api/plugins.json" +# +# ## Define which plugins have to be excluded (based on "type" field - e.g. monitor_agent) +# exclude = [ +# "monitor_agent", +# "dummy", +# ] + + +# # Gather repository information from GitHub hosted repositories. +# [[inputs.github]] +# ## List of repositories to monitor. +# repositories = [ +# "influxdata/telegraf", +# "influxdata/influxdb" +# ] +# +# ## Github API access token. Unauthenticated requests are limited to 60 per hour. +# # access_token = "" +# +# ## Github API enterprise url. Github Enterprise accounts must specify their base url. +# # enterprise_base_url = "" +# +# ## Timeout for HTTP requests. +# # http_timeout = "5s" +# +# ## List of additional fields to query. +# ## NOTE: Getting those fields might involve issuing additional API-calls, so please +# ## make sure you do not exceed the rate-limit of GitHub. +# ## +# ## Available fields are: +# ## - pull-requests -- number of open and closed pull requests (2 API-calls per repository) +# # additional_fields = [] + + +# # Read flattened metrics from one or more GrayLog HTTP endpoints +# [[inputs.graylog]] +# ## API endpoint, currently supported API: +# ## +# ## - multiple (Ex http://:12900/system/metrics/multiple) +# ## - namespace (Ex http://:12900/system/metrics/namespace/{namespace}) +# ## +# ## For namespace endpoint, the metrics array will be ignored for that call. +# ## Endpoint can contain namespace and multiple type calls. +# ## +# ## Please check http://[graylog-server-ip]:12900/api-browser for full list +# ## of endpoints +# servers = [ +# "http://[graylog-server-ip]:12900/system/metrics/multiple", +# ] +# +# ## Metrics list +# ## List of metrics can be found on Graylog webservice documentation. +# ## Or by hitting the the web service api at: +# ## http://[graylog-host]:12900/system/metrics +# metrics = [ +# "jvm.cl.loaded", +# "jvm.memory.pools.Metaspace.committed" +# ] +# +# ## Username and password +# username = "" +# password = "" +# +# ## Optional TLS Config +# # tls_ca = "/etc/telegraf/ca.pem" +# # tls_cert = "/etc/telegraf/cert.pem" +# # tls_key = "/etc/telegraf/key.pem" +# ## Use TLS but skip chain & host verification +# # insecure_skip_verify = false + + +# # Read metrics of haproxy, via socket or csv stats page +# [[inputs.haproxy]] +# ## An array of address to gather stats about. Specify an ip on hostname +# ## with optional port. ie localhost, 10.10.3.33:1936, etc. +# ## Make sure you specify the complete path to the stats endpoint +# ## including the protocol, ie http://10.10.3.33:1936/haproxy?stats +# +# ## If no servers are specified, then default to 127.0.0.1:1936/haproxy?stats +# servers = ["http://myhaproxy.com:1936/haproxy?stats"] +# +# ## Credentials for basic HTTP authentication +# # username = "admin" +# # password = "admin" +# +# ## You can also use local socket with standard wildcard globbing. +# ## Server address not starting with 'http' will be treated as a possible +# ## socket, so both examples below are valid. +# # servers = ["socket:/run/haproxy/admin.sock", "/run/haproxy/*.sock"] +# +# ## By default, some of the fields are renamed from what haproxy calls them. +# ## Setting this option to true results in the plugin keeping the original +# ## field names. +# # keep_field_names = false +# +# ## Optional TLS Config +# # tls_ca = "/etc/telegraf/ca.pem" +# # tls_cert = "/etc/telegraf/cert.pem" +# # tls_key = "/etc/telegraf/key.pem" +# ## Use TLS but skip chain & host verification +# # insecure_skip_verify = false + + +# # Monitor disks' temperatures using hddtemp +# [[inputs.hddtemp]] +# ## By default, telegraf gathers temps data from all disks detected by the +# ## hddtemp. +# ## +# ## Only collect temps from the selected disks. +# ## +# ## A * as the device name will return the temperature values of all disks. +# ## +# # address = "127.0.0.1:7634" +# # devices = ["sda", "*"] + + +# # Read formatted metrics from one or more HTTP endpoints +# [[inputs.http]] +# ## One or more URLs from which to read formatted metrics +# urls = [ +# "http://localhost/metrics" +# ] +# +# ## HTTP method +# # method = "GET" +# +# ## Optional HTTP headers +# # headers = {"X-Special-Header" = "Special-Value"} +# +# ## Optional file with Bearer token +# ## file content is added as an Authorization header +# # bearer_token = "/path/to/file" +# +# ## Optional HTTP Basic Auth Credentials +# # username = "username" +# # password = "pa$$word" +# +# ## HTTP entity-body to send with POST/PUT requests. +# # body = "" +# +# ## HTTP Content-Encoding for write request body, can be set to "gzip" to +# ## compress body or "identity" to apply no encoding. +# # content_encoding = "identity" +# +# ## HTTP Proxy support +# # http_proxy_url = "" +# +# ## OAuth2 Client Credentials Grant +# # client_id = "clientid" +# # client_secret = "secret" +# # token_url = "https://indentityprovider/oauth2/v1/token" +# # scopes = ["urn:opc:idm:__myscopes__"] +# +# ## Optional TLS Config +# # tls_ca = "/etc/telegraf/ca.pem" +# # tls_cert = "/etc/telegraf/cert.pem" +# # tls_key = "/etc/telegraf/key.pem" +# ## Use TLS but skip chain & host verification +# # insecure_skip_verify = false +# +# ## Optional Cookie authentication +# # cookie_auth_url = "https://localhost/authMe" +# # cookie_auth_method = "POST" +# # cookie_auth_username = "username" +# # cookie_auth_password = "pa$$word" +# # cookie_auth_body = '{"username": "user", "password": "pa$$word", "authenticate": "me"}' +# ## cookie_auth_renewal not set or set to "0" will auth once and never renew the cookie +# # cookie_auth_renewal = "5m" +# +# ## Amount of time allowed to complete the HTTP request +# # timeout = "5s" +# +# ## List of success status codes +# # success_status_codes = [200] +# +# ## Data format to consume. +# ## Each data format has its own unique set of configuration options, read +# ## more about them here: +# ## https://github.com/influxdata/telegraf/blob/master/docs/DATA_FORMATS_INPUT.md +# # data_format = "influx" + + +# # HTTP/HTTPS request given an address a method and a timeout +# [[inputs.http_response]] +# ## Deprecated in 1.12, use 'urls' +# ## Server address (default http://localhost) +# # address = "http://localhost" +# +# ## List of urls to query. +# # urls = ["http://localhost"] +# +# ## Set http_proxy (telegraf uses the system wide proxy settings if it's is not set) +# # http_proxy = "http://localhost:8888" +# +# ## Set response_timeout (default 5 seconds) +# # response_timeout = "5s" +# +# ## HTTP Request Method +# # method = "GET" +# +# ## Whether to follow redirects from the server (defaults to false) +# # follow_redirects = false +# +# ## Optional file with Bearer token +# ## file content is added as an Authorization header +# # bearer_token = "/path/to/file" +# +# ## Optional HTTP Basic Auth Credentials +# # username = "username" +# # password = "pa$$word" +# +# ## Optional HTTP Request Body +# # body = ''' +# # {'fake':'data'} +# # ''' +# +# ## Optional name of the field that will contain the body of the response. +# ## By default it is set to an empty String indicating that the body's content won't be added +# # response_body_field = '' +# +# ## Maximum allowed HTTP response body size in bytes. +# ## 0 means to use the default of 32MiB. +# ## If the response body size exceeds this limit a "body_read_error" will be raised +# # response_body_max_size = "32MiB" +# +# ## Optional substring or regex match in body of the response (case sensitive) +# # response_string_match = "\"service_status\": \"up\"" +# # response_string_match = "ok" +# # response_string_match = "\".*_status\".?:.?\"up\"" +# +# ## Expected response status code. +# ## The status code of the response is compared to this value. If they match, the field +# ## "response_status_code_match" will be 1, otherwise it will be 0. If the +# ## expected status code is 0, the check is disabled and the field won't be added. +# # response_status_code = 0 +# +# ## Optional TLS Config +# # tls_ca = "/etc/telegraf/ca.pem" +# # tls_cert = "/etc/telegraf/cert.pem" +# # tls_key = "/etc/telegraf/key.pem" +# ## Use TLS but skip chain & host verification +# # insecure_skip_verify = false +# +# ## HTTP Request Headers (all values must be strings) +# # [inputs.http_response.headers] +# # Host = "github.com" +# +# ## Optional setting to map response http headers into tags +# ## If the http header is not present on the request, no corresponding tag will be added +# ## If multiple instances of the http header are present, only the first value will be used +# # http_header_tags = {"HTTP_HEADER" = "TAG_NAME"} +# +# ## Interface to use when dialing an address +# # interface = "eth0" + + +# # Read flattened metrics from one or more JSON HTTP endpoints +# [[inputs.httpjson]] +# ## NOTE This plugin only reads numerical measurements, strings and booleans +# ## will be ignored. +# +# ## Name for the service being polled. Will be appended to the name of the +# ## measurement e.g. httpjson_webserver_stats +# ## +# ## Deprecated (1.3.0): Use name_override, name_suffix, name_prefix instead. +# name = "webserver_stats" +# +# ## URL of each server in the service's cluster +# servers = [ +# "http://localhost:9999/stats/", +# "http://localhost:9998/stats/", +# ] +# ## Set response_timeout (default 5 seconds) +# response_timeout = "5s" +# +# ## HTTP method to use: GET or POST (case-sensitive) +# method = "GET" +# +# ## List of tag names to extract from top-level of JSON server response +# # tag_keys = [ +# # "my_tag_1", +# # "my_tag_2" +# # ] +# +# ## Optional TLS Config +# # tls_ca = "/etc/telegraf/ca.pem" +# # tls_cert = "/etc/telegraf/cert.pem" +# # tls_key = "/etc/telegraf/key.pem" +# ## Use TLS but skip chain & host verification +# # insecure_skip_verify = false +# +# ## HTTP parameters (all values must be strings). For "GET" requests, data +# ## will be included in the query. For "POST" requests, data will be included +# ## in the request body as "x-www-form-urlencoded". +# # [inputs.httpjson.parameters] +# # event_type = "cpu_spike" +# # threshold = "0.75" +# +# ## HTTP Headers (all values must be strings) +# # [inputs.httpjson.headers] +# # X-Auth-Token = "my-xauth-token" +# # apiVersion = "v1" + + +# # Gather Icinga2 status +# [[inputs.icinga2]] +# ## Required Icinga2 server address +# # server = "https://localhost:5665" +# +# ## Required Icinga2 object type ("services" or "hosts") +# # object_type = "services" +# +# ## Credentials for basic HTTP authentication +# # username = "admin" +# # password = "admin" +# +# ## Maximum time to receive response. +# # response_timeout = "5s" +# +# ## Optional TLS Config +# # tls_ca = "/etc/telegraf/ca.pem" +# # tls_cert = "/etc/telegraf/cert.pem" +# # tls_key = "/etc/telegraf/key.pem" +# ## Use TLS but skip chain & host verification +# # insecure_skip_verify = true + + +# # Gets counters from all InfiniBand cards and ports installed +# [[inputs.infiniband]] +# # no configuration + + +# # Read InfluxDB-formatted JSON metrics from one or more HTTP endpoints +# [[inputs.influxdb]] +# ## Works with InfluxDB debug endpoints out of the box, +# ## but other services can use this format too. +# ## See the influxdb plugin's README for more details. +# +# ## Multiple URLs from which to read InfluxDB-formatted JSON +# ## Default is "http://localhost:8086/debug/vars". +# urls = [ +# "http://localhost:8086/debug/vars" +# ] +# +# ## Username and password to send using HTTP Basic Authentication. +# # username = "" +# # password = "" +# +# ## Optional TLS Config +# # tls_ca = "/etc/telegraf/ca.pem" +# # tls_cert = "/etc/telegraf/cert.pem" +# # tls_key = "/etc/telegraf/key.pem" +# ## Use TLS but skip chain & host verification +# # insecure_skip_verify = false +# +# ## http request & header timeout +# timeout = "5s" + + +# # Collect statistics about itself +# [[inputs.internal]] +# ## If true, collect telegraf memory stats. +# # collect_memstats = true + + +# # Monitors internet speed using speedtest.net service +# [[inputs.internet_speed]] +# ## Sets if runs file download test +# ## Default: false +# enable_file_download = false + + +# # This plugin gathers interrupts data from /proc/interrupts and /proc/softirqs. +# [[inputs.interrupts]] +# ## When set to true, cpu metrics are tagged with the cpu. Otherwise cpu is +# ## stored as a field. +# ## +# ## The default is false for backwards compatibility, and will be changed to +# ## true in a future version. It is recommended to set to true on new +# ## deployments. +# # cpu_as_tag = false +# +# ## To filter which IRQs to collect, make use of tagpass / tagdrop, i.e. +# # [inputs.interrupts.tagdrop] +# # irq = [ "NET_RX", "TASKLET" ] + + +# # Read metrics from the bare metal servers via IPMI +# [[inputs.ipmi_sensor]] +# ## optionally specify the path to the ipmitool executable +# # path = "/usr/bin/ipmitool" +# ## +# ## Setting 'use_sudo' to true will make use of sudo to run ipmitool. +# ## Sudo must be configured to allow the telegraf user to run ipmitool +# ## without a password. +# # use_sudo = false +# ## +# ## optionally force session privilege level. Can be CALLBACK, USER, OPERATOR, ADMINISTRATOR +# # privilege = "ADMINISTRATOR" +# ## +# ## optionally specify one or more servers via a url matching +# ## [username[:password]@][protocol[(address)]] +# ## e.g. +# ## root:passwd@lan(127.0.0.1) +# ## +# ## if no servers are specified, local machine sensor stats will be queried +# ## +# # servers = ["USERID:PASSW0RD@lan(192.168.1.1)"] +# +# ## Recommended: use metric 'interval' that is a multiple of 'timeout' to avoid +# ## gaps or overlap in pulled data +# interval = "30s" +# +# ## Timeout for the ipmitool command to complete +# timeout = "20s" +# +# ## Schema Version: (Optional, defaults to version 1) +# metric_version = 2 +# +# ## Optionally provide the hex key for the IMPI connection. +# # hex_key = "" +# +# ## If ipmitool should use a cache +# ## for me ipmitool runs about 2 to 10 times faster with cache enabled on HP G10 servers (when using ubuntu20.04) +# ## the cache file may not work well for you if some sensors come up late +# # use_cache = false +# +# ## Path to the ipmitools cache file (defaults to OS temp dir) +# ## The provided path must exist and must be writable +# # cache_path = "" + + +# # Gather packets and bytes counters from Linux ipsets +# [[inputs.ipset]] +# ## By default, we only show sets which have already matched at least 1 packet. +# ## set include_unmatched_sets = true to gather them all. +# include_unmatched_sets = false +# ## Adjust your sudo settings appropriately if using this option ("sudo ipset save") +# use_sudo = false +# ## The default timeout of 1s for ipset execution can be overridden here: +# # timeout = "1s" + + +# # Read jobs and cluster metrics from Jenkins instances +# [[inputs.jenkins]] +# ## The Jenkins URL in the format "schema://host:port" +# url = "http://my-jenkins-instance:8080" +# # username = "admin" +# # password = "admin" +# +# ## Set response_timeout +# response_timeout = "5s" +# +# ## Optional TLS Config +# # tls_ca = "/etc/telegraf/ca.pem" +# # tls_cert = "/etc/telegraf/cert.pem" +# # tls_key = "/etc/telegraf/key.pem" +# ## Use SSL but skip chain & host verification +# # insecure_skip_verify = false +# +# ## Optional Max Job Build Age filter +# ## Default 1 hour, ignore builds older than max_build_age +# # max_build_age = "1h" +# +# ## Optional Sub Job Depth filter +# ## Jenkins can have unlimited layer of sub jobs +# ## This config will limit the layers of pulling, default value 0 means +# ## unlimited pulling until no more sub jobs +# # max_subjob_depth = 0 +# +# ## Optional Sub Job Per Layer +# ## In workflow-multibranch-plugin, each branch will be created as a sub job. +# ## This config will limit to call only the lasted branches in each layer, +# ## empty will use default value 10 +# # max_subjob_per_layer = 10 +# +# ## Jobs to include or exclude from gathering +# ## When using both lists, job_exclude has priority. +# ## Wildcards are supported: [ "jobA/*", "jobB/subjob1/*"] +# # job_include = [ "*" ] +# # job_exclude = [ ] +# +# ## Nodes to include or exclude from gathering +# ## When using both lists, node_exclude has priority. +# # node_include = [ "*" ] +# # node_exclude = [ ] +# +# ## Worker pool for jenkins plugin only +# ## Empty this field will use default value 5 +# # max_connections = 5 + + +# # Read JMX metrics through Jolokia +# [[inputs.jolokia]] +# # DEPRECATED: the jolokia plugin has been deprecated in favor of the +# # jolokia2 plugin +# # see https://github.com/influxdata/telegraf/tree/master/plugins/inputs/jolokia2 +# +# ## This is the context root used to compose the jolokia url +# ## NOTE that Jolokia requires a trailing slash at the end of the context root +# ## NOTE that your jolokia security policy must allow for POST requests. +# context = "/jolokia/" +# +# ## This specifies the mode used +# # mode = "proxy" +# # +# ## When in proxy mode this section is used to specify further +# ## proxy address configurations. +# ## Remember to change host address to fit your environment. +# # [inputs.jolokia.proxy] +# # host = "127.0.0.1" +# # port = "8080" +# +# ## Optional http timeouts +# ## +# ## response_header_timeout, if non-zero, specifies the amount of time to wait +# ## for a server's response headers after fully writing the request. +# # response_header_timeout = "3s" +# ## +# ## client_timeout specifies a time limit for requests made by this client. +# ## Includes connection time, any redirects, and reading the response body. +# # client_timeout = "4s" +# +# ## Attribute delimiter +# ## +# ## When multiple attributes are returned for a single +# ## [inputs.jolokia.metrics], the field name is a concatenation of the metric +# ## name, and the attribute name, separated by the given delimiter. +# # delimiter = "_" +# +# ## List of servers exposing jolokia read service +# [[inputs.jolokia.servers]] +# name = "as-server-01" +# host = "127.0.0.1" +# port = "8080" +# # username = "myuser" +# # password = "mypassword" +# +# ## List of metrics collected on above servers +# ## Each metric consists in a name, a jmx path and either +# ## a pass or drop slice attribute. +# ## This collect all heap memory usage metrics. +# [[inputs.jolokia.metrics]] +# name = "heap_memory_usage" +# mbean = "java.lang:type=Memory" +# attribute = "HeapMemoryUsage" +# +# ## This collect thread counts metrics. +# [[inputs.jolokia.metrics]] +# name = "thread_count" +# mbean = "java.lang:type=Threading" +# attribute = "TotalStartedThreadCount,ThreadCount,DaemonThreadCount,PeakThreadCount" +# +# ## This collect number of class loaded/unloaded counts metrics. +# [[inputs.jolokia.metrics]] +# name = "class_count" +# mbean = "java.lang:type=ClassLoading" +# attribute = "LoadedClassCount,UnloadedClassCount,TotalLoadedClassCount" + + +# # Read JMX metrics from a Jolokia REST agent endpoint +# [[inputs.jolokia2_agent]] +# # default_tag_prefix = "" +# # default_field_prefix = "" +# # default_field_separator = "." +# +# # Add agents URLs to query +# urls = ["http://localhost:8080/jolokia"] +# # username = "" +# # password = "" +# # response_timeout = "5s" +# +# ## Optional TLS config +# # tls_ca = "/var/private/ca.pem" +# # tls_cert = "/var/private/client.pem" +# # tls_key = "/var/private/client-key.pem" +# # insecure_skip_verify = false +# +# ## Add metrics to read +# [[inputs.jolokia2_agent.metric]] +# name = "java_runtime" +# mbean = "java.lang:type=Runtime" +# paths = ["Uptime"] + + +# # Read JMX metrics from a Jolokia REST proxy endpoint +# [[inputs.jolokia2_proxy]] +# # default_tag_prefix = "" +# # default_field_prefix = "" +# # default_field_separator = "." +# +# ## Proxy agent +# url = "http://localhost:8080/jolokia" +# # username = "" +# # password = "" +# # response_timeout = "5s" +# +# ## Optional TLS config +# # tls_ca = "/var/private/ca.pem" +# # tls_cert = "/var/private/client.pem" +# # tls_key = "/var/private/client-key.pem" +# # insecure_skip_verify = false +# +# ## Add proxy targets to query +# # default_target_username = "" +# # default_target_password = "" +# [[inputs.jolokia2_proxy.target]] +# url = "service:jmx:rmi:///jndi/rmi://targethost:9999/jmxrmi" +# # username = "" +# # password = "" +# +# ## Add metrics to read +# [[inputs.jolokia2_proxy.metric]] +# name = "java_runtime" +# mbean = "java.lang:type=Runtime" +# paths = ["Uptime"] + + +# # Read Kapacitor-formatted JSON metrics from one or more HTTP endpoints +# [[inputs.kapacitor]] +# ## Multiple URLs from which to read Kapacitor-formatted JSON +# ## Default is "http://localhost:9092/kapacitor/v1/debug/vars". +# urls = [ +# "http://localhost:9092/kapacitor/v1/debug/vars" +# ] +# +# ## Time limit for http requests +# timeout = "5s" +# +# ## Optional TLS Config +# # tls_ca = "/etc/telegraf/ca.pem" +# # tls_cert = "/etc/telegraf/cert.pem" +# # tls_key = "/etc/telegraf/key.pem" +# ## Use TLS but skip chain & host verification +# # insecure_skip_verify = false + + +# # Read status information from one or more Kibana servers +# [[inputs.kibana]] +# ## Specify a list of one or more Kibana servers +# servers = ["http://localhost:5601"] +# +# ## Timeout for HTTP requests +# timeout = "5s" +# +# ## HTTP Basic Auth credentials +# # username = "username" +# # password = "pa$$word" +# +# ## Optional TLS Config +# # tls_ca = "/etc/telegraf/ca.pem" +# # tls_cert = "/etc/telegraf/cert.pem" +# # tls_key = "/etc/telegraf/key.pem" +# ## Use TLS but skip chain & host verification +# # insecure_skip_verify = false + + +# # Read metrics from the Kubernetes api +# [[inputs.kube_inventory]] +# ## URL for the Kubernetes API +# url = "https://127.0.0.1" +# +# ## Namespace to use. Set to "" to use all namespaces. +# # namespace = "default" +# +# ## Use bearer token for authorization. ('bearer_token' takes priority) +# ## If both of these are empty, we'll use the default serviceaccount: +# ## at: /run/secrets/kubernetes.io/serviceaccount/token +# # bearer_token = "/path/to/bearer/token" +# ## OR +# # bearer_token_string = "abc_123" +# +# ## Set response_timeout (default 5 seconds) +# # response_timeout = "5s" +# +# ## Optional Resources to exclude from gathering +# ## Leave them with blank with try to gather everything available. +# ## Values can be - "daemonsets", deployments", "endpoints", "ingress", "nodes", +# ## "persistentvolumes", "persistentvolumeclaims", "pods", "services", "statefulsets" +# # resource_exclude = [ "deployments", "nodes", "statefulsets" ] +# +# ## Optional Resources to include when gathering +# ## Overrides resource_exclude if both set. +# # resource_include = [ "deployments", "nodes", "statefulsets" ] +# +# ## selectors to include and exclude as tags. Globs accepted. +# ## Note that an empty array for both will include all selectors as tags +# ## selector_exclude overrides selector_include if both set. +# # selector_include = [] +# # selector_exclude = ["*"] +# +# ## Optional TLS Config +# # tls_ca = "/path/to/cafile" +# # tls_cert = "/path/to/certfile" +# # tls_key = "/path/to/keyfile" +# ## Use TLS but skip chain & host verification +# # insecure_skip_verify = false + + +# # Read metrics from the kubernetes kubelet api +# [[inputs.kubernetes]] +# ## URL for the kubelet +# url = "http://127.0.0.1:10255" +# +# ## Use bearer token for authorization. ('bearer_token' takes priority) +# ## If both of these are empty, we'll use the default serviceaccount: +# ## at: /run/secrets/kubernetes.io/serviceaccount/token +# # bearer_token = "/path/to/bearer/token" +# ## OR +# # bearer_token_string = "abc_123" +# +# ## Pod labels to be added as tags. An empty array for both include and +# ## exclude will include all labels. +# # label_include = [] +# # label_exclude = ["*"] +# +# ## Set response_timeout (default 5 seconds) +# # response_timeout = "5s" +# +# ## Optional TLS Config +# # tls_ca = /path/to/cafile +# # tls_cert = /path/to/certfile +# # tls_key = /path/to/keyfile +# ## Use TLS but skip chain & host verification +# # insecure_skip_verify = false + + +# # Read metrics from a LeoFS Server via SNMP +# [[inputs.leofs]] +# ## An array of URLs of the form: +# ## host [ ":" port] +# servers = ["127.0.0.1:4020"] + + +# # Provides Linux sysctl fs metrics +# [[inputs.linux_sysctl_fs]] +# # no configuration + + +# # Read metrics exposed by Logstash +# [[inputs.logstash]] +# ## The URL of the exposed Logstash API endpoint. +# url = "http://127.0.0.1:9600" +# +# ## Use Logstash 5 single pipeline API, set to true when monitoring +# ## Logstash 5. +# # single_pipeline = false +# +# ## Enable optional collection components. Can contain +# ## "pipelines", "process", and "jvm". +# # collect = ["pipelines", "process", "jvm"] +# +# ## Timeout for HTTP requests. +# # timeout = "5s" +# +# ## Optional HTTP Basic Auth credentials. +# # username = "username" +# # password = "pa$$word" +# +# ## Optional TLS Config. +# # tls_ca = "/etc/telegraf/ca.pem" +# # tls_cert = "/etc/telegraf/cert.pem" +# # tls_key = "/etc/telegraf/key.pem" +# +# ## Use TLS but skip chain & host verification. +# # insecure_skip_verify = false +# +# ## Optional HTTP headers. +# # [inputs.logstash.headers] +# # "X-Special-Header" = "Special-Value" + + +# # Read metrics about LVM physical volumes, volume groups, logical volumes. +# [[inputs.lvm]] +# ## Use sudo to run LVM commands +# use_sudo = false + + +# # Gathers metrics from the /3.0/reports MailChimp API +# [[inputs.mailchimp]] +# ## MailChimp API key +# ## get from https://admin.mailchimp.com/account/api/ +# api_key = "" # required +# ## Reports for campaigns sent more than days_old ago will not be collected. +# ## 0 means collect all. +# days_old = 0 +# ## Campaign ID to get, if empty gets all campaigns, this option overrides days_old +# # campaign_id = "" + + +# # Retrieves information on a specific host in a MarkLogic Cluster +# [[inputs.marklogic]] +# ## Base URL of the MarkLogic HTTP Server. +# url = "http://localhost:8002" +# +# ## List of specific hostnames to retrieve information. At least (1) required. +# # hosts = ["hostname1", "hostname2"] +# +# ## Using HTTP Basic Authentication. Management API requires 'manage-user' role privileges +# # username = "myuser" +# # password = "mypassword" +# +# ## Optional TLS Config +# # tls_ca = "/etc/telegraf/ca.pem" +# # tls_cert = "/etc/telegraf/cert.pem" +# # tls_key = "/etc/telegraf/key.pem" +# ## Use TLS but skip chain & host verification +# # insecure_skip_verify = false + + +# # Read metrics from one or many mcrouter servers +# [[inputs.mcrouter]] +# ## An array of address to gather stats about. Specify an ip or hostname +# ## with port. ie tcp://localhost:11211, tcp://10.0.0.1:11211, etc. +# servers = ["tcp://localhost:11211", "unix:///var/run/mcrouter.sock"] +# +# ## Timeout for metric collections from all servers. Minimum timeout is "1s". +# # timeout = "5s" + + +# # Read metrics from one or many memcached servers +# [[inputs.memcached]] +# ## An array of address to gather stats about. Specify an ip on hostname +# ## with optional port. ie localhost, 10.0.0.1:11211, etc. +# servers = ["localhost:11211"] +# # unix_sockets = ["/var/run/memcached.sock"] + + +# # Telegraf plugin for gathering metrics from N Mesos masters +# [[inputs.mesos]] +# ## Timeout, in ms. +# timeout = 100 +# +# ## A list of Mesos masters. +# masters = ["http://localhost:5050"] +# +# ## Master metrics groups to be collected, by default, all enabled. +# master_collections = [ +# "resources", +# "master", +# "system", +# "agents", +# "frameworks", +# "framework_offers", +# "tasks", +# "messages", +# "evqueue", +# "registrar", +# "allocator", +# ] +# +# ## A list of Mesos slaves, default is [] +# # slaves = [] +# +# ## Slave metrics groups to be collected, by default, all enabled. +# # slave_collections = [ +# # "resources", +# # "agent", +# # "system", +# # "executors", +# # "tasks", +# # "messages", +# # ] +# +# ## Optional TLS Config +# # tls_ca = "/etc/telegraf/ca.pem" +# # tls_cert = "/etc/telegraf/cert.pem" +# # tls_key = "/etc/telegraf/key.pem" +# ## Use TLS but skip chain & host verification +# # insecure_skip_verify = false + + +# # Collects scores from a Minecraft server's scoreboard using the RCON protocol +# [[inputs.minecraft]] +# ## Address of the Minecraft server. +# # server = "localhost" +# +# ## Server RCON Port. +# # port = "25575" +# +# ## Server RCON Password. +# password = "" +# +# ## Uncomment to remove deprecated metric components. +# # tagdrop = ["server"] + + +# # Retrieve data from MODBUS slave devices +# [[inputs.modbus]] +# ## Connection Configuration +# ## +# ## The plugin supports connections to PLCs via MODBUS/TCP, RTU over TCP, ASCII over TCP or +# ## via serial line communication in binary (RTU) or readable (ASCII) encoding +# ## +# ## Device name +# name = "Device" +# +# ## Slave ID - addresses a MODBUS device on the bus +# ## Range: 0 - 255 [0 = broadcast; 248 - 255 = reserved] +# slave_id = 1 +# +# ## Timeout for each request +# timeout = "1s" +# +# ## Maximum number of retries and the time to wait between retries +# ## when a slave-device is busy. +# # busy_retries = 0 +# # busy_retries_wait = "100ms" +# +# # TCP - connect via Modbus/TCP +# controller = "tcp://localhost:502" +# +# ## Serial (RS485; RS232) +# # controller = "file:///dev/ttyUSB0" +# # baud_rate = 9600 +# # data_bits = 8 +# # parity = "N" +# # stop_bits = 1 +# +# ## For Modbus over TCP you can choose between "TCP", "RTUoverTCP" and "ASCIIoverTCP" +# ## default behaviour is "TCP" if the controller is TCP +# ## For Serial you can choose between "RTU" and "ASCII" +# # transmission_mode = "RTU" +# +# ## Measurements +# ## +# +# ## Digital Variables, Discrete Inputs and Coils +# ## measurement - the (optional) measurement name, defaults to "modbus" +# ## name - the variable name +# ## address - variable address +# +# discrete_inputs = [ +# { name = "start", address = [0]}, +# { name = "stop", address = [1]}, +# { name = "reset", address = [2]}, +# { name = "emergency_stop", address = [3]}, +# ] +# coils = [ +# { name = "motor1_run", address = [0]}, +# { name = "motor1_jog", address = [1]}, +# { name = "motor1_stop", address = [2]}, +# ] +# +# ## Analog Variables, Input Registers and Holding Registers +# ## measurement - the (optional) measurement name, defaults to "modbus" +# ## name - the variable name +# ## byte_order - the ordering of bytes +# ## |---AB, ABCD - Big Endian +# ## |---BA, DCBA - Little Endian +# ## |---BADC - Mid-Big Endian +# ## |---CDAB - Mid-Little Endian +# ## data_type - INT16, UINT16, INT32, UINT32, INT64, UINT64, +# ## FLOAT32-IEEE, FLOAT64-IEEE (the IEEE 754 binary representation) +# ## FLOAT32, FIXED, UFIXED (fixed-point representation on input) +# ## scale - the final numeric variable representation +# ## address - variable address +# +# holding_registers = [ +# { name = "power_factor", byte_order = "AB", data_type = "FIXED", scale=0.01, address = [8]}, +# { name = "voltage", byte_order = "AB", data_type = "FIXED", scale=0.1, address = [0]}, +# { name = "energy", byte_order = "ABCD", data_type = "FIXED", scale=0.001, address = [5,6]}, +# { name = "current", byte_order = "ABCD", data_type = "FIXED", scale=0.001, address = [1,2]}, +# { name = "frequency", byte_order = "AB", data_type = "UFIXED", scale=0.1, address = [7]}, +# { name = "power", byte_order = "ABCD", data_type = "UFIXED", scale=0.1, address = [3,4]}, +# ] +# input_registers = [ +# { name = "tank_level", byte_order = "AB", data_type = "INT16", scale=1.0, address = [0]}, +# { name = "tank_ph", byte_order = "AB", data_type = "INT16", scale=1.0, address = [1]}, +# { name = "pump1_speed", byte_order = "ABCD", data_type = "INT32", scale=1.0, address = [3,4]}, +# ] + + +# # Read metrics from one or many MongoDB servers +# [[inputs.mongodb]] +# ## An array of URLs of the form: +# ## "mongodb://" [user ":" pass "@"] host [ ":" port] +# ## For example: +# ## mongodb://user:auth_key@10.10.3.30:27017, +# ## mongodb://10.10.3.33:18832, +# servers = ["mongodb://127.0.0.1:27017"] +# +# ## When true, collect cluster status +# ## Note that the query that counts jumbo chunks triggers a COLLSCAN, which +# ## may have an impact on performance. +# # gather_cluster_status = true +# +# ## When true, collect per database stats +# # gather_perdb_stats = false +# +# ## When true, collect per collection stats +# # gather_col_stats = false +# +# ## When true, collect usage statistics for each collection +# ## (insert, update, queries, remove, getmore, commands etc...). +# # gather_top_stat = false +# +# ## List of db where collections stats are collected +# ## If empty, all db are concerned +# # col_stats_dbs = ["local"] +# +# ## Optional TLS Config +# # tls_ca = "/etc/telegraf/ca.pem" +# # tls_cert = "/etc/telegraf/cert.pem" +# # tls_key = "/etc/telegraf/key.pem" +# ## Use TLS but skip chain & host verification +# # insecure_skip_verify = false + + +# # Read metrics and status information about processes managed by Monit +# [[inputs.monit]] +# ## Monit HTTPD address +# address = "http://127.0.0.1:2812" +# +# ## Username and Password for Monit +# # username = "" +# # password = "" +# +# ## Amount of time allowed to complete the HTTP request +# # timeout = "5s" +# +# ## Optional TLS Config +# # tls_ca = "/etc/telegraf/ca.pem" +# # tls_cert = "/etc/telegraf/cert.pem" +# # tls_key = "/etc/telegraf/key.pem" +# ## Use TLS but skip chain & host verification +# # insecure_skip_verify = false + + +# # Aggregates the contents of multiple files into a single point +# [[inputs.multifile]] +# ## Base directory where telegraf will look for files. +# ## Omit this option to use absolute paths. +# base_dir = "/sys/bus/i2c/devices/1-0076/iio:device0" +# +# ## If true, Telegraf discard all data when a single file can't be read. +# ## Else, Telegraf omits the field generated from this file. +# # fail_early = true +# +# ## Files to parse each interval. +# [[inputs.multifile.file]] +# file = "in_pressure_input" +# dest = "pressure" +# conversion = "float" +# [[inputs.multifile.file]] +# file = "in_temp_input" +# dest = "temperature" +# conversion = "float(3)" +# [[inputs.multifile.file]] +# file = "in_humidityrelative_input" +# dest = "humidityrelative" +# conversion = "float(3)" + + +# # Read metrics from one or many mysql servers +# [[inputs.mysql]] +# ## specify servers via a url matching: +# ## [username[:password]@][protocol[(address)]]/[?tls=[true|false|skip-verify|custom]] +# ## see https://github.com/go-sql-driver/mysql#dsn-data-source-name +# ## e.g. +# ## servers = ["user:passwd@tcp(127.0.0.1:3306)/?tls=false"] +# ## servers = ["user@tcp(127.0.0.1:3306)/?tls=false"] +# # +# ## If no servers are specified, then localhost is used as the host. +# servers = ["tcp(127.0.0.1:3306)/"] +# +# ## Selects the metric output format. +# ## +# ## This option exists to maintain backwards compatibility, if you have +# ## existing metrics do not set or change this value until you are ready to +# ## migrate to the new format. +# ## +# ## If you do not have existing metrics from this plugin set to the latest +# ## version. +# ## +# ## Telegraf >=1.6: metric_version = 2 +# ## <1.6: metric_version = 1 (or unset) +# metric_version = 2 +# +# ## if the list is empty, then metrics are gathered from all database tables +# # table_schema_databases = [] +# +# ## gather metrics from INFORMATION_SCHEMA.TABLES for databases provided above list +# # gather_table_schema = false +# +# ## gather thread state counts from INFORMATION_SCHEMA.PROCESSLIST +# # gather_process_list = false +# +# ## gather user statistics from INFORMATION_SCHEMA.USER_STATISTICS +# # gather_user_statistics = false +# +# ## gather auto_increment columns and max values from information schema +# # gather_info_schema_auto_inc = false +# +# ## gather metrics from INFORMATION_SCHEMA.INNODB_METRICS +# # gather_innodb_metrics = false +# +# ## gather metrics from SHOW SLAVE STATUS command output +# # gather_slave_status = false +# +# ## gather metrics from all channels from SHOW SLAVE STATUS command output +# # gather_all_slave_channels = false +# +# ## use MariaDB dialect for all channels SHOW SLAVE STATUS +# # mariadb_dialect = false +# +# ## gather metrics from SHOW BINARY LOGS command output +# # gather_binary_logs = false +# +# ## gather metrics from PERFORMANCE_SCHEMA.GLOBAL_VARIABLES +# # gather_global_variables = true +# +# ## gather metrics from PERFORMANCE_SCHEMA.TABLE_IO_WAITS_SUMMARY_BY_TABLE +# # gather_table_io_waits = false +# +# ## gather metrics from PERFORMANCE_SCHEMA.TABLE_LOCK_WAITS +# # gather_table_lock_waits = false +# +# ## gather metrics from PERFORMANCE_SCHEMA.TABLE_IO_WAITS_SUMMARY_BY_INDEX_USAGE +# # gather_index_io_waits = false +# +# ## gather metrics from PERFORMANCE_SCHEMA.EVENT_WAITS +# # gather_event_waits = false +# +# ## gather metrics from PERFORMANCE_SCHEMA.FILE_SUMMARY_BY_EVENT_NAME +# # gather_file_events_stats = false +# +# ## gather metrics from PERFORMANCE_SCHEMA.EVENTS_STATEMENTS_SUMMARY_BY_DIGEST +# # gather_perf_events_statements = false +# +# ## the limits for metrics form perf_events_statements +# # perf_events_statements_digest_text_limit = 120 +# # perf_events_statements_limit = 250 +# # perf_events_statements_time_limit = 86400 +# +# ## gather metrics from PERFORMANCE_SCHEMA.EVENTS_STATEMENTS_SUMMARY_BY_ACCOUNT_BY_EVENT_NAME +# # gather_perf_sum_per_acc_per_event = false +# +# ## list of events to be gathered for gather_perf_sum_per_acc_per_event +# ## in case of empty list all events will be gathered +# # perf_summary_events = [] +# +# ## Some queries we may want to run less often (such as SHOW GLOBAL VARIABLES) +# ## example: interval_slow = "30m" +# # interval_slow = "" +# +# ## Optional TLS Config (will be used if tls=custom parameter specified in server uri) +# # tls_ca = "/etc/telegraf/ca.pem" +# # tls_cert = "/etc/telegraf/cert.pem" +# # tls_key = "/etc/telegraf/key.pem" +# ## Use TLS but skip chain & host verification +# # insecure_skip_verify = false + + +# # Provides metrics about the state of a NATS server +# [[inputs.nats]] +# ## The address of the monitoring endpoint of the NATS server +# server = "http://localhost:8222" +# +# ## Maximum time to receive response +# # response_timeout = "5s" + + +# # Neptune Apex data collector +# [[inputs.neptune_apex]] +# ## The Neptune Apex plugin reads the publicly available status.xml data from a local Apex. +# ## Measurements will be logged under "apex". +# +# ## The base URL of the local Apex(es). If you specify more than one server, they will +# ## be differentiated by the "source" tag. +# servers = [ +# "http://apex.local", +# ] +# +# ## The response_timeout specifies how long to wait for a reply from the Apex. +# #response_timeout = "5s" + + +# # Read metrics about network interface usage +# [[inputs.net]] +# ## By default, telegraf gathers stats from any up interface (excluding loopback) +# ## Setting interfaces will tell it to gather these explicit interfaces, +# ## regardless of status. +# ## +# # interfaces = ["eth0"] +# ## +# ## On linux systems telegraf also collects protocol stats. +# ## Setting ignore_protocol_stats to true will skip reporting of protocol metrics. +# ## +# # ignore_protocol_stats = false +# ## + + +# # Collect response time of a TCP or UDP connection +# [[inputs.net_response]] +# ## Protocol, must be "tcp" or "udp" +# ## NOTE: because the "udp" protocol does not respond to requests, it requires +# ## a send/expect string pair (see below). +# protocol = "tcp" +# ## Server address (default localhost) +# address = "localhost:80" +# +# ## Set timeout +# # timeout = "1s" +# +# ## Set read timeout (only used if expecting a response) +# # read_timeout = "1s" +# +# ## The following options are required for UDP checks. For TCP, they are +# ## optional. The plugin will send the given string to the server and then +# ## expect to receive the given 'expect' string back. +# ## string sent to the server +# # send = "ssh" +# ## expected string in answer +# # expect = "ssh" +# +# ## Uncomment to remove deprecated fields +# # fielddrop = ["result_type", "string_found"] + + +# # Read TCP metrics such as established, time wait and sockets counts. +# [[inputs.netstat]] +# # no configuration + + +# # Read per-mount NFS client metrics from /proc/self/mountstats +# [[inputs.nfsclient]] +# ## Read more low-level metrics (optional, defaults to false) +# # fullstat = false +# +# ## List of mounts to explictly include or exclude (optional) +# ## The pattern (Go regexp) is matched against the mount point (not the +# ## device being mounted). If include_mounts is set, all mounts are ignored +# ## unless present in the list. If a mount is listed in both include_mounts +# ## and exclude_mounts, it is excluded. Go regexp patterns can be used. +# # include_mounts = [] +# # exclude_mounts = [] +# +# ## List of operations to include or exclude from collecting. This applies +# ## only when fullstat=true. Symantics are similar to {include,exclude}_mounts: +# ## the default is to collect everything; when include_operations is set, only +# ## those OPs are collected; when exclude_operations is set, all are collected +# ## except those listed. If include and exclude are set, the OP is excluded. +# ## See /proc/self/mountstats for a list of valid operations; note that +# ## NFSv3 and NFSv4 have different lists. While it is not possible to +# ## have different include/exclude lists for NFSv3/4, unused elements +# ## in the list should be okay. It is possible to have different lists +# ## for different mountpoints: use mulitple [[input.nfsclient]] stanzas, +# ## with their own lists. See "include_mounts" above, and be careful of +# ## duplicate metrics. +# # include_operations = [] +# # exclude_operations = [] + + +# # Read Nginx's basic status information (ngx_http_stub_status_module) +# [[inputs.nginx]] +# # An array of Nginx stub_status URI to gather stats. +# urls = ["http://localhost/server_status"] +# +# ## Optional TLS Config +# tls_ca = "/etc/telegraf/ca.pem" +# tls_cert = "/etc/telegraf/cert.cer" +# tls_key = "/etc/telegraf/key.key" +# ## Use TLS but skip chain & host verification +# insecure_skip_verify = false +# +# # HTTP response timeout (default: 5s) +# response_timeout = "5s" + + +# # Read Nginx Plus' full status information (ngx_http_status_module) +# [[inputs.nginx_plus]] +# ## An array of ngx_http_status_module or status URI to gather stats. +# urls = ["http://localhost/status"] +# +# # HTTP response timeout (default: 5s) +# response_timeout = "5s" +# +# ## Optional TLS Config +# # tls_ca = "/etc/telegraf/ca.pem" +# # tls_cert = "/etc/telegraf/cert.pem" +# # tls_key = "/etc/telegraf/key.pem" +# ## Use TLS but skip chain & host verification +# # insecure_skip_verify = false + + +# # Read Nginx Plus Api documentation +# [[inputs.nginx_plus_api]] +# ## An array of API URI to gather stats. +# urls = ["http://localhost/api"] +# +# # Nginx API version, default: 3 +# # api_version = 3 +# +# # HTTP response timeout (default: 5s) +# response_timeout = "5s" +# +# ## Optional TLS Config +# # tls_ca = "/etc/telegraf/ca.pem" +# # tls_cert = "/etc/telegraf/cert.pem" +# # tls_key = "/etc/telegraf/key.pem" +# ## Use TLS but skip chain & host verification +# # insecure_skip_verify = false + + +# # Read Nginx virtual host traffic status module information (nginx-module-sts) +# [[inputs.nginx_sts]] +# ## An array of ngx_http_status_module or status URI to gather stats. +# urls = ["http://localhost/status"] +# +# ## HTTP response timeout (default: 5s) +# response_timeout = "5s" +# +# ## Optional TLS Config +# # tls_ca = "/etc/telegraf/ca.pem" +# # tls_cert = "/etc/telegraf/cert.pem" +# # tls_key = "/etc/telegraf/key.pem" +# ## Use TLS but skip chain & host verification +# # insecure_skip_verify = false + + +# # Read nginx_upstream_check module status information (https://github.com/yaoweibin/nginx_upstream_check_module) +# [[inputs.nginx_upstream_check]] +# ## An URL where Nginx Upstream check module is enabled +# ## It should be set to return a JSON formatted response +# url = "http://127.0.0.1/status?format=json" +# +# ## HTTP method +# # method = "GET" +# +# ## Optional HTTP headers +# # headers = {"X-Special-Header" = "Special-Value"} +# +# ## Override HTTP "Host" header +# # host_header = "check.example.com" +# +# ## Timeout for HTTP requests +# timeout = "5s" +# +# ## Optional HTTP Basic Auth credentials +# # username = "username" +# # password = "pa$$word" +# +# ## Optional TLS Config +# # tls_ca = "/etc/telegraf/ca.pem" +# # tls_cert = "/etc/telegraf/cert.pem" +# # tls_key = "/etc/telegraf/key.pem" +# ## Use TLS but skip chain & host verification +# # insecure_skip_verify = false + + +# # Read Nginx virtual host traffic status module information (nginx-module-vts) +# [[inputs.nginx_vts]] +# ## An array of ngx_http_status_module or status URI to gather stats. +# urls = ["http://localhost/status"] +# +# ## HTTP response timeout (default: 5s) +# response_timeout = "5s" +# +# ## Optional TLS Config +# # tls_ca = "/etc/telegraf/ca.pem" +# # tls_cert = "/etc/telegraf/cert.pem" +# # tls_key = "/etc/telegraf/key.pem" +# ## Use TLS but skip chain & host verification +# # insecure_skip_verify = false + + +# # A plugin to collect stats from the NSD authoritative DNS name server +# [[inputs.nsd]] +# ## Address of server to connect to, optionally ':port'. Defaults to the +# ## address in the nsd config file. +# server = "127.0.0.1:8953" +# +# ## If running as a restricted user you can prepend sudo for additional access: +# # use_sudo = false +# +# ## The default location of the nsd-control binary can be overridden with: +# # binary = "/usr/sbin/nsd-control" +# +# ## The default location of the nsd config file can be overridden with: +# # config_file = "/etc/nsd/nsd.conf" +# +# ## The default timeout of 1s can be overridden with: +# # timeout = "1s" + + +# # Read NSQ topic and channel statistics. +# [[inputs.nsq]] +# ## An array of NSQD HTTP API endpoints +# endpoints = ["http://localhost:4151"] +# +# ## Optional TLS Config +# # tls_ca = "/etc/telegraf/ca.pem" +# # tls_cert = "/etc/telegraf/cert.pem" +# # tls_key = "/etc/telegraf/key.pem" +# ## Use TLS but skip chain & host verification +# # insecure_skip_verify = false + + +# # Collect kernel snmp counters and network interface statistics +# [[inputs.nstat]] +# ## file paths for proc files. If empty default paths will be used: +# ## /proc/net/netstat, /proc/net/snmp, /proc/net/snmp6 +# ## These can also be overridden with env variables, see README. +# proc_net_netstat = "/proc/net/netstat" +# proc_net_snmp = "/proc/net/snmp" +# proc_net_snmp6 = "/proc/net/snmp6" +# ## dump metrics with 0 values too +# dump_zeros = true + + +# # Get standard NTP query metrics, requires ntpq executable. +# [[inputs.ntpq]] +# ## If false, set the -n ntpq flag. Can reduce metric gather time. +# dns_lookup = true + + +# # Pulls statistics from nvidia GPUs attached to the host +# [[inputs.nvidia_smi]] +# ## Optional: path to nvidia-smi binary, defaults to $PATH via exec.LookPath +# # bin_path = "/usr/bin/nvidia-smi" +# +# ## Optional: timeout for GPU polling +# # timeout = "5s" + + +# # Retrieve data from OPCUA devices +# [[inputs.opcua]] +# ## Metric name +# # name = "opcua" +# # +# ## OPC UA Endpoint URL +# # endpoint = "opc.tcp://localhost:4840" +# # +# ## Maximum time allowed to establish a connect to the endpoint. +# # connect_timeout = "10s" +# # +# ## Maximum time allowed for a request over the estabilished connection. +# # request_timeout = "5s" +# # +# ## Security policy, one of "None", "Basic128Rsa15", "Basic256", +# ## "Basic256Sha256", or "auto" +# # security_policy = "auto" +# # +# ## Security mode, one of "None", "Sign", "SignAndEncrypt", or "auto" +# # security_mode = "auto" +# # +# ## Path to cert.pem. Required when security mode or policy isn't "None". +# ## If cert path is not supplied, self-signed cert and key will be generated. +# # certificate = "/etc/telegraf/cert.pem" +# # +# ## Path to private key.pem. Required when security mode or policy isn't "None". +# ## If key path is not supplied, self-signed cert and key will be generated. +# # private_key = "/etc/telegraf/key.pem" +# # +# ## Authentication Method, one of "Certificate", "UserName", or "Anonymous". To +# ## authenticate using a specific ID, select 'Certificate' or 'UserName' +# # auth_method = "Anonymous" +# # +# ## Username. Required for auth_method = "UserName" +# # username = "" +# # +# ## Password. Required for auth_method = "UserName" +# # password = "" +# # +# ## Option to select the metric timestamp to use. Valid options are: +# ## "gather" -- uses the time of receiving the data in telegraf +# ## "server" -- uses the timestamp provided by the server +# ## "source" -- uses the timestamp provided by the source +# # timestamp = "gather" +# # +# ## Node ID configuration +# ## name - field name to use in the output +# ## namespace - OPC UA namespace of the node (integer value 0 thru 3) +# ## identifier_type - OPC UA ID type (s=string, i=numeric, g=guid, b=opaque) +# ## identifier - OPC UA ID (tag as shown in opcua browser) +# ## Example: +# ## {name="ProductUri", namespace="0", identifier_type="i", identifier="2262"} +# # nodes = [ +# # {name="", namespace="", identifier_type="", identifier=""}, +# # {name="", namespace="", identifier_type="", identifier=""}, +# #] +# # +# ## Node Group +# ## Sets defaults for OPC UA namespace and ID type so they aren't required in +# ## every node. A group can also have a metric name that overrides the main +# ## plugin metric name. +# ## +# ## Multiple node groups are allowed +# #[[inputs.opcua.group]] +# ## Group Metric name. Overrides the top level name. If unset, the +# ## top level name is used. +# # name = +# # +# ## Group default namespace. If a node in the group doesn't set its +# ## namespace, this is used. +# # namespace = +# # +# ## Group default identifier type. If a node in the group doesn't set its +# ## namespace, this is used. +# # identifier_type = +# # +# ## Node ID Configuration. Array of nodes with the same settings as above. +# # nodes = [ +# # {name="", namespace="", identifier_type="", identifier=""}, +# # {name="", namespace="", identifier_type="", identifier=""}, +# #] + + +# # OpenLDAP cn=Monitor plugin +# [[inputs.openldap]] +# host = "localhost" +# port = 389 +# +# # ldaps, starttls, or no encryption. default is an empty string, disabling all encryption. +# # note that port will likely need to be changed to 636 for ldaps +# # valid options: "" | "starttls" | "ldaps" +# tls = "" +# +# # skip peer certificate verification. Default is false. +# insecure_skip_verify = false +# +# # Path to PEM-encoded Root certificate to use to verify server certificate +# tls_ca = "/etc/ssl/certs.pem" +# +# # dn/password to bind with. If bind_dn is empty, an anonymous bind is performed. +# bind_dn = "" +# bind_password = "" +# +# # Reverse metric names so they sort more naturally. Recommended. +# # This defaults to false if unset, but is set to true when generating a new config +# reverse_metric_names = true + + +# # Get standard NTP query metrics from OpenNTPD. +# [[inputs.openntpd]] +# ## Run ntpctl binary with sudo. +# # use_sudo = false +# +# ## Location of the ntpctl binary. +# # binary = "/usr/sbin/ntpctl" +# +# ## Maximum time the ntpctl binary is allowed to run. +# # timeout = "5ms" + + +# # A plugin to collect stats from Opensmtpd - a validating, recursive, and caching DNS resolver +# [[inputs.opensmtpd]] +# ## If running as a restricted user you can prepend sudo for additional access: +# #use_sudo = false +# +# ## The default location of the smtpctl binary can be overridden with: +# binary = "/usr/sbin/smtpctl" +# +# ## The default timeout of 1000ms can be overridden with (in milliseconds): +# timeout = 1000 + + +# # Read current weather and forecasts data from openweathermap.org +# [[inputs.openweathermap]] +# ## OpenWeatherMap API key. +# app_id = "xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx" +# +# ## City ID's to collect weather data from. +# city_id = ["5391959"] +# +# ## Language of the description field. Can be one of "ar", "bg", +# ## "ca", "cz", "de", "el", "en", "fa", "fi", "fr", "gl", "hr", "hu", +# ## "it", "ja", "kr", "la", "lt", "mk", "nl", "pl", "pt", "ro", "ru", +# ## "se", "sk", "sl", "es", "tr", "ua", "vi", "zh_cn", "zh_tw" +# # lang = "en" +# +# ## APIs to fetch; can contain "weather" or "forecast". +# fetch = ["weather", "forecast"] +# +# ## OpenWeatherMap base URL +# # base_url = "https://api.openweathermap.org/" +# +# ## Timeout for HTTP response. +# # response_timeout = "5s" +# +# ## Preferred unit system for temperature and wind speed. Can be one of +# ## "metric", "imperial", or "standard". +# # units = "metric" +# +# ## Query interval; OpenWeatherMap updates their weather data every 10 +# ## minutes. +# interval = "10m" + + +# # Read metrics of passenger using passenger-status +# [[inputs.passenger]] +# ## Path of passenger-status. +# ## +# ## Plugin gather metric via parsing XML output of passenger-status +# ## More information about the tool: +# ## https://www.phusionpassenger.com/library/admin/apache/overall_status_report.html +# ## +# ## If no path is specified, then the plugin simply execute passenger-status +# ## hopefully it can be found in your PATH +# command = "passenger-status -v --show=xml" + + +# # Gather counters from PF +# [[inputs.pf]] +# ## PF require root access on most systems. +# ## Setting 'use_sudo' to true will make use of sudo to run pfctl. +# ## Users must configure sudo to allow telegraf user to run pfctl with no password. +# ## pfctl can be restricted to only list command "pfctl -s info". +# use_sudo = false + + +# # Read metrics of phpfpm, via HTTP status page or socket +# [[inputs.phpfpm]] +# ## An array of addresses to gather stats about. Specify an ip or hostname +# ## with optional port and path +# ## +# ## Plugin can be configured in three modes (either can be used): +# ## - http: the URL must start with http:// or https://, ie: +# ## "http://localhost/status" +# ## "http://192.168.130.1/status?full" +# ## +# ## - unixsocket: path to fpm socket, ie: +# ## "/var/run/php5-fpm.sock" +# ## or using a custom fpm status path: +# ## "/var/run/php5-fpm.sock:fpm-custom-status-path" +# ## +# ## - fcgi: the URL must start with fcgi:// or cgi://, and port must be present, ie: +# ## "fcgi://10.0.0.12:9000/status" +# ## "cgi://10.0.10.12:9001/status" +# ## +# ## Example of multiple gathering from local socket and remote host +# ## urls = ["http://192.168.1.20/status", "/tmp/fpm.sock"] +# urls = ["http://localhost/status"] +# +# ## Duration allowed to complete HTTP requests. +# # timeout = "5s" +# +# ## Optional TLS Config +# # tls_ca = "/etc/telegraf/ca.pem" +# # tls_cert = "/etc/telegraf/cert.pem" +# # tls_key = "/etc/telegraf/key.pem" +# ## Use TLS but skip chain & host verification +# # insecure_skip_verify = false + + +# # Ping given url(s) and return statistics +# [[inputs.ping]] +# ## Hosts to send ping packets to. +# urls = ["example.org"] +# +# ## Method used for sending pings, can be either "exec" or "native". When set +# ## to "exec" the systems ping command will be executed. When set to "native" +# ## the plugin will send pings directly. +# ## +# ## While the default is "exec" for backwards compatibility, new deployments +# ## are encouraged to use the "native" method for improved compatibility and +# ## performance. +# # method = "exec" +# +# ## Number of ping packets to send per interval. Corresponds to the "-c" +# ## option of the ping command. +# # count = 1 +# +# ## Time to wait between sending ping packets in seconds. Operates like the +# ## "-i" option of the ping command. +# # ping_interval = 1.0 +# +# ## If set, the time to wait for a ping response in seconds. Operates like +# ## the "-W" option of the ping command. +# # timeout = 1.0 +# +# ## If set, the total ping deadline, in seconds. Operates like the -w option +# ## of the ping command. +# # deadline = 10 +# +# ## Interface or source address to send ping from. Operates like the -I or -S +# ## option of the ping command. +# # interface = "" +# +# ## Percentiles to calculate. This only works with the native method. +# # percentiles = [50, 95, 99] +# +# ## Specify the ping executable binary. +# # binary = "ping" +# +# ## Arguments for ping command. When arguments is not empty, the command from +# ## the binary option will be used and other options (ping_interval, timeout, +# ## etc) will be ignored. +# # arguments = ["-c", "3"] +# +# ## Use only IPv6 addresses when resolving a hostname. +# # ipv6 = false +# +# ## Number of data bytes to be sent. Corresponds to the "-s" +# ## option of the ping command. This only works with the native method. +# # size = 56 + + +# # Read metrics from one or many PowerDNS servers +# [[inputs.powerdns]] +# ## An array of sockets to gather stats about. +# ## Specify a path to unix socket. +# unix_sockets = ["/var/run/pdns.controlsocket"] + + +# # Read metrics from one or many PowerDNS Recursor servers +# [[inputs.powerdns_recursor]] +# ## Path to the Recursor control socket. +# unix_sockets = ["/var/run/pdns_recursor.controlsocket"] +# +# ## Directory to create receive socket. This default is likely not writable, +# ## please reference the full plugin documentation for a recommended setup. +# # socket_dir = "/var/run/" +# ## Socket permissions for the receive socket. +# # socket_mode = "0666" + + +# # Monitor process cpu and memory usage +# [[inputs.procstat]] +# ## PID file to monitor process +# pid_file = "/var/run/nginx.pid" +# ## executable name (ie, pgrep ) +# # exe = "nginx" +# ## pattern as argument for pgrep (ie, pgrep -f ) +# # pattern = "nginx" +# ## user as argument for pgrep (ie, pgrep -u ) +# # user = "nginx" +# ## Systemd unit name, supports globs when include_systemd_children is set to true +# # systemd_unit = "nginx.service" +# # include_systemd_children = false +# ## CGroup name or path, supports globs +# # cgroup = "systemd/system.slice/nginx.service" +# +# ## Windows service name +# # win_service = "" +# +# ## override for process_name +# ## This is optional; default is sourced from /proc//status +# # process_name = "bar" +# +# ## Field name prefix +# # prefix = "" +# +# ## When true add the full cmdline as a tag. +# # cmdline_tag = false +# +# ## Mode to use when calculating CPU usage. Can be one of 'solaris' or 'irix'. +# # mode = "irix" +# +# ## Add the PID as a tag instead of as a field. When collecting multiple +# ## processes with otherwise matching tags this setting should be enabled to +# ## ensure each process has a unique identity. +# ## +# ## Enabling this option may result in a large number of series, especially +# ## when processes have a short lifetime. +# # pid_tag = false +# +# ## Method to use when finding process IDs. Can be one of 'pgrep', or +# ## 'native'. The pgrep finder calls the pgrep executable in the PATH while +# ## the native finder performs the search directly in a manor dependent on the +# ## platform. Default is 'pgrep' +# # pid_finder = "pgrep" + + +# # Provides metrics from Proxmox nodes (Proxmox Virtual Environment > 6.2). +# [[inputs.proxmox]] +# ## API connection configuration. The API token was introduced in Proxmox v6.2. Required permissions for user and token: PVEAuditor role on /. +# base_url = "https://localhost:8006/api2/json" +# api_token = "USER@REALM!TOKENID=UUID" +# ## Node name, defaults to OS hostname +# # node_name = "" +# +# ## Optional TLS Config +# # tls_ca = "/etc/telegraf/ca.pem" +# # tls_cert = "/etc/telegraf/cert.pem" +# # tls_key = "/etc/telegraf/key.pem" +# ## Use TLS but skip chain & host verification +# insecure_skip_verify = false +# +# # HTTP response timeout (default: 5s) +# response_timeout = "5s" + + +# # Reads last_run_summary.yaml file and converts to measurements +# [[inputs.puppetagent]] +# ## Location of puppet last run summary file +# location = "/var/lib/puppet/state/last_run_summary.yaml" + + +# # Reads metrics from RabbitMQ servers via the Management Plugin +# [[inputs.rabbitmq]] +# ## Management Plugin url. (default: http://localhost:15672) +# # url = "http://localhost:15672" +# ## Tag added to rabbitmq_overview series; deprecated: use tags +# # name = "rmq-server-1" +# ## Credentials +# # username = "guest" +# # password = "guest" +# +# ## Optional TLS Config +# # tls_ca = "/etc/telegraf/ca.pem" +# # tls_cert = "/etc/telegraf/cert.pem" +# # tls_key = "/etc/telegraf/key.pem" +# ## Use TLS but skip chain & host verification +# # insecure_skip_verify = false +# +# ## Optional request timeouts +# ## +# ## ResponseHeaderTimeout, if non-zero, specifies the amount of time to wait +# ## for a server's response headers after fully writing the request. +# # header_timeout = "3s" +# ## +# ## client_timeout specifies a time limit for requests made by this client. +# ## Includes connection time, any redirects, and reading the response body. +# # client_timeout = "4s" +# +# ## A list of nodes to gather as the rabbitmq_node measurement. If not +# ## specified, metrics for all nodes are gathered. +# # nodes = ["rabbit@node1", "rabbit@node2"] +# +# ## A list of queues to gather as the rabbitmq_queue measurement. If not +# ## specified, metrics for all queues are gathered. +# # queues = ["telegraf"] +# +# ## A list of exchanges to gather as the rabbitmq_exchange measurement. If not +# ## specified, metrics for all exchanges are gathered. +# # exchanges = ["telegraf"] +# +# ## Metrics to include and exclude. Globs accepted. +# ## Note that an empty array for both will include all metrics +# ## Currently the following metrics are supported: "exchange", "federation", "node", "overview", "queue" +# # metric_include = [] +# # metric_exclude = [] +# +# ## Queues to include and exclude. Globs accepted. +# ## Note that an empty array for both will include all queues +# queue_name_include = [] +# queue_name_exclude = [] +# +# ## Federation upstreams include and exclude when gathering the rabbitmq_federation measurement. +# ## If neither are specified, metrics for all federation upstreams are gathered. +# ## Federation link metrics will only be gathered for queues and exchanges +# ## whose non-federation metrics will be collected (e.g a queue excluded +# ## by the 'queue_name_exclude' option will also be excluded from federation). +# ## Globs accepted. +# # federation_upstream_include = ["dataCentre-*"] +# # federation_upstream_exclude = [] + + +# # Read raindrops stats (raindrops - real-time stats for preforking Rack servers) +# [[inputs.raindrops]] +# ## An array of raindrops middleware URI to gather stats. +# urls = ["http://localhost:8080/_raindrops"] + + +# # Reads metrics from RavenDB servers via the Monitoring Endpoints +# [[inputs.ravendb]] +# ## Node URL and port that RavenDB is listening on +# url = "https://localhost:8080" +# +# ## RavenDB X509 client certificate setup +# # tls_cert = "/etc/telegraf/raven.crt" +# # tls_key = "/etc/telegraf/raven.key" +# +# ## Optional request timeout +# ## +# ## Timeout, specifies the amount of time to wait +# ## for a server's response headers after fully writing the request and +# ## time limit for requests made by this client +# # timeout = "5s" +# +# ## List of statistics which are collected +# # At least one is required +# # Allowed values: server, databases, indexes, collections +# # +# # stats_include = ["server", "databases", "indexes", "collections"] +# +# ## List of db where database stats are collected +# ## If empty, all db are concerned +# # db_stats_dbs = [] +# +# ## List of db where index status are collected +# ## If empty, all indexes from all db are concerned +# # index_stats_dbs = [] +# +# ## List of db where collection status are collected +# ## If empty, all collections from all db are concerned +# # collection_stats_dbs = [] + + +# # Read CPU, Fans, Powersupply and Voltage metrics of hardware server through redfish APIs +# [[inputs.redfish]] +# ## Server url +# address = "https://127.0.0.1:5000" +# +# ## Username, Password for hardware server +# username = "root" +# password = "password123456" +# +# ## ComputerSystemId +# computer_system_id="2M220100SL" +# +# ## Amount of time allowed to complete the HTTP request +# # timeout = "5s" +# +# ## Optional TLS Config +# # tls_ca = "/etc/telegraf/ca.pem" +# # tls_cert = "/etc/telegraf/cert.pem" +# # tls_key = "/etc/telegraf/key.pem" +# ## Use TLS but skip chain & host verification +# # insecure_skip_verify = false + + +# # Read metrics from one or many redis servers +# [[inputs.redis]] +# ## specify servers via a url matching: +# ## [protocol://][:password]@address[:port] +# ## e.g. +# ## tcp://localhost:6379 +# ## tcp://:password@192.168.99.100 +# ## unix:///var/run/redis.sock +# ## +# ## If no servers are specified, then localhost is used as the host. +# ## If no port is specified, 6379 is used +# servers = ["tcp://localhost:6379"] +# +# ## Optional. Specify redis commands to retrieve values +# # [[inputs.redis.commands]] +# # # The command to run where each argument is a separate element +# # command = ["get", "sample-key"] +# # # The field to store the result in +# # field = "sample-key-value" +# # # The type of the result +# # # Can be "string", "integer", or "float" +# # type = "string" +# +# ## specify server password +# # password = "s#cr@t%" +# +# ## Optional TLS Config +# # tls_ca = "/etc/telegraf/ca.pem" +# # tls_cert = "/etc/telegraf/cert.pem" +# # tls_key = "/etc/telegraf/key.pem" +# ## Use TLS but skip chain & host verification +# # insecure_skip_verify = true + + +# # Read metrics from one or many RethinkDB servers +# [[inputs.rethinkdb]] +# ## An array of URI to gather stats about. Specify an ip or hostname +# ## with optional port add password. ie, +# ## rethinkdb://user:auth_key@10.10.3.30:28105, +# ## rethinkdb://10.10.3.33:18832, +# ## 10.0.0.1:10000, etc. +# servers = ["127.0.0.1:28015"] +# ## +# ## If you use actual rethinkdb of > 2.3.0 with username/password authorization, +# ## protocol have to be named "rethinkdb2" - it will use 1_0 H. +# # servers = ["rethinkdb2://username:password@127.0.0.1:28015"] +# ## +# ## If you use older versions of rethinkdb (<2.2) with auth_key, protocol +# ## have to be named "rethinkdb". +# # servers = ["rethinkdb://username:auth_key@127.0.0.1:28015"] + + +# # Read metrics one or many Riak servers +# [[inputs.riak]] +# # Specify a list of one or more riak http servers +# servers = ["http://localhost:8098"] + + +# # Read API usage and limits for a Salesforce organisation +# [[inputs.salesforce]] +# ## specify your credentials +# ## +# username = "your_username" +# password = "your_password" +# ## +# ## (optional) security token +# # security_token = "your_security_token" +# ## +# ## (optional) environment type (sandbox or production) +# ## default is: production +# ## +# # environment = "production" +# ## +# ## (optional) API version (default: "39.0") +# ## +# # version = "39.0" + + +# # Read metrics from storage devices supporting S.M.A.R.T. +# [[inputs.smart]] +# ## Optionally specify the path to the smartctl executable +# # path_smartctl = "/usr/bin/smartctl" +# +# ## Optionally specify the path to the nvme-cli executable +# # path_nvme = "/usr/bin/nvme" +# +# ## Optionally specify if vendor specific attributes should be propagated for NVMe disk case +# ## ["auto-on"] - automatically find and enable additional vendor specific disk info +# ## ["vendor1", "vendor2", ...] - e.g. "Intel" enable additional Intel specific disk info +# # enable_extensions = ["auto-on"] +# +# ## On most platforms used cli utilities requires root access. +# ## Setting 'use_sudo' to true will make use of sudo to run smartctl or nvme-cli. +# ## Sudo must be configured to allow the telegraf user to run smartctl or nvme-cli +# ## without a password. +# # use_sudo = false +# +# ## Skip checking disks in this power mode. Defaults to +# ## "standby" to not wake up disks that have stopped rotating. +# ## See --nocheck in the man pages for smartctl. +# ## smartctl version 5.41 and 5.42 have faulty detection of +# ## power mode and might require changing this value to +# ## "never" depending on your disks. +# # nocheck = "standby" +# +# ## Gather all returned S.M.A.R.T. attribute metrics and the detailed +# ## information from each drive into the 'smart_attribute' measurement. +# # attributes = false +# +# ## Optionally specify devices to exclude from reporting if disks auto-discovery is performed. +# # excludes = [ "/dev/pass6" ] +# +# ## Optionally specify devices and device type, if unset +# ## a scan (smartctl --scan and smartctl --scan -d nvme) for S.M.A.R.T. devices will be done +# ## and all found will be included except for the excluded in excludes. +# # devices = [ "/dev/ada0 -d atacam", "/dev/nvme0"] +# +# ## Timeout for the cli command to complete. +# # timeout = "30s" + + +# # Retrieves SNMP values from remote agents +# [[inputs.snmp]] +# ## Agent addresses to retrieve values from. +# ## format: agents = [":"] +# ## scheme: optional, either udp, udp4, udp6, tcp, tcp4, tcp6. +# ## default is udp +# ## port: optional +# ## example: agents = ["udp://127.0.0.1:161"] +# ## agents = ["tcp://127.0.0.1:161"] +# ## agents = ["udp4://v4only-snmp-agent"] +# agents = ["udp://127.0.0.1:161"] +# +# ## Timeout for each request. +# # timeout = "5s" +# +# ## SNMP version; can be 1, 2, or 3. +# # version = 2 +# +# ## Agent host tag; the tag used to reference the source host +# # agent_host_tag = "agent_host" +# +# ## SNMP community string. +# # community = "public" +# +# ## Number of retries to attempt. +# # retries = 3 +# +# ## The GETBULK max-repetitions parameter. +# # max_repetitions = 10 +# +# ## SNMPv3 authentication and encryption options. +# ## +# ## Security Name. +# # sec_name = "myuser" +# ## Authentication protocol; one of "MD5", "SHA", "SHA224", "SHA256", "SHA384", "SHA512" or "". +# # auth_protocol = "MD5" +# ## Authentication password. +# # auth_password = "pass" +# ## Security Level; one of "noAuthNoPriv", "authNoPriv", or "authPriv". +# # sec_level = "authNoPriv" +# ## Context Name. +# # context_name = "" +# ## Privacy protocol used for encrypted messages; one of "DES", "AES" or "". +# # priv_protocol = "" +# ## Privacy password used for encrypted messages. +# # priv_password = "" +# +# ## Add fields and tables defining the variables you wish to collect. This +# ## example collects the system uptime and interface variables. Reference the +# ## full plugin documentation for configuration details. + + +# # DEPRECATED! PLEASE USE inputs.snmp INSTEAD. +# [[inputs.snmp_legacy]] +# ## Use 'oids.txt' file to translate oids to names +# ## To generate 'oids.txt' you need to run: +# ## snmptranslate -m all -Tz -On | sed -e 's/"//g' > /tmp/oids.txt +# ## Or if you have an other MIB folder with custom MIBs +# ## snmptranslate -M /mycustommibfolder -Tz -On -m all | sed -e 's/"//g' > oids.txt +# snmptranslate_file = "/tmp/oids.txt" +# [[inputs.snmp.host]] +# address = "192.168.2.2:161" +# # SNMP community +# community = "public" # default public +# # SNMP version (1, 2 or 3) +# # Version 3 not supported yet +# version = 2 # default 2 +# # SNMP response timeout +# timeout = 2.0 # default 2.0 +# # SNMP request retries +# retries = 2 # default 2 +# # Which get/bulk do you want to collect for this host +# collect = ["mybulk", "sysservices", "sysdescr"] +# # Simple list of OIDs to get, in addition to "collect" +# get_oids = [] +# +# [[inputs.snmp.host]] +# address = "192.168.2.3:161" +# community = "public" +# version = 2 +# timeout = 2.0 +# retries = 2 +# collect = ["mybulk"] +# get_oids = [ +# "ifNumber", +# ".1.3.6.1.2.1.1.3.0", +# ] +# +# [[inputs.snmp.get]] +# name = "ifnumber" +# oid = "ifNumber" +# +# [[inputs.snmp.get]] +# name = "interface_speed" +# oid = "ifSpeed" +# instance = "0" +# +# [[inputs.snmp.get]] +# name = "sysuptime" +# oid = ".1.3.6.1.2.1.1.3.0" +# unit = "second" +# +# [[inputs.snmp.bulk]] +# name = "mybulk" +# max_repetition = 127 +# oid = ".1.3.6.1.2.1.1" +# +# [[inputs.snmp.bulk]] +# name = "ifoutoctets" +# max_repetition = 127 +# oid = "ifOutOctets" +# +# [[inputs.snmp.host]] +# address = "192.168.2.13:161" +# #address = "127.0.0.1:161" +# community = "public" +# version = 2 +# timeout = 2.0 +# retries = 2 +# #collect = ["mybulk", "sysservices", "sysdescr", "systype"] +# collect = ["sysuptime" ] +# [[inputs.snmp.host.table]] +# name = "iftable3" +# include_instances = ["enp5s0", "eth1"] +# +# # SNMP TABLEs +# # table without mapping neither subtables +# [[inputs.snmp.table]] +# name = "iftable1" +# oid = ".1.3.6.1.2.1.31.1.1.1" +# +# # table without mapping but with subtables +# [[inputs.snmp.table]] +# name = "iftable2" +# oid = ".1.3.6.1.2.1.31.1.1.1" +# sub_tables = [".1.3.6.1.2.1.2.2.1.13"] +# +# # table with mapping but without subtables +# [[inputs.snmp.table]] +# name = "iftable3" +# oid = ".1.3.6.1.2.1.31.1.1.1" +# # if empty. get all instances +# mapping_table = ".1.3.6.1.2.1.31.1.1.1.1" +# # if empty, get all subtables +# +# # table with both mapping and subtables +# [[inputs.snmp.table]] +# name = "iftable4" +# oid = ".1.3.6.1.2.1.31.1.1.1" +# # if empty get all instances +# mapping_table = ".1.3.6.1.2.1.31.1.1.1.1" +# # if empty get all subtables +# # sub_tables could be not "real subtables" +# sub_tables=[".1.3.6.1.2.1.2.2.1.13", "bytes_recv", "bytes_send"] + + +# # Read stats from one or more Solr servers or cores +# [[inputs.solr]] +# ## specify a list of one or more Solr servers +# servers = ["http://localhost:8983"] +# +# ## specify a list of one or more Solr cores (default - all) +# # cores = ["main"] +# +# ## Optional HTTP Basic Auth Credentials +# # username = "username" +# # password = "pa$$word" + + +# # Gather timeseries from Google Cloud Platform v3 monitoring API +# [[inputs.stackdriver]] +# ## GCP Project +# project = "erudite-bloom-151019" +# +# ## Include timeseries that start with the given metric type. +# metric_type_prefix_include = [ +# "compute.googleapis.com/", +# ] +# +# ## Exclude timeseries that start with the given metric type. +# # metric_type_prefix_exclude = [] +# +# ## Many metrics are updated once per minute; it is recommended to override +# ## the agent level interval with a value of 1m or greater. +# interval = "1m" +# +# ## Maximum number of API calls to make per second. The quota for accounts +# ## varies, it can be viewed on the API dashboard: +# ## https://cloud.google.com/monitoring/quotas#quotas_and_limits +# # rate_limit = 14 +# +# ## The delay and window options control the number of points selected on +# ## each gather. When set, metrics are gathered between: +# ## start: now() - delay - window +# ## end: now() - delay +# # +# ## Collection delay; if set too low metrics may not yet be available. +# # delay = "5m" +# # +# ## If unset, the window will start at 1m and be updated dynamically to span +# ## the time between calls (approximately the length of the plugin interval). +# # window = "1m" +# +# ## TTL for cached list of metric types. This is the maximum amount of time +# ## it may take to discover new metrics. +# # cache_ttl = "1h" +# +# ## If true, raw bucket counts are collected for distribution value types. +# ## For a more lightweight collection, you may wish to disable and use +# ## distribution_aggregation_aligners instead. +# # gather_raw_distribution_buckets = true +# +# ## Aggregate functions to be used for metrics whose value type is +# ## distribution. These aggregate values are recorded in in addition to raw +# ## bucket counts; if they are enabled. +# ## +# ## For a list of aligner strings see: +# ## https://cloud.google.com/monitoring/api/ref_v3/rpc/google.monitoring.v3#aligner +# # distribution_aggregation_aligners = [ +# # "ALIGN_PERCENTILE_99", +# # "ALIGN_PERCENTILE_95", +# # "ALIGN_PERCENTILE_50", +# # ] +# +# ## Filters can be added to reduce the number of time series matched. All +# ## functions are supported: starts_with, ends_with, has_substring, and +# ## one_of. Only the '=' operator is supported. +# ## +# ## The logical operators when combining filters are defined statically using +# ## the following values: +# ## filter ::= {AND } +# ## resource_labels ::= {OR } +# ## metric_labels ::= {OR } +# ## +# ## For more details, see https://cloud.google.com/monitoring/api/v3/filters +# # +# ## Resource labels refine the time series selection with the following expression: +# ## resource.labels. = +# # [[inputs.stackdriver.filter.resource_labels]] +# # key = "instance_name" +# # value = 'starts_with("localhost")' +# # +# ## Metric labels refine the time series selection with the following expression: +# ## metric.labels. = +# # [[inputs.stackdriver.filter.metric_labels]] +# # key = "device_name" +# # value = 'one_of("sda", "sdb")' + + +# # Get synproxy counter statistics from procfs +# [[inputs.synproxy]] +# # no configuration + + +# # Reads metrics from a Teamspeak 3 Server via ServerQuery +# [[inputs.teamspeak]] +# ## Server address for Teamspeak 3 ServerQuery +# # server = "127.0.0.1:10011" +# ## Username for ServerQuery +# username = "serverqueryuser" +# ## Password for ServerQuery +# password = "secret" +# ## Array of virtual servers +# # virtual_servers = [1] + + +# # Read metrics about temperature +# [[inputs.temp]] +# # no configuration + + +# # Read Tengine's basic status information (ngx_http_reqstat_module) +# [[inputs.tengine]] +# # An array of Tengine reqstat module URI to gather stats. +# urls = ["http://127.0.0.1/us"] +# +# # HTTP response timeout (default: 5s) +# # response_timeout = "5s" +# +# ## Optional TLS Config +# # tls_ca = "/etc/telegraf/ca.pem" +# # tls_cert = "/etc/telegraf/cert.cer" +# # tls_key = "/etc/telegraf/key.key" +# ## Use TLS but skip chain & host verification +# # insecure_skip_verify = false + + +# # Gather metrics from the Tomcat server status page. +# [[inputs.tomcat]] +# ## URL of the Tomcat server status +# # url = "http://127.0.0.1:8080/manager/status/all?XML=true" +# +# ## HTTP Basic Auth Credentials +# # username = "tomcat" +# # password = "s3cret" +# +# ## Request timeout +# # timeout = "5s" +# +# ## Optional TLS Config +# # tls_ca = "/etc/telegraf/ca.pem" +# # tls_cert = "/etc/telegraf/cert.pem" +# # tls_key = "/etc/telegraf/key.pem" +# ## Use TLS but skip chain & host verification +# # insecure_skip_verify = false + + +# # Inserts sine and cosine waves for demonstration purposes +# [[inputs.trig]] +# ## Set the amplitude +# amplitude = 10.0 + + +# # Read Twemproxy stats data +# [[inputs.twemproxy]] +# ## Twemproxy stats address and port (no scheme) +# addr = "localhost:22222" +# ## Monitor pool name +# pools = ["redis_pool", "mc_pool"] + + +# # A plugin to collect stats from the Unbound DNS resolver +# [[inputs.unbound]] +# ## Address of server to connect to, read from unbound conf default, optionally ':port' +# ## Will lookup IP if given a hostname +# server = "127.0.0.1:8953" +# +# ## If running as a restricted user you can prepend sudo for additional access: +# # use_sudo = false +# +# ## The default location of the unbound-control binary can be overridden with: +# # binary = "/usr/sbin/unbound-control" +# +# ## The default location of the unbound config file can be overridden with: +# # config_file = "/etc/unbound/unbound.conf" +# +# ## The default timeout of 1s can be overridden with: +# # timeout = "1s" +# +# ## When set to true, thread metrics are tagged with the thread id. +# ## +# ## The default is false for backwards compatibility, and will be changed to +# ## true in a future version. It is recommended to set to true on new +# ## deployments. +# thread_as_tag = false + + +# # Read uWSGI metrics. +# [[inputs.uwsgi]] +# ## List with urls of uWSGI Stats servers. URL must match pattern: +# ## scheme://address[:port] +# ## +# ## For example: +# ## servers = ["tcp://localhost:5050", "http://localhost:1717", "unix:///tmp/statsock"] +# servers = ["tcp://127.0.0.1:1717"] +# +# ## General connection timeout +# # timeout = "5s" + + +# # Input plugin to collect Windows Event Log messages +# [[inputs.win_eventlog]] +# ## Telegraf should have Administrator permissions to subscribe for some Windows Events channels +# ## (System log, for example) +# +# ## LCID (Locale ID) for event rendering +# ## 1033 to force English language +# ## 0 to use default Windows locale +# # locale = 0 +# +# ## Name of eventlog, used only if xpath_query is empty +# ## Example: "Application" +# # eventlog_name = "" +# +# ## xpath_query can be in defined short form like "Event/System[EventID=999]" +# ## or you can form a XML Query. Refer to the Consuming Events article: +# ## https://docs.microsoft.com/en-us/windows/win32/wes/consuming-events +# ## XML query is the recommended form, because it is most flexible +# ## You can create or debug XML Query by creating Custom View in Windows Event Viewer +# ## and then copying resulting XML here +# xpath_query = ''' +# +# +# +# *[System[( (EventID >= 5152 and EventID <= 5158) or EventID=5379 or EventID=4672)]] +# +# +# +# +# +# +# +# +# +# +# +# +# +# +# ''' +# +# ## System field names: +# ## "Source", "EventID", "Version", "Level", "Task", "Opcode", "Keywords", "TimeCreated", +# ## "EventRecordID", "ActivityID", "RelatedActivityID", "ProcessID", "ThreadID", "ProcessName", +# ## "Channel", "Computer", "UserID", "UserName", "Message", "LevelText", "TaskText", "OpcodeText" +# +# ## In addition to System, Data fields can be unrolled from additional XML nodes in event. +# ## Human-readable representation of those nodes is formatted into event Message field, +# ## but XML is more machine-parsable +# +# # Process UserData XML to fields, if this node exists in Event XML +# process_userdata = true +# +# # Process EventData XML to fields, if this node exists in Event XML +# process_eventdata = true +# +# ## Separator character to use for unrolled XML Data field names +# separator = "_" +# +# ## Get only first line of Message field. For most events first line is usually more than enough +# only_first_line_of_message = true +# +# ## Parse timestamp from TimeCreated.SystemTime event field. +# ## Will default to current time of telegraf processing on parsing error or if set to false +# timestamp_from_event = true +# +# ## Fields to include as tags. Globbing supported ("Level*" for both "Level" and "LevelText") +# event_tags = ["Source", "EventID", "Level", "LevelText", "Task", "TaskText", "Opcode", "OpcodeText", "Keywords", "Channel", "Computer"] +# +# ## Default list of fields to send. All fields are sent by default. Globbing supported +# event_fields = ["*"] +# +# ## Fields to exclude. Also applied to data fields. Globbing supported +# exclude_fields = ["TimeCreated", "Binary", "Data_Address*"] +# +# ## Skip those tags or fields if their value is empty or equals to zero. Globbing supported +# exclude_empty = ["*ActivityID", "UserID"] + + +# # Input plugin to counterPath Performance Counters on Windows operating systems +# [[inputs.win_perf_counters]] +# ## By default this plugin returns basic CPU and Disk statistics. +# ## See the README file for more examples. +# ## Uncomment examples below or write your own as you see fit. If the system +# ## being polled for data does not have the Object at startup of the Telegraf +# ## agent, it will not be gathered. +# ## Settings: +# # PrintValid = false # Print All matching performance counters +# # Whether request a timestamp along with the PerfCounter data or just use current time +# # UsePerfCounterTime=true +# # If UseWildcardsExpansion params is set to true, wildcards (partial wildcards in instance names and wildcards in counters names) in configured counter paths will be expanded +# # and in case of localized Windows, counter paths will be also localized. It also returns instance indexes in instance names. +# # If false, wildcards (not partial) in instance names will still be expanded, but instance indexes will not be returned in instance names. +# #UseWildcardsExpansion = false +# # Period after which counters will be reread from configuration and wildcards in counter paths expanded +# CountersRefreshInterval="1m" +# +# [[inputs.win_perf_counters.object]] +# # Processor usage, alternative to native, reports on a per core. +# ObjectName = "Processor" +# Instances = ["*"] +# Counters = [ +# "% Idle Time", +# "% Interrupt Time", +# "% Privileged Time", +# "% User Time", +# "% Processor Time", +# "% DPC Time", +# ] +# Measurement = "win_cpu" +# # Set to true to include _Total instance when querying for all (*). +# # IncludeTotal=false +# # Print out when the performance counter is missing from object, counter or instance. +# # WarnOnMissing = false +# +# [[inputs.win_perf_counters.object]] +# # Disk times and queues +# ObjectName = "LogicalDisk" +# Instances = ["*"] +# Counters = [ +# "% Idle Time", +# "% Disk Time", +# "% Disk Read Time", +# "% Disk Write Time", +# "% User Time", +# "% Free Space", +# "Current Disk Queue Length", +# "Free Megabytes", +# ] +# Measurement = "win_disk" +# +# [[inputs.win_perf_counters.object]] +# ObjectName = "PhysicalDisk" +# Instances = ["*"] +# Counters = [ +# "Disk Read Bytes/sec", +# "Disk Write Bytes/sec", +# "Current Disk Queue Length", +# "Disk Reads/sec", +# "Disk Writes/sec", +# "% Disk Time", +# "% Disk Read Time", +# "% Disk Write Time", +# ] +# Measurement = "win_diskio" +# +# [[inputs.win_perf_counters.object]] +# ObjectName = "Network Interface" +# Instances = ["*"] +# Counters = [ +# "Bytes Received/sec", +# "Bytes Sent/sec", +# "Packets Received/sec", +# "Packets Sent/sec", +# "Packets Received Discarded", +# "Packets Outbound Discarded", +# "Packets Received Errors", +# "Packets Outbound Errors", +# ] +# Measurement = "win_net" +# +# +# [[inputs.win_perf_counters.object]] +# ObjectName = "System" +# Counters = [ +# "Context Switches/sec", +# "System Calls/sec", +# "Processor Queue Length", +# "System Up Time", +# ] +# Instances = ["------"] +# Measurement = "win_system" +# +# [[inputs.win_perf_counters.object]] +# # Example counterPath where the Instance portion must be removed to get data back, +# # such as from the Memory object. +# ObjectName = "Memory" +# Counters = [ +# "Available Bytes", +# "Cache Faults/sec", +# "Demand Zero Faults/sec", +# "Page Faults/sec", +# "Pages/sec", +# "Transition Faults/sec", +# "Pool Nonpaged Bytes", +# "Pool Paged Bytes", +# "Standby Cache Reserve Bytes", +# "Standby Cache Normal Priority Bytes", +# "Standby Cache Core Bytes", +# ] +# Instances = ["------"] # Use 6 x - to remove the Instance bit from the counterPath. +# Measurement = "win_mem" +# +# [[inputs.win_perf_counters.object]] +# # Example query where the Instance portion must be removed to get data back, +# # such as from the Paging File object. +# ObjectName = "Paging File" +# Counters = [ +# "% Usage", +# ] +# Instances = ["_Total"] +# Measurement = "win_swap" + + +# # Input plugin to report Windows services info. +# [[inputs.win_services]] +# ## Names of the services to monitor. Leave empty to monitor all the available services on the host. Globs accepted. +# service_names = [ +# "LanmanServer", +# "TermService", +# "Win*", +# ] + + +# # Collect Wireguard server interface and peer statistics +# [[inputs.wireguard]] +# ## Optional list of Wireguard device/interface names to query. +# ## If omitted, all Wireguard interfaces are queried. +# # devices = ["wg0"] + + +# # Monitor wifi signal strength and quality +# [[inputs.wireless]] +# ## Sets 'proc' directory path +# ## If not specified, then default is /proc +# # host_proc = "/proc" + + +# # Reads metrics from a SSL certificate +# [[inputs.x509_cert]] +# ## List certificate sources +# ## Prefix your entry with 'file://' if you intend to use relative paths +# sources = ["tcp://example.org:443", "https://influxdata.com:443", +# "udp://127.0.0.1:4433", "/etc/ssl/certs/ssl-cert-snakeoil.pem", +# "/etc/mycerts/*.mydomain.org.pem", "file:///path/to/*.pem"] +# +# ## Timeout for SSL connection +# # timeout = "5s" +# +# ## Pass a different name into the TLS request (Server Name Indication) +# ## example: server_name = "myhost.example.org" +# # server_name = "" +# +# ## Optional TLS Config +# # tls_ca = "/etc/telegraf/ca.pem" +# # tls_cert = "/etc/telegraf/cert.pem" +# # tls_key = "/etc/telegraf/key.pem" + + +# # Read metrics of ZFS from arcstats, zfetchstats, vdev_cache_stats, pools and datasets +# [[inputs.zfs]] +# ## ZFS kstat path. Ignored on FreeBSD +# ## If not specified, then default is: +# # kstatPath = "/proc/spl/kstat/zfs" +# +# ## By default, telegraf gather all zfs stats +# ## If not specified, then default is: +# # kstatMetrics = ["arcstats", "zfetchstats", "vdev_cache_stats"] +# ## For Linux, the default is: +# # kstatMetrics = ["abdstats", "arcstats", "dnodestats", "dbufcachestats", +# # "dmu_tx", "fm", "vdev_mirror_stats", "zfetchstats", "zil"] +# ## By default, don't gather zpool stats +# # poolMetrics = false +# ## By default, don't gather zdataset stats +# # datasetMetrics = false + + +# # Reads 'mntr' stats from one or many zookeeper servers +# [[inputs.zookeeper]] +# ## An array of address to gather stats about. Specify an ip or hostname +# ## with port. ie localhost:2181, 10.0.0.1:2181, etc. +# +# ## If no servers are specified, then localhost is used as the host. +# ## If no port is specified, 2181 is used +# servers = [":2181"] +# +# ## Timeout for metric collections from all servers. Minimum timeout is "1s". +# # timeout = "5s" +# +# ## Optional TLS Config +# # enable_tls = true +# # tls_ca = "/etc/telegraf/ca.pem" +# # tls_cert = "/etc/telegraf/cert.pem" +# # tls_key = "/etc/telegraf/key.pem" +# ## If false, skip chain & host verification +# # insecure_skip_verify = true + + +############################################################################### +# SERVICE INPUT PLUGINS # +############################################################################### + + +# # Listener capable of handling KNX bus messages provided through a KNX-IP Interface. +# [[inputs.KNXListener]] +# ## Type of KNX-IP interface. +# ## Can be either "tunnel" or "router". +# # service_type = "tunnel" +# +# ## Address of the KNX-IP interface. +# service_address = "localhost:3671" +# +# ## Measurement definition(s) +# # [[inputs.knx_listener.measurement]] +# # ## Name of the measurement +# # name = "temperature" +# # ## Datapoint-Type (DPT) of the KNX messages +# # dpt = "9.001" +# # ## List of Group-Addresses (GAs) assigned to the measurement +# # addresses = ["5/5/1"] +# +# # [[inputs.knx_listener.measurement]] +# # name = "illumination" +# # dpt = "9.004" +# # addresses = ["5/5/3"] + + +# # Pull Metric Statistics from Aliyun CMS +# [[inputs.aliyuncms]] +# ## Aliyun Credentials +# ## Credentials are loaded in the following order +# ## 1) Ram RoleArn credential +# ## 2) AccessKey STS token credential +# ## 3) AccessKey credential +# ## 4) Ecs Ram Role credential +# ## 5) RSA keypair credential +# ## 6) Environment variables credential +# ## 7) Instance metadata credential +# +# # access_key_id = "" +# # access_key_secret = "" +# # access_key_sts_token = "" +# # role_arn = "" +# # role_session_name = "" +# # private_key = "" +# # public_key_id = "" +# # role_name = "" +# +# ## Specify the ali cloud region list to be queried for metrics and objects discovery +# ## If not set, all supported regions (see below) would be covered, it can provide a significant load on API, so the recommendation here +# ## is to limit the list as much as possible. Allowed values: https://www.alibabacloud.com/help/zh/doc-detail/40654.htm +# ## Default supported regions are: +# ## 21 items: cn-qingdao,cn-beijing,cn-zhangjiakou,cn-huhehaote,cn-hangzhou,cn-shanghai,cn-shenzhen, +# ## cn-heyuan,cn-chengdu,cn-hongkong,ap-southeast-1,ap-southeast-2,ap-southeast-3,ap-southeast-5, +# ## ap-south-1,ap-northeast-1,us-west-1,us-east-1,eu-central-1,eu-west-1,me-east-1 +# ## +# ## From discovery perspective it set the scope for object discovery, the discovered info can be used to enrich +# ## the metrics with objects attributes/tags. Discovery is supported not for all projects (if not supported, then +# ## it will be reported on the start - for example for 'acs_cdn' project: +# ## 'E! [inputs.aliyuncms] Discovery tool is not activated: no discovery support for project "acs_cdn"' ) +# ## Currently, discovery supported for the following projects: +# ## - acs_ecs_dashboard +# ## - acs_rds_dashboard +# ## - acs_slb_dashboard +# ## - acs_vpc_eip +# regions = ["cn-hongkong"] +# +# # The minimum period for AliyunCMS metrics is 1 minute (60s). However not all +# # metrics are made available to the 1 minute period. Some are collected at +# # 3 minute, 5 minute, or larger intervals. +# # See: https://help.aliyun.com/document_detail/51936.html?spm=a2c4g.11186623.2.18.2bc1750eeOw1Pv +# # Note that if a period is configured that is smaller than the minimum for a +# # particular metric, that metric will not be returned by the Aliyun OpenAPI +# # and will not be collected by Telegraf. +# # +# ## Requested AliyunCMS aggregation Period (required - must be a multiple of 60s) +# period = "5m" +# +# ## Collection Delay (required - must account for metrics availability via AliyunCMS API) +# delay = "1m" +# +# ## Recommended: use metric 'interval' that is a multiple of 'period' to avoid +# ## gaps or overlap in pulled data +# interval = "5m" +# +# ## Metric Statistic Project (required) +# project = "acs_slb_dashboard" +# +# ## Maximum requests per second, default value is 200 +# ratelimit = 200 +# +# ## How often the discovery API call executed (default 1m) +# #discovery_interval = "1m" +# +# ## Metrics to Pull (Required) +# [[inputs.aliyuncms.metrics]] +# ## Metrics names to be requested, +# ## described here (per project): https://help.aliyun.com/document_detail/28619.html?spm=a2c4g.11186623.6.690.1938ad41wg8QSq +# names = ["InstanceActiveConnection", "InstanceNewConnection"] +# +# ## Dimension filters for Metric (these are optional). +# ## This allows to get additional metric dimension. If dimension is not specified it can be returned or +# ## the data can be aggregated - it depends on particular metric, you can find details here: https://help.aliyun.com/document_detail/28619.html?spm=a2c4g.11186623.6.690.1938ad41wg8QSq +# ## +# ## Note, that by default dimension filter includes the list of discovered objects in scope (if discovery is enabled) +# ## Values specified here would be added into the list of discovered objects. +# ## You can specify either single dimension: +# #dimensions = '{"instanceId": "p-example"}' +# +# ## Or you can specify several dimensions at once: +# #dimensions = '[{"instanceId": "p-example"},{"instanceId": "q-example"}]' +# +# ## Enrichment tags, can be added from discovery (if supported) +# ## Notation is : +# ## To figure out which fields are available, consult the Describe API per project. +# ## For example, for SLB: https://api.aliyun.com/#/?product=Slb&version=2014-05-15&api=DescribeLoadBalancers¶ms={}&tab=MOCK&lang=GO +# #tag_query_path = [ +# # "address:Address", +# # "name:LoadBalancerName", +# # "cluster_owner:Tags.Tag[?TagKey=='cs.cluster.name'].TagValue | [0]" +# # ] +# ## The following tags added by default: regionId (if discovery enabled), userId, instanceId. +# +# ## Allow metrics without discovery data, if discovery is enabled. If set to true, then metric without discovery +# ## data would be emitted, otherwise dropped. This cane be of help, in case debugging dimension filters, or partial coverage +# ## of discovery scope vs monitoring scope +# #allow_dps_without_discovery = false + + +# # AMQP consumer plugin +# [[inputs.amqp_consumer]] +# ## Broker to consume from. +# ## deprecated in 1.7; use the brokers option +# # url = "amqp://localhost:5672/influxdb" +# +# ## Brokers to consume from. If multiple brokers are specified a random broker +# ## will be selected anytime a connection is established. This can be +# ## helpful for load balancing when not using a dedicated load balancer. +# brokers = ["amqp://localhost:5672/influxdb"] +# +# ## Authentication credentials for the PLAIN auth_method. +# # username = "" +# # password = "" +# +# ## Name of the exchange to declare. If unset, no exchange will be declared. +# exchange = "telegraf" +# +# ## Exchange type; common types are "direct", "fanout", "topic", "header", "x-consistent-hash". +# # exchange_type = "topic" +# +# ## If true, exchange will be passively declared. +# # exchange_passive = false +# +# ## Exchange durability can be either "transient" or "durable". +# # exchange_durability = "durable" +# +# ## Additional exchange arguments. +# # exchange_arguments = { } +# # exchange_arguments = {"hash_property" = "timestamp"} +# +# ## AMQP queue name. +# queue = "telegraf" +# +# ## AMQP queue durability can be "transient" or "durable". +# queue_durability = "durable" +# +# ## If true, queue will be passively declared. +# # queue_passive = false +# +# ## A binding between the exchange and queue using this binding key is +# ## created. If unset, no binding is created. +# binding_key = "#" +# +# ## Maximum number of messages server should give to the worker. +# # prefetch_count = 50 +# +# ## Maximum messages to read from the broker that have not been written by an +# ## output. For best throughput set based on the number of metrics within +# ## each message and the size of the output's metric_batch_size. +# ## +# ## For example, if each message from the queue contains 10 metrics and the +# ## output metric_batch_size is 1000, setting this to 100 will ensure that a +# ## full batch is collected and the write is triggered immediately without +# ## waiting until the next flush_interval. +# # max_undelivered_messages = 1000 +# +# ## Auth method. PLAIN and EXTERNAL are supported +# ## Using EXTERNAL requires enabling the rabbitmq_auth_mechanism_ssl plugin as +# ## described here: https://www.rabbitmq.com/plugins.html +# # auth_method = "PLAIN" +# +# ## Optional TLS Config +# # tls_ca = "/etc/telegraf/ca.pem" +# # tls_cert = "/etc/telegraf/cert.pem" +# # tls_key = "/etc/telegraf/key.pem" +# ## Use TLS but skip chain & host verification +# # insecure_skip_verify = false +# +# ## Content encoding for message payloads, can be set to "gzip" to or +# ## "identity" to apply no encoding. +# # content_encoding = "identity" +# +# ## Data format to consume. +# ## Each data format has its own unique set of configuration options, read +# ## more about them here: +# ## https://github.com/influxdata/telegraf/blob/master/docs/DATA_FORMATS_INPUT.md +# data_format = "influx" + + +# # Read Cassandra metrics through Jolokia +# [[inputs.cassandra]] +# ## DEPRECATED: The cassandra plugin has been deprecated. Please use the +# ## jolokia2 plugin instead. +# ## +# ## see https://github.com/influxdata/telegraf/tree/master/plugins/inputs/jolokia2 +# +# context = "/jolokia/read" +# ## List of cassandra servers exposing jolokia read service +# servers = ["myuser:mypassword@10.10.10.1:8778","10.10.10.2:8778",":8778"] +# ## List of metrics collected on above servers +# ## Each metric consists of a jmx path. +# ## This will collect all heap memory usage metrics from the jvm and +# ## ReadLatency metrics for all keyspaces and tables. +# ## "type=Table" in the query works with Cassandra3.0. Older versions might +# ## need to use "type=ColumnFamily" +# metrics = [ +# "/java.lang:type=Memory/HeapMemoryUsage", +# "/org.apache.cassandra.metrics:type=Table,keyspace=*,scope=*,name=ReadLatency" +# ] + + +# # Cisco model-driven telemetry (MDT) input plugin for IOS XR, IOS XE and NX-OS platforms +# [[inputs.cisco_telemetry_mdt]] +# ## Telemetry transport can be "tcp" or "grpc". TLS is only supported when +# ## using the grpc transport. +# transport = "grpc" +# +# ## Address and port to host telemetry listener +# service_address = ":57000" +# +# ## Enable TLS; grpc transport only. +# # tls_cert = "/etc/telegraf/cert.pem" +# # tls_key = "/etc/telegraf/key.pem" +# +# ## Enable TLS client authentication and define allowed CA certificates; grpc +# ## transport only. +# # tls_allowed_cacerts = ["/etc/telegraf/clientca.pem"] +# +# ## Define (for certain nested telemetry measurements with embedded tags) which fields are tags +# # embedded_tags = ["Cisco-IOS-XR-qos-ma-oper:qos/interface-table/interface/input/service-policy-names/service-policy-instance/statistics/class-stats/class-name"] +# +# ## Define aliases to map telemetry encoding paths to simple measurement names +# [inputs.cisco_telemetry_mdt.aliases] +# ifstats = "ietf-interfaces:interfaces-state/interface/statistics" +# ##Define Property Xformation, please refer README and https://pubhub.devnetcloud.com/media/dme-docs-9-3-3/docs/appendix/ for Model details. +# [inputs.cisco_telemetry_mdt.dmes] +# ModTs = "ignore" +# CreateTs = "ignore" + + +# # Read metrics from one or many ClickHouse servers +# [[inputs.clickhouse]] +# ## Username for authorization on ClickHouse server +# ## example: username = "default" +# username = "default" +# +# ## Password for authorization on ClickHouse server +# ## example: password = "super_secret" +# +# ## HTTP(s) timeout while getting metrics values +# ## The timeout includes connection time, any redirects, and reading the response body. +# ## example: timeout = 1s +# # timeout = 5s +# +# ## List of servers for metrics scraping +# ## metrics scrape via HTTP(s) clickhouse interface +# ## https://clickhouse.tech/docs/en/interfaces/http/ +# ## example: servers = ["http://127.0.0.1:8123","https://custom-server.mdb.yandexcloud.net"] +# servers = ["http://127.0.0.1:8123"] +# +# ## If "auto_discovery"" is "true" plugin tries to connect to all servers available in the cluster +# ## with using same "user:password" described in "user" and "password" parameters +# ## and get this server hostname list from "system.clusters" table +# ## see +# ## - https://clickhouse.tech/docs/en/operations/system_tables/#system-clusters +# ## - https://clickhouse.tech/docs/en/operations/server_settings/settings/#server_settings_remote_servers +# ## - https://clickhouse.tech/docs/en/operations/table_engines/distributed/ +# ## - https://clickhouse.tech/docs/en/operations/table_engines/replication/#creating-replicated-tables +# ## example: auto_discovery = false +# # auto_discovery = true +# +# ## Filter cluster names in "system.clusters" when "auto_discovery" is "true" +# ## when this filter present then "WHERE cluster IN (...)" filter will apply +# ## please use only full cluster names here, regexp and glob filters is not allowed +# ## for "/etc/clickhouse-server/config.d/remote.xml" +# ## +# ## +# ## +# ## +# ## clickhouse-ru-1.local9000 +# ## clickhouse-ru-2.local9000 +# ## +# ## +# ## clickhouse-eu-1.local9000 +# ## clickhouse-eu-2.local9000 +# ## +# ## +# ## +# ## +# ## +# ## +# ## example: cluster_include = ["my-own-cluster"] +# # cluster_include = [] +# +# ## Filter cluster names in "system.clusters" when "auto_discovery" is "true" +# ## when this filter present then "WHERE cluster NOT IN (...)" filter will apply +# ## example: cluster_exclude = ["my-internal-not-discovered-cluster"] +# # cluster_exclude = [] +# +# ## Optional TLS Config +# # tls_ca = "/etc/telegraf/ca.pem" +# # tls_cert = "/etc/telegraf/cert.pem" +# # tls_key = "/etc/telegraf/key.pem" +# ## Use TLS but skip chain & host verification +# # insecure_skip_verify = false + + +# # Read metrics from Google PubSub +# [[inputs.cloud_pubsub]] +# ## Required. Name of Google Cloud Platform (GCP) Project that owns +# ## the given PubSub subscription. +# project = "my-project" +# +# ## Required. Name of PubSub subscription to ingest metrics from. +# subscription = "my-subscription" +# +# ## Required. Data format to consume. +# ## Each data format has its own unique set of configuration options. +# ## Read more about them here: +# ## https://github.com/influxdata/telegraf/blob/master/docs/DATA_FORMATS_INPUT.md +# data_format = "influx" +# +# ## Optional. Filepath for GCP credentials JSON file to authorize calls to +# ## PubSub APIs. If not set explicitly, Telegraf will attempt to use +# ## Application Default Credentials, which is preferred. +# # credentials_file = "path/to/my/creds.json" +# +# ## Optional. Number of seconds to wait before attempting to restart the +# ## PubSub subscription receiver after an unexpected error. +# ## If the streaming pull for a PubSub Subscription fails (receiver), +# ## the agent attempts to restart receiving messages after this many seconds. +# # retry_delay_seconds = 5 +# +# ## Optional. Maximum byte length of a message to consume. +# ## Larger messages are dropped with an error. If less than 0 or unspecified, +# ## treated as no limit. +# # max_message_len = 1000000 +# +# ## Optional. Maximum messages to read from PubSub that have not been written +# ## to an output. Defaults to 1000. +# ## For best throughput set based on the number of metrics within +# ## each message and the size of the output's metric_batch_size. +# ## +# ## For example, if each message contains 10 metrics and the output +# ## metric_batch_size is 1000, setting this to 100 will ensure that a +# ## full batch is collected and the write is triggered immediately without +# ## waiting until the next flush_interval. +# # max_undelivered_messages = 1000 +# +# ## The following are optional Subscription ReceiveSettings in PubSub. +# ## Read more about these values: +# ## https://godoc.org/cloud.google.com/go/pubsub#ReceiveSettings +# +# ## Optional. Maximum number of seconds for which a PubSub subscription +# ## should auto-extend the PubSub ACK deadline for each message. If less than +# ## 0, auto-extension is disabled. +# # max_extension = 0 +# +# ## Optional. Maximum number of unprocessed messages in PubSub +# ## (unacknowledged but not yet expired in PubSub). +# ## A value of 0 is treated as the default PubSub value. +# ## Negative values will be treated as unlimited. +# # max_outstanding_messages = 0 +# +# ## Optional. Maximum size in bytes of unprocessed messages in PubSub +# ## (unacknowledged but not yet expired in PubSub). +# ## A value of 0 is treated as the default PubSub value. +# ## Negative values will be treated as unlimited. +# # max_outstanding_bytes = 0 +# +# ## Optional. Max number of goroutines a PubSub Subscription receiver can spawn +# ## to pull messages from PubSub concurrently. This limit applies to each +# ## subscription separately and is treated as the PubSub default if less than +# ## 1. Note this setting does not limit the number of messages that can be +# ## processed concurrently (use "max_outstanding_messages" instead). +# # max_receiver_go_routines = 0 +# +# ## Optional. If true, Telegraf will attempt to base64 decode the +# ## PubSub message data before parsing +# # base64_data = false + + +# # Google Cloud Pub/Sub Push HTTP listener +# [[inputs.cloud_pubsub_push]] +# ## Address and port to host HTTP listener on +# service_address = ":8080" +# +# ## Application secret to verify messages originate from Cloud Pub/Sub +# # token = "" +# +# ## Path to listen to. +# # path = "/" +# +# ## Maximum duration before timing out read of the request +# # read_timeout = "10s" +# ## Maximum duration before timing out write of the response. This should be set to a value +# ## large enough that you can send at least 'metric_batch_size' number of messages within the +# ## duration. +# # write_timeout = "10s" +# +# ## Maximum allowed http request body size in bytes. +# ## 0 means to use the default of 524,288,00 bytes (500 mebibytes) +# # max_body_size = "500MB" +# +# ## Whether to add the pubsub metadata, such as message attributes and subscription as a tag. +# # add_meta = false +# +# ## Optional. Maximum messages to read from PubSub that have not been written +# ## to an output. Defaults to 1000. +# ## For best throughput set based on the number of metrics within +# ## each message and the size of the output's metric_batch_size. +# ## +# ## For example, if each message contains 10 metrics and the output +# ## metric_batch_size is 1000, setting this to 100 will ensure that a +# ## full batch is collected and the write is triggered immediately without +# ## waiting until the next flush_interval. +# # max_undelivered_messages = 1000 +# +# ## Set one or more allowed client CA certificate file names to +# ## enable mutually authenticated TLS connections +# # tls_allowed_cacerts = ["/etc/telegraf/clientca.pem"] +# +# ## Add service certificate and key +# # tls_cert = "/etc/telegraf/cert.pem" +# # tls_key = "/etc/telegraf/key.pem" +# +# ## Data format to consume. +# ## Each data format has its own unique set of configuration options, read +# ## more about them here: +# ## https://github.com/influxdata/telegraf/blob/master/docs/DATA_FORMATS_INPUT.md +# data_format = "influx" + + +# # Ingests files in a directory and then moves them to a target directory. +# [[inputs.directory_monitor]] +# ## The directory to monitor and read files from. +# directory = "" +# # +# ## The directory to move finished files to. +# finished_directory = "" +# # +# ## The directory to move files to upon file error. +# ## If not provided, erroring files will stay in the monitored directory. +# # error_directory = "" +# # +# ## The amount of time a file is allowed to sit in the directory before it is picked up. +# ## This time can generally be low but if you choose to have a very large file written to the directory and it's potentially slow, +# ## set this higher so that the plugin will wait until the file is fully copied to the directory. +# # directory_duration_threshold = "50ms" +# # +# ## A list of the only file names to monitor, if necessary. Supports regex. If left blank, all files are ingested. +# # files_to_monitor = ["^.*\.csv"] +# # +# ## A list of files to ignore, if necessary. Supports regex. +# # files_to_ignore = [".DS_Store"] +# # +# ## Maximum lines of the file to process that have not yet be written by the +# ## output. For best throughput set to the size of the output's metric_buffer_limit. +# ## Warning: setting this number higher than the output's metric_buffer_limit can cause dropped metrics. +# # max_buffered_metrics = 10000 +# # +# ## The maximum amount of file paths to queue up for processing at once, before waiting until files are processed to find more files. +# ## Lowering this value will result in *slightly* less memory use, with a potential sacrifice in speed efficiency, if absolutely necessary. +# # file_queue_size = 100000 +# # +# ## The dataformat to be read from the files. +# ## Each data format has its own unique set of configuration options, read +# ## more about them here: +# ## https://github.com/influxdata/telegraf/blob/master/docs/DATA_FORMATS_INPUT.md +# ## NOTE: We currently only support parsing newline-delimited JSON. See the format here: https://github.com/ndjson/ndjson-spec +# data_format = "influx" + + +# # Read logging output from the Docker engine +# [[inputs.docker_log]] +# ## Docker Endpoint +# ## To use TCP, set endpoint = "tcp://[ip]:[port]" +# ## To use environment variables (ie, docker-machine), set endpoint = "ENV" +# # endpoint = "unix:///var/run/docker.sock" +# +# ## When true, container logs are read from the beginning; otherwise +# ## reading begins at the end of the log. +# # from_beginning = false +# +# ## Timeout for Docker API calls. +# # timeout = "5s" +# +# ## Containers to include and exclude. Globs accepted. +# ## Note that an empty array for both will include all containers +# # container_name_include = [] +# # container_name_exclude = [] +# +# ## Container states to include and exclude. Globs accepted. +# ## When empty only containers in the "running" state will be captured. +# # container_state_include = [] +# # container_state_exclude = [] +# +# ## docker labels to include and exclude as tags. Globs accepted. +# ## Note that an empty array for both will include all labels as tags +# # docker_label_include = [] +# # docker_label_exclude = [] +# +# ## Set the source tag for the metrics to the container ID hostname, eg first 12 chars +# source_tag = false +# +# ## Optional TLS Config +# # tls_ca = "/etc/telegraf/ca.pem" +# # tls_cert = "/etc/telegraf/cert.pem" +# # tls_key = "/etc/telegraf/key.pem" +# ## Use TLS but skip chain & host verification +# # insecure_skip_verify = false + + +# # Azure Event Hubs service input plugin +# [[inputs.eventhub_consumer]] +# ## The default behavior is to create a new Event Hub client from environment variables. +# ## This requires one of the following sets of environment variables to be set: +# ## +# ## 1) Expected Environment Variables: +# ## - "EVENTHUB_CONNECTION_STRING" +# ## +# ## 2) Expected Environment Variables: +# ## - "EVENTHUB_NAMESPACE" +# ## - "EVENTHUB_NAME" +# ## - "EVENTHUB_KEY_NAME" +# ## - "EVENTHUB_KEY_VALUE" +# +# ## 3) Expected Environment Variables: +# ## - "EVENTHUB_NAMESPACE" +# ## - "EVENTHUB_NAME" +# ## - "AZURE_TENANT_ID" +# ## - "AZURE_CLIENT_ID" +# ## - "AZURE_CLIENT_SECRET" +# +# ## Uncommenting the option below will create an Event Hub client based solely on the connection string. +# ## This can either be the associated environment variable or hard coded directly. +# ## If this option is uncommented, environment variables will be ignored. +# ## Connection string should contain EventHubName (EntityPath) +# # connection_string = "" +# +# ## Set persistence directory to a valid folder to use a file persister instead of an in-memory persister +# # persistence_dir = "" +# +# ## Change the default consumer group +# # consumer_group = "" +# +# ## By default the event hub receives all messages present on the broker, alternative modes can be set below. +# ## The timestamp should be in https://github.com/toml-lang/toml#offset-date-time format (RFC 3339). +# ## The 3 options below only apply if no valid persister is read from memory or file (e.g. first run). +# # from_timestamp = +# # latest = true +# +# ## Set a custom prefetch count for the receiver(s) +# # prefetch_count = 1000 +# +# ## Add an epoch to the receiver(s) +# # epoch = 0 +# +# ## Change to set a custom user agent, "telegraf" is used by default +# # user_agent = "telegraf" +# +# ## To consume from a specific partition, set the partition_ids option. +# ## An empty array will result in receiving from all partitions. +# # partition_ids = ["0","1"] +# +# ## Max undelivered messages +# # max_undelivered_messages = 1000 +# +# ## Set either option below to true to use a system property as timestamp. +# ## You have the choice between EnqueuedTime and IoTHubEnqueuedTime. +# ## It is recommended to use this setting when the data itself has no timestamp. +# # enqueued_time_as_ts = true +# # iot_hub_enqueued_time_as_ts = true +# +# ## Tags or fields to create from keys present in the application property bag. +# ## These could for example be set by message enrichments in Azure IoT Hub. +# # application_property_tags = [] +# # application_property_fields = [] +# +# ## Tag or field name to use for metadata +# ## By default all metadata is disabled +# # sequence_number_field = "SequenceNumber" +# # enqueued_time_field = "EnqueuedTime" +# # offset_field = "Offset" +# # partition_id_tag = "PartitionID" +# # partition_key_tag = "PartitionKey" +# # iot_hub_device_connection_id_tag = "IoTHubDeviceConnectionID" +# # iot_hub_auth_generation_id_tag = "IoTHubAuthGenerationID" +# # iot_hub_connection_auth_method_tag = "IoTHubConnectionAuthMethod" +# # iot_hub_connection_module_id_tag = "IoTHubConnectionModuleID" +# # iot_hub_enqueued_time_field = "IoTHubEnqueuedTime" +# +# ## Data format to consume. +# ## Each data format has its own unique set of configuration options, read +# ## more about them here: +# ## https://github.com/influxdata/telegraf/blob/master/docs/DATA_FORMATS_INPUT.md +# data_format = "influx" + + +# # Run executable as long-running input plugin +# [[inputs.execd]] +# ## Program to run as daemon +# command = ["telegraf-smartctl", "-d", "/dev/sda"] +# +# ## Define how the process is signaled on each collection interval. +# ## Valid values are: +# ## "none" : Do not signal anything. +# ## The process must output metrics by itself. +# ## "STDIN" : Send a newline on STDIN. +# ## "SIGHUP" : Send a HUP signal. Not available on Windows. +# ## "SIGUSR1" : Send a USR1 signal. Not available on Windows. +# ## "SIGUSR2" : Send a USR2 signal. Not available on Windows. +# signal = "none" +# +# ## Delay before the process is restarted after an unexpected termination +# restart_delay = "10s" +# +# ## Data format to consume. +# ## Each data format has its own unique set of configuration options, read +# ## more about them here: +# ## https://github.com/influxdata/telegraf/blob/master/docs/DATA_FORMATS_INPUT.md +# data_format = "influx" + + +# # gNMI telemetry input plugin +# [[inputs.gnmi]] +# ## Address and port of the gNMI GRPC server +# addresses = ["10.49.234.114:57777"] +# +# ## define credentials +# username = "cisco" +# password = "cisco" +# +# ## gNMI encoding requested (one of: "proto", "json", "json_ietf", "bytes") +# # encoding = "proto" +# +# ## redial in case of failures after +# redial = "10s" +# +# ## enable client-side TLS and define CA to authenticate the device +# # enable_tls = true +# # tls_ca = "/etc/telegraf/ca.pem" +# # insecure_skip_verify = true +# +# ## define client-side TLS certificate & key to authenticate to the device +# # tls_cert = "/etc/telegraf/cert.pem" +# # tls_key = "/etc/telegraf/key.pem" +# +# ## gNMI subscription prefix (optional, can usually be left empty) +# ## See: https://github.com/openconfig/reference/blob/master/rpc/gnmi/gnmi-specification.md#222-paths +# # origin = "" +# # prefix = "" +# # target = "" +# +# ## Define additional aliases to map telemetry encoding paths to simple measurement names +# #[inputs.gnmi.aliases] +# # ifcounters = "openconfig:/interfaces/interface/state/counters" +# +# [[inputs.gnmi.subscription]] +# ## Name of the measurement that will be emitted +# name = "ifcounters" +# +# ## Origin and path of the subscription +# ## See: https://github.com/openconfig/reference/blob/master/rpc/gnmi/gnmi-specification.md#222-paths +# ## +# ## origin usually refers to a (YANG) data model implemented by the device +# ## and path to a specific substructure inside it that should be subscribed to (similar to an XPath) +# ## YANG models can be found e.g. here: https://github.com/YangModels/yang/tree/master/vendor/cisco/xr +# origin = "openconfig-interfaces" +# path = "/interfaces/interface/state/counters" +# +# # Subscription mode (one of: "target_defined", "sample", "on_change") and interval +# subscription_mode = "sample" +# sample_interval = "10s" +# +# ## Suppress redundant transmissions when measured values are unchanged +# # suppress_redundant = false +# +# ## If suppression is enabled, send updates at least every X seconds anyway +# # heartbeat_interval = "60s" + + +# # Accept metrics over InfluxDB 1.x HTTP API +# [[inputs.http_listener]] +# ## Address and port to host InfluxDB listener on +# service_address = ":8186" +# +# ## maximum duration before timing out read of the request +# read_timeout = "10s" +# ## maximum duration before timing out write of the response +# write_timeout = "10s" +# +# ## Maximum allowed HTTP request body size in bytes. +# ## 0 means to use the default of 32MiB. +# max_body_size = "32MiB" +# +# ## Optional tag name used to store the database. +# ## If the write has a database in the query string then it will be kept in this tag name. +# ## This tag can be used in downstream outputs. +# ## The default value of nothing means it will be off and the database will not be recorded. +# # database_tag = "" +# +# ## If set the retention policy specified in the write query will be added as +# ## the value of this tag name. +# # retention_policy_tag = "" +# +# ## Set one or more allowed client CA certificate file names to +# ## enable mutually authenticated TLS connections +# tls_allowed_cacerts = ["/etc/telegraf/clientca.pem"] +# +# ## Add service certificate and key +# tls_cert = "/etc/telegraf/cert.pem" +# tls_key = "/etc/telegraf/key.pem" +# +# ## Optional username and password to accept for HTTP basic authentication. +# ## You probably want to make sure you have TLS configured above for this. +# # basic_username = "foobar" +# # basic_password = "barfoo" + + +# # Generic HTTP write listener +# [[inputs.http_listener_v2]] +# ## Address and port to host HTTP listener on +# service_address = ":8080" +# +# ## Path to listen to. +# ## This option is deprecated and only available for backward-compatibility. Please use paths instead. +# # path = "" +# +# ## Paths to listen to. +# # paths = ["/telegraf"] +# +# ## Save path as http_listener_v2_path tag if set to true +# # path_tag = false +# +# ## HTTP methods to accept. +# # methods = ["POST", "PUT"] +# +# ## maximum duration before timing out read of the request +# # read_timeout = "10s" +# ## maximum duration before timing out write of the response +# # write_timeout = "10s" +# +# ## Maximum allowed http request body size in bytes. +# ## 0 means to use the default of 524,288,000 bytes (500 mebibytes) +# # max_body_size = "500MB" +# +# ## Part of the request to consume. Available options are "body" and +# ## "query". +# # data_source = "body" +# +# ## Set one or more allowed client CA certificate file names to +# ## enable mutually authenticated TLS connections +# # tls_allowed_cacerts = ["/etc/telegraf/clientca.pem"] +# +# ## Add service certificate and key +# # tls_cert = "/etc/telegraf/cert.pem" +# # tls_key = "/etc/telegraf/key.pem" +# +# ## Optional username and password to accept for HTTP basic authentication. +# ## You probably want to make sure you have TLS configured above for this. +# # basic_username = "foobar" +# # basic_password = "barfoo" +# +# ## Optional setting to map http headers into tags +# ## If the http header is not present on the request, no corresponding tag will be added +# ## If multiple instances of the http header are present, only the first value will be used +# # http_header_tags = {"HTTP_HEADER" = "TAG_NAME"} +# +# ## Data format to consume. +# ## Each data format has its own unique set of configuration options, read +# ## more about them here: +# ## https://github.com/influxdata/telegraf/blob/master/docs/DATA_FORMATS_INPUT.md +# data_format = "influx" + + +# # Accept metrics over InfluxDB 1.x HTTP API +# [[inputs.influxdb_listener]] +# ## Address and port to host InfluxDB listener on +# service_address = ":8186" +# +# ## maximum duration before timing out read of the request +# read_timeout = "10s" +# ## maximum duration before timing out write of the response +# write_timeout = "10s" +# +# ## Maximum allowed HTTP request body size in bytes. +# ## 0 means to use the default of 32MiB. +# max_body_size = "32MiB" +# +# ## Optional tag name used to store the database. +# ## If the write has a database in the query string then it will be kept in this tag name. +# ## This tag can be used in downstream outputs. +# ## The default value of nothing means it will be off and the database will not be recorded. +# # database_tag = "" +# +# ## If set the retention policy specified in the write query will be added as +# ## the value of this tag name. +# # retention_policy_tag = "" +# +# ## Set one or more allowed client CA certificate file names to +# ## enable mutually authenticated TLS connections +# tls_allowed_cacerts = ["/etc/telegraf/clientca.pem"] +# +# ## Add service certificate and key +# tls_cert = "/etc/telegraf/cert.pem" +# tls_key = "/etc/telegraf/key.pem" +# +# ## Optional username and password to accept for HTTP basic authentication. +# ## You probably want to make sure you have TLS configured above for this. +# # basic_username = "foobar" +# # basic_password = "barfoo" + + +# # Accept metrics over InfluxDB 2.x HTTP API +# [[inputs.influxdb_v2_listener]] +# ## Address and port to host InfluxDB listener on +# ## (Double check the port. Could be 9999 if using OSS Beta) +# service_address = ":8086" +# +# ## Maximum allowed HTTP request body size in bytes. +# ## 0 means to use the default of 32MiB. +# # max_body_size = "32MiB" +# +# ## Optional tag to determine the bucket. +# ## If the write has a bucket in the query string then it will be kept in this tag name. +# ## This tag can be used in downstream outputs. +# ## The default value of nothing means it will be off and the database will not be recorded. +# # bucket_tag = "" +# +# ## Set one or more allowed client CA certificate file names to +# ## enable mutually authenticated TLS connections +# # tls_allowed_cacerts = ["/etc/telegraf/clientca.pem"] +# +# ## Add service certificate and key +# # tls_cert = "/etc/telegraf/cert.pem" +# # tls_key = "/etc/telegraf/key.pem" +# +# ## Optional token to accept for HTTP authentication. +# ## You probably want to make sure you have TLS configured above for this. +# # token = "some-long-shared-secret-token" + + +# # Read JTI OpenConfig Telemetry from listed sensors +# [[inputs.jti_openconfig_telemetry]] +# ## List of device addresses to collect telemetry from +# servers = ["localhost:1883"] +# +# ## Authentication details. Username and password are must if device expects +# ## authentication. Client ID must be unique when connecting from multiple instances +# ## of telegraf to the same device +# username = "user" +# password = "pass" +# client_id = "telegraf" +# +# ## Frequency to get data +# sample_frequency = "1000ms" +# +# ## Sensors to subscribe for +# ## A identifier for each sensor can be provided in path by separating with space +# ## Else sensor path will be used as identifier +# ## When identifier is used, we can provide a list of space separated sensors. +# ## A single subscription will be created with all these sensors and data will +# ## be saved to measurement with this identifier name +# sensors = [ +# "/interfaces/", +# "collection /components/ /lldp", +# ] +# +# ## We allow specifying sensor group level reporting rate. To do this, specify the +# ## reporting rate in Duration at the beginning of sensor paths / collection +# ## name. For entries without reporting rate, we use configured sample frequency +# sensors = [ +# "1000ms customReporting /interfaces /lldp", +# "2000ms collection /components", +# "/interfaces", +# ] +# +# ## Optional TLS Config +# # enable_tls = true +# # tls_ca = "/etc/telegraf/ca.pem" +# # tls_cert = "/etc/telegraf/cert.pem" +# # tls_key = "/etc/telegraf/key.pem" +# ## Use TLS but skip chain & host verification +# # insecure_skip_verify = false +# +# ## Delay between retry attempts of failed RPC calls or streams. Defaults to 1000ms. +# ## Failed streams/calls will not be retried if 0 is provided +# retry_delay = "1000ms" +# +# ## To treat all string values as tags, set this to true +# str_as_tags = false + + +# # Read metrics from Kafka topics +# [[inputs.kafka_consumer]] +# ## Kafka brokers. +# brokers = ["localhost:9092"] +# +# ## Topics to consume. +# topics = ["telegraf"] +# +# ## When set this tag will be added to all metrics with the topic as the value. +# # topic_tag = "" +# +# ## Optional Client id +# # client_id = "Telegraf" +# +# ## Set the minimal supported Kafka version. Setting this enables the use of new +# ## Kafka features and APIs. Must be 0.10.2.0 or greater. +# ## ex: version = "1.1.0" +# # version = "" +# +# ## Optional TLS Config +# # tls_ca = "/etc/telegraf/ca.pem" +# # tls_cert = "/etc/telegraf/cert.pem" +# # tls_key = "/etc/telegraf/key.pem" +# ## Use TLS but skip chain & host verification +# # insecure_skip_verify = false +# +# ## SASL authentication credentials. These settings should typically be used +# ## with TLS encryption enabled +# # sasl_username = "kafka" +# # sasl_password = "secret" +# +# ## Optional SASL: +# ## one of: OAUTHBEARER, PLAIN, SCRAM-SHA-256, SCRAM-SHA-512, GSSAPI +# ## (defaults to PLAIN) +# # sasl_mechanism = "" +# +# ## used if sasl_mechanism is GSSAPI (experimental) +# # sasl_gssapi_service_name = "" +# # ## One of: KRB5_USER_AUTH and KRB5_KEYTAB_AUTH +# # sasl_gssapi_auth_type = "KRB5_USER_AUTH" +# # sasl_gssapi_kerberos_config_path = "/" +# # sasl_gssapi_realm = "realm" +# # sasl_gssapi_key_tab_path = "" +# # sasl_gssapi_disable_pafxfast = false +# +# ## used if sasl_mechanism is OAUTHBEARER (experimental) +# # sasl_access_token = "" +# +# ## SASL protocol version. When connecting to Azure EventHub set to 0. +# # sasl_version = 1 +# +# ## Name of the consumer group. +# # consumer_group = "telegraf_metrics_consumers" +# +# ## Compression codec represents the various compression codecs recognized by +# ## Kafka in messages. +# ## 0 : None +# ## 1 : Gzip +# ## 2 : Snappy +# ## 3 : LZ4 +# ## 4 : ZSTD +# # compression_codec = 0 +# +# ## Initial offset position; one of "oldest" or "newest". +# # offset = "oldest" +# +# ## Consumer group partition assignment strategy; one of "range", "roundrobin" or "sticky". +# # balance_strategy = "range" +# +# ## Maximum length of a message to consume, in bytes (default 0/unlimited); +# ## larger messages are dropped +# max_message_len = 1000000 +# +# ## Maximum messages to read from the broker that have not been written by an +# ## output. For best throughput set based on the number of metrics within +# ## each message and the size of the output's metric_batch_size. +# ## +# ## For example, if each message from the queue contains 10 metrics and the +# ## output metric_batch_size is 1000, setting this to 100 will ensure that a +# ## full batch is collected and the write is triggered immediately without +# ## waiting until the next flush_interval. +# # max_undelivered_messages = 1000 +# +# ## Data format to consume. +# ## Each data format has its own unique set of configuration options, read +# ## more about them here: +# ## https://github.com/influxdata/telegraf/blob/master/docs/DATA_FORMATS_INPUT.md +# data_format = "influx" + + +# # Read metrics from Kafka topic(s) +# [[inputs.kafka_consumer_legacy]] +# ## topic(s) to consume +# topics = ["telegraf"] +# +# ## an array of Zookeeper connection strings +# zookeeper_peers = ["localhost:2181"] +# +# ## Zookeeper Chroot +# zookeeper_chroot = "" +# +# ## the name of the consumer group +# consumer_group = "telegraf_metrics_consumers" +# +# ## Offset (must be either "oldest" or "newest") +# offset = "oldest" +# +# ## Data format to consume. +# ## Each data format has its own unique set of configuration options, read +# ## more about them here: +# ## https://github.com/influxdata/telegraf/blob/master/docs/DATA_FORMATS_INPUT.md +# data_format = "influx" +# +# ## Maximum length of a message to consume, in bytes (default 0/unlimited); +# ## larger messages are dropped +# max_message_len = 65536 + + +# # Configuration for the AWS Kinesis input. +# [[inputs.kinesis_consumer]] +# ## Amazon REGION of kinesis endpoint. +# region = "ap-southeast-2" +# +# ## Amazon Credentials +# ## Credentials are loaded in the following order +# ## 1) Web identity provider credentials via STS if role_arn and web_identity_token_file are specified +# ## 2) Assumed credentials via STS if role_arn is specified +# ## 3) explicit credentials from 'access_key' and 'secret_key' +# ## 4) shared profile from 'profile' +# ## 5) environment variables +# ## 6) shared credentials file +# ## 7) EC2 Instance Profile +# # access_key = "" +# # secret_key = "" +# # token = "" +# # role_arn = "" +# # web_identity_token_file = "" +# # role_session_name = "" +# # profile = "" +# # shared_credential_file = "" +# +# ## Endpoint to make request against, the correct endpoint is automatically +# ## determined and this option should only be set if you wish to override the +# ## default. +# ## ex: endpoint_url = "http://localhost:8000" +# # endpoint_url = "" +# +# ## Kinesis StreamName must exist prior to starting telegraf. +# streamname = "StreamName" +# +# ## Shard iterator type (only 'TRIM_HORIZON' and 'LATEST' currently supported) +# # shard_iterator_type = "TRIM_HORIZON" +# +# ## Maximum messages to read from the broker that have not been written by an +# ## output. For best throughput set based on the number of metrics within +# ## each message and the size of the output's metric_batch_size. +# ## +# ## For example, if each message from the queue contains 10 metrics and the +# ## output metric_batch_size is 1000, setting this to 100 will ensure that a +# ## full batch is collected and the write is triggered immediately without +# ## waiting until the next flush_interval. +# # max_undelivered_messages = 1000 +# +# ## Data format to consume. +# ## Each data format has its own unique set of configuration options, read +# ## more about them here: +# ## https://github.com/influxdata/telegraf/blob/master/docs/DATA_FORMATS_INPUT.md +# data_format = "influx" +# +# ## +# ## The content encoding of the data from kinesis +# ## If you are processing a cloudwatch logs kinesis stream then set this to "gzip" +# ## as AWS compresses cloudwatch log data before it is sent to kinesis (aws +# ## also base64 encodes the zip byte data before pushing to the stream. The base64 decoding +# ## is done automatically by the golang sdk, as data is read from kinesis) +# ## +# # content_encoding = "identity" +# +# ## Optional +# ## Configuration for a dynamodb checkpoint +# [inputs.kinesis_consumer.checkpoint_dynamodb] +# ## unique name for this consumer +# app_name = "default" +# table_name = "default" + + +# # Listener capable of handling KNX bus messages provided through a KNX-IP Interface. +# [[inputs.knx_listener]] +# ## Type of KNX-IP interface. +# ## Can be either "tunnel" or "router". +# # service_type = "tunnel" +# +# ## Address of the KNX-IP interface. +# service_address = "localhost:3671" +# +# ## Measurement definition(s) +# # [[inputs.knx_listener.measurement]] +# # ## Name of the measurement +# # name = "temperature" +# # ## Datapoint-Type (DPT) of the KNX messages +# # dpt = "9.001" +# # ## List of Group-Addresses (GAs) assigned to the measurement +# # addresses = ["5/5/1"] +# +# # [[inputs.knx_listener.measurement]] +# # name = "illumination" +# # dpt = "9.004" +# # addresses = ["5/5/3"] + + +# # Read metrics off Arista LANZ, via socket +# [[inputs.lanz]] +# ## URL to Arista LANZ endpoint +# servers = [ +# "tcp://127.0.0.1:50001" +# ] + + +# # Stream and parse log file(s). +# [[inputs.logparser]] +# ## Log files to parse. +# ## These accept standard unix glob matching rules, but with the addition of +# ## ** as a "super asterisk". ie: +# ## /var/log/**.log -> recursively find all .log files in /var/log +# ## /var/log/*/*.log -> find all .log files with a parent dir in /var/log +# ## /var/log/apache.log -> only tail the apache log file +# files = ["/var/log/apache/access.log"] +# +# ## Read files that currently exist from the beginning. Files that are created +# ## while telegraf is running (and that match the "files" globs) will always +# ## be read from the beginning. +# from_beginning = false +# +# ## Method used to watch for file updates. Can be either "inotify" or "poll". +# # watch_method = "inotify" +# +# ## Parse logstash-style "grok" patterns: +# [inputs.logparser.grok] +# ## This is a list of patterns to check the given log file(s) for. +# ## Note that adding patterns here increases processing time. The most +# ## efficient configuration is to have one pattern per logparser. +# ## Other common built-in patterns are: +# ## %{COMMON_LOG_FORMAT} (plain apache & nginx access logs) +# ## %{COMBINED_LOG_FORMAT} (access logs + referrer & agent) +# patterns = ["%{COMBINED_LOG_FORMAT}"] +# +# ## Name of the outputted measurement name. +# measurement = "apache_access_log" +# +# ## Full path(s) to custom pattern files. +# custom_pattern_files = [] +# +# ## Custom patterns can also be defined here. Put one pattern per line. +# custom_patterns = ''' +# ''' +# +# ## Timezone allows you to provide an override for timestamps that +# ## don't already include an offset +# ## e.g. 04/06/2016 12:41:45 data one two 5.43µs +# ## +# ## Default: "" which renders UTC +# ## Options are as follows: +# ## 1. Local -- interpret based on machine localtime +# ## 2. "Canada/Eastern" -- Unix TZ values like those found in https://en.wikipedia.org/wiki/List_of_tz_database_time_zones +# ## 3. UTC -- or blank/unspecified, will return timestamp in UTC +# # timezone = "Canada/Eastern" +# +# ## When set to "disable", timestamp will not incremented if there is a +# ## duplicate. +# # unique_timestamp = "auto" + + +# # Read metrics from MQTT topic(s) +# [[inputs.mqtt_consumer]] +# ## Broker URLs for the MQTT server or cluster. To connect to multiple +# ## clusters or standalone servers, use a seperate plugin instance. +# ## example: servers = ["tcp://localhost:1883"] +# ## servers = ["ssl://localhost:1883"] +# ## servers = ["ws://localhost:1883"] +# servers = ["tcp://127.0.0.1:1883"] +# +# ## Topics that will be subscribed to. +# topics = [ +# "telegraf/host01/cpu", +# "telegraf/+/mem", +# "sensors/#", +# ] +# +# ## The message topic will be stored in a tag specified by this value. If set +# ## to the empty string no topic tag will be created. +# # topic_tag = "topic" +# +# ## QoS policy for messages +# ## 0 = at most once +# ## 1 = at least once +# ## 2 = exactly once +# ## +# ## When using a QoS of 1 or 2, you should enable persistent_session to allow +# ## resuming unacknowledged messages. +# # qos = 0 +# +# ## Connection timeout for initial connection in seconds +# # connection_timeout = "30s" +# +# ## Maximum messages to read from the broker that have not been written by an +# ## output. For best throughput set based on the number of metrics within +# ## each message and the size of the output's metric_batch_size. +# ## +# ## For example, if each message from the queue contains 10 metrics and the +# ## output metric_batch_size is 1000, setting this to 100 will ensure that a +# ## full batch is collected and the write is triggered immediately without +# ## waiting until the next flush_interval. +# # max_undelivered_messages = 1000 +# +# ## Persistent session disables clearing of the client session on connection. +# ## In order for this option to work you must also set client_id to identify +# ## the client. To receive messages that arrived while the client is offline, +# ## also set the qos option to 1 or 2 and don't forget to also set the QoS when +# ## publishing. +# # persistent_session = false +# +# ## If unset, a random client ID will be generated. +# # client_id = "" +# +# ## Username and password to connect MQTT server. +# # username = "telegraf" +# # password = "metricsmetricsmetricsmetrics" +# +# ## Optional TLS Config +# # tls_ca = "/etc/telegraf/ca.pem" +# # tls_cert = "/etc/telegraf/cert.pem" +# # tls_key = "/etc/telegraf/key.pem" +# ## Use TLS but skip chain & host verification +# # insecure_skip_verify = false +# +# ## Data format to consume. +# ## Each data format has its own unique set of configuration options, read +# ## more about them here: +# ## https://github.com/influxdata/telegraf/blob/master/docs/DATA_FORMATS_INPUT.md +# data_format = "influx" + + +# # Read metrics from NATS subject(s) +# [[inputs.nats_consumer]] +# ## urls of NATS servers +# servers = ["nats://localhost:4222"] +# +# ## subject(s) to consume +# subjects = ["telegraf"] +# +# ## name a queue group +# queue_group = "telegraf_consumers" +# +# ## Optional credentials +# # username = "" +# # password = "" +# +# ## Optional NATS 2.0 and NATS NGS compatible user credentials +# # credentials = "/etc/telegraf/nats.creds" +# +# ## Use Transport Layer Security +# # secure = false +# +# ## Optional TLS Config +# # tls_ca = "/etc/telegraf/ca.pem" +# # tls_cert = "/etc/telegraf/cert.pem" +# # tls_key = "/etc/telegraf/key.pem" +# ## Use TLS but skip chain & host verification +# # insecure_skip_verify = false +# +# ## Sets the limits for pending msgs and bytes for each subscription +# ## These shouldn't need to be adjusted except in very high throughput scenarios +# # pending_message_limit = 65536 +# # pending_bytes_limit = 67108864 +# +# ## Maximum messages to read from the broker that have not been written by an +# ## output. For best throughput set based on the number of metrics within +# ## each message and the size of the output's metric_batch_size. +# ## +# ## For example, if each message from the queue contains 10 metrics and the +# ## output metric_batch_size is 1000, setting this to 100 will ensure that a +# ## full batch is collected and the write is triggered immediately without +# ## waiting until the next flush_interval. +# # max_undelivered_messages = 1000 +# +# ## Data format to consume. +# ## Each data format has its own unique set of configuration options, read +# ## more about them here: +# ## https://github.com/influxdata/telegraf/blob/master/docs/DATA_FORMATS_INPUT.md +# data_format = "influx" + + +# # Read NSQ topic for metrics. +# [[inputs.nsq_consumer]] +# ## Server option still works but is deprecated, we just prepend it to the nsqd array. +# # server = "localhost:4150" +# +# ## An array representing the NSQD TCP HTTP Endpoints +# nsqd = ["localhost:4150"] +# +# ## An array representing the NSQLookupd HTTP Endpoints +# nsqlookupd = ["localhost:4161"] +# topic = "telegraf" +# channel = "consumer" +# max_in_flight = 100 +# +# ## Maximum messages to read from the broker that have not been written by an +# ## output. For best throughput set based on the number of metrics within +# ## each message and the size of the output's metric_batch_size. +# ## +# ## For example, if each message from the queue contains 10 metrics and the +# ## output metric_batch_size is 1000, setting this to 100 will ensure that a +# ## full batch is collected and the write is triggered immediately without +# ## waiting until the next flush_interval. +# # max_undelivered_messages = 1000 +# +# ## Data format to consume. +# ## Each data format has its own unique set of configuration options, read +# ## more about them here: +# ## https://github.com/influxdata/telegraf/blob/master/docs/DATA_FORMATS_INPUT.md +# data_format = "influx" + + +# # Receive OpenTelemetry traces, metrics, and logs over gRPC +# [[inputs.opentelemetry]] +# ## Override the default (0.0.0.0:4317) destination OpenTelemetry gRPC service +# ## address:port +# # service_address = "0.0.0.0:4317" +# +# ## Override the default (5s) new connection timeout +# # timeout = "5s" +# +# ## Override the default (prometheus-v1) metrics schema. +# ## Supports: "prometheus-v1", "prometheus-v2" +# ## For more information about the alternatives, read the Prometheus input +# ## plugin notes. +# # metrics_schema = "prometheus-v1" +# +# ## Optional TLS Config. +# ## For advanced options: https://github.com/influxdata/telegraf/blob/v1.18.3/docs/TLS.md +# ## +# ## Set one or more allowed client CA certificate file names to +# ## enable mutually authenticated TLS connections. +# # tls_allowed_cacerts = ["/etc/telegraf/clientca.pem"] +# ## Add service certificate and key. +# # tls_cert = "/etc/telegraf/cert.pem" +# # tls_key = "/etc/telegraf/key.pem" + + +# # Read metrics from one or many pgbouncer servers +# [[inputs.pgbouncer]] +# ## specify address via a url matching: +# ## postgres://[pqgotest[:password]]@localhost[/dbname]\ +# ## ?sslmode=[disable|verify-ca|verify-full] +# ## or a simple string: +# ## host=localhost user=pqgotest password=... sslmode=... dbname=app_production +# ## +# ## All connection parameters are optional. +# ## +# address = "host=localhost user=pgbouncer sslmode=disable" + + +# # Read metrics from one or many postgresql servers +# [[inputs.postgresql]] +# ## specify address via a url matching: +# ## postgres://[pqgotest[:password]]@localhost[/dbname]\ +# ## ?sslmode=[disable|verify-ca|verify-full] +# ## or a simple string: +# ## host=localhost user=pqgotest password=... sslmode=... dbname=app_production +# ## +# ## All connection parameters are optional. +# ## +# ## Without the dbname parameter, the driver will default to a database +# ## with the same name as the user. This dbname is just for instantiating a +# ## connection with the server and doesn't restrict the databases we are trying +# ## to grab metrics for. +# ## +# address = "host=localhost user=postgres sslmode=disable" +# ## A custom name for the database that will be used as the "server" tag in the +# ## measurement output. If not specified, a default one generated from +# ## the connection address is used. +# # outputaddress = "db01" +# +# ## connection configuration. +# ## maxlifetime - specify the maximum lifetime of a connection. +# ## default is forever (0s) +# max_lifetime = "0s" +# +# ## A list of databases to explicitly ignore. If not specified, metrics for all +# ## databases are gathered. Do NOT use with the 'databases' option. +# # ignored_databases = ["postgres", "template0", "template1"] +# +# ## A list of databases to pull metrics about. If not specified, metrics for all +# ## databases are gathered. Do NOT use with the 'ignored_databases' option. +# # databases = ["app_production", "testing"] + + +# # Read metrics from one or many postgresql servers +# [[inputs.postgresql_extensible]] +# ## specify address via a url matching: +# ## postgres://[pqgotest[:password]]@localhost[/dbname]\ +# ## ?sslmode=[disable|verify-ca|verify-full] +# ## or a simple string: +# ## host=localhost user=pqgotest password=... sslmode=... dbname=app_production +# # +# ## All connection parameters are optional. # +# ## Without the dbname parameter, the driver will default to a database +# ## with the same name as the user. This dbname is just for instantiating a +# ## connection with the server and doesn't restrict the databases we are trying +# ## to grab metrics for. +# # +# address = "host=localhost user=postgres sslmode=disable" +# +# ## connection configuration. +# ## maxlifetime - specify the maximum lifetime of a connection. +# ## default is forever (0s) +# max_lifetime = "0s" +# +# ## A list of databases to pull metrics about. If not specified, metrics for all +# ## databases are gathered. +# ## databases = ["app_production", "testing"] +# # +# ## A custom name for the database that will be used as the "server" tag in the +# ## measurement output. If not specified, a default one generated from +# ## the connection address is used. +# # outputaddress = "db01" +# # +# ## Define the toml config where the sql queries are stored +# ## New queries can be added, if the withdbname is set to true and there is no +# ## databases defined in the 'databases field', the sql query is ended by a +# ## 'is not null' in order to make the query succeed. +# ## Example : +# ## The sqlquery : "SELECT * FROM pg_stat_database where datname" become +# ## "SELECT * FROM pg_stat_database where datname IN ('postgres', 'pgbench')" +# ## because the databases variable was set to ['postgres', 'pgbench' ] and the +# ## withdbname was true. Be careful that if the withdbname is set to false you +# ## don't have to define the where clause (aka with the dbname) the tagvalue +# ## field is used to define custom tags (separated by commas) +# ## The optional "measurement" value can be used to override the default +# ## output measurement name ("postgresql"). +# ## +# ## The script option can be used to specify the .sql file path. +# ## If script and sqlquery options specified at same time, sqlquery will be used +# ## +# ## the tagvalue field is used to define custom tags (separated by comas). +# ## the query is expected to return columns which match the names of the +# ## defined tags. The values in these columns must be of a string-type, +# ## a number-type or a blob-type. +# ## +# ## The timestamp field is used to override the data points timestamp value. By +# ## default, all rows inserted with current time. By setting a timestamp column, +# ## the row will be inserted with that column's value. +# ## +# ## Structure : +# ## [[inputs.postgresql_extensible.query]] +# ## sqlquery string +# ## version string +# ## withdbname boolean +# ## tagvalue string (comma separated) +# ## measurement string +# ## timestamp string +# [[inputs.postgresql_extensible.query]] +# sqlquery="SELECT * FROM pg_stat_database" +# version=901 +# withdbname=false +# tagvalue="" +# measurement="" +# [[inputs.postgresql_extensible.query]] +# sqlquery="SELECT * FROM pg_stat_bgwriter" +# version=901 +# withdbname=false +# tagvalue="postgresql.stats" + + +# # Read metrics from one or many prometheus clients +# [[inputs.prometheus]] +# ## An array of urls to scrape metrics from. +# urls = ["http://localhost:9100/metrics"] +# +# ## Metric version controls the mapping from Prometheus metrics into +# ## Telegraf metrics. When using the prometheus_client output, use the same +# ## value in both plugins to ensure metrics are round-tripped without +# ## modification. +# ## +# ## example: metric_version = 1; +# ## metric_version = 2; recommended version +# # metric_version = 1 +# +# ## Url tag name (tag containing scrapped url. optional, default is "url") +# # url_tag = "url" +# +# ## Whether the timestamp of the scraped metrics will be ignored. +# ## If set to true, the gather time will be used. +# # ignore_timestamp = false +# +# ## An array of Kubernetes services to scrape metrics from. +# # kubernetes_services = ["http://my-service-dns.my-namespace:9100/metrics"] +# +# ## Kubernetes config file to create client from. +# # kube_config = "/path/to/kubernetes.config" +# +# ## Scrape Kubernetes pods for the following prometheus annotations: +# ## - prometheus.io/scrape: Enable scraping for this pod +# ## - prometheus.io/scheme: If the metrics endpoint is secured then you will need to +# ## set this to 'https' & most likely set the tls config. +# ## - prometheus.io/path: If the metrics path is not /metrics, define it with this annotation. +# ## - prometheus.io/port: If port is not 9102 use this annotation +# # monitor_kubernetes_pods = true +# ## Get the list of pods to scrape with either the scope of +# ## - cluster: the kubernetes watch api (default, no need to specify) +# ## - node: the local cadvisor api; for scalability. Note that the config node_ip or the environment variable NODE_IP must be set to the host IP. +# # pod_scrape_scope = "cluster" +# ## Only for node scrape scope: node IP of the node that telegraf is running on. +# ## Either this config or the environment variable NODE_IP must be set. +# # node_ip = "10.180.1.1" +# ## Only for node scrape scope: interval in seconds for how often to get updated pod list for scraping. +# ## Default is 60 seconds. +# # pod_scrape_interval = 60 +# ## Restricts Kubernetes monitoring to a single namespace +# ## ex: monitor_kubernetes_pods_namespace = "default" +# # monitor_kubernetes_pods_namespace = "" +# # label selector to target pods which have the label +# # kubernetes_label_selector = "env=dev,app=nginx" +# # field selector to target pods +# # eg. To scrape pods on a specific node +# # kubernetes_field_selector = "spec.nodeName=$HOSTNAME" +# +# ## Scrape Services available in Consul Catalog +# # [inputs.prometheus.consul] +# # enabled = true +# # agent = "http://localhost:8500" +# # query_interval = "5m" +# +# # [[inputs.prometheus.consul.query]] +# # name = "a service name" +# # tag = "a service tag" +# # url = 'http://{{if ne .ServiceAddress ""}}{{.ServiceAddress}}{{else}}{{.Address}}{{end}}:{{.ServicePort}}/{{with .ServiceMeta.metrics_path}}{{.}}{{else}}metrics{{end}}' +# # [inputs.prometheus.consul.query.tags] +# # host = "{{.Node}}" +# +# ## Use bearer token for authorization. ('bearer_token' takes priority) +# # bearer_token = "/path/to/bearer/token" +# ## OR +# # bearer_token_string = "abc_123" +# +# ## HTTP Basic Authentication username and password. ('bearer_token' and +# ## 'bearer_token_string' take priority) +# # username = "" +# # password = "" +# +# ## Specify timeout duration for slower prometheus clients (default is 3s) +# # response_timeout = "3s" +# +# ## Optional TLS Config +# # tls_ca = /path/to/cafile +# # tls_cert = /path/to/certfile +# # tls_key = /path/to/keyfile +# ## Use TLS but skip chain & host verification +# # insecure_skip_verify = false + + +# # Riemann protobuff listener. +# [[inputs.riemann_listener]] +# ## URL to listen on. +# ## Default is "tcp://:5555" +# # service_address = "tcp://:8094" +# # service_address = "tcp://127.0.0.1:http" +# # service_address = "tcp4://:8094" +# # service_address = "tcp6://:8094" +# # service_address = "tcp6://[2001:db8::1]:8094" +# +# ## Maximum number of concurrent connections. +# ## 0 (default) is unlimited. +# # max_connections = 1024 +# ## Read timeout. +# ## 0 (default) is unlimited. +# # read_timeout = "30s" +# ## Optional TLS configuration. +# # tls_cert = "/etc/telegraf/cert.pem" +# # tls_key = "/etc/telegraf/key.pem" +# ## Enables client authentication if set. +# # tls_allowed_cacerts = ["/etc/telegraf/clientca.pem"] +# ## Maximum socket buffer size (in bytes when no unit specified). +# # read_buffer_size = "64KiB" +# ## Period between keep alive probes. +# ## 0 disables keep alive probes. +# ## Defaults to the OS configuration. +# # keep_alive_period = "5m" + + +# # SFlow V5 Protocol Listener +# [[inputs.sflow]] +# ## Address to listen for sFlow packets. +# ## example: service_address = "udp://:6343" +# ## service_address = "udp4://:6343" +# ## service_address = "udp6://:6343" +# service_address = "udp://:6343" +# +# ## Set the size of the operating system's receive buffer. +# ## example: read_buffer_size = "64KiB" +# # read_buffer_size = "" + + +# # Receive SNMP traps +# [[inputs.snmp_trap]] +# ## Transport, local address, and port to listen on. Transport must +# ## be "udp://". Omit local address to listen on all interfaces. +# ## example: "udp://127.0.0.1:1234" +# ## +# ## Special permissions may be required to listen on a port less than +# ## 1024. See README.md for details +# ## +# # service_address = "udp://:162" +# ## +# ## Path to mib files +# # path = ["/usr/share/snmp/mibs"] +# ## +# ## Timeout running snmptranslate command +# # timeout = "5s" +# ## Snmp version, defaults to 2c +# # version = "2c" +# ## SNMPv3 authentication and encryption options. +# ## +# ## Security Name. +# # sec_name = "myuser" +# ## Authentication protocol; one of "MD5", "SHA" or "". +# # auth_protocol = "MD5" +# ## Authentication password. +# # auth_password = "pass" +# ## Security Level; one of "noAuthNoPriv", "authNoPriv", or "authPriv". +# # sec_level = "authNoPriv" +# ## Privacy protocol used for encrypted messages; one of "DES", "AES", "AES192", "AES192C", "AES256", "AES256C" or "". +# # priv_protocol = "" +# ## Privacy password used for encrypted messages. +# # priv_password = "" + + +# # Generic socket listener capable of handling multiple socket types. +# [[inputs.socket_listener]] +# ## URL to listen on +# # service_address = "tcp://:8094" +# # service_address = "tcp://127.0.0.1:http" +# # service_address = "tcp4://:8094" +# # service_address = "tcp6://:8094" +# # service_address = "tcp6://[2001:db8::1]:8094" +# # service_address = "udp://:8094" +# # service_address = "udp4://:8094" +# # service_address = "udp6://:8094" +# # service_address = "unix:///tmp/telegraf.sock" +# # service_address = "unixgram:///tmp/telegraf.sock" +# +# ## Change the file mode bits on unix sockets. These permissions may not be +# ## respected by some platforms, to safely restrict write permissions it is best +# ## to place the socket into a directory that has previously been created +# ## with the desired permissions. +# ## ex: socket_mode = "777" +# # socket_mode = "" +# +# ## Maximum number of concurrent connections. +# ## Only applies to stream sockets (e.g. TCP). +# ## 0 (default) is unlimited. +# # max_connections = 1024 +# +# ## Read timeout. +# ## Only applies to stream sockets (e.g. TCP). +# ## 0 (default) is unlimited. +# # read_timeout = "30s" +# +# ## Optional TLS configuration. +# ## Only applies to stream sockets (e.g. TCP). +# # tls_cert = "/etc/telegraf/cert.pem" +# # tls_key = "/etc/telegraf/key.pem" +# ## Enables client authentication if set. +# # tls_allowed_cacerts = ["/etc/telegraf/clientca.pem"] +# +# ## Maximum socket buffer size (in bytes when no unit specified). +# ## For stream sockets, once the buffer fills up, the sender will start backing up. +# ## For datagram sockets, once the buffer fills up, metrics will start dropping. +# ## Defaults to the OS default. +# # read_buffer_size = "64KiB" +# +# ## Period between keep alive probes. +# ## Only applies to TCP sockets. +# ## 0 disables keep alive probes. +# ## Defaults to the OS configuration. +# # keep_alive_period = "5m" +# +# ## Data format to consume. +# ## Each data format has its own unique set of configuration options, read +# ## more about them here: +# ## https://github.com/influxdata/telegraf/blob/master/docs/DATA_FORMATS_INPUT.md +# # data_format = "influx" +# +# ## Content encoding for message payloads, can be set to "gzip" to or +# ## "identity" to apply no encoding. +# # content_encoding = "identity" + + +# # Read metrics from SQL queries +# [[inputs.sql]] +# ## Database Driver +# ## See https://github.com/influxdata/telegraf/blob/master/docs/SQL_DRIVERS_INPUT.md for +# ## a list of supported drivers. +# driver = "mysql" +# +# ## Data source name for connecting +# ## The syntax and supported options depends on selected driver. +# dsn = "username:password@mysqlserver:3307/dbname?param=value" +# +# ## Timeout for any operation +# ## Note that the timeout for queries is per query not per gather. +# # timeout = "5s" +# +# ## Connection time limits +# ## By default the maximum idle time and maximum lifetime of a connection is unlimited, i.e. the connections +# ## will not be closed automatically. If you specify a positive time, the connections will be closed after +# ## idleing or existing for at least that amount of time, respectively. +# # connection_max_idle_time = "0s" +# # connection_max_life_time = "0s" +# +# ## Connection count limits +# ## By default the number of open connections is not limited and the number of maximum idle connections +# ## will be inferred from the number of queries specified. If you specify a positive number for any of the +# ## two options, connections will be closed when reaching the specified limit. The number of idle connections +# ## will be clipped to the maximum number of connections limit if any. +# # connection_max_open = 0 +# # connection_max_idle = auto +# +# [[inputs.sql.query]] +# ## Query to perform on the server +# query="SELECT user,state,latency,score FROM Scoreboard WHERE application > 0" +# ## Alternatively to specifying the query directly you can select a file here containing the SQL query. +# ## Only one of 'query' and 'query_script' can be specified! +# # query_script = "/path/to/sql/script.sql" +# +# ## Name of the measurement +# ## In case both measurement and 'measurement_col' are given, the latter takes precedence. +# # measurement = "sql" +# +# ## Column name containing the name of the measurement +# ## If given, this will take precedence over the 'measurement' setting. In case a query result +# ## does not contain the specified column, we fall-back to the 'measurement' setting. +# # measurement_column = "" +# +# ## Column name containing the time of the measurement +# ## If ommited, the time of the query will be used. +# # time_column = "" +# +# ## Format of the time contained in 'time_col' +# ## The time must be 'unix', 'unix_ms', 'unix_us', 'unix_ns', or a golang time format. +# ## See https://golang.org/pkg/time/#Time.Format for details. +# # time_format = "unix" +# +# ## Column names containing tags +# ## An empty include list will reject all columns and an empty exclude list will not exclude any column. +# ## I.e. by default no columns will be returned as tag and the tags are empty. +# # tag_columns_include = [] +# # tag_columns_exclude = [] +# +# ## Column names containing fields (explicit types) +# ## Convert the given columns to the corresponding type. Explicit type conversions take precedence over +# ## the automatic (driver-based) conversion below. +# ## NOTE: Columns should not be specified for multiple types or the resulting type is undefined. +# # field_columns_float = [] +# # field_columns_int = [] +# # field_columns_uint = [] +# # field_columns_bool = [] +# # field_columns_string = [] +# +# ## Column names containing fields (automatic types) +# ## An empty include list is equivalent to '[*]' and all returned columns will be accepted. An empty +# ## exclude list will not exclude any column. I.e. by default all columns will be returned as fields. +# ## NOTE: We rely on the database driver to perform automatic datatype conversion. +# # field_columns_include = [] +# # field_columns_exclude = [] + + +# # Read metrics from Microsoft SQL Server +# [[inputs.sqlserver]] +# ## Specify instances to monitor with a list of connection strings. +# ## All connection parameters are optional. +# ## By default, the host is localhost, listening on default port, TCP 1433. +# ## for Windows, the user is the currently running AD user (SSO). +# ## See https://github.com/denisenkom/go-mssqldb for detailed connection +# ## parameters, in particular, tls connections can be created like so: +# ## "encrypt=true;certificate=;hostNameInCertificate=" +# servers = [ +# "Server=192.168.1.10;Port=1433;User Id=;Password=;app name=telegraf;log=1;", +# ] +# +# ## Authentication method +# ## valid methods: "connection_string", "AAD" +# # auth_method = "connection_string" +# +# ## "database_type" enables a specific set of queries depending on the database type. If specified, it replaces azuredb = true/false and query_version = 2 +# ## In the config file, the sql server plugin section should be repeated each with a set of servers for a specific database_type. +# ## Possible values for database_type are - "AzureSQLDB" or "AzureSQLManagedInstance" or "SQLServer" +# +# ## Queries enabled by default for database_type = "AzureSQLDB" are - +# ## AzureSQLDBResourceStats, AzureSQLDBResourceGovernance, AzureSQLDBWaitStats, AzureSQLDBDatabaseIO, AzureSQLDBServerProperties, +# ## AzureSQLDBOsWaitstats, AzureSQLDBMemoryClerks, AzureSQLDBPerformanceCounters, AzureSQLDBRequests, AzureSQLDBSchedulers +# +# # database_type = "AzureSQLDB" +# +# ## A list of queries to include. If not specified, all the above listed queries are used. +# # include_query = [] +# +# ## A list of queries to explicitly ignore. +# # exclude_query = [] +# +# ## Queries enabled by default for database_type = "AzureSQLManagedInstance" are - +# ## AzureSQLMIResourceStats, AzureSQLMIResourceGovernance, AzureSQLMIDatabaseIO, AzureSQLMIServerProperties, AzureSQLMIOsWaitstats, +# ## AzureSQLMIMemoryClerks, AzureSQLMIPerformanceCounters, AzureSQLMIRequests, AzureSQLMISchedulers +# +# # database_type = "AzureSQLManagedInstance" +# +# # include_query = [] +# +# # exclude_query = [] +# +# ## Queries enabled by default for database_type = "SQLServer" are - +# ## SQLServerPerformanceCounters, SQLServerWaitStatsCategorized, SQLServerDatabaseIO, SQLServerProperties, SQLServerMemoryClerks, +# ## SQLServerSchedulers, SQLServerRequests, SQLServerVolumeSpace, SQLServerCpu +# +# database_type = "SQLServer" +# +# include_query = [] +# +# ## SQLServerAvailabilityReplicaStates and SQLServerDatabaseReplicaStates are optional queries and hence excluded here as default +# exclude_query = ["SQLServerAvailabilityReplicaStates", "SQLServerDatabaseReplicaStates"] +# +# ## Following are old config settings, you may use them only if you are using the earlier flavor of queries, however it is recommended to use +# ## the new mechanism of identifying the database_type there by use it's corresponding queries +# +# ## Optional parameter, setting this to 2 will use a new version +# ## of the collection queries that break compatibility with the original +# ## dashboards. +# ## Version 2 - is compatible from SQL Server 2012 and later versions and also for SQL Azure DB +# # query_version = 2 +# +# ## If you are using AzureDB, setting this to true will gather resource utilization metrics +# # azuredb = false + + +# # Statsd UDP/TCP Server +# [[inputs.statsd]] +# ## Protocol, must be "tcp", "udp", "udp4" or "udp6" (default=udp) +# protocol = "udp" +# +# ## MaxTCPConnection - applicable when protocol is set to tcp (default=250) +# max_tcp_connections = 250 +# +# ## Enable TCP keep alive probes (default=false) +# tcp_keep_alive = false +# +# ## Specifies the keep-alive period for an active network connection. +# ## Only applies to TCP sockets and will be ignored if tcp_keep_alive is false. +# ## Defaults to the OS configuration. +# # tcp_keep_alive_period = "2h" +# +# ## Address and port to host UDP listener on +# service_address = ":8125" +# +# ## The following configuration options control when telegraf clears it's cache +# ## of previous values. If set to false, then telegraf will only clear it's +# ## cache when the daemon is restarted. +# ## Reset gauges every interval (default=true) +# delete_gauges = true +# ## Reset counters every interval (default=true) +# delete_counters = true +# ## Reset sets every interval (default=true) +# delete_sets = true +# ## Reset timings & histograms every interval (default=true) +# delete_timings = true +# +# ## Percentiles to calculate for timing & histogram stats +# percentiles = [50.0, 90.0, 99.0, 99.9, 99.95, 100.0] +# +# ## separator to use between elements of a statsd metric +# metric_separator = "_" +# +# ## Parses tags in the datadog statsd format +# ## http://docs.datadoghq.com/guides/dogstatsd/ +# parse_data_dog_tags = false +# +# ## Parses datadog extensions to the statsd format +# datadog_extensions = false +# +# ## Parses distributions metric as specified in the datadog statsd format +# ## https://docs.datadoghq.com/developers/metrics/types/?tab=distribution#definition +# datadog_distributions = false +# +# ## Statsd data translation templates, more info can be read here: +# ## https://github.com/influxdata/telegraf/blob/master/docs/TEMPLATE_PATTERN.md +# # templates = [ +# # "cpu.* measurement*" +# # ] +# +# ## Number of UDP messages allowed to queue up, once filled, +# ## the statsd server will start dropping packets +# allowed_pending_messages = 10000 +# +# ## Number of timing/histogram values to track per-measurement in the +# ## calculation of percentiles. Raising this limit increases the accuracy +# ## of percentiles but also increases the memory usage and cpu time. +# percentile_limit = 1000 +# +# ## Max duration (TTL) for each metric to stay cached/reported without being updated. +# #max_ttl = "1000h" + + +# # Suricata stats and alerts plugin +# [[inputs.suricata]] +# ## Data sink for Suricata stats and alerts logs +# # This is expected to be a filename of a +# # unix socket to be created for listening. +# source = "/var/run/suricata-stats.sock" +# +# # Delimiter for flattening field keys, e.g. subitem "alert" of "detect" +# # becomes "detect_alert" when delimiter is "_". +# delimiter = "_" +# +# ## Detect alert logs +# # alerts = false + + +# # Accepts syslog messages following RFC5424 format with transports as per RFC5426, RFC5425, or RFC6587 +# [[inputs.syslog]] +# ## Specify an ip or hostname with port - eg., tcp://localhost:6514, tcp://10.0.0.1:6514 +# ## Protocol, address and port to host the syslog receiver. +# ## If no host is specified, then localhost is used. +# ## If no port is specified, 6514 is used (RFC5425#section-4.1). +# server = "tcp://:6514" +# +# ## TLS Config +# # tls_allowed_cacerts = ["/etc/telegraf/ca.pem"] +# # tls_cert = "/etc/telegraf/cert.pem" +# # tls_key = "/etc/telegraf/key.pem" +# +# ## Period between keep alive probes. +# ## 0 disables keep alive probes. +# ## Defaults to the OS configuration. +# ## Only applies to stream sockets (e.g. TCP). +# # keep_alive_period = "5m" +# +# ## Maximum number of concurrent connections (default = 0). +# ## 0 means unlimited. +# ## Only applies to stream sockets (e.g. TCP). +# # max_connections = 1024 +# +# ## Read timeout is the maximum time allowed for reading a single message (default = 5s). +# ## 0 means unlimited. +# # read_timeout = "5s" +# +# ## The framing technique with which it is expected that messages are transported (default = "octet-counting"). +# ## Whether the messages come using the octect-counting (RFC5425#section-4.3.1, RFC6587#section-3.4.1), +# ## or the non-transparent framing technique (RFC6587#section-3.4.2). +# ## Must be one of "octet-counting", "non-transparent". +# # framing = "octet-counting" +# +# ## The trailer to be expected in case of non-transparent framing (default = "LF"). +# ## Must be one of "LF", or "NUL". +# # trailer = "LF" +# +# ## Whether to parse in best effort mode or not (default = false). +# ## By default best effort parsing is off. +# # best_effort = false +# +# ## The RFC standard to use for message parsing +# ## By default RFC5424 is used. RFC3164 only supports UDP transport (no streaming support) +# ## Must be one of "RFC5424", or "RFC3164". +# # syslog_standard = "RFC5424" +# +# ## Character to prepend to SD-PARAMs (default = "_"). +# ## A syslog message can contain multiple parameters and multiple identifiers within structured data section. +# ## Eg., [id1 name1="val1" name2="val2"][id2 name1="val1" nameA="valA"] +# ## For each combination a field is created. +# ## Its name is created concatenating identifier, sdparam_separator, and parameter name. +# # sdparam_separator = "_" + + +# # Parse the new lines appended to a file +# [[inputs.tail]] +# ## File names or a pattern to tail. +# ## These accept standard unix glob matching rules, but with the addition of +# ## ** as a "super asterisk". ie: +# ## "/var/log/**.log" -> recursively find all .log files in /var/log +# ## "/var/log/*/*.log" -> find all .log files with a parent dir in /var/log +# ## "/var/log/apache.log" -> just tail the apache log file +# ## "/var/log/log[!1-2]* -> tail files without 1-2 +# ## "/var/log/log[^1-2]* -> identical behavior as above +# ## See https://github.com/gobwas/glob for more examples +# ## +# files = ["/var/mymetrics.out"] +# +# ## Read file from beginning. +# # from_beginning = false +# +# ## Whether file is a named pipe +# # pipe = false +# +# ## Method used to watch for file updates. Can be either "inotify" or "poll". +# # watch_method = "inotify" +# +# ## Maximum lines of the file to process that have not yet be written by the +# ## output. For best throughput set based on the number of metrics on each +# ## line and the size of the output's metric_batch_size. +# # max_undelivered_lines = 1000 +# +# ## Character encoding to use when interpreting the file contents. Invalid +# ## characters are replaced using the unicode replacement character. When set +# ## to the empty string the data is not decoded to text. +# ## ex: character_encoding = "utf-8" +# ## character_encoding = "utf-16le" +# ## character_encoding = "utf-16be" +# ## character_encoding = "" +# # character_encoding = "" +# +# ## Data format to consume. +# ## Each data format has its own unique set of configuration options, read +# ## more about them here: +# ## https://github.com/influxdata/telegraf/blob/master/docs/DATA_FORMATS_INPUT.md +# data_format = "influx" +# +# ## Set the tag that will contain the path of the tailed file. If you don't want this tag, set it to an empty string. +# # path_tag = "path" +# +# ## multiline parser/codec +# ## https://www.elastic.co/guide/en/logstash/2.4/plugins-filters-multiline.html +# #[inputs.tail.multiline] +# ## The pattern should be a regexp which matches what you believe to be an +# ## indicator that the field is part of an event consisting of multiple lines of log data. +# #pattern = "^\s" +# +# ## This field must be either "previous" or "next". +# ## If a line matches the pattern, "previous" indicates that it belongs to the previous line, +# ## whereas "next" indicates that the line belongs to the next one. +# #match_which_line = "previous" +# +# ## The invert_match field can be true or false (defaults to false). +# ## If true, a message not matching the pattern will constitute a match of the multiline +# ## filter and the what will be applied. (vice-versa is also true) +# #invert_match = false +# +# ## After the specified timeout, this plugin sends a multiline event even if no new pattern +# ## is found to start a new event. The default timeout is 5s. +# #timeout = 5s + + +# # Generic TCP listener +# [[inputs.tcp_listener]] +# # DEPRECATED: the TCP listener plugin has been deprecated in favor of the +# # socket_listener plugin +# # see https://github.com/influxdata/telegraf/tree/master/plugins/inputs/socket_listener + + +# # Generic UDP listener +# [[inputs.udp_listener]] +# # DEPRECATED: the TCP listener plugin has been deprecated in favor of the +# # socket_listener plugin +# # see https://github.com/influxdata/telegraf/tree/master/plugins/inputs/socket_listener + + +# # Read metrics from VMware vCenter +# [[inputs.vsphere]] +# ## List of vCenter URLs to be monitored. These three lines must be uncommented +# ## and edited for the plugin to work. +# vcenters = [ "https://vcenter.local/sdk" ] +# username = "user@corp.local" +# password = "secret" +# +# ## VMs +# ## Typical VM metrics (if omitted or empty, all metrics are collected) +# # vm_include = [ "/*/vm/**"] # Inventory path to VMs to collect (by default all are collected) +# # vm_exclude = [] # Inventory paths to exclude +# vm_metric_include = [ +# "cpu.demand.average", +# "cpu.idle.summation", +# "cpu.latency.average", +# "cpu.readiness.average", +# "cpu.ready.summation", +# "cpu.run.summation", +# "cpu.usagemhz.average", +# "cpu.used.summation", +# "cpu.wait.summation", +# "mem.active.average", +# "mem.granted.average", +# "mem.latency.average", +# "mem.swapin.average", +# "mem.swapinRate.average", +# "mem.swapout.average", +# "mem.swapoutRate.average", +# "mem.usage.average", +# "mem.vmmemctl.average", +# "net.bytesRx.average", +# "net.bytesTx.average", +# "net.droppedRx.summation", +# "net.droppedTx.summation", +# "net.usage.average", +# "power.power.average", +# "virtualDisk.numberReadAveraged.average", +# "virtualDisk.numberWriteAveraged.average", +# "virtualDisk.read.average", +# "virtualDisk.readOIO.latest", +# "virtualDisk.throughput.usage.average", +# "virtualDisk.totalReadLatency.average", +# "virtualDisk.totalWriteLatency.average", +# "virtualDisk.write.average", +# "virtualDisk.writeOIO.latest", +# "sys.uptime.latest", +# ] +# # vm_metric_exclude = [] ## Nothing is excluded by default +# # vm_instances = true ## true by default +# +# ## Hosts +# ## Typical host metrics (if omitted or empty, all metrics are collected) +# # host_include = [ "/*/host/**"] # Inventory path to hosts to collect (by default all are collected) +# # host_exclude [] # Inventory paths to exclude +# host_metric_include = [ +# "cpu.coreUtilization.average", +# "cpu.costop.summation", +# "cpu.demand.average", +# "cpu.idle.summation", +# "cpu.latency.average", +# "cpu.readiness.average", +# "cpu.ready.summation", +# "cpu.swapwait.summation", +# "cpu.usage.average", +# "cpu.usagemhz.average", +# "cpu.used.summation", +# "cpu.utilization.average", +# "cpu.wait.summation", +# "disk.deviceReadLatency.average", +# "disk.deviceWriteLatency.average", +# "disk.kernelReadLatency.average", +# "disk.kernelWriteLatency.average", +# "disk.numberReadAveraged.average", +# "disk.numberWriteAveraged.average", +# "disk.read.average", +# "disk.totalReadLatency.average", +# "disk.totalWriteLatency.average", +# "disk.write.average", +# "mem.active.average", +# "mem.latency.average", +# "mem.state.latest", +# "mem.swapin.average", +# "mem.swapinRate.average", +# "mem.swapout.average", +# "mem.swapoutRate.average", +# "mem.totalCapacity.average", +# "mem.usage.average", +# "mem.vmmemctl.average", +# "net.bytesRx.average", +# "net.bytesTx.average", +# "net.droppedRx.summation", +# "net.droppedTx.summation", +# "net.errorsRx.summation", +# "net.errorsTx.summation", +# "net.usage.average", +# "power.power.average", +# "storageAdapter.numberReadAveraged.average", +# "storageAdapter.numberWriteAveraged.average", +# "storageAdapter.read.average", +# "storageAdapter.write.average", +# "sys.uptime.latest", +# ] +# ## Collect IP addresses? Valid values are "ipv4" and "ipv6" +# # ip_addresses = ["ipv6", "ipv4" ] +# +# # host_metric_exclude = [] ## Nothing excluded by default +# # host_instances = true ## true by default +# +# +# ## Clusters +# # cluster_include = [ "/*/host/**"] # Inventory path to clusters to collect (by default all are collected) +# # cluster_exclude = [] # Inventory paths to exclude +# # cluster_metric_include = [] ## if omitted or empty, all metrics are collected +# # cluster_metric_exclude = [] ## Nothing excluded by default +# # cluster_instances = false ## false by default +# +# ## Datastores +# # datastore_include = [ "/*/datastore/**"] # Inventory path to datastores to collect (by default all are collected) +# # datastore_exclude = [] # Inventory paths to exclude +# # datastore_metric_include = [] ## if omitted or empty, all metrics are collected +# # datastore_metric_exclude = [] ## Nothing excluded by default +# # datastore_instances = false ## false by default +# +# ## Datacenters +# # datacenter_include = [ "/*/host/**"] # Inventory path to clusters to collect (by default all are collected) +# # datacenter_exclude = [] # Inventory paths to exclude +# datacenter_metric_include = [] ## if omitted or empty, all metrics are collected +# datacenter_metric_exclude = [ "*" ] ## Datacenters are not collected by default. +# # datacenter_instances = false ## false by default +# +# ## Plugin Settings +# ## separator character to use for measurement and field names (default: "_") +# # separator = "_" +# +# ## number of objects to retrieve per query for realtime resources (vms and hosts) +# ## set to 64 for vCenter 5.5 and 6.0 (default: 256) +# # max_query_objects = 256 +# +# ## number of metrics to retrieve per query for non-realtime resources (clusters and datastores) +# ## set to 64 for vCenter 5.5 and 6.0 (default: 256) +# # max_query_metrics = 256 +# +# ## number of go routines to use for collection and discovery of objects and metrics +# # collect_concurrency = 1 +# # discover_concurrency = 1 +# +# ## the interval before (re)discovering objects subject to metrics collection (default: 300s) +# # object_discovery_interval = "300s" +# +# ## timeout applies to any of the api request made to vcenter +# # timeout = "60s" +# +# ## When set to true, all samples are sent as integers. This makes the output +# ## data types backwards compatible with Telegraf 1.9 or lower. Normally all +# ## samples from vCenter, with the exception of percentages, are integer +# ## values, but under some conditions, some averaging takes place internally in +# ## the plugin. Setting this flag to "false" will send values as floats to +# ## preserve the full precision when averaging takes place. +# # use_int_samples = true +# +# ## Custom attributes from vCenter can be very useful for queries in order to slice the +# ## metrics along different dimension and for forming ad-hoc relationships. They are disabled +# ## by default, since they can add a considerable amount of tags to the resulting metrics. To +# ## enable, simply set custom_attribute_exclude to [] (empty set) and use custom_attribute_include +# ## to select the attributes you want to include. +# ## By default, since they can add a considerable amount of tags to the resulting metrics. To +# ## enable, simply set custom_attribute_exclude to [] (empty set) and use custom_attribute_include +# ## to select the attributes you want to include. +# # custom_attribute_include = [] +# # custom_attribute_exclude = ["*"] +# +# ## The number of vSphere 5 minute metric collection cycles to look back for non-realtime metrics. In +# ## some versions (6.7, 7.0 and possible more), certain metrics, such as cluster metrics, may be reported +# ## with a significant delay (>30min). If this happens, try increasing this number. Please note that increasing +# ## it too much may cause performance issues. +# # metric_lookback = 3 +# +# ## Optional SSL Config +# # ssl_ca = "/path/to/cafile" +# # ssl_cert = "/path/to/certfile" +# # ssl_key = "/path/to/keyfile" +# ## Use SSL but skip chain & host verification +# # insecure_skip_verify = false +# +# ## The Historical Interval value must match EXACTLY the interval in the daily +# # "Interval Duration" found on the VCenter server under Configure > General > Statistics > Statistic intervals +# # historical_interval = "5m" + + +# # A Webhooks Event collector +# [[inputs.webhooks]] +# ## Address and port to host Webhook listener on +# service_address = ":1619" +# +# [inputs.webhooks.filestack] +# path = "/filestack" +# +# [inputs.webhooks.github] +# path = "/github" +# # secret = "" +# +# [inputs.webhooks.mandrill] +# path = "/mandrill" +# +# [inputs.webhooks.rollbar] +# path = "/rollbar" +# +# [inputs.webhooks.papertrail] +# path = "/papertrail" +# +# [inputs.webhooks.particle] +# path = "/particle" + + +# # This plugin implements the Zipkin http server to gather trace and timing data needed to troubleshoot latency problems in microservice architectures. +# [[inputs.zipkin]] +# # path = "/api/v1/spans" # URL path for span data +# # port = 9411 # Port on which Telegraf listens + From 1a9b3ad476e3a04761dda233186548b5ca95a7b4 Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Wed, 6 Oct 2021 12:13:49 -0500 Subject: [PATCH 111/176] fix: bump k8s.io/apimachinery from 0.21.1 to 0.22.2 (#9776) Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> Co-authored-by: Sebastian Spaink --- go.mod | 8 ++++---- go.sum | 30 +++++++++++++++++++----------- 2 files changed, 23 insertions(+), 15 deletions(-) diff --git a/go.mod b/go.mod index 55de18a59efa0..c6f3138489d28 100644 --- a/go.mod +++ b/go.mod @@ -121,7 +121,7 @@ require ( github.com/google/gofuzz v1.1.0 // indirect github.com/google/uuid v1.2.0 // indirect github.com/googleapis/gax-go/v2 v2.0.5 // indirect - github.com/googleapis/gnostic v0.4.1 // indirect + github.com/googleapis/gnostic v0.5.5 // indirect github.com/gopcua/opcua v0.2.0-rc2.0.20210409063412-baabb9b14fd2 github.com/gorilla/mux v1.7.3 github.com/gorilla/websocket v1.4.2 @@ -315,9 +315,9 @@ require ( gopkg.in/yaml.v3 v3.0.0-20210107192922-496545a6307b // indirect gotest.tools v2.2.0+incompatible k8s.io/api v0.20.6 - k8s.io/apimachinery v0.21.1 + k8s.io/apimachinery v0.22.2 k8s.io/client-go v0.20.6 - k8s.io/klog/v2 v2.8.0 // indirect + k8s.io/klog/v2 v2.9.0 // indirect k8s.io/utils v0.0.0-20201110183641-67b214c5f920 // indirect modernc.org/cc/v3 v3.33.5 // indirect modernc.org/ccgo/v3 v3.9.4 // indirect @@ -328,7 +328,7 @@ require ( modernc.org/sqlite v1.10.8 modernc.org/strutil v1.1.0 // indirect modernc.org/token v1.0.0 // indirect - sigs.k8s.io/structured-merge-diff/v4 v4.1.0 // indirect + sigs.k8s.io/structured-merge-diff/v4 v4.1.2 // indirect sigs.k8s.io/yaml v1.2.0 // indirect ) diff --git a/go.sum b/go.sum index fccbfb9b9bdcb..a8fc62a7b3874 100644 --- a/go.sum +++ b/go.sum @@ -586,6 +586,7 @@ github.com/envoyproxy/protoc-gen-validate v0.1.0/go.mod h1:iSmxcyjqTsJpI2R4NaDN7 github.com/evanphx/json-patch v0.0.0-20200808040245-162e5629780b/go.mod h1:NAJj0yf/KaRKURN6nyi7A9IZydMivZEm9oQLWNjfKDc= github.com/evanphx/json-patch v4.2.0+incompatible/go.mod h1:50XU6AFN0ol/bzJsmQLiYLvXMP4fmwYFNcr97nuDLSk= github.com/evanphx/json-patch v4.9.0+incompatible/go.mod h1:50XU6AFN0ol/bzJsmQLiYLvXMP4fmwYFNcr97nuDLSk= +github.com/evanphx/json-patch v4.11.0+incompatible/go.mod h1:50XU6AFN0ol/bzJsmQLiYLvXMP4fmwYFNcr97nuDLSk= github.com/facebookgo/stack v0.0.0-20160209184415-751773369052 h1:JWuenKqqX8nojtoVVWjGfOF9635RETekkoH6Cc9SX0A= github.com/facebookgo/stack v0.0.0-20160209184415-751773369052/go.mod h1:UbMTZqLaRiH3MsBH8va0n7s1pQYcu3uTb8G4tygF4Zg= github.com/facebookgo/stackerr v0.0.0-20150612192056-c2fcf88613f4 h1:fP04zlkPjAGpsduG7xN3rRkxjAqkJaIQnnkNYYw/pAk= @@ -864,8 +865,10 @@ github.com/googleapis/gax-go/v2 v2.0.5/go.mod h1:DWXyrwAJ9X0FpwwEdw+IPEYBICEFu5m github.com/googleapis/gnostic v0.0.0-20170729233727-0c5108395e2d/go.mod h1:sJBsCZ4ayReDTBIg8b9dl28c5xFWyhBTVRp3pOg5EKY= github.com/googleapis/gnostic v0.1.0/go.mod h1:sJBsCZ4ayReDTBIg8b9dl28c5xFWyhBTVRp3pOg5EKY= github.com/googleapis/gnostic v0.4.0/go.mod h1:on+2t9HRStVgn95RSsFWFz+6Q0Snyqv1awfrALZdbtU= -github.com/googleapis/gnostic v0.4.1 h1:DLJCy1n/vrD4HPjOvYcT8aYQXpPIzoRZONaYwyycI+I= github.com/googleapis/gnostic v0.4.1/go.mod h1:LRhVm6pbyptWbWbuZ38d1eyptfvIytN3ir6b65WBswg= +github.com/googleapis/gnostic v0.5.1/go.mod h1:6U4PtQXGIEt/Z3h5MAT7FNofLnw9vXk2cUuW7uA/OeU= +github.com/googleapis/gnostic v0.5.5 h1:9fHAtK0uDfpveeqqo1hkEZJcFvYXAiCN3UutL8F9xHw= +github.com/googleapis/gnostic v0.5.5/go.mod h1:7+EbHbldMins07ALC74bsA81Ovc97DwqyJO1AENw9kA= github.com/gopcua/opcua v0.2.0-rc2.0.20210409063412-baabb9b14fd2 h1:OtFKr0Kwe1oLpMR+uNMh/DPgC5fxAq4xRe6HBv8LDqQ= github.com/gopcua/opcua v0.2.0-rc2.0.20210409063412-baabb9b14fd2/go.mod h1:a6QH4F9XeODklCmWuvaOdL8v9H0d73CEKUHWVZLQyE8= github.com/gophercloud/gophercloud v0.1.0/go.mod h1:vxM41WHh5uqHVBMZHzuwNOHh8XEoIEcSTewFxm1c5g8= @@ -1294,14 +1297,16 @@ github.com/onsi/ginkgo v1.10.1/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+ github.com/onsi/ginkgo v1.10.1/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE= github.com/onsi/ginkgo v1.10.3/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE= github.com/onsi/ginkgo v1.11.0/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE= -github.com/onsi/ginkgo v1.12.1 h1:mFwc4LvZ0xpSvDZ3E+k8Yte0hLOMxXUlP+yXtJqkYfQ= github.com/onsi/ginkgo v1.12.1/go.mod h1:zj2OWP4+oCPe1qIXoGWkgMRwljMUYCdkwsT2108oapk= +github.com/onsi/ginkgo v1.14.0 h1:2mOpI4JVVPBN+WQRa0WKH2eXR+Ey+uK4n7Zj0aYpIQA= +github.com/onsi/ginkgo v1.14.0/go.mod h1:iSB4RoI2tjJc9BBv4NKIKWKya62Rps+oPG/Lv9klQyY= github.com/onsi/gomega v0.0.0-20151007035656-2152b45fa28a/go.mod h1:C1qb7wdrVGGVU+Z6iS04AVkA3Q65CEZX59MT0QO5uiA= github.com/onsi/gomega v0.0.0-20170829124025-dcabb60a477c/go.mod h1:C1qb7wdrVGGVU+Z6iS04AVkA3Q65CEZX59MT0QO5uiA= github.com/onsi/gomega v1.4.3/go.mod h1:ex+gbHU/CVuBBDIJjb2X0qEXbFg53c61hWP/1CpauHY= github.com/onsi/gomega v1.5.0/go.mod h1:ex+gbHU/CVuBBDIJjb2X0qEXbFg53c61hWP/1CpauHY= github.com/onsi/gomega v1.7.0/go.mod h1:ex+gbHU/CVuBBDIJjb2X0qEXbFg53c61hWP/1CpauHY= github.com/onsi/gomega v1.7.1/go.mod h1:XdKZgCCFLUoM/7CFJVPcG8C1xQ1AJ0vpAezJrB7JYyY= +github.com/onsi/gomega v1.10.1/go.mod h1:iN09h71vgCQne3DLsj+A5owkum+a2tYe+TOCB1ybHNo= github.com/onsi/gomega v1.10.3 h1:gph6h/qe9GSUw1NhH1gp+qb+h8rXD8Cy60Z32Qw3ELA= github.com/onsi/gomega v1.10.3/go.mod h1:V9xEwhxec5O8UDM77eCW8vLymOMltsqPVYWrpDsH8xc= github.com/op/go-logging v0.0.0-20160315200505-970db520ece7/go.mod h1:HzydrMdWErDVzsI23lYNej1Htcns9BCg93Dk0bBINWk= @@ -1559,6 +1564,7 @@ github.com/spf13/pflag v1.0.5 h1:iy+VFUOCP1a+8yFto/drg2CJ5u0yRoB7fZw3DKv/JXA= github.com/spf13/pflag v1.0.5/go.mod h1:McXfInJRrz4CZXVZOBLb0bTZqETkiAhM9Iw0y3An2Bg= github.com/spf13/viper v1.4.0/go.mod h1:PTJ7Z/lr49W6bUbkmS1V3by4uWynFiR9p7+dSq/yZzE= github.com/stefanberger/go-pkcs11uri v0.0.0-20201008174630-78d3cae3a980/go.mod h1:AO3tvPzVZ/ayst6UlUKUv6rcPQInYe3IknH3jYhAKu8= +github.com/stoewer/go-strcase v1.2.0/go.mod h1:IBiWB2sKIp3wVVQ3Y035++gc+knqhUQag1KpM8ahLw8= github.com/streadway/amqp v0.0.0-20190404075320-75d898a42a94/go.mod h1:AZpEONHx3DKn8O/DFsRAY58/XVQiIPMTMB1SddzLXVw= github.com/streadway/amqp v0.0.0-20190827072141-edfb9018d271 h1:WhxRHzgeVGETMlmVfqhRn8RIeeNoPr2Czh33I4Zdccw= github.com/streadway/amqp v0.0.0-20190827072141-edfb9018d271/go.mod h1:AZpEONHx3DKn8O/DFsRAY58/XVQiIPMTMB1SddzLXVw= @@ -1858,6 +1864,7 @@ golang.org/x/net v0.0.0-20200425230154-ff2c4b7c35a0/go.mod h1:qpuaurCH72eLCgpAm/ golang.org/x/net v0.0.0-20200501053045-e0ff5e5a1de5/go.mod h1:qpuaurCH72eLCgpAm/N6yyVIVM9cpaDIP3A8BGJEC5A= golang.org/x/net v0.0.0-20200506145744-7e3656a0809f/go.mod h1:qpuaurCH72eLCgpAm/N6yyVIVM9cpaDIP3A8BGJEC5A= golang.org/x/net v0.0.0-20200513185701-a91f0712d120/go.mod h1:qpuaurCH72eLCgpAm/N6yyVIVM9cpaDIP3A8BGJEC5A= +golang.org/x/net v0.0.0-20200520004742-59133d7f0dd7/go.mod h1:qpuaurCH72eLCgpAm/N6yyVIVM9cpaDIP3A8BGJEC5A= golang.org/x/net v0.0.0-20200520182314-0ba52f642ac2/go.mod h1:qpuaurCH72eLCgpAm/N6yyVIVM9cpaDIP3A8BGJEC5A= golang.org/x/net v0.0.0-20200625001655-4c5254603344/go.mod h1:/O7V0waA8r7cgGh81Ro3o1hOxt32SMVPicZroKQ2sZA= golang.org/x/net v0.0.0-20200707034311-ab3426394381/go.mod h1:/O7V0waA8r7cgGh81Ro3o1hOxt32SMVPicZroKQ2sZA= @@ -1874,7 +1881,6 @@ golang.org/x/net v0.0.0-20201224014010-6772e930b67b/go.mod h1:m0MpNAwzfU5UDzcl9v golang.org/x/net v0.0.0-20201224014010-6772e930b67b/go.mod h1:m0MpNAwzfU5UDzcl9v0D8zg8gWTRqZa9RBIspLL5mdg= golang.org/x/net v0.0.0-20201224014010-6772e930b67b/go.mod h1:m0MpNAwzfU5UDzcl9v0D8zg8gWTRqZa9RBIspLL5mdg= golang.org/x/net v0.0.0-20210119194325-5f4716e94777/go.mod h1:m0MpNAwzfU5UDzcl9v0D8zg8gWTRqZa9RBIspLL5mdg= -golang.org/x/net v0.0.0-20210224082022-3d97a244fca7/go.mod h1:m0MpNAwzfU5UDzcl9v0D8zg8gWTRqZa9RBIspLL5mdg= golang.org/x/net v0.0.0-20210226172049-e18ecbb05110/go.mod h1:m0MpNAwzfU5UDzcl9v0D8zg8gWTRqZa9RBIspLL5mdg= golang.org/x/net v0.0.0-20210226172049-e18ecbb05110/go.mod h1:m0MpNAwzfU5UDzcl9v0D8zg8gWTRqZa9RBIspLL5mdg= golang.org/x/net v0.0.0-20210316092652-d523dce5a7f4/go.mod h1:RBQZq4jEuRlivfhVLdyRGr576XBO4/greRjx4P4O3yc= @@ -1882,6 +1888,7 @@ golang.org/x/net v0.0.0-20210331212208-0fccb6fa2b5c/go.mod h1:p54w0d4576C0XHj96b golang.org/x/net v0.0.0-20210405180319-a5a99cb37ef4/go.mod h1:p54w0d4576C0XHj96bSt6lcn1PtDYWL6XObtHCRCNQM= golang.org/x/net v0.0.0-20210405180319-a5a99cb37ef4/go.mod h1:p54w0d4576C0XHj96bSt6lcn1PtDYWL6XObtHCRCNQM= golang.org/x/net v0.0.0-20210503060351-7fd8e65b6420/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y= +golang.org/x/net v0.0.0-20210520170846-37e1c6afe023/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y= golang.org/x/net v0.0.0-20210610132358-84b48f89b13b h1:k+E048sYJHyVnsr1GDrRZWQ32D2C7lWs9JRc0bel53A= golang.org/x/net v0.0.0-20210610132358-84b48f89b13b/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y= golang.org/x/oauth2 v0.0.0-20180821212333-d2e6202438be/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U= @@ -1986,6 +1993,7 @@ golang.org/x/sys v0.0.0-20200420163511-1957bb5e6d1f/go.mod h1:h1NjWce9XRLGQEsW7w golang.org/x/sys v0.0.0-20200501052902-10377860bb8e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20200511232937-7e40ca221e25/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20200515095857-1151b9dac4a9/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200519105757-fe76b779f299/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20200523222454-059865788121/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20200615200032-f1bc736245b1/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20200622214017-ed371f2e16b4/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= @@ -2017,7 +2025,6 @@ golang.org/x/sys v0.0.0-20210124154548-22da62e12c0c/go.mod h1:h1NjWce9XRLGQEsW7w golang.org/x/sys v0.0.0-20210124154548-22da62e12c0c/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20210124154548-22da62e12c0c/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20210220050731-9a76102bfb43/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20210225134936-a50acf3fe073/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20210303074136-134d130e1a04/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20210305230114-8fe3ee5dd75b/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20210315160823-c6e025ad8005/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= @@ -2241,6 +2248,7 @@ google.golang.org/genproto v0.0.0-20200804131852-c06518451d9c/go.mod h1:FWY/as6D google.golang.org/genproto v0.0.0-20200815001618-f69a88009b70/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= google.golang.org/genproto v0.0.0-20200825200019-8632dd797987/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= google.golang.org/genproto v0.0.0-20200904004341-0bd0a958aa1d/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= +google.golang.org/genproto v0.0.0-20201019141844-1ed22bb0c154/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= google.golang.org/genproto v0.0.0-20201109203340-2640f1f9cdfb/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= google.golang.org/genproto v0.0.0-20201110150050-8816d57aaa9a/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= google.golang.org/genproto v0.0.0-20201201144952-b05cb90ed32e/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= @@ -2411,8 +2419,8 @@ k8s.io/apimachinery v0.18.8/go.mod h1:6sQd+iHEqmOtALqOFjSWp2KZ9F0wlU/nWm0ZgsYWMi k8s.io/apimachinery v0.20.1/go.mod h1:WlLqWAHZGg07AeltaI0MV5uk1Omp8xaN0JGLY6gkRpU= k8s.io/apimachinery v0.20.4/go.mod h1:WlLqWAHZGg07AeltaI0MV5uk1Omp8xaN0JGLY6gkRpU= k8s.io/apimachinery v0.20.6/go.mod h1:ejZXtW1Ra6V1O5H8xPBGz+T3+4gfkTCeExAHKU57MAc= -k8s.io/apimachinery v0.21.1 h1:Q6XuHGlj2xc+hlMCvqyYfbv3H7SRGn2c8NycxJquDVs= -k8s.io/apimachinery v0.21.1/go.mod h1:jbreFvJo3ov9rj7eWT7+sYiRx+qZuCYXwWT1bcDswPY= +k8s.io/apimachinery v0.22.2 h1:ejz6y/zNma8clPVfNDLnPbleBo6MpoFy/HBiBqCouVk= +k8s.io/apimachinery v0.22.2/go.mod h1:O3oNtNadZdeOMxHFVxOreoznohCpy0z6mocxbZr7oJ0= k8s.io/apiserver v0.20.1/go.mod h1:ro5QHeQkgMS7ZGpvf4tSMx6bBOgPfE+f52KwvXfScaU= k8s.io/apiserver v0.20.4/go.mod h1:Mc80thBKOyy7tbvFtB4kJv1kbdD0eIH8k8vianJcbFM= k8s.io/apiserver v0.20.6/go.mod h1:QIJXNt6i6JB+0YQRNcS0hdRHJlMhflFmsBDeSgT1r8Q= @@ -2436,11 +2444,11 @@ k8s.io/klog v1.0.0 h1:Pt+yjF5aB1xDSVbau4VsWe+dQNzA0qv1LlXdC2dF6Q8= k8s.io/klog v1.0.0/go.mod h1:4Bi6QPql/J/LkTDqv7R/cd3hPo4k2DG6Ptcz060Ez5I= k8s.io/klog/v2 v2.0.0/go.mod h1:PBfzABfn139FHAV07az/IF9Wp1bkk3vpT2XSJ76fSDE= k8s.io/klog/v2 v2.4.0/go.mod h1:Od+F08eJP+W3HUb4pSrPpgp9DGU4GzlpG/TmITuYh/Y= -k8s.io/klog/v2 v2.8.0 h1:Q3gmuM9hKEjefWFFYF0Mat+YyFJvsUyYuwyNNJ5C9Ts= -k8s.io/klog/v2 v2.8.0/go.mod h1:hy9LJ/NvuK+iVyP4Ehqva4HxZG/oXyIS3n3Jmire4Ec= +k8s.io/klog/v2 v2.9.0 h1:D7HV+n1V57XeZ0m6tdRkfknthUaM06VFbWldOFh8kzM= +k8s.io/klog/v2 v2.9.0/go.mod h1:hy9LJ/NvuK+iVyP4Ehqva4HxZG/oXyIS3n3Jmire4Ec= k8s.io/kube-openapi v0.0.0-20200410145947-61e04a5be9a6/go.mod h1:GRQhZsXIAJ1xR0C9bd8UpWHZ5plfAS9fzPjJuQ6JL3E= k8s.io/kube-openapi v0.0.0-20201113171705-d219536bb9fd/go.mod h1:WOJ3KddDSol4tAGcJo0Tvi+dK12EcqSLqcWsryKMpfM= -k8s.io/kube-openapi v0.0.0-20210305001622-591a79e4bda7/go.mod h1:wXW5VT87nVfh/iLV8FpR2uDvrFyomxbtb1KivDbvPTE= +k8s.io/kube-openapi v0.0.0-20210421082810-95288971da7e/go.mod h1:vHXdDvt9+2spS2Rx9ql3I8tycm3H9FDfdUoIuKCefvw= k8s.io/kubernetes v1.13.0/go.mod h1:ocZa8+6APFNC2tX1DZASIbocyYT5jHzqFVsY5aoB7Jk= k8s.io/utils v0.0.0-20200324210504-a9aa75ae1b89/go.mod h1:sZAwmy6armz5eXlNoLmJcl4F1QuKu7sr+mFQ0byX7Ew= k8s.io/utils v0.0.0-20200414100711-2df71ebbae66/go.mod h1:jPW/WVKK9YHAvNhRxK0md/EJ228hCsBRufyofKtW8HA= @@ -2485,8 +2493,8 @@ sigs.k8s.io/structured-merge-diff/v3 v3.0.0-20200116222232-67a7b8c61874/go.mod h sigs.k8s.io/structured-merge-diff/v3 v3.0.0/go.mod h1:PlARxl6Hbt/+BC80dRLi1qAmnMqwqDg62YvvVkZjemw= sigs.k8s.io/structured-merge-diff/v4 v4.0.2/go.mod h1:bJZC9H9iH24zzfZ/41RGcq60oK1F7G282QMXDPYydCw= sigs.k8s.io/structured-merge-diff/v4 v4.0.3/go.mod h1:bJZC9H9iH24zzfZ/41RGcq60oK1F7G282QMXDPYydCw= -sigs.k8s.io/structured-merge-diff/v4 v4.1.0 h1:C4r9BgJ98vrKnnVCjwCSXcWjWe0NKcUQkmzDXZXGwH8= -sigs.k8s.io/structured-merge-diff/v4 v4.1.0/go.mod h1:bJZC9H9iH24zzfZ/41RGcq60oK1F7G282QMXDPYydCw= +sigs.k8s.io/structured-merge-diff/v4 v4.1.2 h1:Hr/htKFmJEbtMgS/UD0N+gtgctAqz81t3nu+sPzynno= +sigs.k8s.io/structured-merge-diff/v4 v4.1.2/go.mod h1:j/nl6xW8vLS49O8YvXW1ocPhZawJtm+Yrr7PPRQ0Vg4= sigs.k8s.io/yaml v1.1.0/go.mod h1:UJmg0vDUVViEyp3mgSv9WPwZCDxu4rQW1olrI1uml+o= sigs.k8s.io/yaml v1.2.0 h1:kr/MCeFWJWTwyaHoR9c8EjH9OumOmoF9YGiZd7lFm/Q= sigs.k8s.io/yaml v1.2.0/go.mod h1:yfXDCHCao9+ENCvLSE62v9VSji2MKu5jeNfTrofGhJc= From 06edde61610679ed396ceda2f165e70e39f46d66 Mon Sep 17 00:00:00 2001 From: David Reimschussel Date: Wed, 6 Oct 2021 15:12:02 -0600 Subject: [PATCH 112/176] Update changelog (cherry picked from commit ca61e202f9b0bd19833cfa37638d7946972339c6) --- CHANGELOG.md | 25 +++++++++++++++++++++++++ etc/telegraf.conf | 15 --------------- 2 files changed, 25 insertions(+), 15 deletions(-) diff --git a/CHANGELOG.md b/CHANGELOG.md index 2ebccd4849220..7360ff5a687d3 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -1,3 +1,28 @@ +## v1.20.1 [2021-10-06] + +#### Bugfixes + + - [#9776](https://github.com/influxdata/telegraf/pull/9776) Update k8s.io/apimachinery module from 0.21.1 to 0.22.2 + - [#9864](https://github.com/influxdata/telegraf/pull/9864) Update containerd module to v1.5.7 + - [#9863](https://github.com/influxdata/telegraf/pull/9863) Update consul module to v1.11.0 + - [#9846](https://github.com/influxdata/telegraf/pull/9846) `inputs.mongodb` Fix panic due to nil dereference + - [#9850](https://github.com/influxdata/telegraf/pull/9850) `inputs.intel_rdt` Prevent timeout when logging + - [#9848](https://github.com/influxdata/telegraf/pull/9848) `outputs.loki` Update http_headers setting to match sample config + - [#9808](https://github.com/influxdata/telegraf/pull/9808) `inputs.procstat` Add missing tags + - [#9803](https://github.com/influxdata/telegraf/pull/9803) `outputs.mqtt` Add keep alive config option and documentation around issue with eclipse/mosquitto version + - [#9800](https://github.com/influxdata/telegraf/pull/9800) Fix output buffer never completely flushing + - [#9458](https://github.com/influxdata/telegraf/pull/9458) `inputs.couchbase` Fix insecure certificate validation + - [#9797](https://github.com/influxdata/telegraf/pull/9797) `inputs.opentelemetry` Fix error returned to OpenTelemetry client + - [#9789](https://github.com/influxdata/telegraf/pull/9789) Update github.com/testcontainers/testcontainers-go module from 0.11.0 to 0.11.1 + - [#9791](https://github.com/influxdata/telegraf/pull/9791) Update github.com/Azure/go-autorest/autorest/adal module + - [#9678](https://github.com/influxdata/telegraf/pull/9678) Update github.com/Azure/go-autorest/autorest/azure/auth module from 0.5.6 to 0.5.8 + - [#9769](https://github.com/influxdata/telegraf/pull/9769) Update cloud.google.com/go/pubsub module from 1.15.0 to 1.17.0 + - [#9770](https://github.com/influxdata/telegraf/pull/9770) Update github.com/aws/smithy-go module from 1.3.1 to 1.8.0 + +#### Features + + - [#9838](https://github.com/influxdata/telegraf/pull/9838) `inputs.elasticsearch_query` Add custom time/date format field + ## v1.20.0 [2021-09-17] #### Release Notes diff --git a/etc/telegraf.conf b/etc/telegraf.conf index 5564bc23ac8aa..2f2dce2f61df6 100644 --- a/etc/telegraf.conf +++ b/etc/telegraf.conf @@ -765,9 +765,6 @@ # ## Endpoints for your graylog instances. # servers = ["udp://127.0.0.1:12201"] # -# ## Connection timeout. -# # timeout = "5s" -# # ## The field to use as the GELF short_message, if unset the static string # ## "telegraf" will be used. # ## example: short_message_field = "message" @@ -4801,12 +4798,6 @@ # # ] -# # Read metrics about LVM physical volumes, volume groups, logical volumes. -# [[inputs.lvm]] -# ## Use sudo to run LVM commands -# use_sudo = false - - # # Gathers metrics from the /3.0/reports MailChimp API # [[inputs.mailchimp]] # ## MailChimp API key @@ -5501,12 +5492,6 @@ # ## Password. Required for auth_method = "UserName" # # password = "" # # -# ## Option to select the metric timestamp to use. Valid options are: -# ## "gather" -- uses the time of receiving the data in telegraf -# ## "server" -- uses the timestamp provided by the server -# ## "source" -- uses the timestamp provided by the source -# # timestamp = "gather" -# # # ## Node ID configuration # ## name - field name to use in the output # ## namespace - OPC UA namespace of the node (integer value 0 thru 3) From 6b51697ef01a3faa04dedb3e433f95f8281ec29d Mon Sep 17 00:00:00 2001 From: Joshua Powers Date: Thu, 7 Oct 2021 10:19:53 -0600 Subject: [PATCH 113/176] fix: set location for timezone on failing time tests (#9877) Resolves: #9874 --- internal/internal_test.go | 8 ++++++++ 1 file changed, 8 insertions(+) diff --git a/internal/internal_test.go b/internal/internal_test.go index 8dae73f562702..24fdb91bb2ebc 100644 --- a/internal/internal_test.go +++ b/internal/internal_test.go @@ -514,6 +514,7 @@ func TestParseTimestamp(t *testing.T) { format: "UnixDate", timestamp: "Mon Jan 2 15:04:05 MST 2006", expected: unixdate("Mon Jan 2 15:04:05 MST 2006"), + location: "Local", }, { @@ -521,6 +522,7 @@ func TestParseTimestamp(t *testing.T) { format: "RubyDate", timestamp: "Mon Jan 02 15:04:05 -0700 2006", expected: rubydate("Mon Jan 02 15:04:05 -0700 2006"), + location: "Local", }, { @@ -528,6 +530,7 @@ func TestParseTimestamp(t *testing.T) { format: "RFC822", timestamp: "02 Jan 06 15:04 MST", expected: rfc822("02 Jan 06 15:04 MST"), + location: "Local", }, { @@ -535,6 +538,7 @@ func TestParseTimestamp(t *testing.T) { format: "RFC822Z", timestamp: "02 Jan 06 15:04 -0700", expected: rfc822z("02 Jan 06 15:04 -0700"), + location: "Local", }, { @@ -542,6 +546,7 @@ func TestParseTimestamp(t *testing.T) { format: "RFC850", timestamp: "Monday, 02-Jan-06 15:04:05 MST", expected: rfc850("Monday, 02-Jan-06 15:04:05 MST"), + location: "Local", }, { @@ -549,6 +554,7 @@ func TestParseTimestamp(t *testing.T) { format: "RFC1123", timestamp: "Mon, 02 Jan 2006 15:04:05 MST", expected: rfc1123("Mon, 02 Jan 2006 15:04:05 MST"), + location: "Local", }, { @@ -556,6 +562,7 @@ func TestParseTimestamp(t *testing.T) { format: "RFC1123Z", timestamp: "Mon, 02 Jan 2006 15:04:05 -0700", expected: rfc1123z("Mon, 02 Jan 2006 15:04:05 -0700"), + location: "Local", }, { @@ -563,6 +570,7 @@ func TestParseTimestamp(t *testing.T) { format: "RFC3339Nano", timestamp: "2006-01-02T15:04:05.999999999-07:00", expected: rfc3339nano("2006-01-02T15:04:05.999999999-07:00"), + location: "Local", }, { From 282ec85cd7027d021bf7baa09c6b10fd2a5df134 Mon Sep 17 00:00:00 2001 From: Joshua Powers Date: Thu, 7 Oct 2021 12:34:14 -0600 Subject: [PATCH 114/176] fix: makefile missing space for i386 tar and rpm (#9887) --- Makefile | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/Makefile b/Makefile index 12267c04403bb..143a55d58a757 100644 --- a/Makefile +++ b/Makefile @@ -271,7 +271,7 @@ ppc64le += linux_ppc64le.tar.gz ppc64le.rpm ppc64el.deb .PHONY: ppc64le ppc64le: @ echo $(ppc64le) -i386 += freebsd_i386.tar.gz i386.deb linux_i386.tar.gzi386.rpm +i386 += freebsd_i386.tar.gz i386.deb linux_i386.tar.gz i386.rpm .PHONY: i386 i386: @ echo $(i386) From 15753a6f7fa065248a19d534582fc7b79e0afe12 Mon Sep 17 00:00:00 2001 From: Sebastian Spaink <3441183+sspaink@users.noreply.github.com> Date: Thu, 7 Oct 2021 14:47:59 -0500 Subject: [PATCH 115/176] fix: migrate to cloud.google.com/go/monitoring/apiv3/v2 (#9880) --- plugins/inputs/stackdriver/stackdriver.go | 6 +++--- plugins/outputs/stackdriver/stackdriver.go | 4 ++-- plugins/outputs/stackdriver/stackdriver_test.go | 2 +- 3 files changed, 6 insertions(+), 6 deletions(-) diff --git a/plugins/inputs/stackdriver/stackdriver.go b/plugins/inputs/stackdriver/stackdriver.go index 885913f91dd1c..cc8b1a40a10a5 100644 --- a/plugins/inputs/stackdriver/stackdriver.go +++ b/plugins/inputs/stackdriver/stackdriver.go @@ -9,7 +9,7 @@ import ( "sync" "time" - monitoring "cloud.google.com/go/monitoring/apiv3" + monitoring "cloud.google.com/go/monitoring/apiv3/v2" googlepbduration "github.com/golang/protobuf/ptypes/duration" googlepbts "github.com/golang/protobuf/ptypes/timestamp" "github.com/influxdata/telegraf" @@ -397,7 +397,7 @@ func (s *Stackdriver) newTimeSeriesConf( StartTime: &googlepbts.Timestamp{Seconds: startTime.Unix()}, } tsReq := &monitoringpb.ListTimeSeriesRequest{ - Name: monitoring.MetricProjectPath(s.Project), + Name: fmt.Sprintf("projects/%s", s.Project), Filter: filter, Interval: interval, } @@ -533,7 +533,7 @@ func (s *Stackdriver) generatetimeSeriesConfs( ret := []*timeSeriesConf{} req := &monitoringpb.ListMetricDescriptorsRequest{ - Name: monitoring.MetricProjectPath(s.Project), + Name: fmt.Sprintf("projects/%s", s.Project), } filters := s.newListMetricDescriptorsFilters() diff --git a/plugins/outputs/stackdriver/stackdriver.go b/plugins/outputs/stackdriver/stackdriver.go index 4d561a27b5007..d4f660ff7c569 100644 --- a/plugins/outputs/stackdriver/stackdriver.go +++ b/plugins/outputs/stackdriver/stackdriver.go @@ -9,7 +9,7 @@ import ( "sort" "strings" - monitoring "cloud.google.com/go/monitoring/apiv3" // Imports the Stackdriver Monitoring client package. + monitoring "cloud.google.com/go/monitoring/apiv3/v2" // Imports the Stackdriver Monitoring client package. googlepb "github.com/golang/protobuf/ptypes/timestamp" "github.com/influxdata/telegraf" "github.com/influxdata/telegraf/internal" @@ -218,7 +218,7 @@ func (s *Stackdriver) Write(metrics []telegraf.Metric) error { // Prepare time series request. timeSeriesRequest := &monitoringpb.CreateTimeSeriesRequest{ - Name: monitoring.MetricProjectPath(s.Project), + Name: fmt.Sprintf("projects/%s", s.Project), TimeSeries: timeSeries, } diff --git a/plugins/outputs/stackdriver/stackdriver_test.go b/plugins/outputs/stackdriver/stackdriver_test.go index 7ddaa44854620..8af553b374c53 100644 --- a/plugins/outputs/stackdriver/stackdriver_test.go +++ b/plugins/outputs/stackdriver/stackdriver_test.go @@ -11,7 +11,7 @@ import ( "testing" "time" - monitoring "cloud.google.com/go/monitoring/apiv3" + monitoring "cloud.google.com/go/monitoring/apiv3/v2" "github.com/golang/protobuf/proto" emptypb "github.com/golang/protobuf/ptypes/empty" googlepb "github.com/golang/protobuf/ptypes/timestamp" From 128ed8849b16239707a9bedd059029792aceac53 Mon Sep 17 00:00:00 2001 From: Patrick Hemmer Date: Thu, 7 Oct 2021 16:35:44 -0400 Subject: [PATCH 116/176] fix: memory leak in influx parser (#9787) --- plugins/parsers/influx/machine.go | 14 +++++++------- plugins/parsers/influx/machine.go.rl | 14 +++++++------- 2 files changed, 14 insertions(+), 14 deletions(-) diff --git a/plugins/parsers/influx/machine.go b/plugins/parsers/influx/machine.go index 2649cdb42bc37..4bbf8c079476b 100644 --- a/plugins/parsers/influx/machine.go +++ b/plugins/parsers/influx/machine.go @@ -3747,13 +3747,6 @@ func (m *streamMachine) Next() error { m.machine.finishMetric = false for { - // Expand the buffer if it is full - if m.machine.pe == len(m.machine.data) { - expanded := make([]byte, 2*len(m.machine.data)) - copy(expanded, m.machine.data) - m.machine.data = expanded - } - err := m.machine.exec() if err != nil { return err @@ -3764,6 +3757,13 @@ func (m *streamMachine) Next() error { break } + // Expand the buffer if it is full + if m.machine.pe == len(m.machine.data) { + expanded := make([]byte, 2*len(m.machine.data)) + copy(expanded, m.machine.data) + m.machine.data = expanded + } + n, err := m.reader.Read(m.machine.data[m.machine.pe:]) if n == 0 && err == io.EOF { m.machine.eof = m.machine.pe diff --git a/plugins/parsers/influx/machine.go.rl b/plugins/parsers/influx/machine.go.rl index 29f4307860ea2..d6b5d949e4065 100644 --- a/plugins/parsers/influx/machine.go.rl +++ b/plugins/parsers/influx/machine.go.rl @@ -499,13 +499,6 @@ func (m *streamMachine) Next() error { m.machine.finishMetric = false for { - // Expand the buffer if it is full - if m.machine.pe == len(m.machine.data) { - expanded := make([]byte, 2 * len(m.machine.data)) - copy(expanded, m.machine.data) - m.machine.data = expanded - } - err := m.machine.exec() if err != nil { return err @@ -516,6 +509,13 @@ func (m *streamMachine) Next() error { break } + // Expand the buffer if it is full + if m.machine.pe == len(m.machine.data) { + expanded := make([]byte, 2 * len(m.machine.data)) + copy(expanded, m.machine.data) + m.machine.data = expanded + } + n, err := m.reader.Read(m.machine.data[m.machine.pe:]) if n == 0 && err == io.EOF { m.machine.eof = m.machine.pe From d06b387528f36909515b0d27cf333f6ef60f430d Mon Sep 17 00:00:00 2001 From: etycomputer <57578566+etycomputer@users.noreply.github.com> Date: Fri, 8 Oct 2021 06:38:20 +1000 Subject: [PATCH 117/176] feat: Adds the ability to create and name a tag containing the filename using the directory monitor input plugin (#9860) Co-authored-by: Ehsan Yazdi --- plugins/inputs/directory_monitor/README.md | 6 ++ .../directory_monitor/directory_monitor.go | 17 +++++- .../directory_monitor_test.go | 59 +++++++++++++++++++ plugins/inputs/file/README.md | 5 +- plugins/inputs/file/file.go | 6 +- 5 files changed, 89 insertions(+), 4 deletions(-) diff --git a/plugins/inputs/directory_monitor/README.md b/plugins/inputs/directory_monitor/README.md index 66d9eb51fce79..4e260f44256ed 100644 --- a/plugins/inputs/directory_monitor/README.md +++ b/plugins/inputs/directory_monitor/README.md @@ -39,6 +39,12 @@ This plugin is intended to read files that are moved or copied to the monitored ## Lowering this value will result in *slightly* less memory use, with a potential sacrifice in speed efficiency, if absolutely necessary. # file_queue_size = 100000 # + ## Name a tag containing the name of the file the data was parsed from. Leave empty + ## to disable. Cautious when file name variation is high, this can increase the cardinality + ## significantly. Read more about cardinality here: + ## https://docs.influxdata.com/influxdb/cloud/reference/glossary/#series-cardinality + # file_tag = "" + # ## The dataformat to be read from the files. ## Each data format has its own unique set of configuration options, read ## more about them here: diff --git a/plugins/inputs/directory_monitor/directory_monitor.go b/plugins/inputs/directory_monitor/directory_monitor.go index a58c039422757..ee1163e7a51b1 100644 --- a/plugins/inputs/directory_monitor/directory_monitor.go +++ b/plugins/inputs/directory_monitor/directory_monitor.go @@ -55,6 +55,12 @@ const sampleConfig = ` ## Lowering this value will result in *slightly* less memory use, with a potential sacrifice in speed efficiency, if absolutely necessary. # file_queue_size = 100000 # + ## Name a tag containing the name of the file the data was parsed from. Leave empty + ## to disable. Cautious when file name variation is high, this can increase the cardinality + ## significantly. Read more about cardinality here: + ## https://docs.influxdata.com/influxdb/cloud/reference/glossary/#series-cardinality + # file_tag = "" + # ## The dataformat to be read from the files. ## Each data format has its own unique set of configuration options, read ## more about them here: @@ -75,6 +81,7 @@ type DirectoryMonitor struct { Directory string `toml:"directory"` FinishedDirectory string `toml:"finished_directory"` ErrorDirectory string `toml:"error_directory"` + FileTag string `toml:"file_tag"` FilesToMonitor []string `toml:"files_to_monitor"` FilesToIgnore []string `toml:"files_to_ignore"` @@ -250,10 +257,10 @@ func (monitor *DirectoryMonitor) ingestFile(filePath string) error { reader = file } - return monitor.parseFile(parser, reader) + return monitor.parseFile(parser, reader, file.Name()) } -func (monitor *DirectoryMonitor) parseFile(parser parsers.Parser, reader io.Reader) error { +func (monitor *DirectoryMonitor) parseFile(parser parsers.Parser, reader io.Reader, fileName string) error { // Read the file line-by-line and parse with the configured parse method. firstLine := true scanner := bufio.NewScanner(reader) @@ -264,6 +271,12 @@ func (monitor *DirectoryMonitor) parseFile(parser parsers.Parser, reader io.Read } firstLine = false + if monitor.FileTag != "" { + for _, m := range metrics { + m.AddTag(monitor.FileTag, filepath.Base(fileName)) + } + } + if err := monitor.sendMetrics(metrics); err != nil { return err } diff --git a/plugins/inputs/directory_monitor/directory_monitor_test.go b/plugins/inputs/directory_monitor/directory_monitor_test.go index 7cda5f2d7b639..3e954adb40320 100644 --- a/plugins/inputs/directory_monitor/directory_monitor_test.go +++ b/plugins/inputs/directory_monitor/directory_monitor_test.go @@ -134,3 +134,62 @@ func TestMultipleJSONFileImports(t *testing.T) { // Verify that we read each JSON line once to a single metric. require.Equal(t, len(acc.Metrics), 5) } + +func TestFileTag(t *testing.T) { + acc := testutil.Accumulator{} + testJSONFile := "test.json" + + // Establish process directory and finished directory. + finishedDirectory, err := os.MkdirTemp("", "finished") + require.NoError(t, err) + processDirectory, err := os.MkdirTemp("", "test") + require.NoError(t, err) + defer os.RemoveAll(processDirectory) + defer os.RemoveAll(finishedDirectory) + + // Init plugin. + r := DirectoryMonitor{ + Directory: processDirectory, + FinishedDirectory: finishedDirectory, + FileTag: "filename", + MaxBufferedMetrics: 1000, + FileQueueSize: 1000, + } + err = r.Init() + require.NoError(t, err) + + parserConfig := parsers.Config{ + DataFormat: "json", + JSONNameKey: "Name", + } + + r.SetParserFunc(func() (parsers.Parser, error) { + return parsers.NewParser(&parserConfig) + }) + + // Let's drop a 1-line LINE-DELIMITED json. + // Write csv file to process into the 'process' directory. + f, err := os.Create(filepath.Join(processDirectory, testJSONFile)) + require.NoError(t, err) + _, err = f.WriteString("{\"Name\": \"event1\",\"Speed\": 100.1,\"Length\": 20.1}") + require.NoError(t, err) + err = f.Close() + require.NoError(t, err) + + err = r.Start(&acc) + r.Log = testutil.Logger{} + require.NoError(t, err) + err = r.Gather(&acc) + require.NoError(t, err) + acc.Wait(1) + r.Stop() + + // Verify that we read each JSON line once to a single metric. + require.Equal(t, len(acc.Metrics), 1) + for _, m := range acc.Metrics { + for key, value := range m.Tags { + require.Equal(t, r.FileTag, key) + require.Equal(t, filepath.Base(testJSONFile), value) + } + } +} diff --git a/plugins/inputs/file/README.md b/plugins/inputs/file/README.md index ef0fb90b0796c..8ec406da7be3d 100644 --- a/plugins/inputs/file/README.md +++ b/plugins/inputs/file/README.md @@ -20,8 +20,11 @@ plugin instead. ## https://github.com/influxdata/telegraf/blob/master/docs/DATA_FORMATS_INPUT.md data_format = "influx" + ## Name a tag containing the name of the file the data was parsed from. Leave empty - ## to disable. + ## to disable. Cautious when file name variation is high, this can increase the cardinality + ## significantly. Read more about cardinality here: + ## https://docs.influxdata.com/influxdb/cloud/reference/glossary/#series-cardinality # file_tag = "" ``` diff --git a/plugins/inputs/file/file.go b/plugins/inputs/file/file.go index 22af282dbde0a..fbfc536a6d874 100644 --- a/plugins/inputs/file/file.go +++ b/plugins/inputs/file/file.go @@ -29,9 +29,13 @@ const sampleConfig = ` ## as well as ** to match recursive files and directories. files = ["/tmp/metrics.out"] + ## Name a tag containing the name of the file the data was parsed from. Leave empty - ## to disable. + ## to disable. Cautious when file name variation is high, this can increase the cardinality + ## significantly. Read more about cardinality here: + ## https://docs.influxdata.com/influxdb/cloud/reference/glossary/#series-cardinality # file_tag = "" + # ## Character encoding to use when interpreting the file contents. Invalid ## characters are replaced using the unicode replacement character. When set From 9de8c987f23c13aeb8cf1cbe365e6e55630d6df6 Mon Sep 17 00:00:00 2001 From: Sebastian Spaink <3441183+sspaink@users.noreply.github.com> Date: Thu, 7 Oct 2021 15:41:59 -0500 Subject: [PATCH 118/176] fix: mute graylog UDP/TCP tests by marking them as integration (#9881) --- plugins/outputs/graylog/graylog_test.go | 12 ++++++++++++ 1 file changed, 12 insertions(+) diff --git a/plugins/outputs/graylog/graylog_test.go b/plugins/outputs/graylog/graylog_test.go index faa5b34b908d7..fcf61ae77d51e 100644 --- a/plugins/outputs/graylog/graylog_test.go +++ b/plugins/outputs/graylog/graylog_test.go @@ -15,14 +15,26 @@ import ( ) func TestWriteDefault(t *testing.T) { + if testing.Short() { + t.Skip("skipping test in short mode.") + } + scenarioUDP(t, "127.0.0.1:12201") } func TestWriteUDP(t *testing.T) { + if testing.Short() { + t.Skip("skipping test in short mode.") + } + scenarioUDP(t, "udp://127.0.0.1:12201") } func TestWriteTCP(t *testing.T) { + if testing.Short() { + t.Skip("skipping test in short mode.") + } + scenarioTCP(t, "tcp://127.0.0.1:12201") } From da5727e34cae3b85a4840c827575978092fd040b Mon Sep 17 00:00:00 2001 From: Sebastian Spaink <3441183+sspaink@users.noreply.github.com> Date: Thu, 7 Oct 2021 15:45:02 -0500 Subject: [PATCH 119/176] fix: duplicate line_protocol when using object and fields (#9872) --- plugins/parsers/json_v2/parser.go | 2 +- plugins/parsers/json_v2/parser_test.go | 4 ++ .../mix_field_and_object/expected.out | 1 + .../testdata/mix_field_and_object/input.json | 44 +++++++++++++++++++ .../mix_field_and_object/telegraf.conf | 15 +++++++ 5 files changed, 65 insertions(+), 1 deletion(-) create mode 100644 plugins/parsers/json_v2/testdata/mix_field_and_object/expected.out create mode 100644 plugins/parsers/json_v2/testdata/mix_field_and_object/input.json create mode 100644 plugins/parsers/json_v2/testdata/mix_field_and_object/telegraf.conf diff --git a/plugins/parsers/json_v2/parser.go b/plugins/parsers/json_v2/parser.go index f4f84c562e781..ebeb6545ba549 100644 --- a/plugins/parsers/json_v2/parser.go +++ b/plugins/parsers/json_v2/parser.go @@ -137,7 +137,7 @@ func (p *Parser) Parse(input []byte) ([]telegraf.Metric, error) { metrics = append(metrics, cartesianProduct(tags, fields)...) if len(objects) != 0 && len(metrics) != 0 { - metrics = append(metrics, cartesianProduct(objects, metrics)...) + metrics = cartesianProduct(objects, metrics) } else { metrics = append(metrics, objects...) } diff --git a/plugins/parsers/json_v2/parser_test.go b/plugins/parsers/json_v2/parser_test.go index 3ef08856190ac..686bf826ad9d7 100644 --- a/plugins/parsers/json_v2/parser_test.go +++ b/plugins/parsers/json_v2/parser_test.go @@ -20,6 +20,10 @@ func TestData(t *testing.T) { name string test string }{ + { + name: "Test when using field and object together", + test: "mix_field_and_object", + }, { name: "Test complex nesting", test: "complex_nesting", diff --git a/plugins/parsers/json_v2/testdata/mix_field_and_object/expected.out b/plugins/parsers/json_v2/testdata/mix_field_and_object/expected.out new file mode 100644 index 0000000000000..e7f0e222418aa --- /dev/null +++ b/plugins/parsers/json_v2/testdata/mix_field_and_object/expected.out @@ -0,0 +1 @@ +openweather,id=2.643743e+06,name=London coord_lat=51.5085,coord_lon=-0.1257,description="few clouds",main_temp=12.54,summary="Clouds",wind_speed=2.11 1628186541000000000 diff --git a/plugins/parsers/json_v2/testdata/mix_field_and_object/input.json b/plugins/parsers/json_v2/testdata/mix_field_and_object/input.json new file mode 100644 index 0000000000000..402113af8ca9e --- /dev/null +++ b/plugins/parsers/json_v2/testdata/mix_field_and_object/input.json @@ -0,0 +1,44 @@ +{ + "coord": { + "lon": -0.1257, + "lat": 51.5085 + }, + "weather": [ + { + "id": 801, + "main": "Clouds", + "description": "few clouds", + "icon": "02n" + } + ], + "base": "stations", + "main": { + "temp": 12.54, + "feels_like": 11.86, + "temp_min": 10.49, + "temp_max": 14.27, + "pressure": 1024, + "humidity": 77 + }, + "visibility": 10000, + "wind": { + "speed": 2.11, + "deg": 254, + "gust": 4.63 + }, + "clouds": { + "all": 21 + }, + "dt": 1633545358, + "sys": { + "type": 2, + "id": 2019646, + "country": "GB", + "sunrise": 1633500560, + "sunset": 1633541256 + }, + "timezone": 3600, + "id": 2643743, + "name": "London", + "cod": 200 +} diff --git a/plugins/parsers/json_v2/testdata/mix_field_and_object/telegraf.conf b/plugins/parsers/json_v2/testdata/mix_field_and_object/telegraf.conf new file mode 100644 index 0000000000000..cc181960cbf1e --- /dev/null +++ b/plugins/parsers/json_v2/testdata/mix_field_and_object/telegraf.conf @@ -0,0 +1,15 @@ +[[inputs.file]] + files = ["./testdata/mix_field_and_object/input.json"] + data_format = "json_v2" + [[inputs.file.json_v2]] + measurement_name = "openweather" + [[inputs.file.json_v2.field]] + path = "weather.#.main" + rename = "summary" + [[inputs.file.json_v2.field]] + path = "weather.#.description" + [[inputs.file.json_v2.object]] + path = "@this" + included_keys = ["coord_lat", "coord_lon", "main_temp", "wind_speed"] # List of JSON keys (for a nested key, prepend the parent keys with underscores) that should be only included in result + tags = ["id", "name"] # List of JSON keys (for a nested key, prepend the parent keys with underscores) to be a tag instead of a field + From fde637464add3b2958560338cc3a29e0164021ba Mon Sep 17 00:00:00 2001 From: Sebastian Spaink <3441183+sspaink@users.noreply.github.com> Date: Thu, 7 Oct 2021 15:47:56 -0500 Subject: [PATCH 120/176] fix: migrate aws/credentials.go to use NewSession, same functionality but now supports error (#9878) --- config/aws/credentials.go | 15 +++++---- plugins/inputs/cloudwatch/cloudwatch.go | 6 +++- .../kinesis_consumer/kinesis_consumer.go | 31 ++++++++++++------- plugins/outputs/cloudwatch/cloudwatch.go | 18 ++++++----- .../cloudwatch_logs/cloudwatch_logs.go | 12 ++++--- plugins/outputs/kinesis/kinesis.go | 8 +++-- plugins/outputs/timestream/timestream.go | 28 ++++++++++------- plugins/outputs/timestream/timestream_test.go | 16 +++++----- 8 files changed, 83 insertions(+), 51 deletions(-) diff --git a/config/aws/credentials.go b/config/aws/credentials.go index d2c2b284817d8..7b75917393590 100644 --- a/config/aws/credentials.go +++ b/config/aws/credentials.go @@ -21,7 +21,7 @@ type CredentialConfig struct { WebIdentityTokenFile string `toml:"web_identity_token_file"` } -func (c *CredentialConfig) Credentials() client.ConfigProvider { +func (c *CredentialConfig) Credentials() (client.ConfigProvider, error) { if c.RoleARN != "" { return c.assumeCredentials() } @@ -29,7 +29,7 @@ func (c *CredentialConfig) Credentials() client.ConfigProvider { return c.rootCredentials() } -func (c *CredentialConfig) rootCredentials() client.ConfigProvider { +func (c *CredentialConfig) rootCredentials() (client.ConfigProvider, error) { config := &aws.Config{ Region: aws.String(c.Region), } @@ -42,11 +42,14 @@ func (c *CredentialConfig) rootCredentials() client.ConfigProvider { config.Credentials = credentials.NewSharedCredentials(c.Filename, c.Profile) } - return session.New(config) + return session.NewSession(config) } -func (c *CredentialConfig) assumeCredentials() client.ConfigProvider { - rootCredentials := c.rootCredentials() +func (c *CredentialConfig) assumeCredentials() (client.ConfigProvider, error) { + rootCredentials, err := c.rootCredentials() + if err != nil { + return nil, err + } config := &aws.Config{ Region: aws.String(c.Region), Endpoint: &c.EndpointURL, @@ -58,5 +61,5 @@ func (c *CredentialConfig) assumeCredentials() client.ConfigProvider { config.Credentials = stscreds.NewCredentials(rootCredentials, c.RoleARN) } - return session.New(config) + return session.NewSession(config) } diff --git a/plugins/inputs/cloudwatch/cloudwatch.go b/plugins/inputs/cloudwatch/cloudwatch.go index 7dbd3c7faa7be..3fb86310946e1 100644 --- a/plugins/inputs/cloudwatch/cloudwatch.go +++ b/plugins/inputs/cloudwatch/cloudwatch.go @@ -288,7 +288,11 @@ func (c *CloudWatch) initializeCloudWatch() error { } loglevel := aws.LogOff - c.client = cwClient.New(c.CredentialConfig.Credentials(), cfg.WithLogLevel(loglevel)) + p, err := c.CredentialConfig.Credentials() + if err != nil { + return err + } + c.client = cwClient.New(p, cfg.WithLogLevel(loglevel)) // Initialize regex matchers for each Dimension value. for _, m := range c.Metrics { diff --git a/plugins/inputs/kinesis_consumer/kinesis_consumer.go b/plugins/inputs/kinesis_consumer/kinesis_consumer.go index 005ccdc43aab2..88b5fef660112 100644 --- a/plugins/inputs/kinesis_consumer/kinesis_consumer.go +++ b/plugins/inputs/kinesis_consumer/kinesis_consumer.go @@ -153,24 +153,31 @@ func (k *KinesisConsumer) SetParser(parser parsers.Parser) { } func (k *KinesisConsumer) connect(ac telegraf.Accumulator) error { - client := kinesis.New(k.CredentialConfig.Credentials()) + p, err := k.CredentialConfig.Credentials() + if err != nil { + return err + } + client := kinesis.New(p) k.checkpoint = &noopCheckpoint{} if k.DynamoDB != nil { - var err error + p, err := (&internalaws.CredentialConfig{ + Region: k.Region, + AccessKey: k.AccessKey, + SecretKey: k.SecretKey, + RoleARN: k.RoleARN, + Profile: k.Profile, + Filename: k.Filename, + Token: k.Token, + EndpointURL: k.EndpointURL, + }).Credentials() + if err != nil { + return err + } k.checkpoint, err = ddb.New( k.DynamoDB.AppName, k.DynamoDB.TableName, - ddb.WithDynamoClient(dynamodb.New((&internalaws.CredentialConfig{ - Region: k.Region, - AccessKey: k.AccessKey, - SecretKey: k.SecretKey, - RoleARN: k.RoleARN, - Profile: k.Profile, - Filename: k.Filename, - Token: k.Token, - EndpointURL: k.EndpointURL, - }).Credentials())), + ddb.WithDynamoClient(dynamodb.New(p)), ddb.WithMaxInterval(time.Second*10), ) if err != nil { diff --git a/plugins/outputs/cloudwatch/cloudwatch.go b/plugins/outputs/cloudwatch/cloudwatch.go index ddf3622328ba2..85f9570b3d5ea 100644 --- a/plugins/outputs/cloudwatch/cloudwatch.go +++ b/plugins/outputs/cloudwatch/cloudwatch.go @@ -177,12 +177,12 @@ var sampleConfig = ` ## Namespace for the CloudWatch MetricDatums namespace = "InfluxData/Telegraf" - ## If you have a large amount of metrics, you should consider to send statistic - ## values instead of raw metrics which could not only improve performance but - ## also save AWS API cost. If enable this flag, this plugin would parse the required - ## CloudWatch statistic fields (count, min, max, and sum) and send them to CloudWatch. - ## You could use basicstats aggregator to calculate those fields. If not all statistic - ## fields are available, all fields would still be sent as raw metrics. + ## If you have a large amount of metrics, you should consider to send statistic + ## values instead of raw metrics which could not only improve performance but + ## also save AWS API cost. If enable this flag, this plugin would parse the required + ## CloudWatch statistic fields (count, min, max, and sum) and send them to CloudWatch. + ## You could use basicstats aggregator to calculate those fields. If not all statistic + ## fields are available, all fields would still be sent as raw metrics. # write_statistics = false ## Enable high resolution metrics of 1 second (if not enabled, standard resolution are of 60 seconds precision) @@ -198,7 +198,11 @@ func (c *CloudWatch) Description() string { } func (c *CloudWatch) Connect() error { - c.svc = cloudwatch.New(c.CredentialConfig.Credentials()) + p, err := c.CredentialConfig.Credentials() + if err != nil { + return err + } + c.svc = cloudwatch.New(p) return nil } diff --git a/plugins/outputs/cloudwatch_logs/cloudwatch_logs.go b/plugins/outputs/cloudwatch_logs/cloudwatch_logs.go index 79eb5d7722f13..f9ef289089363 100644 --- a/plugins/outputs/cloudwatch_logs/cloudwatch_logs.go +++ b/plugins/outputs/cloudwatch_logs/cloudwatch_logs.go @@ -108,12 +108,12 @@ region = "us-east-1" ## Cloud watch log group. Must be created in AWS cloudwatch logs upfront! ## For example, you can specify the name of the k8s cluster here to group logs from all cluster in oine place -log_group = "my-group-name" +log_group = "my-group-name" ## Log stream in log group ## Either log group name or reference to metric attribute, from which it can be parsed: ## tag: or field:. If log stream is not exist, it will be created. -## Since AWS is not automatically delete logs streams with expired logs entries (i.e. empty log stream) +## Since AWS is not automatically delete logs streams with expired logs entries (i.e. empty log stream) ## you need to put in place appropriate house-keeping (https://forums.aws.amazon.com/thread.jspa?threadID=178855) log_stream = "tag:location" @@ -126,7 +126,7 @@ log_data_metric_name = "docker_log" ## Specify from which metric attribute the log data should be retrieved: ## tag: or field:. ## I.e., if you are using docker_log plugin to stream logs from container, then -## specify log_data_source = "field:message" +## specify log_data_source = "field:message" log_data_source = "field:message" ` @@ -187,7 +187,11 @@ func (c *CloudWatchLogs) Connect() error { var logGroupsOutput = &cloudwatchlogs.DescribeLogGroupsOutput{NextToken: &dummyToken} var err error - c.svc = cloudwatchlogs.New(c.CredentialConfig.Credentials()) + p, err := c.CredentialConfig.Credentials() + if err != nil { + return err + } + c.svc = cloudwatchlogs.New(p) if c.svc == nil { return fmt.Errorf("can't create cloudwatch logs service endpoint") } diff --git a/plugins/outputs/kinesis/kinesis.go b/plugins/outputs/kinesis/kinesis.go index 2e75788400ae0..412e3d9742b72 100644 --- a/plugins/outputs/kinesis/kinesis.go +++ b/plugins/outputs/kinesis/kinesis.go @@ -126,9 +126,13 @@ func (k *KinesisOutput) Connect() error { k.Log.Infof("Establishing a connection to Kinesis in %s", k.Region) } - svc := kinesis.New(k.CredentialConfig.Credentials()) + p, err := k.CredentialConfig.Credentials() + if err != nil { + return err + } + svc := kinesis.New(p) - _, err := svc.DescribeStreamSummary(&kinesis.DescribeStreamSummaryInput{ + _, err = svc.DescribeStreamSummary(&kinesis.DescribeStreamSummaryInput{ StreamName: aws.String(k.StreamName), }) k.svc = svc diff --git a/plugins/outputs/timestream/timestream.go b/plugins/outputs/timestream/timestream.go index 2c77c408e7357..42ea706afa9e6 100644 --- a/plugins/outputs/timestream/timestream.go +++ b/plugins/outputs/timestream/timestream.go @@ -57,7 +57,7 @@ const MaxRecordsPerCall = 100 var sampleConfig = ` ## Amazon Region region = "us-east-1" - + ## Amazon Credentials ## Credentials are loaded in the following order: ## 1) Web identity provider credentials via STS if role_arn and web_identity_token_file are specified @@ -75,7 +75,7 @@ var sampleConfig = ` #role_session_name = "" #profile = "" #shared_credential_file = "" - + ## Endpoint to make request against, the correct endpoint is automatically ## determined and this option should only be set if you wish to override the ## default. @@ -88,7 +88,7 @@ var sampleConfig = ` ## Specifies if the plugin should describe the Timestream database upon starting ## to validate if it has access necessary permissions, connection, etc., as a safety check. - ## If the describe operation fails, the plugin will not start + ## If the describe operation fails, the plugin will not start ## and therefore the Telegraf agent will not start. describe_database_on_start = false @@ -97,17 +97,17 @@ var sampleConfig = ` ## For example, consider the following data in line protocol format: ## weather,location=us-midwest,season=summer temperature=82,humidity=71 1465839830100400200 ## airquality,location=us-west no2=5,pm25=16 1465839830100400200 - ## where weather and airquality are the measurement names, location and season are tags, + ## where weather and airquality are the measurement names, location and season are tags, ## and temperature, humidity, no2, pm25 are fields. ## In multi-table mode: ## - first line will be ingested to table named weather ## - second line will be ingested to table named airquality ## - the tags will be represented as dimensions ## - first table (weather) will have two records: - ## one with measurement name equals to temperature, + ## one with measurement name equals to temperature, ## another with measurement name equals to humidity ## - second table (airquality) will have two records: - ## one with measurement name equals to no2, + ## one with measurement name equals to no2, ## another with measurement name equals to pm25 ## - the Timestream tables from the example will look like this: ## TABLE "weather": @@ -141,7 +141,7 @@ var sampleConfig = ` ## Specifies the Timestream table where the metrics will be uploaded. # single_table_name = "yourTableNameHere" - ## Only valid and required for mapping_mode = "single-table" + ## Only valid and required for mapping_mode = "single-table" ## Describes what will be the Timestream dimension name for the Telegraf ## measurement name. # single_table_dimension_name_for_telegraf_measurement_name = "namespace" @@ -169,9 +169,12 @@ var sampleConfig = ` ` // WriteFactory function provides a way to mock the client instantiation for testing purposes. -var WriteFactory = func(credentialConfig *internalaws.CredentialConfig) WriteClient { - configProvider := credentialConfig.Credentials() - return timestreamwrite.New(configProvider) +var WriteFactory = func(credentialConfig *internalaws.CredentialConfig) (WriteClient, error) { + configProvider, err := credentialConfig.Credentials() + if err != nil { + return nil, err + } + return timestreamwrite.New(configProvider), nil } func (t *Timestream) Connect() error { @@ -221,7 +224,10 @@ func (t *Timestream) Connect() error { t.Log.Infof("Constructing Timestream client for '%s' mode", t.MappingMode) - svc := WriteFactory(&t.CredentialConfig) + svc, err := WriteFactory(&t.CredentialConfig) + if err != nil { + return err + } if t.DescribeDatabaseOnStart { t.Log.Infof("Describing database '%s' in region '%s'", t.DatabaseName, t.Region) diff --git a/plugins/outputs/timestream/timestream_test.go b/plugins/outputs/timestream/timestream_test.go index 67cdb4495c1d8..868e24d745a9c 100644 --- a/plugins/outputs/timestream/timestream_test.go +++ b/plugins/outputs/timestream/timestream_test.go @@ -2,7 +2,6 @@ package timestream_test import ( "fmt" - "github.com/aws/aws-sdk-go/aws/awserr" "reflect" "sort" "strconv" @@ -10,6 +9,8 @@ import ( "testing" "time" + "github.com/aws/aws-sdk-go/aws/awserr" + "github.com/aws/aws-sdk-go/aws" "github.com/aws/aws-sdk-go/service/timestreamwrite" "github.com/influxdata/telegraf" @@ -53,10 +54,9 @@ func (m *mockTimestreamClient) DescribeDatabase(*timestreamwrite.DescribeDatabas func TestConnectValidatesConfigParameters(t *testing.T) { assertions := assert.New(t) - ts.WriteFactory = func(credentialConfig *internalaws.CredentialConfig) ts.WriteClient { - return &mockTimestreamClient{} + ts.WriteFactory = func(credentialConfig *internalaws.CredentialConfig) (ts.WriteClient, error) { + return &mockTimestreamClient{}, nil } - // checking base arguments noDatabaseName := ts.Timestream{Log: testutil.Logger{}} assertions.Contains(noDatabaseName.Connect().Error(), "DatabaseName") @@ -182,11 +182,11 @@ func (m *mockTimestreamErrorClient) DescribeDatabase(*timestreamwrite.DescribeDa func TestThrottlingErrorIsReturnedToTelegraf(t *testing.T) { assertions := assert.New(t) - ts.WriteFactory = func(credentialConfig *internalaws.CredentialConfig) ts.WriteClient { + ts.WriteFactory = func(credentialConfig *internalaws.CredentialConfig) (ts.WriteClient, error) { return &mockTimestreamErrorClient{ awserr.New(timestreamwrite.ErrCodeThrottlingException, "Throttling Test", nil), - } + }, nil } plugin := ts.Timestream{ MappingMode: ts.MappingModeMultiTable, @@ -210,11 +210,11 @@ func TestThrottlingErrorIsReturnedToTelegraf(t *testing.T) { func TestRejectedRecordsErrorResultsInMetricsBeingSkipped(t *testing.T) { assertions := assert.New(t) - ts.WriteFactory = func(credentialConfig *internalaws.CredentialConfig) ts.WriteClient { + ts.WriteFactory = func(credentialConfig *internalaws.CredentialConfig) (ts.WriteClient, error) { return &mockTimestreamErrorClient{ awserr.New(timestreamwrite.ErrCodeRejectedRecordsException, "RejectedRecords Test", nil), - } + }, nil } plugin := ts.Timestream{ MappingMode: ts.MappingModeMultiTable, From 276bbc900461e1f33569952f7e737631714ca05b Mon Sep 17 00:00:00 2001 From: David Reimschussel Date: Thu, 7 Oct 2021 15:24:09 -0600 Subject: [PATCH 121/176] Update changelog (cherry picked from commit 3eab8d846e2337de731db51cbd36bb8586a59bd1) --- CHANGELOG.md | 10 ++++++++++ 1 file changed, 10 insertions(+) diff --git a/CHANGELOG.md b/CHANGELOG.md index 7360ff5a687d3..8760b914b7f95 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -1,3 +1,13 @@ +## v1.20.2 [2021-10-07] + +#### Bugfixes + + - [#9878](https://github.com/influxdata/telegraf/pull/9878) `inputs.cloudwatch` Use new session API + - [#9872](https://github.com/influxdata/telegraf/pull/9872) `parsers.json_v2` Duplicate line_protocol when using object and fields + - [#9787](https://github.com/influxdata/telegraf/pull/9787) `parsers.influx` Fix memory leak in influx parser + - [#9880](https://github.com/influxdata/telegraf/pull/9880) `inputs.stackdriver` Migrate to cloud.google.com/go/monitoring/apiv3/v2 + - [#9887](https://github.com/influxdata/telegraf/pull/9887) Fix makefile typo that prevented i386 tar and rpm packages from being built + ## v1.20.1 [2021-10-06] #### Bugfixes From 60211f0f1a800b2e3cad0f4ecf8823781b4dcfa3 Mon Sep 17 00:00:00 2001 From: Joshua Powers Date: Mon, 11 Oct 2021 08:34:30 -0600 Subject: [PATCH 122/176] Fix/jaeger version (#9893) --- docs/LICENSE_OF_DEPENDENCIES.md | 11 +- go.mod | 80 ++-- go.sum | 645 +++++++++++++++++++++++++------- 3 files changed, 557 insertions(+), 179 deletions(-) diff --git a/docs/LICENSE_OF_DEPENDENCIES.md b/docs/LICENSE_OF_DEPENDENCIES.md index b36594a1faea2..34a54d2d2fde9 100644 --- a/docs/LICENSE_OF_DEPENDENCIES.md +++ b/docs/LICENSE_OF_DEPENDENCIES.md @@ -83,7 +83,6 @@ following works: - github.com/go-stack/stack [MIT License](https://github.com/go-stack/stack/blob/master/LICENSE.md) - github.com/gobwas/glob [MIT License](https://github.com/gobwas/glob/blob/master/LICENSE) - github.com/gofrs/uuid [MIT License](https://github.com/gofrs/uuid/blob/master/LICENSE) -- github.com/gogo/googleapis [Apache License 2.0](https://github.com/gogo/googleapis/blob/master/LICENSE) - github.com/gogo/protobuf [BSD 3-Clause Clear License](https://github.com/gogo/protobuf/blob/master/LICENSE) - github.com/golang-jwt/jwt [MIT License](https://github.com/golang-jwt/jwt/blob/main/LICENSE) - github.com/golang-sql/civil [Apache License 2.0](https://github.com/golang-sql/civil/blob/master/LICENSE) @@ -105,7 +104,6 @@ following works: - github.com/gosnmp/gosnmp [BSD 2-Clause "Simplified" License](https://github.com/gosnmp/gosnmp/blob/master/LICENSE) - github.com/grid-x/modbus [BSD 3-Clause "New" or "Revised" License](https://github.com/grid-x/modbus/blob/master/LICENSE) - github.com/grid-x/serial [MIT License](https://github.com/grid-x/serial/blob/master/LICENSE) -- github.com/grpc-ecosystem/grpc-gateway [BSD 3-Clause "New" or "Revised" License](https://github.com/grpc-ecosystem/grpc-gateway/blob/master/LICENSE.txt) - github.com/hailocab/go-hostpool [MIT License](https://github.com/hailocab/go-hostpool/blob/master/LICENSE) - github.com/harlow/kinesis-consumer [MIT License](https://github.com/harlow/kinesis-consumer/blob/master/MIT-LICENSE) - github.com/hashicorp/consul/api [Mozilla Public License 2.0](https://github.com/hashicorp/consul/blob/master/LICENSE) @@ -133,9 +131,14 @@ following works: - github.com/jackc/pgx [MIT License](https://github.com/jackc/pgx/blob/master/LICENSE) - github.com/jaegertracing/jaeger [Apache License 2.0](https://github.com/jaegertracing/jaeger/blob/master/LICENSE) - github.com/james4k/rcon [MIT License](https://github.com/james4k/rcon/blob/master/LICENSE) +- github.com/jcmturner/aescts [Apache License 2.0](https://github.com/jcmturner/aescts/blob/master/LICENSE) +- github.com/jcmturner/dnsutils [Apache License 2.0](https://github.com/jcmturner/dnsutils/blob/master/LICENSE) - github.com/jcmturner/gofork [BSD 3-Clause "New" or "Revised" License](https://github.com/jcmturner/gofork/blob/master/LICENSE) +- github.com/jcmturner/gokrb5 [Apache License 2.0](https://github.com/jcmturner/gokrb5/blob/master/LICENSE) +- github.com/jcmturner/rpc [Apache License 2.0](https://github.com/jcmturner/rpc/blob/master/LICENSE) - github.com/jhump/protoreflect [Apache License 2.0](https://github.com/jhump/protoreflect/blob/master/LICENSE) - github.com/jmespath/go-jmespath [Apache License 2.0](https://github.com/jmespath/go-jmespath/blob/master/LICENSE) +- github.com/josharian/intern [MIT License](https://github.com/josharian/intern/blob/master/license.md) - github.com/jpillora/backoff [MIT License](https://github.com/jpillora/backoff/blob/master/LICENSE) - github.com/json-iterator/go [MIT License](https://github.com/json-iterator/go/blob/master/LICENSE) - github.com/kardianos/service [zlib License](https://github.com/kardianos/service/blob/master/LICENSE) @@ -253,10 +256,6 @@ following works: - gopkg.in/gorethink/gorethink.v3 [Apache License 2.0](https://github.com/rethinkdb/rethinkdb-go/blob/v3.0.5/LICENSE) - gopkg.in/inf.v0 [BSD 3-Clause "New" or "Revised" License](https://github.com/go-inf/inf/blob/v0.9.1/LICENSE) - gopkg.in/ini.v1 [Apache License 2.0](https://github.com/go-ini/ini/blob/master/LICENSE) -- gopkg.in/jcmturner/aescts.v1 [Apache License 2.0](https://github.com/jcmturner/aescts/blob/v1.0.1/LICENSE) -- gopkg.in/jcmturner/dnsutils.v1 [Apache License 2.0](https://github.com/jcmturner/dnsutils/blob/v1.0.1/LICENSE) -- gopkg.in/jcmturner/gokrb5.v7 [Apache License 2.0](https://github.com/jcmturner/gokrb5/tree/v7.5.0/LICENSE) -- gopkg.in/jcmturner/rpc.v1 [Apache License 2.0](https://github.com/jcmturner/rpc/blob/v1.1.0/LICENSE) - gopkg.in/ldap.v3 [MIT License](https://github.com/go-ldap/ldap/blob/v3.1.7/LICENSE) - gopkg.in/olivere/elastic.v5 [MIT License](https://github.com/olivere/elastic/blob/v5.0.76/LICENSE) - gopkg.in/tomb.v1 [BSD 3-Clause Clear License](https://github.com/go-tomb/tomb/blob/v1/LICENSE) diff --git a/go.mod b/go.mod index c6f3138489d28..bcb5f9af3a7af 100644 --- a/go.mod +++ b/go.mod @@ -13,7 +13,7 @@ require ( github.com/Azure/azure-event-hubs-go/v3 v3.3.13 github.com/Azure/azure-kusto-go v0.3.2 github.com/Azure/azure-pipeline-go v0.2.3 // indirect - github.com/Azure/azure-sdk-for-go v51.1.0+incompatible // indirect + github.com/Azure/azure-sdk-for-go v52.5.0+incompatible // indirect github.com/Azure/azure-storage-blob-go v0.13.0 // indirect github.com/Azure/azure-storage-queue-go v0.0.0-20191125232315-636801874cdd github.com/Azure/go-amqp v0.13.12 // indirect @@ -32,8 +32,8 @@ require ( github.com/Mellanox/rdmamap v0.0.0-20191106181932-7c3c4763a6ee github.com/Microsoft/go-winio v0.4.17 // indirect github.com/Microsoft/hcsshim v0.8.21 // indirect - github.com/Shopify/sarama v1.27.2 - github.com/StackExchange/wmi v0.0.0-20210224194228-fe8f1750fd46 // indirect + github.com/Shopify/sarama v1.29.1 + github.com/StackExchange/wmi v1.2.1 // indirect github.com/aerospike/aerospike-client-go v1.27.0 github.com/alecthomas/participle v0.4.1 // indirect github.com/alecthomas/units v0.0.0-20210208195552-ff826a37aa15 @@ -92,29 +92,28 @@ require ( github.com/eapache/queue v1.1.0 // indirect github.com/echlebek/timeproxy v1.0.0 // indirect github.com/eclipse/paho.mqtt.golang v1.3.0 - github.com/fatih/color v1.9.0 // indirect + github.com/fatih/color v1.10.0 // indirect github.com/form3tech-oss/jwt-go v3.2.3+incompatible // indirect github.com/ghodss/yaml v1.0.1-0.20190212211648-25d852aebe32 github.com/go-logfmt/logfmt v0.5.0 github.com/go-logr/logr v0.4.0 // indirect - github.com/go-ole/go-ole v1.2.4 // indirect + github.com/go-ole/go-ole v1.2.5 // indirect github.com/go-ping/ping v0.0.0-20210201095549-52eed920f98c github.com/go-redis/redis v6.15.9+incompatible github.com/go-sql-driver/mysql v1.6.0 - github.com/go-stack/stack v1.8.0 // indirect + github.com/go-stack/stack v1.8.1 // indirect github.com/goburrow/modbus v0.1.0 // indirect github.com/goburrow/serial v0.1.0 // indirect github.com/gobwas/glob v0.2.3 github.com/gofrs/uuid v3.3.0+incompatible - github.com/gogo/googleapis v1.4.0 // indirect github.com/gogo/protobuf v1.3.2 github.com/golang-jwt/jwt/v4 v4.0.0 github.com/golang-sql/civil v0.0.0-20190719163853-cb61b32ac6fe // indirect github.com/golang/geo v0.0.0-20190916061304-5b978397cfec - github.com/golang/groupcache v0.0.0-20200121045136-8c9f03a8e57e // indirect + github.com/golang/groupcache v0.0.0-20210331224755-41bb18bfe9da // indirect github.com/golang/protobuf v1.5.2 github.com/golang/snappy v0.0.3 - github.com/google/flatbuffers v1.11.0 // indirect + github.com/google/flatbuffers v1.12.0 // indirect github.com/google/go-cmp v0.5.6 github.com/google/go-github/v32 v32.1.0 github.com/google/go-querystring v1.0.0 // indirect @@ -123,7 +122,7 @@ require ( github.com/googleapis/gax-go/v2 v2.0.5 // indirect github.com/googleapis/gnostic v0.5.5 // indirect github.com/gopcua/opcua v0.2.0-rc2.0.20210409063412-baabb9b14fd2 - github.com/gorilla/mux v1.7.3 + github.com/gorilla/mux v1.8.0 github.com/gorilla/websocket v1.4.2 github.com/gosnmp/gosnmp v1.32.0 github.com/grid-x/modbus v0.0.0-20210224155242-c4a3d042e99b @@ -133,7 +132,7 @@ require ( github.com/harlow/kinesis-consumer v0.3.1-0.20181230152818-2f58b136fee0 github.com/hashicorp/consul/api v1.11.0 github.com/hashicorp/go-cleanhttp v0.5.1 // indirect - github.com/hashicorp/go-hclog v0.12.2 // indirect + github.com/hashicorp/go-hclog v0.16.2 // indirect github.com/hashicorp/go-immutable-radix v1.2.0 // indirect github.com/hashicorp/go-msgpack v0.5.5 // indirect github.com/hashicorp/go-rootcerts v1.0.2 // indirect @@ -155,7 +154,7 @@ require ( github.com/jackc/pgservicefile v0.0.0-20200307190119-3430c5407db8 // indirect github.com/jackc/pgtype v1.3.0 // indirect github.com/jackc/pgx/v4 v4.6.0 - github.com/jaegertracing/jaeger v1.15.1 // indirect + github.com/jaegertracing/jaeger v1.26.0 // indirect github.com/james4k/rcon v0.0.0-20120923215419-8fbb8268b60a github.com/jcmturner/gofork v1.0.0 // indirect github.com/jhump/protoreflect v1.8.3-0.20210616212123-6cc1efa697ca @@ -165,14 +164,13 @@ require ( github.com/kardianos/service v1.0.0 github.com/karrick/godirwalk v1.16.1 github.com/kballard/go-shellquote v0.0.0-20180428030007-95032a82bc51 - github.com/klauspost/compress v1.13.1 // indirect - github.com/kr/pretty v0.2.1 // indirect + github.com/klauspost/compress v1.13.4 // indirect + github.com/kr/pretty v0.3.0 // indirect github.com/kr/text v0.2.0 // indirect github.com/kylelemons/godebug v1.1.0 // indirect github.com/leodido/ragel-machinery v0.0.0-20181214104525-299bdde78165 // indirect - github.com/lib/pq v1.3.0 // indirect - github.com/mailru/easyjson v0.7.1 // indirect - github.com/mattn/go-colorable v0.1.6 // indirect + github.com/mailru/easyjson v0.7.7 // indirect + github.com/mattn/go-colorable v0.1.8 // indirect github.com/mattn/go-ieproxy v0.0.1 // indirect github.com/mattn/go-isatty v0.0.12 // indirect github.com/matttproud/golang_protobuf_extensions v1.0.2-0.20181231171920-c182affec369 @@ -183,7 +181,7 @@ require ( github.com/miekg/dns v1.1.43 github.com/minio/highwayhash v1.0.1 // indirect github.com/mitchellh/go-homedir v1.1.0 // indirect - github.com/mitchellh/mapstructure v1.2.2 // indirect + github.com/mitchellh/mapstructure v1.4.1 // indirect github.com/moby/ipvs v1.0.1 github.com/moby/sys/mount v0.2.0 // indirect github.com/moby/sys/mountinfo v0.4.1 // indirect @@ -209,7 +207,7 @@ require ( github.com/openzipkin-contrib/zipkin-go-opentracing v0.4.5 github.com/openzipkin/zipkin-go v0.2.5 github.com/philhofer/fwd v1.1.1 // indirect - github.com/pierrec/lz4 v2.5.2+incompatible // indirect + github.com/pierrec/lz4 v2.6.0+incompatible // indirect github.com/pion/dtls/v2 v2.0.9 github.com/pion/logging v0.2.2 // indirect github.com/pion/transport v0.12.3 // indirect @@ -219,10 +217,10 @@ require ( github.com/pmezard/go-difflib v1.0.0 // indirect github.com/prometheus/client_golang v1.11.0 github.com/prometheus/client_model v0.2.0 - github.com/prometheus/common v0.26.0 + github.com/prometheus/common v0.30.0 github.com/prometheus/procfs v0.6.0 - github.com/prometheus/prometheus v1.8.2-0.20200911110723-e83ef207b6c2 - github.com/rcrowley/go-metrics v0.0.0-20200313005456-10cdbea86bc0 // indirect + github.com/prometheus/prometheus v1.8.2-0.20210430082741-2a4b8e12bbf2 + github.com/rcrowley/go-metrics v0.0.0-20201227073835-cf1acfcdf475 // indirect github.com/remyoudompheng/bigfft v0.0.0-20200410134404-eec4a21b6bb0 // indirect github.com/riemann/riemann-go-client v0.5.0 github.com/robertkrimen/otto v0.0.0-20191219234010-c382bd3c16ff // indirect @@ -236,8 +234,8 @@ require ( github.com/showwin/speedtest-go v1.1.4 github.com/signalfx/com_signalfx_metrics_protobuf v0.0.2 // indirect github.com/signalfx/gohistogram v0.0.0-20160107210732-1ccfd2ff5083 // indirect - github.com/signalfx/golib/v3 v3.3.34 - github.com/signalfx/sapm-proto v0.4.0 // indirect + github.com/signalfx/golib/v3 v3.3.38 + github.com/signalfx/sapm-proto v0.7.2 // indirect github.com/sirupsen/logrus v1.8.1 github.com/sleepinggenius2/gosmi v0.4.3 github.com/snowflakedb/gosnowflake v1.5.0 @@ -250,8 +248,8 @@ require ( github.com/tidwall/match v1.0.3 // indirect github.com/tidwall/pretty v1.1.0 // indirect github.com/tinylib/msgp v1.1.6 - github.com/tklauser/go-sysconf v0.3.5 // indirect - github.com/tklauser/numcpus v0.2.2 // indirect + github.com/tklauser/go-sysconf v0.3.9 // indirect + github.com/tklauser/numcpus v0.3.0 // indirect github.com/vapourismo/knx-go v0.0.0-20201122213738-75fe09ace330 github.com/vishvananda/netlink v1.1.1-0.20201029203352-d40f9887b852 // indirect github.com/vishvananda/netns v0.0.0-20200728191858-db3c7e526aae // indirect @@ -263,8 +261,8 @@ require ( github.com/xdg-go/pbkdf2 v1.0.0 // indirect github.com/xdg-go/scram v1.0.2 // indirect github.com/xdg-go/stringprep v1.0.2 // indirect - github.com/xdg/scram v0.0.0-20180814205039-7eeb5667e42c - github.com/xdg/stringprep v1.0.0 // indirect + github.com/xdg/scram v1.0.3 + github.com/xdg/stringprep v1.0.3 // indirect github.com/youmark/pkcs8 v0.0.0-20201027041543-1326539a0a0a // indirect github.com/yuin/gopher-lua v0.0.0-20180630135845-46796da1b0b4 // indirect go.etcd.io/etcd/api/v3 v3.5.0 // indirect @@ -275,15 +273,15 @@ require ( go.opentelemetry.io/otel/metric v0.23.0 go.opentelemetry.io/otel/sdk/metric v0.23.0 go.starlark.net v0.0.0-20210406145628-7a1108eaa012 - go.uber.org/atomic v1.7.0 // indirect + go.uber.org/atomic v1.9.0 // indirect go.uber.org/multierr v1.6.0 // indirect golang.org/x/crypto v0.0.0-20210616213533-5ff15b29337e // indirect golang.org/x/mod v0.4.2 // indirect - golang.org/x/net v0.0.0-20210610132358-84b48f89b13b + golang.org/x/net v0.0.0-20210614182718-04defd469f4e golang.org/x/oauth2 v0.0.0-20210805134026-6f1e6394065a golang.org/x/sync v0.0.0-20210220032951-036812b2e83c - golang.org/x/sys v0.0.0-20210806184541-e5e7981a1069 - golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1 // indirect + golang.org/x/sys v0.0.0-20210816074244-15123e1e1f71 + golang.org/x/term v0.0.0-20210220032956-6a3ed077a48d // indirect golang.org/x/text v0.3.6 golang.org/x/time v0.0.0-20210723032227-1f47c861a9ac // indirect golang.org/x/tools v0.1.5 @@ -301,11 +299,7 @@ require ( gopkg.in/fsnotify.v1 v1.4.7 // indirect gopkg.in/gorethink/gorethink.v3 v3.0.5 gopkg.in/inf.v0 v0.9.1 // indirect - gopkg.in/ini.v1 v1.42.0 // indirect - gopkg.in/jcmturner/aescts.v1 v1.0.1 // indirect - gopkg.in/jcmturner/dnsutils.v1 v1.0.1 // indirect - gopkg.in/jcmturner/gokrb5.v7 v7.5.0 // indirect - gopkg.in/jcmturner/rpc.v1 v1.1.0 // indirect + gopkg.in/ini.v1 v1.62.0 // indirect gopkg.in/ldap.v3 v3.1.0 gopkg.in/olivere/elastic.v5 v5.0.70 gopkg.in/sourcemap.v1 v1.0.5 // indirect @@ -314,11 +308,11 @@ require ( gopkg.in/yaml.v2 v2.4.0 gopkg.in/yaml.v3 v3.0.0-20210107192922-496545a6307b // indirect gotest.tools v2.2.0+incompatible - k8s.io/api v0.20.6 + k8s.io/api v0.22.2 k8s.io/apimachinery v0.22.2 - k8s.io/client-go v0.20.6 + k8s.io/client-go v0.22.2 k8s.io/klog/v2 v2.9.0 // indirect - k8s.io/utils v0.0.0-20201110183641-67b214c5f920 // indirect + k8s.io/utils v0.0.0-20210819203725-bdf08cb9a70a // indirect modernc.org/cc/v3 v3.33.5 // indirect modernc.org/ccgo/v3 v3.9.4 // indirect modernc.org/libc v1.9.5 // indirect @@ -334,6 +328,12 @@ require ( require ( github.com/cenkalti/backoff/v4 v4.1.1 // indirect + github.com/jcmturner/aescts/v2 v2.0.0 // indirect + github.com/jcmturner/dnsutils/v2 v2.0.0 // indirect + github.com/jcmturner/gokrb5/v8 v8.4.2 // indirect + github.com/jcmturner/rpc/v2 v2.0.3 // indirect + github.com/josharian/intern v1.0.0 // indirect + github.com/rogpeppe/go-internal v1.6.2 // indirect go.opentelemetry.io/otel v1.0.0-RC3 // indirect go.opentelemetry.io/otel/exporters/otlp/otlpmetric v0.23.0 // indirect go.opentelemetry.io/otel/internal/metric v0.23.0 // indirect diff --git a/go.sum b/go.sum index a8fc62a7b3874..de38213b94a7b 100644 --- a/go.sum +++ b/go.sum @@ -1,3 +1,4 @@ +4d63.com/gochecknoglobals v0.0.0-20201008074935-acfc0b28355a/go.mod h1:wfdC5ZjKSPr7CybKEcgJhUOgeAQW1+7WcyK8OvUilfo= bazil.org/fuse v0.0.0-20160811212531-371fbbdaa898/go.mod h1:Xbm+BRKSBEpa4q4hTSxohYNQpsxXPbPry4JJWOB3LB8= cloud.google.com/go v0.26.0/go.mod h1:aQUYkXzVsufM+DwF1aE+0xfcU+56JwCaLick0ClmMTw= cloud.google.com/go v0.34.0/go.mod h1:aQUYkXzVsufM+DwF1aE+0xfcU+56JwCaLick0ClmMTw= @@ -38,6 +39,7 @@ cloud.google.com/go/bigquery v1.8.0/go.mod h1:J5hqkt3O0uAFnINi6JXValWIb1v0goeZM7 cloud.google.com/go/bigtable v1.2.0/go.mod h1:JcVAOl45lrTmQfLj7T6TxyMzIN/3FGGcFm+2xVAli2o= cloud.google.com/go/datastore v1.0.0/go.mod h1:LXYbyblFSglQ5pkeyhO+Qmw7ukd3C+pD7TKLgZqpHYE= cloud.google.com/go/datastore v1.1.0/go.mod h1:umbIZjpQpHh4hmRpGhH4tLFup+FVzqBi1b3c64qFpCk= +cloud.google.com/go/firestore v1.1.0/go.mod h1:ulACoGHTpvq5r8rxGJ4ddJZBZqakUQqClKRT5SZwBmk= cloud.google.com/go/kms v0.1.0 h1:VXAb5OzejDcyhFzIDeZ5n5AUdlsFnCyexuascIwWMj0= cloud.google.com/go/kms v0.1.0/go.mod h1:8Qp8PCAypHg4FdmlyW1QRAv09BGQ9Uzh7JnmIZxPk+c= cloud.google.com/go/monitoring v0.2.0 h1:UFQB1+YbZjAOqAFFY4RlNiOrt19O5HzPeCdtYSlPvmk= @@ -60,7 +62,9 @@ code.cloudfoundry.org/clock v1.0.0/go.mod h1:QD9Lzhd/ux6eNQVUDVRJX/RKTigpewimNYB collectd.org v0.3.0/go.mod h1:A/8DzQBkF6abtvrT2j/AU/4tiBgJWYyh0y/oB/4MlWE= collectd.org v0.5.0 h1:y4uFSAuOmeVhG3GCRa3/oH+ysePfO/+eGJNfd0Qa3d8= collectd.org v0.5.0/go.mod h1:A/8DzQBkF6abtvrT2j/AU/4tiBgJWYyh0y/oB/4MlWE= +contrib.go.opencensus.io/exporter/prometheus v0.3.0/go.mod h1:rpCPVQKhiyH8oomWgm34ZmgIdZa8OVYO5WAIygPbBBE= dmitri.shuralyov.com/gpu/mtl v0.0.0-20190408044501-666a987793e9/go.mod h1:H6x//7gZCb22OMCxBHrMx7a5I7Hp++hsVxbQ4BYO7hU= +github.com/AndreasBriese/bbloom v0.0.0-20190825152654-46b345b51c96/go.mod h1:bOvUY6CB00SOBii9/FifXqc0awNKxLFCL/+pkDPuyl8= github.com/Azure/azure-amqp-common-go/v3 v3.0.1 h1:mXh+eyOxGLBfqDtfmbtby0l7XfG/6b2NkuZ3B7i6zHA= github.com/Azure/azure-amqp-common-go/v3 v3.0.1/go.mod h1:PBIGdzcO1teYoufTKMcGibdKaYZv4avS+O6LNIp8bq0= github.com/Azure/azure-event-hubs-go/v3 v3.3.13 h1:aiI2RLjp0MzLCuFUXzR8b3h3bdPIc2c3vBYXRK8jX3E= @@ -74,9 +78,9 @@ github.com/Azure/azure-pipeline-go v0.2.3 h1:7U9HBg1JFK3jHl5qmo4CTZKFTVgMwdFHMVt github.com/Azure/azure-pipeline-go v0.2.3/go.mod h1:x841ezTBIMG6O3lAcl8ATHnsOPVl2bqk7S3ta6S6u4k= github.com/Azure/azure-sdk-for-go v16.2.1+incompatible/go.mod h1:9XXNKU+eRnpl9moKnB4QOLf1HestfXbmab5FXxiDBjc= github.com/Azure/azure-sdk-for-go v44.1.0+incompatible/go.mod h1:9XXNKU+eRnpl9moKnB4QOLf1HestfXbmab5FXxiDBjc= -github.com/Azure/azure-sdk-for-go v45.1.0+incompatible/go.mod h1:9XXNKU+eRnpl9moKnB4QOLf1HestfXbmab5FXxiDBjc= -github.com/Azure/azure-sdk-for-go v51.1.0+incompatible h1:7uk6GWtUqKg6weLv2dbKnzwb0ml1Qn70AdtRccZ543w= github.com/Azure/azure-sdk-for-go v51.1.0+incompatible/go.mod h1:9XXNKU+eRnpl9moKnB4QOLf1HestfXbmab5FXxiDBjc= +github.com/Azure/azure-sdk-for-go v52.5.0+incompatible h1:/NLBWHCnIHtZyLPc1P7WIqi4Te4CC23kIQyK3Ep/7lA= +github.com/Azure/azure-sdk-for-go v52.5.0+incompatible/go.mod h1:9XXNKU+eRnpl9moKnB4QOLf1HestfXbmab5FXxiDBjc= github.com/Azure/azure-storage-blob-go v0.6.0/go.mod h1:oGfmITT1V6x//CswqY2gtAHND+xIP64/qL7a5QJix0Y= github.com/Azure/azure-storage-blob-go v0.8.0/go.mod h1:lPI3aLPpuLTeUwh1sViKXFxwl2B6teiRqI0deQUvsw0= github.com/Azure/azure-storage-blob-go v0.13.0 h1:lgWHvFh+UYBNVQLFHXkvul2f6yOPA9PIH82RTG2cSwc= @@ -96,7 +100,7 @@ github.com/Azure/go-autorest/autorest v0.9.3/go.mod h1:GsRuLYvwzLjjjRoWEIyMUaYq8 github.com/Azure/go-autorest/autorest v0.10.0/go.mod h1:/FALq9T/kS7b5J5qsQ+RSTUdAmGFqi0vUdVNNx8q630= github.com/Azure/go-autorest/autorest v0.11.1/go.mod h1:JFgpikqFJ/MleTTxwepExTKnFUKKszPS8UavbQYUMuw= github.com/Azure/go-autorest/autorest v0.11.3/go.mod h1:JFgpikqFJ/MleTTxwepExTKnFUKKszPS8UavbQYUMuw= -github.com/Azure/go-autorest/autorest v0.11.4/go.mod h1:JFgpikqFJ/MleTTxwepExTKnFUKKszPS8UavbQYUMuw= +github.com/Azure/go-autorest/autorest v0.11.12/go.mod h1:eipySxLmqSyC5s5k1CLupqet0PSENBEDP93LQ9a8QYw= github.com/Azure/go-autorest/autorest v0.11.17/go.mod h1:eipySxLmqSyC5s5k1CLupqet0PSENBEDP93LQ9a8QYw= github.com/Azure/go-autorest/autorest v0.11.18 h1:90Y4srNYrwOtAgVo3ndrQkTYn6kf1Eg/AjTFJ8Is2aM= github.com/Azure/go-autorest/autorest v0.11.18/go.mod h1:dSiJPy22c3u0OtOKDNttNgqpNFY/GeWa7GH/Pz56QRA= @@ -127,10 +131,8 @@ github.com/Azure/go-autorest/autorest/mocks v0.3.0/go.mod h1:a8FDP3DYzQ4RYfVAxAN github.com/Azure/go-autorest/autorest/mocks v0.4.0/go.mod h1:LTp+uSrOhSkaKrUy935gNZuuIPPVsHlr9DSOxSayd+k= github.com/Azure/go-autorest/autorest/mocks v0.4.1 h1:K0laFcLE6VLTOwNgSxaGbUcLPuGXlNkbVvq4cW4nIHk= github.com/Azure/go-autorest/autorest/mocks v0.4.1/go.mod h1:LTp+uSrOhSkaKrUy935gNZuuIPPVsHlr9DSOxSayd+k= -github.com/Azure/go-autorest/autorest/to v0.3.0/go.mod h1:MgwOyqaIuKdG4TL/2ywSsIWKAfJfgHDo8ObuUk3t5sA= github.com/Azure/go-autorest/autorest/to v0.4.0 h1:oXVqrxakqqV1UZdSazDOPOLvOIz+XA683u8EctwboHk= github.com/Azure/go-autorest/autorest/to v0.4.0/go.mod h1:fE8iZBn7LQR7zH/9XU2NcPR4o9jEImooCeWJcYV/zLE= -github.com/Azure/go-autorest/autorest/validation v0.2.0/go.mod h1:3EEqHnBxQGHXRYq3HT1WyXAvT7LLY3tl70hw6tQIbjI= github.com/Azure/go-autorest/autorest/validation v0.3.1 h1:AgyqjAd94fwNAoTjl/WQXg4VvFeRFpO+UhNyRXqF1ac= github.com/Azure/go-autorest/autorest/validation v0.3.1/go.mod h1:yhLgjC0Wda5DYXl6JAsWyUe4KVNffhoDhG0zVzUMo3E= github.com/Azure/go-autorest/logger v0.1.0/go.mod h1:oExouG+K6PryycPJfVSxi/koC6LSNgds39diKLz7Vrc= @@ -145,7 +147,13 @@ github.com/BurntSushi/toml v0.3.1/go.mod h1:xHWCNGjB5oqiDr8zfno3MHue2Ht5sIBksp03 github.com/BurntSushi/xgb v0.0.0-20160522181843-27f122750802/go.mod h1:IVnqGOEym/WlBOVXweHU+Q+/VP0lqqI8lqeDx9IjBqo= github.com/DATA-DOG/go-sqlmock v1.3.3/go.mod h1:f/Ixk793poVmq4qj/V1dPUg2JEAKC73Q5eFN3EC/SaM= github.com/DataDog/datadog-go v3.2.0+incompatible/go.mod h1:LButxg5PwREeZtORoXG3tL4fMGNddJ+vMq1mwgfaqoQ= +github.com/DataDog/zstd v1.3.6-0.20190409195224-796139022798/go.mod h1:1jcaCB/ufaK+sKp1NBhlGmpz41jOoPQ35bpF36t7BBo= +github.com/DataDog/zstd v1.4.4/go.mod h1:1jcaCB/ufaK+sKp1NBhlGmpz41jOoPQ35bpF36t7BBo= +github.com/Djarvur/go-err113 v0.0.0-20210108212216-aea10b59be24/go.mod h1:4UJr5HIiMZrwgkSPdsjy2uOQExX/WEILpIrO9UPGuXs= +github.com/HdrHistogram/hdrhistogram-go v0.9.0/go.mod h1:nxrse8/Tzg2tg3DZcZjm6qEclQKK70g0KxO61gFFZD4= +github.com/HdrHistogram/hdrhistogram-go v1.0.1/go.mod h1:BWJ+nMSHY3L41Zj7CA3uXnloDp7xxV0YvstAE7nKTaM= github.com/Knetic/govaluate v3.0.1-0.20171022003610-9aa49832a739+incompatible/go.mod h1:r7JcOSlj0wfOMncg0iLm8Leh48TZaKVeNIfJntJ2wa0= +github.com/Masterminds/semver v1.5.0/go.mod h1:MB6lktGJrhw8PrUyiEoblNEGEQ+RzHPF078ddwwvV3Y= github.com/Mellanox/rdmamap v0.0.0-20191106181932-7c3c4763a6ee h1:atI/FFjXh6hIVlPE1Jup9m8N4B9q/OSbMUe2EBahs+w= github.com/Mellanox/rdmamap v0.0.0-20191106181932-7c3c4763a6ee/go.mod h1:jDA6v0TUYrFEIAE5uGJ29LQOeONIgMdP4Rkqb8HUnPM= github.com/Microsoft/go-winio v0.4.11/go.mod h1:VhR8bwka0BXejwEJY73c50VrPtXAaKcyvVC4A4RozmA= @@ -185,20 +193,22 @@ github.com/NYTimes/gziphandler v0.0.0-20170623195520-56545f4a5d46/go.mod h1:3wb0 github.com/NYTimes/gziphandler v0.0.0-20170623195520-56545f4a5d46/go.mod h1:3wb06e3pkSAbeQ52E9H9iFoQsEEwGN64994WTCIhntQ= github.com/NYTimes/gziphandler v0.0.0-20170623195520-56545f4a5d46/go.mod h1:3wb06e3pkSAbeQ52E9H9iFoQsEEwGN64994WTCIhntQ= github.com/OneOfOne/xxhash v1.2.2/go.mod h1:HSdplMjZKSmBqAxg5vPj2TmRDmfkzw+cTzAElWljhcU= -github.com/PuerkitoBio/purell v1.0.0/go.mod h1:c11w/QuzBsJSee3cPx9rAFu61PvFxuPbtSwDGJws/X0= +github.com/OpenPeeDeeP/depguard v1.0.1/go.mod h1:xsIw86fROiiwelg+jB2uM9PiKihMMmUx/1V+TNhjQvM= github.com/PuerkitoBio/purell v1.1.0/go.mod h1:c11w/QuzBsJSee3cPx9rAFu61PvFxuPbtSwDGJws/X0= github.com/PuerkitoBio/purell v1.1.1/go.mod h1:c11w/QuzBsJSee3cPx9rAFu61PvFxuPbtSwDGJws/X0= -github.com/PuerkitoBio/urlesc v0.0.0-20160726150825-5bd2802263f2/go.mod h1:uGdkoq3SwY9Y+13GIhn11/XLaGBb4BfwItxLd5jeuXE= github.com/PuerkitoBio/urlesc v0.0.0-20170810143723-de5bf2ad4578/go.mod h1:uGdkoq3SwY9Y+13GIhn11/XLaGBb4BfwItxLd5jeuXE= github.com/Shopify/logrus-bugsnag v0.0.0-20171204204709-577dee27f20d/go.mod h1:HI8ITrYtUY+O+ZhtlqUnD8+KwNPOyugEhfP9fdUIaEQ= github.com/Shopify/sarama v1.19.0/go.mod h1:FVkBWblsNy7DGZRfXLU0O9RCGt5g3g3yEuWXgklEdEo= -github.com/Shopify/sarama v1.27.2 h1:1EyY1dsxNDUQEv0O/4TsjosHI2CgB1uo9H/v56xzTxc= -github.com/Shopify/sarama v1.27.2/go.mod h1:g5s5osgELxgM+Md9Qni9rzo7Rbt+vvFQI4bt/Mc93II= +github.com/Shopify/sarama v1.22.2-0.20190604114437-cd910a683f9f/go.mod h1:XLH1GYJnLVE0XCr6KdJGVJRTwY30moWNJ4sERjXX6fs= +github.com/Shopify/sarama v1.29.0/go.mod h1:2QpgD79wpdAESqNQMxNc0KYMkycd4slxGdV3TWSVqrU= +github.com/Shopify/sarama v1.29.1 h1:wBAacXbYVLmWieEA/0X/JagDdCZ8NVFOfS6l6+2u5S0= +github.com/Shopify/sarama v1.29.1/go.mod h1:mdtqvCSg8JOxk8PmpTNGyo6wzd4BMm4QXSfDnTXmgkE= github.com/Shopify/toxiproxy v2.1.4+incompatible h1:TKdv8HiTLgE5wdJuEML90aBgNWsokNbMijUGhmcoBJc= github.com/Shopify/toxiproxy v2.1.4+incompatible/go.mod h1:OXgGpZ6Cli1/URJOF1DMxUHB2q5Ap20/P/eIdh4G0pI= github.com/StackExchange/wmi v0.0.0-20190523213315-cbe66965904d/go.mod h1:3eOhrUMpNV+6aFIbp5/iudMxNCF27Vw2OZgy4xEx0Fg= -github.com/StackExchange/wmi v0.0.0-20210224194228-fe8f1750fd46 h1:5sXbqlSomvdjlRbWyNqkPsJ3Fg+tQZCbgeX1VGljbQY= github.com/StackExchange/wmi v0.0.0-20210224194228-fe8f1750fd46/go.mod h1:3eOhrUMpNV+6aFIbp5/iudMxNCF27Vw2OZgy4xEx0Fg= +github.com/StackExchange/wmi v1.2.1 h1:VIkavFPXSjcnS+O8yTq7NI32k0R5Aj+v39y29VYDOSA= +github.com/StackExchange/wmi v1.2.1/go.mod h1:rcmrprowKIVzvc+NUiLncP2uuArMWLCbu9SBzvHz7e8= github.com/VividCortex/gohistogram v1.0.0/go.mod h1:Pf5mBqqDxYaXu3hDrrU+w6nw50o/4+TcAqDqk/vUH7g= github.com/a8m/tree v0.0.0-20210115125333-10a5fd5b637d/go.mod h1:FSdwKX97koS5efgm8WevNf7XS3PqtyFkKDDXrz778cg= github.com/aerospike/aerospike-client-go v1.27.0 h1:VC6/Wqqm3Qlp4/utM7Zts3cv4A2HPn8rVFp/XZKTWgE= @@ -220,11 +230,13 @@ github.com/alecthomas/units v0.0.0-20190924025748-f65c72e2690d/go.mod h1:rBZYJk5 github.com/alecthomas/units v0.0.0-20210208195552-ff826a37aa15 h1:AUNCr9CiJuwrRYS3XieqF+Z9B9gNxo/eANAJCF2eiN4= github.com/alecthomas/units v0.0.0-20210208195552-ff826a37aa15/go.mod h1:OMCwj8VM1Kc9e19TLln2VL61YJF0x1XFtfdL4JdbSyE= github.com/alexflint/go-filemutex v0.0.0-20171022225611-72bdc8eae2ae/go.mod h1:CgnQgUtFrFz9mxFNtED3jI5tLDjKlOM+oUF/sTk6ps0= +github.com/alexkohler/prealloc v1.0.0/go.mod h1:VetnK3dIgFBBKmg0YnD9F9x6Icjd+9cvfHR56wJVlKE= github.com/aliyun/alibaba-cloud-sdk-go v1.61.1004 h1:YtaYjXmemIMyySUbs0VGFPqsLpsNHf4TW/L6yqpJQ9s= github.com/aliyun/alibaba-cloud-sdk-go v1.61.1004/go.mod h1:pUKYbK5JQ+1Dfxk80P0qxGqe5dkxDoabbZS7zOcouyA= github.com/amir/raidman v0.0.0-20170415203553-1ccc43bfb9c9 h1:FXrPTd8Rdlc94dKccl7KPmdmIbVh/OjelJ8/vgMRzcQ= github.com/amir/raidman v0.0.0-20170415203553-1ccc43bfb9c9/go.mod h1:eliMa/PW+RDr2QLWRmLH1R1ZA4RInpmvOzDDXtaIZkc= github.com/andreyvit/diff v0.0.0-20170406064948-c7f18ee00883/go.mod h1:rCTlJbsFo29Kk6CurOXKm700vrz8f0KW0JNfpkRJY/8= +github.com/andybalholm/brotli v1.0.0/go.mod h1:loMXtMfwqflxFJPmdbJO0a3KNoPuLBgiu3qAvBg8x/Y= github.com/antchfx/jsonquery v1.1.4 h1:+OlFO3QS9wjU0MKx9MgHm5f6o6hdd4e9mUTp0wTjxlM= github.com/antchfx/jsonquery v1.1.4/go.mod h1:cHs8r6Bymd8j6HI6Ej1IJbjahKvLBcIEh54dfmo+E9A= github.com/antchfx/xmlquery v1.3.6 h1:kaEVzH1mNo/2AJZrhZjAaAUTy2Nn2zxGfYYU8jWfXOo= @@ -233,12 +245,15 @@ github.com/antchfx/xpath v1.1.7/go.mod h1:Yee4kTMuNiPYJ7nSNorELQMr1J33uOpXDMByNY github.com/antchfx/xpath v1.1.10/go.mod h1:Yee4kTMuNiPYJ7nSNorELQMr1J33uOpXDMByNYhvtNk= github.com/antchfx/xpath v1.1.11 h1:WOFtK8TVAjLm3lbgqeP0arlHpvCEeTANeWZ/csPpJkQ= github.com/antchfx/xpath v1.1.11/go.mod h1:i54GszH55fYfBmoZXapTHN8T8tkcHfRgLyVwwqzXNcs= +github.com/antihax/optional v0.0.0-20180407024304-ca021399b1a6/go.mod h1:V8iCPQYkqmusNa815XgQio277wI47sdRh1dUOLdyC6Q= github.com/antihax/optional v1.0.0/go.mod h1:uupD/76wgC+ih3iEmQUL+0Ugr19nfwCT1kdvxnR2qWY= +github.com/antonmedv/expr v1.8.9/go.mod h1:5qsM3oLGDND7sDmQGDXHkYfkjYMUX14qsgqmHhwGEk8= github.com/apache/arrow/go/arrow v0.0.0-20191024131854-af6fa24be0db/go.mod h1:VTxUBvSJ3s3eHAg65PNgrsn5BtqCRPdmyXh6rAfdxN0= github.com/apache/arrow/go/arrow v0.0.0-20200601151325-b2287a20f230 h1:5ultmol0yeX75oh1hY78uAFn3dupBQ/QUNxERCkiaUQ= github.com/apache/arrow/go/arrow v0.0.0-20200601151325-b2287a20f230/go.mod h1:QNYViu/X0HXDHw7m3KXzWSVXIbfUvJqBFe6Gj8/pYA0= github.com/apache/thrift v0.12.0/go.mod h1:cp2SuWMxlEZw2r+iP2GNCdIi4C1qmUzdZFSVb+bacwQ= github.com/apache/thrift v0.13.0/go.mod h1:cp2SuWMxlEZw2r+iP2GNCdIi4C1qmUzdZFSVb+bacwQ= +github.com/apache/thrift v0.14.1/go.mod h1:cp2SuWMxlEZw2r+iP2GNCdIi4C1qmUzdZFSVb+bacwQ= github.com/apache/thrift v0.14.2 h1:hY4rAyg7Eqbb27GB6gkhUKrRAuc8xRjlNtJq+LseKeY= github.com/apache/thrift v0.14.2/go.mod h1:cp2SuWMxlEZw2r+iP2GNCdIi4C1qmUzdZFSVb+bacwQ= github.com/aristanetworks/glog v0.0.0-20191112221043-67e8567f59f3 h1:Bmjk+DjIi3tTAU0wxGaFbfjGUqlxxSXARq9A96Kgoos= @@ -256,11 +271,16 @@ github.com/aryann/difflib v0.0.0-20170710044230-e206f873d14a/go.mod h1:DAHtR1m6l github.com/asaskevich/govalidator v0.0.0-20180720115003-f9ffefc3facf/go.mod h1:lB+ZfQJz7igIIfQNfa7Ml4HSf2uFQQRzpGGRXenZAgY= github.com/asaskevich/govalidator v0.0.0-20190424111038-f61b66f89f4a/go.mod h1:lB+ZfQJz7igIIfQNfa7Ml4HSf2uFQQRzpGGRXenZAgY= github.com/asaskevich/govalidator v0.0.0-20200108200545-475eaeb16496/go.mod h1:oGkLhpf+kjZl6xBf758TQhh5XrAeiJv/7FRz/2spLIg= +github.com/asaskevich/govalidator v0.0.0-20200428143746-21a406dcc535/go.mod h1:oGkLhpf+kjZl6xBf758TQhh5XrAeiJv/7FRz/2spLIg= +github.com/asaskevich/govalidator v0.0.0-20200907205600-7a23bdc65eef/go.mod h1:WaHUgvxTVq04UNunO+XhnAqY/wQc+bxr74GqbsZ/Jqw= +github.com/asaskevich/govalidator v0.0.0-20210307081110-f21760c49a8d/go.mod h1:WaHUgvxTVq04UNunO+XhnAqY/wQc+bxr74GqbsZ/Jqw= +github.com/ashanbrown/forbidigo v1.1.0/go.mod h1:vVW7PEdqEFqapJe95xHkTfB1+XvZXBFg8t0sG2FIxmI= +github.com/ashanbrown/makezero v0.0.0-20201205152432-7b7cdbb3025a/go.mod h1:oG9Dnez7/ESBqc4EdrdNlryeo7d0KcW1ftXHm7nU/UU= github.com/aws/aws-lambda-go v1.13.3/go.mod h1:4UKl9IzQMoD+QF79YdCuzCwp8VbmG4VAQwij/eHl5CU= github.com/aws/aws-sdk-go v1.15.11/go.mod h1:mFuSZ37Z9YOHbQEwBWztmVzqXrEkub65tZoCYDt7FT0= github.com/aws/aws-sdk-go v1.27.0/go.mod h1:KmX6BPdI08NWTb3/sm4ZGu5ShLoqVDhKgpiN924inxo= -github.com/aws/aws-sdk-go v1.34.9/go.mod h1:5zCpMtNQVjRREroY7sYe8lOMRSxkhG6MZveU8YkpAk0= github.com/aws/aws-sdk-go v1.34.28/go.mod h1:H7NKnBqNVzoTJpGfLrQkkD+ytBA93eiDYi/+8rV9s48= +github.com/aws/aws-sdk-go v1.38.3/go.mod h1:hcU610XS61/+aQV88ixoOzUoG7v3b31pl2zKMmprdro= github.com/aws/aws-sdk-go v1.38.69 h1:V489lmrdkIQSfF6OAGZZ1Cavcm7eczCm2JcGvX+yHRg= github.com/aws/aws-sdk-go v1.38.69/go.mod h1:hcU610XS61/+aQV88ixoOzUoG7v3b31pl2zKMmprdro= github.com/aws/aws-sdk-go-v2 v0.18.0/go.mod h1:JWVYvqSMppoMJC0x5wdwiImzgXTI9FuZwxzkQq9wy+g= @@ -302,10 +322,14 @@ github.com/beorn7/perks v1.0.0/go.mod h1:KWe93zE9D1o94FZ5RNwFwVgaQK1VOXiVxmqh+Ce github.com/beorn7/perks v1.0.1 h1:VlbKKnNfV8bJzeqoa4cOKqO6bYr3WgKZxO8Z16+hsOM= github.com/beorn7/perks v1.0.1/go.mod h1:G2ZrVWU2WbWT9wwq4/hrbKbnv/1ERSJQ0ibhJ6rlkpw= github.com/bgentry/speakeasy v0.1.0/go.mod h1:+zsyZBPWlz7T6j88CTgSN5bM796AkVf0kBD4zp0CCIs= +github.com/bitly/go-hostpool v0.0.0-20171023180738-a3a6125de932/go.mod h1:NOuUCSz6Q9T7+igc/hlvDOUdtWKryOrtFyIVABv/p7k= github.com/bitly/go-hostpool v0.1.0 h1:XKmsF6k5el6xHG3WPJ8U0Ku/ye7njX7W81Ng7O2ioR0= github.com/bitly/go-hostpool v0.1.0/go.mod h1:4gOCgp6+NZnVqlKyZ/iBZFTAJKembaVENUpMkpg42fw= github.com/bitly/go-simplejson v0.5.0/go.mod h1:cXHtHw4XUPsvGaxgjIAn8PhEWG9NfngEKAMDJEczWVA= github.com/bits-and-blooms/bitset v1.2.0/go.mod h1:gIdJ4wp64HaoK2YrL1Q5/N7Y16edYb8uY+O0FJTyyDA= +github.com/bketelsen/crypt v0.0.3-0.20200106085610-5cbc8cc4026c/go.mod h1:MKsuJmJgSg28kpZDP6UIiPt0e0Oz0kqKNGyRaWEPv84= +github.com/bketelsen/crypt v0.0.4/go.mod h1:aI6NrJ0pMGgvZKL1iVgXLnfIFJtfV+bKCoqOes/6LfM= +github.com/bkielbasa/cyclop v1.2.0/go.mod h1:qOI0yy6A7dYC4Zgsa72Ppm9kONl0RoIlPbzot9mhmeI= github.com/blang/semver v3.1.0+incompatible/go.mod h1:kRBLl5iJ+tD4TcOOxsy/0fnwebNt5EWlYSAyrTnjyyk= github.com/blang/semver v3.5.1+incompatible/go.mod h1:kRBLl5iJ+tD4TcOOxsy/0fnwebNt5EWlYSAyrTnjyyk= github.com/bmatcuk/doublestar/v3 v3.0.0 h1:TQtVPlDnAYwcrVNB2JiGuMc++H5qzWZd9PhkNo5WyHI= @@ -314,7 +338,9 @@ github.com/bmizerany/assert v0.0.0-20160611221934-b7ed37b82869 h1:DDGfHa7BWjL4Yn github.com/bmizerany/assert v0.0.0-20160611221934-b7ed37b82869/go.mod h1:Ekp36dRnpXw/yCqJaO+ZrUyxD+3VXMFFr56k5XYrpB4= github.com/bmizerany/pat v0.0.0-20170815010413-6226ea591a40/go.mod h1:8rLXio+WjiTceGBHIoTvn60HIbs7Hm7bcHjyrSqYB9c= github.com/boltdb/bolt v1.3.1/go.mod h1:clJnj/oiGkjum5o1McbSZDSLxVThjynRyGBgiAx27Ps= +github.com/bombsimon/wsl/v3 v3.2.0/go.mod h1:st10JtZYLE4D5sC7b8xV4zTKZwAQjCH/Hy2Pm1FNZIc= github.com/bshuster-repo/logrus-logstash-hook v0.4.1/go.mod h1:zsTqEiSzDgAa/8GZR7E1qaXrhYNDKBYy5/dWPTIflbk= +github.com/bsm/sarama-cluster v2.1.13+incompatible/go.mod h1:r7ao+4tTNXvWm+VRpRJchr2kQhqxgmAp2iEX5W96gMM= github.com/buger/jsonparser v0.0.0-20180808090653-f4dd9f5a6b44/go.mod h1:bbYlZJ7hK1yFx9hf58LP0zeX7UjIGs20ufpu3evjr+s= github.com/buger/jsonparser v0.0.0-20180808090653-f4dd9f5a6b44/go.mod h1:bbYlZJ7hK1yFx9hf58LP0zeX7UjIGs20ufpu3evjr+s= github.com/buger/jsonparser v0.0.0-20180808090653-f4dd9f5a6b44/go.mod h1:bbYlZJ7hK1yFx9hf58LP0zeX7UjIGs20ufpu3evjr+s= @@ -340,10 +366,12 @@ github.com/cenkalti/backoff/v4 v4.0.2/go.mod h1:eEew/i+1Q6OrCDZh3WiXYv3+nJwBASZ8 github.com/cenkalti/backoff/v4 v4.1.1 h1:G2HAfAmvm/GcKan2oOQpBXOd2tT2G57ZnZGWa1PxPBQ= github.com/cenkalti/backoff/v4 v4.1.1/go.mod h1:scbssz8iZGpm3xbr14ovlUdkxfGXNInqkPWOWmG2CLw= github.com/census-instrumentation/opencensus-proto v0.2.1/go.mod h1:f6KPmirojxKA12rnyqOA5BBL4O983OfeGPqjHWSTneU= +github.com/census-instrumentation/opencensus-proto v0.3.0/go.mod h1:f6KPmirojxKA12rnyqOA5BBL4O983OfeGPqjHWSTneU= github.com/cespare/xxhash v1.1.0 h1:a6HrQnmkObjyL+Gs60czilIUGqrzKutQD6XZog3p+ko= github.com/cespare/xxhash v1.1.0/go.mod h1:XrSqR1VqqWfGrhpAt58auRo0WTKS1nRRg3ghfAqPWnc= github.com/cespare/xxhash/v2 v2.1.1 h1:6MnRN8NT7+YBpUIWxHtefFZOKTAPgGjpQSxqLNn0+qY= github.com/cespare/xxhash/v2 v2.1.1/go.mod h1:VGX0DQ3Q6kWi7AoAeZDth3/j3BFtOZR5XLFGgcrjCOs= +github.com/charithe/durationcheck v0.0.6/go.mod h1:SSbRIBVfMjCi/kEB6K65XEA83D6prSM8ap1UCpNKtgg= github.com/checkpoint-restore/go-criu/v4 v4.1.0/go.mod h1:xUQBLp4RLc5zJtWY++yjOoMoB5lihDt7fai+75m+rGw= github.com/checkpoint-restore/go-criu/v5 v5.0.0/go.mod h1:cfwC0EG7HMUenopBsUf9d89JlCLQIfgVcNsNN0t6T2M= github.com/chzyer/logex v1.1.10/go.mod h1:+Ywpsq7O8HXn0nuIou7OrIPyXbp3wmkHB+jjWRnGsAI= @@ -395,7 +423,6 @@ github.com/containerd/containerd v1.3.0-beta.2.0.20190828155532-0293cbd26c69/go. github.com/containerd/containerd v1.3.0/go.mod h1:bC6axHOhabU15QhwfG7w5PipXdVtMXFTttgp+kVtyUA= github.com/containerd/containerd v1.3.1-0.20191213020239-082f7e3aed57/go.mod h1:bC6axHOhabU15QhwfG7w5PipXdVtMXFTttgp+kVtyUA= github.com/containerd/containerd v1.3.2/go.mod h1:bC6axHOhabU15QhwfG7w5PipXdVtMXFTttgp+kVtyUA= -github.com/containerd/containerd v1.3.4/go.mod h1:bC6axHOhabU15QhwfG7w5PipXdVtMXFTttgp+kVtyUA= github.com/containerd/containerd v1.4.0-beta.2.0.20200729163537-40b22ef07410/go.mod h1:bC6axHOhabU15QhwfG7w5PipXdVtMXFTttgp+kVtyUA= github.com/containerd/containerd v1.4.1/go.mod h1:bC6axHOhabU15QhwfG7w5PipXdVtMXFTttgp+kVtyUA= github.com/containerd/containerd v1.4.3/go.mod h1:bC6axHOhabU15QhwfG7w5PipXdVtMXFTttgp+kVtyUA= @@ -463,9 +490,12 @@ github.com/containers/ocicrypt v1.1.0/go.mod h1:b8AOe0YR67uU8OqfVNcznfFpAzu3rdgU github.com/containers/ocicrypt v1.1.1/go.mod h1:Dm55fwWm1YZAjYRaJ94z2mfZikIyIN4B0oB3dj3jFxY= github.com/coreos/bbolt v1.3.2/go.mod h1:iRUV2dpdMOn7Bo10OQBFzIJO9kkE559Wcmn+qkEiiKk= github.com/coreos/etcd v3.3.10+incompatible/go.mod h1:uF7uidLiAD3TWHmW31ZFd/JWoc32PjwdhPthX9715RE= +github.com/coreos/etcd v3.3.13+incompatible/go.mod h1:uF7uidLiAD3TWHmW31ZFd/JWoc32PjwdhPthX9715RE= +github.com/coreos/go-etcd v2.0.0+incompatible/go.mod h1:Jez6KQU2B/sWsbdaef3ED8NzMklzPG4d5KIOhIy30Tk= github.com/coreos/go-iptables v0.4.5/go.mod h1:/mVI274lEDI2ns62jHCDnCyBF9Iwsmekav8Dbxlm1MU= github.com/coreos/go-iptables v0.5.0/go.mod h1:/mVI274lEDI2ns62jHCDnCyBF9Iwsmekav8Dbxlm1MU= github.com/coreos/go-oidc v2.1.0+incompatible/go.mod h1:CgnwVTmzoESiwO9qyAFEMiHoZ1nMCKZlZ9V6mm3/LKc= +github.com/coreos/go-oidc v2.2.1+incompatible/go.mod h1:CgnwVTmzoESiwO9qyAFEMiHoZ1nMCKZlZ9V6mm3/LKc= github.com/coreos/go-semver v0.2.0/go.mod h1:nnelYz7RCh+5ahJtPPxZlU+153eP4D4r3EedlOD2RNk= github.com/coreos/go-semver v0.3.0 h1:wkHLiw0WNATZnSG7epLsujiMCgPAc9xhjJ4tgnAxmfM= github.com/coreos/go-semver v0.3.0/go.mod h1:nnelYz7RCh+5ahJtPPxZlU+153eP4D4r3EedlOD2RNk= @@ -485,22 +515,27 @@ github.com/couchbase/gomemcached v0.1.3 h1:HIc5qMYNbuhB7zNaiEtj61DCYkquAwrQlf64q github.com/couchbase/gomemcached v0.1.3/go.mod h1:mxliKQxOv84gQ0bJWbI+w9Wxdpt9HjDvgW9MjCym5Vo= github.com/couchbase/goutils v0.1.0 h1:0WLlKJilu7IBm98T8nS9+J36lBFVLRUSIUtyD/uWpAE= github.com/couchbase/goutils v0.1.0/go.mod h1:BQwMFlJzDjFDG3DJUdU0KORxn88UlsOULuxLExMh3Hs= +github.com/cpuguy83/go-md2man v1.0.10/go.mod h1:SmD6nW6nTyfqj6ABTjUi3V3JVMnlJmwcJI5acqYI6dE= github.com/cpuguy83/go-md2man/v2 v2.0.0-20190314233015-f79a8a8ca69d/go.mod h1:maD7wRr/U5Z6m/iR4s+kqSMx2CaBsrgA7czyZG/E6dU= github.com/cpuguy83/go-md2man/v2 v2.0.0/go.mod h1:maD7wRr/U5Z6m/iR4s+kqSMx2CaBsrgA7czyZG/E6dU= github.com/creack/pty v1.1.7/go.mod h1:lj5s0c3V2DBrqTV7llrYr5NG6My20zk30Fl46Y7DoTY= github.com/creack/pty v1.1.9/go.mod h1:oKZEueFk5CKHvIhNR5MUki03XCEU+Q6VDXinZuGJ33E= github.com/creack/pty v1.1.11 h1:07n33Z8lZxZ2qwegKbObQohDhXDQxiMMz1NOUGYlesw= github.com/creack/pty v1.1.11/go.mod h1:oKZEueFk5CKHvIhNR5MUki03XCEU+Q6VDXinZuGJ33E= +github.com/crossdock/crossdock-go v0.0.0-20160816171116-049aabb0122b/go.mod h1:v9FBN7gdVTpiD/+LZ7Po0UKvROyT87uLVxTHVky/dlQ= github.com/cyphar/filepath-securejoin v0.2.2/go.mod h1:FpkQEhXnPnOthhzymB7CGsFk2G9VLXONKD9G7QGMM+4= github.com/d2g/dhcp4 v0.0.0-20170904100407-a1d1b6c41b1c/go.mod h1:Ct2BUK8SB0YC1SMSibvLzxjeJLnrYEVLULFNiHY9YfQ= github.com/d2g/dhcp4client v1.0.0/go.mod h1:j0hNfjhrt2SxUOw55nL0ATM/z4Yt3t2Kd1mW34z5W5s= github.com/d2g/dhcp4server v0.0.0-20181031114812-7d4a0a7f59a5/go.mod h1:Eo87+Kg/IX2hfWJfwxMzLyuSZyxSoAug2nGa1G2QAi8= github.com/d2g/hardwareaddr v0.0.0-20190221164911-e7d9fbe030e4/go.mod h1:bMl4RjIciD2oAxI7DmWRx6gbeqrkoLqv3MV0vzNad+I= +github.com/daixiang0/gci v0.2.8/go.mod h1:+4dZ7TISfSmqfAGv59ePaHfNzgGtIkHAhhdKggP1JAc= github.com/dave/jennifer v1.2.0/go.mod h1:fIb+770HOpJ2fmN9EPPKOqm1vMGhB+TwXKMZhrIygKg= +github.com/davecgh/go-spew v0.0.0-20161028175848-04cdfd42973b/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= github.com/davecgh/go-spew v1.1.1 h1:vj9j/u1bqnvCEfJOwUhtlOARqs3+rkHYY13jYWTU97c= github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= github.com/davecgh/go-xdr v0.0.0-20161123171359-e6a2ba005892/go.mod h1:CTDl0pzVzE5DEzZhPfvhY/9sPFMQIxaJ9VAMs9AagrE= +github.com/denis-tingajkin/go-header v0.4.2/go.mod h1:eLRHAVXzE5atsKAnNRDB90WHCFFnBUn4RN0nRcs1LJA= github.com/denisenkom/go-mssqldb v0.10.0 h1:QykgLZBorFE95+gO3u9esLd0BmbvpWp0/waNNZfHBM8= github.com/denisenkom/go-mssqldb v0.10.0/go.mod h1:xbL0rPBG9cCiLr28tMa8zpbdarY27NDyej4t/EjAShU= github.com/denverdino/aliyungo v0.0.0-20190125010748-a747050bb1ba/go.mod h1:dV8lFg6daOBZbT6/BDGIz6Y3WFGn8juu6G+CQ6LHtl0= @@ -508,13 +543,20 @@ github.com/devigned/tab v0.0.1/go.mod h1:oVYrfgGyond090gxCvvbjZji79+peOiSV6vhZhK github.com/devigned/tab v0.1.1 h1:3mD6Kb1mUOYeLpJvTVSDwSg5ZsfSxfvxGRTxRsJsITA= github.com/devigned/tab v0.1.1/go.mod h1:XG9mPq0dFghrYvoBF3xdRrJzSTX1b7IQrvaL9mzjeJY= github.com/devigned/tab/opencensus v0.1.2/go.mod h1:U6xXMXnNwXJpdaK0mnT3zdng4WTi+vCfqn7YHofEv2A= +github.com/dgraph-io/badger v1.6.2/go.mod h1:JW2yswe3V058sS0kZ2h/AXeDSqFjxnZcRrVH//y2UQE= +github.com/dgraph-io/badger/v3 v3.2103.1/go.mod h1:dULbq6ehJ5K0cGW/1TQ9iSfUk0gbSiToDWmWmTsJ53E= +github.com/dgraph-io/ristretto v0.0.1/go.mod h1:T40EBc7CJke8TkpiYfGGKAeFjSaxuFXhuXRyumBd6RE= +github.com/dgraph-io/ristretto v0.0.2/go.mod h1:KPxhHT9ZxKefz+PCeOGsrHpl1qZ7i70dGTu2u+Ahh6E= +github.com/dgraph-io/ristretto v0.1.0/go.mod h1:fux0lOrBhrVCJd3lcTHsIJhq1T2rokOu6v9Vcb3Q9ug= github.com/dgrijalva/jwt-go v0.0.0-20170104182250-a601269ab70c/go.mod h1:E3ru+11k8xSBh+hMPgOLZmtrrCbhqsmaPHjLKYnJCaQ= github.com/dgrijalva/jwt-go v3.2.0+incompatible h1:7qlOGliEKZXTDg6OTjfoBKDXWrumCAMpl/TFQ4/5kLM= github.com/dgrijalva/jwt-go v3.2.0+incompatible/go.mod h1:E3ru+11k8xSBh+hMPgOLZmtrrCbhqsmaPHjLKYnJCaQ= github.com/dgryski/go-bitstream v0.0.0-20180413035011-3522498ce2c8/go.mod h1:VMaSuZ+SZcx/wljOQKvp5srsbCiKDEb6K2wC4+PiBmQ= +github.com/dgryski/go-farm v0.0.0-20190423205320-6a90982ecee2/go.mod h1:SqUrOPUnsFjfmXRMNPybcSiG0BgUW2AuFH8PAnS2iTw= +github.com/dgryski/go-farm v0.0.0-20200201041132-a6ae2369ad13/go.mod h1:SqUrOPUnsFjfmXRMNPybcSiG0BgUW2AuFH8PAnS2iTw= github.com/dgryski/go-sip13 v0.0.0-20181026042036-e10d5fee7954/go.mod h1:vAd38F8PWV+bWy6jNmig1y/TA+kYO4g3RSRF0IAv0no= -github.com/dgryski/go-sip13 v0.0.0-20190329191031-25c5027a8c7b/go.mod h1:vAd38F8PWV+bWy6jNmig1y/TA+kYO4g3RSRF0IAv0no= -github.com/digitalocean/godo v1.42.1/go.mod h1:p7dOjjtSBqCTUksqtA5Fd3uaKs9kyTq2xcz76ulEJRU= +github.com/dgryski/go-sip13 v0.0.0-20200911182023-62edffca9245/go.mod h1:vAd38F8PWV+bWy6jNmig1y/TA+kYO4g3RSRF0IAv0no= +github.com/digitalocean/godo v1.58.0/go.mod h1:p7dOjjtSBqCTUksqtA5Fd3uaKs9kyTq2xcz76ulEJRU= github.com/dimchansky/utfbom v1.1.0/go.mod h1:rO41eb7gLfo8SF1jd9F8HplJm1Fewwi4mQvIirEdv+8= github.com/dimchansky/utfbom v1.1.1 h1:vV6w1AhK4VMnhBno/TPVCoK9U/LP0PkLCS9tbxHdi/U= github.com/dimchansky/utfbom v1.1.1/go.mod h1:SxdoEBH5qIqFocHMyGOXVAybYJdr71b1Q/j0mACtrfE= @@ -525,7 +567,7 @@ github.com/docker/distribution v2.7.1-0.20190205005809-0d3efadf0154+incompatible github.com/docker/distribution v2.7.1-0.20190205005809-0d3efadf0154+incompatible/go.mod h1:J2gT2udsDAN96Uj4KfcMRqY0/ypR+oyYUYmja8H+y+w= github.com/docker/distribution v2.7.1+incompatible h1:a5mlkVzth6W5A4fOsS3D2EO5BUmsJpcB+cRlLU7cSug= github.com/docker/distribution v2.7.1+incompatible/go.mod h1:J2gT2udsDAN96Uj4KfcMRqY0/ypR+oyYUYmja8H+y+w= -github.com/docker/docker v17.12.0-ce-rc1.0.20200706150819-a40b877fbb9e+incompatible/go.mod h1:eEKB0N0r5NX/I1kEveEz05bcu8tLC/8azJZsviup8Sk= +github.com/docker/docker v20.10.5+incompatible/go.mod h1:eEKB0N0r5NX/I1kEveEz05bcu8tLC/8azJZsviup8Sk= github.com/docker/docker v20.10.7+incompatible h1:Z6O9Nhsjv+ayUEeI1IojKbYcsGdgYSNqxe1s2MYzUhQ= github.com/docker/docker v20.10.7+incompatible/go.mod h1:eEKB0N0r5NX/I1kEveEz05bcu8tLC/8azJZsviup8Sk= github.com/docker/go-connections v0.4.0 h1:El9xVISelRB7BuFusrZozjnkIM5YnzCViNKohAFqRJQ= @@ -583,8 +625,7 @@ github.com/envoyproxy/go-control-plane v0.9.9-0.20201210154907-fd9021fe5dad/go.m github.com/envoyproxy/go-control-plane v0.9.9-0.20210217033140-668b12f5399d/go.mod h1:cXg6YxExXjJnVBQHBLXeUAgxn2UodCpnH306RInaBQk= github.com/envoyproxy/go-control-plane v0.9.9-0.20210512163311-63b5d3c536b0/go.mod h1:hliV/p42l8fGbc6Y9bQ70uLwIvmJyVE5k4iMKlh8wCQ= github.com/envoyproxy/protoc-gen-validate v0.1.0/go.mod h1:iSmxcyjqTsJpI2R4NaDN7+kN2VEUnK/pcBlmesArF7c= -github.com/evanphx/json-patch v0.0.0-20200808040245-162e5629780b/go.mod h1:NAJj0yf/KaRKURN6nyi7A9IZydMivZEm9oQLWNjfKDc= -github.com/evanphx/json-patch v4.2.0+incompatible/go.mod h1:50XU6AFN0ol/bzJsmQLiYLvXMP4fmwYFNcr97nuDLSk= +github.com/esimonov/ifshort v1.0.1/go.mod h1:yZqNJUrNn20K8Q9n2CrjTKYyVEmX209Hgu+M1LBpeZE= github.com/evanphx/json-patch v4.9.0+incompatible/go.mod h1:50XU6AFN0ol/bzJsmQLiYLvXMP4fmwYFNcr97nuDLSk= github.com/evanphx/json-patch v4.11.0+incompatible/go.mod h1:50XU6AFN0ol/bzJsmQLiYLvXMP4fmwYFNcr97nuDLSk= github.com/facebookgo/stack v0.0.0-20160209184415-751773369052 h1:JWuenKqqX8nojtoVVWjGfOF9635RETekkoH6Cc9SX0A= @@ -592,8 +633,11 @@ github.com/facebookgo/stack v0.0.0-20160209184415-751773369052/go.mod h1:UbMTZqL github.com/facebookgo/stackerr v0.0.0-20150612192056-c2fcf88613f4 h1:fP04zlkPjAGpsduG7xN3rRkxjAqkJaIQnnkNYYw/pAk= github.com/facebookgo/stackerr v0.0.0-20150612192056-c2fcf88613f4/go.mod h1:SBHk9aNQtiw4R4bEuzHjVmZikkUKCnO1v3lPQ21HZGk= github.com/fatih/color v1.7.0/go.mod h1:Zm6kSWBoL9eyXnKyktHP6abPY2pDugNf5KwzbycvMj4= -github.com/fatih/color v1.9.0 h1:8xPHl4/q1VyqGIPif1F+1V3Y3lSmrq01EabUW3CoW5s= github.com/fatih/color v1.9.0/go.mod h1:eQcE1qtQxscV5RaZvpXrrb8Drkc3/DdQ+uUYCNjL+zU= +github.com/fatih/color v1.10.0 h1:s36xzo75JdqLaaWoiEHk767eHiwo0598uUxyfiPkDsg= +github.com/fatih/color v1.10.0/go.mod h1:ELkj/draVOlAH/xkhN6mQ50Qd0MPOk5AAr3maGEBuJM= +github.com/fatih/structtag v1.2.0/go.mod h1:mBJUNpUnHmRKrKlQQlmCrh5PuhftFbNv8Ys4/aAZl94= +github.com/felixge/httpsnoop v1.0.1/go.mod h1:m8KPJKqk1gH5J9DgRY2ASl2lWCfGKXixSwevea8zH2U= github.com/fogleman/gg v1.2.1-0.20190220221249-0403632d5b90/go.mod h1:R/bRT+9gY/C5z7JzPU0zXsXHKM4/ayA+zqcVNZzPa1k= github.com/form3tech-oss/jwt-go v3.2.2+incompatible/go.mod h1:pbq4aXjuKjdthFRnoDwaVPLA+WlJuPGy+QneDUgJi2k= github.com/form3tech-oss/jwt-go v3.2.3+incompatible h1:7ZaBxOI7TMoYBfyA3cQHErNNyAWIKUMIwqxEtgHOs5c= @@ -602,22 +646,29 @@ github.com/fortytw2/leaktest v1.3.0 h1:u8491cBMTQ8ft8aeV+adlcytMZylmA5nnwwkRZjI8 github.com/fortytw2/leaktest v1.3.0/go.mod h1:jDsjWgpAGjm2CA7WthBh/CdZYEPF31XHquHwclZch5g= github.com/franela/goblin v0.0.0-20200105215937-c9ffbefa60db/go.mod h1:7dvUGVsVBjqR7JHJk0brhHOZYGmfBYOrK0ZhYMEtBr4= github.com/franela/goreq v0.0.0-20171204163338-bcd34c9993f8/go.mod h1:ZhphrRTfi2rbfLwlschooIH4+wKKDR4Pdxhh+TRoA20= -github.com/frankban/quicktest v1.10.2/go.mod h1:K+q6oSqb0W0Ininfk863uOk1lMy69l/P6txr3mVT54s= +github.com/frankban/quicktest v1.7.3/go.mod h1:V1d2J5pfxYH6EjBAgSK7YNXcXlTWxUHdE1sVDXkjnig= github.com/frankban/quicktest v1.11.3 h1:8sXhOn0uLys67V8EsXLc6eszDs8VXWxL3iRvebPhedY= github.com/frankban/quicktest v1.11.3/go.mod h1:wRf/ReqHper53s+kmmSZizM8NamnL3IM0I9ntUbOk+k= github.com/fsnotify/fsnotify v1.4.7/go.mod h1:jwhsz4b93w/PPRr/qN1Yymfu8t87LnFCMoQvtojpjFo= -github.com/fsnotify/fsnotify v1.4.9 h1:hsms1Qyu0jgnwNXIxa+/V/PDsU6CfLf6CNO8H7IWoS4= github.com/fsnotify/fsnotify v1.4.9/go.mod h1:znqG4EE+3YCdAaPaxE2ZRY/06pZUdp0tY4IgpuI1SZQ= +github.com/fsnotify/fsnotify v1.5.1 h1:mZcQUHVQUQWoPXXtuf9yuEXKudkV2sx1E06UadKWpgI= +github.com/fsnotify/fsnotify v1.5.1/go.mod h1:T3375wBYaZdLLcVNkcVbzGHY7f1l/uK5T5Ai1i3InKU= github.com/fullsailor/pkcs7 v0.0.0-20190404230743-d7302db945fa/go.mod h1:KnogPXtdwXqoenmZCw6S+25EAm2MkxbG0deNDu4cbSA= +github.com/fzipp/gocyclo v0.3.1/go.mod h1:DJHO6AUmbdqj2ET4Z9iArSuwWgYDRryYt2wASxc7x3E= github.com/garyburd/redigo v0.0.0-20150301180006-535138d7bcd7/go.mod h1:NR3MbYisc3/PwhQ00EMzDiPmrwpPxAn5GI05/YaO1SY= +github.com/gdamore/encoding v1.0.0/go.mod h1:alR0ol34c49FCSBLjhosxzcPHQbf2trDkoo5dl+VrEg= +github.com/gdamore/tcell v1.3.0/go.mod h1:Hjvr+Ofd+gLglo7RYKxxnzCBmev3BzsS67MebKS4zMM= github.com/ghodss/yaml v0.0.0-20150909031657-73d445a93680/go.mod h1:4dBDuWmgqj2HViK6kFavaiC9ZROes6MMH2rRYeMEF04= github.com/ghodss/yaml v1.0.0/go.mod h1:4dBDuWmgqj2HViK6kFavaiC9ZROes6MMH2rRYeMEF04= github.com/ghodss/yaml v1.0.1-0.20190212211648-25d852aebe32 h1:Mn26/9ZMNWSw9C9ERFA1PUxfmGpolnw2v0bKOREu5ew= github.com/ghodss/yaml v1.0.1-0.20190212211648-25d852aebe32/go.mod h1:GIjDIg/heH5DOkXY3YJ/wNhfHsQHoXGjl8G8amsYQ1I= +github.com/gin-contrib/sse v0.1.0/go.mod h1:RHrZQHXnP2xjPF+u1gW/2HnVO7nvIa9PG3Gm+fLHvGI= +github.com/gin-gonic/gin v1.5.0/go.mod h1:Nd6IXA8m5kNZdNEHMBd93KT+mdY3+bewLgRvmCsR2Do= github.com/globalsign/mgo v0.0.0-20180905125535-1ca0a4f7cbcb/go.mod h1:xkRDCp4j0OGD1HRkm4kmhM+pmpv3AKq5SU7GMg4oO/Q= github.com/globalsign/mgo v0.0.0-20181015135952-eeefdecb41b8/go.mod h1:xkRDCp4j0OGD1HRkm4kmhM+pmpv3AKq5SU7GMg4oO/Q= github.com/glycerine/go-unsnap-stream v0.0.0-20180323001048-9f0cb55181dd/go.mod h1:/20jfyN9Y5QPEAprSgKAUr+glWDY39ZiUEAYOEv5dsE= github.com/glycerine/goconvey v0.0.0-20190410193231-58a59202ab31/go.mod h1:Ogl1Tioa0aV7gstGFO7KhffUsb9M4ydbEbbxpcEDc24= +github.com/go-critic/go-critic v0.5.4/go.mod h1:cjB4YGw+n/+X8gREApej7150Uyy1Tg8If6F2XOAUXNE= github.com/go-gl/glfw v0.0.0-20190409004039-e6da0acd62b1/go.mod h1:vR7hzQXu2zJy9AVAgeJqvqgH9Q5CA+iKCZ2gyEVpxRU= github.com/go-gl/glfw/v3.3/glfw v0.0.0-20191125211704-12ad95a8df72/go.mod h1:tQ2UAYgL5IevRw8kRxooKSPJfGvJ9fJQFa0TUsXzTg8= github.com/go-gl/glfw/v3.3/glfw v0.0.0-20200222043503-6f7a984d4dc4/go.mod h1:tQ2UAYgL5IevRw8kRxooKSPJfGvJ9fJQFa0TUsXzTg8= @@ -635,8 +686,9 @@ github.com/go-logr/logr v0.1.0/go.mod h1:ixOQHD9gLJUVQQ2ZOR7zLEifBX6tGkNJF4QyIY7 github.com/go-logr/logr v0.2.0/go.mod h1:z6/tIYblkpsD+a4lm/fGIIU9mZ+XfAiaFtq7xTgseGU= github.com/go-logr/logr v0.4.0 h1:K7/B1jt6fIBQVd4Owv2MqGQClcgf0R266+7C/QjRcLc= github.com/go-logr/logr v0.4.0/go.mod h1:z6/tIYblkpsD+a4lm/fGIIU9mZ+XfAiaFtq7xTgseGU= -github.com/go-ole/go-ole v1.2.4 h1:nNBDSCOigTSiarFpYE9J/KtEA1IOW4CNeqT9TQDqCxI= github.com/go-ole/go-ole v1.2.4/go.mod h1:XCwSNxSkXRo4vlyPy93sltvi/qJq0jqQhjqQNIwKuxM= +github.com/go-ole/go-ole v1.2.5 h1:t4MGB5xEDZvXI+0rMjjsfBsD7yAgp/s9ZDkL1JndXwY= +github.com/go-ole/go-ole v1.2.5/go.mod h1:pprOEPIfldk/42T2oK7lQ4v4JSDwmV0As9GaiUsvbm0= github.com/go-openapi/analysis v0.0.0-20180825180245-b006789cd277/go.mod h1:k70tL6pCuVxPJOHXQ+wIac1FUrvNkHolPie/cLEU6hI= github.com/go-openapi/analysis v0.17.0/go.mod h1:IowGgpVeD0vNm45So8nr+IcQ3pxVtpRoBWb8PVZO0ik= github.com/go-openapi/analysis v0.18.0/go.mod h1:IowGgpVeD0vNm45So8nr+IcQ3pxVtpRoBWb8PVZO0ik= @@ -644,21 +696,30 @@ github.com/go-openapi/analysis v0.19.2/go.mod h1:3P1osvZa9jKjb8ed2TPng3f0i/UY9sn github.com/go-openapi/analysis v0.19.4/go.mod h1:3P1osvZa9jKjb8ed2TPng3f0i/UY9snX6gxi44djMjk= github.com/go-openapi/analysis v0.19.5/go.mod h1:hkEAkxagaIvIP7VTn8ygJNkd4kAYON2rCu0v0ObL0AU= github.com/go-openapi/analysis v0.19.10/go.mod h1:qmhS3VNFxBlquFJ0RGoDtylO9y4pgTAUNE9AEEMdlJQ= +github.com/go-openapi/analysis v0.19.16/go.mod h1:GLInF007N83Ad3m8a/CbQ5TPzdnGT7workfHwuVjNVk= +github.com/go-openapi/analysis v0.20.0/go.mod h1:BMchjvaHDykmRMsK40iPtvyOfFdMMxlOmQr9FBZk+Og= +github.com/go-openapi/analysis v0.20.1/go.mod h1:BMchjvaHDykmRMsK40iPtvyOfFdMMxlOmQr9FBZk+Og= github.com/go-openapi/errors v0.17.0/go.mod h1:LcZQpmvG4wyF5j4IhA73wkLFQg+QJXOQHVjmcZxhka0= github.com/go-openapi/errors v0.18.0/go.mod h1:LcZQpmvG4wyF5j4IhA73wkLFQg+QJXOQHVjmcZxhka0= github.com/go-openapi/errors v0.19.2/go.mod h1:qX0BLWsyaKfvhluLejVpVNwNRdXZhEbTA4kxxpKBC94= github.com/go-openapi/errors v0.19.3/go.mod h1:qX0BLWsyaKfvhluLejVpVNwNRdXZhEbTA4kxxpKBC94= github.com/go-openapi/errors v0.19.4/go.mod h1:qX0BLWsyaKfvhluLejVpVNwNRdXZhEbTA4kxxpKBC94= -github.com/go-openapi/jsonpointer v0.0.0-20160704185906-46af16f9f7b1/go.mod h1:+35s3my2LFTysnkMfxsJBAMHj/DoqoB9knIWoYG/Vk0= +github.com/go-openapi/errors v0.19.6/go.mod h1:cM//ZKUKyO06HSwqAelJ5NsEMMcpa6VpXe8DOa1Mi1M= +github.com/go-openapi/errors v0.19.7/go.mod h1:cM//ZKUKyO06HSwqAelJ5NsEMMcpa6VpXe8DOa1Mi1M= +github.com/go-openapi/errors v0.19.8/go.mod h1:cM//ZKUKyO06HSwqAelJ5NsEMMcpa6VpXe8DOa1Mi1M= +github.com/go-openapi/errors v0.19.9/go.mod h1:cM//ZKUKyO06HSwqAelJ5NsEMMcpa6VpXe8DOa1Mi1M= +github.com/go-openapi/errors v0.20.0/go.mod h1:cM//ZKUKyO06HSwqAelJ5NsEMMcpa6VpXe8DOa1Mi1M= +github.com/go-openapi/errors v0.20.1/go.mod h1:cM//ZKUKyO06HSwqAelJ5NsEMMcpa6VpXe8DOa1Mi1M= github.com/go-openapi/jsonpointer v0.17.0/go.mod h1:cOnomiV+CVVwFLk0A/MExoFMjwdsUdVpsRhURCKh+3M= github.com/go-openapi/jsonpointer v0.18.0/go.mod h1:cOnomiV+CVVwFLk0A/MExoFMjwdsUdVpsRhURCKh+3M= github.com/go-openapi/jsonpointer v0.19.2/go.mod h1:3akKfEdA7DF1sugOqz1dVQHBcuDBPKZGEoHC/NkiQRg= github.com/go-openapi/jsonpointer v0.19.3/go.mod h1:Pl9vOtqEWErmShwVjC8pYs9cog34VGT37dQOVbmoatg= -github.com/go-openapi/jsonreference v0.0.0-20160704190145-13c6e3589ad9/go.mod h1:W3Z9FmVs9qj+KR4zFKmDPGiLdk1D9Rlm7cyMvf57TTg= +github.com/go-openapi/jsonpointer v0.19.5/go.mod h1:Pl9vOtqEWErmShwVjC8pYs9cog34VGT37dQOVbmoatg= github.com/go-openapi/jsonreference v0.17.0/go.mod h1:g4xxGn04lDIRh0GJb5QlpE3HfopLOL6uZrK/VgnsK9I= github.com/go-openapi/jsonreference v0.18.0/go.mod h1:g4xxGn04lDIRh0GJb5QlpE3HfopLOL6uZrK/VgnsK9I= github.com/go-openapi/jsonreference v0.19.2/go.mod h1:jMjeRr2HHw6nAVajTXJ4eiUwohSTlpa0o73RUL1owJc= github.com/go-openapi/jsonreference v0.19.3/go.mod h1:rjx6GuL8TTa9VaixXglHmQmIL98+wF9xc8zWvFonSJ8= +github.com/go-openapi/jsonreference v0.19.5/go.mod h1:RdybgQwPxbL4UEjuAruzK1x3nE69AqPYEJeo/TWfEeg= github.com/go-openapi/loads v0.17.0/go.mod h1:72tmFy5wsWx89uEVddd0RjRWPZm92WRLhf7AC+0+OOU= github.com/go-openapi/loads v0.18.0/go.mod h1:72tmFy5wsWx89uEVddd0RjRWPZm92WRLhf7AC+0+OOU= github.com/go-openapi/loads v0.19.0/go.mod h1:72tmFy5wsWx89uEVddd0RjRWPZm92WRLhf7AC+0+OOU= @@ -666,17 +727,28 @@ github.com/go-openapi/loads v0.19.2/go.mod h1:QAskZPMX5V0C2gvfkGZzJlINuP7Hx/4+ix github.com/go-openapi/loads v0.19.3/go.mod h1:YVfqhUCdahYwR3f3iiwQLhicVRvLlU/WO5WPaZvcvSI= github.com/go-openapi/loads v0.19.4/go.mod h1:zZVHonKd8DXyxyw4yfnVjPzBjIQcLt0CCsn0N0ZrQsk= github.com/go-openapi/loads v0.19.5/go.mod h1:dswLCAdonkRufe/gSUC3gN8nTSaB9uaS2es0x5/IbjY= +github.com/go-openapi/loads v0.19.6/go.mod h1:brCsvE6j8mnbmGBh103PT/QLHfbyDxA4hsKvYBNEGVc= +github.com/go-openapi/loads v0.19.7/go.mod h1:brCsvE6j8mnbmGBh103PT/QLHfbyDxA4hsKvYBNEGVc= +github.com/go-openapi/loads v0.20.0/go.mod h1:2LhKquiE513rN5xC6Aan6lYOSddlL8Mp20AW9kpviM4= +github.com/go-openapi/loads v0.20.2/go.mod h1:hTVUotJ+UonAMMZsvakEgmWKgtulweO9vYP2bQYKA/o= github.com/go-openapi/runtime v0.0.0-20180920151709-4f900dc2ade9/go.mod h1:6v9a6LTXWQCdL8k1AO3cvqx5OtZY/Y9wKTgaoP6YRfA= github.com/go-openapi/runtime v0.19.0/go.mod h1:OwNfisksmmaZse4+gpV3Ne9AyMOlP1lt4sK4FXt0O64= github.com/go-openapi/runtime v0.19.4/go.mod h1:X277bwSUBxVlCYR3r7xgZZGKVvBd/29gLDlFGtJ8NL4= github.com/go-openapi/runtime v0.19.15/go.mod h1:dhGWCTKRXlAfGnQG0ONViOZpjfg0m2gUt9nTQPQZuoo= -github.com/go-openapi/spec v0.0.0-20160808142527-6aced65f8501/go.mod h1:J8+jY1nAiCcj+friV/PDoE1/3eeccG9LYBs0tYvLOWc= +github.com/go-openapi/runtime v0.19.16/go.mod h1:5P9104EJgYcizotuXhEuUrzVc+j1RiSjahULvYmlv98= +github.com/go-openapi/runtime v0.19.24/go.mod h1:Lm9YGCeecBnUUkFTxPC4s1+lwrkJ0pthx8YvyjCfkgk= +github.com/go-openapi/runtime v0.19.28/go.mod h1:BvrQtn6iVb2QmiVXRsFAm6ZCAZBpbVKFfN6QWCp582M= github.com/go-openapi/spec v0.17.0/go.mod h1:XkF/MOi14NmjsfZ8VtAKf8pIlbZzyoTvZsdfssdxcBI= github.com/go-openapi/spec v0.18.0/go.mod h1:XkF/MOi14NmjsfZ8VtAKf8pIlbZzyoTvZsdfssdxcBI= github.com/go-openapi/spec v0.19.2/go.mod h1:sCxk3jxKgioEJikev4fgkNmwS+3kuYdJtcsZsD5zxMY= github.com/go-openapi/spec v0.19.3/go.mod h1:FpwSN1ksY1eteniUU7X0N/BgJ7a4WvBFVA8Lj9mJglo= github.com/go-openapi/spec v0.19.6/go.mod h1:Hm2Jr4jv8G1ciIAo+frC/Ft+rR2kQDh8JHKHb3gWUSk= github.com/go-openapi/spec v0.19.8/go.mod h1:Hm2Jr4jv8G1ciIAo+frC/Ft+rR2kQDh8JHKHb3gWUSk= +github.com/go-openapi/spec v0.19.15/go.mod h1:+81FIL1JwC5P3/Iuuozq3pPE9dXdIEGxFutcFKaVbmU= +github.com/go-openapi/spec v0.20.0/go.mod h1:+81FIL1JwC5P3/Iuuozq3pPE9dXdIEGxFutcFKaVbmU= +github.com/go-openapi/spec v0.20.1/go.mod h1:93x7oh+d+FQsmsieroS4cmR3u0p/ywH649a3qwC9OsQ= +github.com/go-openapi/spec v0.20.2/go.mod h1:RW6Xcbs6LOyWLU/mXGdzn2Qc+3aj+ASfI7rvSZh1Vls= +github.com/go-openapi/spec v0.20.3/go.mod h1:gG4F8wdEDN+YPBMVnzE85Rbhf+Th2DTvA9nFPQ5AYEg= github.com/go-openapi/strfmt v0.17.0/go.mod h1:P82hnJI0CXkErkXi8IKjPbNBM6lV6+5pLP5l494TcyU= github.com/go-openapi/strfmt v0.18.0/go.mod h1:P82hnJI0CXkErkXi8IKjPbNBM6lV6+5pLP5l494TcyU= github.com/go-openapi/strfmt v0.19.0/go.mod h1:+uW+93UVvGGq2qGaZxdDeJqSAqBqBdl+ZPMF/cC8nDY= @@ -684,19 +756,33 @@ github.com/go-openapi/strfmt v0.19.2/go.mod h1:0yX7dbo8mKIvc3XSKp7MNfxw4JytCfCD6 github.com/go-openapi/strfmt v0.19.3/go.mod h1:0yX7dbo8mKIvc3XSKp7MNfxw4JytCfCD6+bY1AVL9LU= github.com/go-openapi/strfmt v0.19.4/go.mod h1:eftuHTlB/dI8Uq8JJOyRlieZf+WkkxUuk0dgdHXr2Qk= github.com/go-openapi/strfmt v0.19.5/go.mod h1:eftuHTlB/dI8Uq8JJOyRlieZf+WkkxUuk0dgdHXr2Qk= -github.com/go-openapi/swag v0.0.0-20160704191624-1d0bd113de87/go.mod h1:DXUve3Dpr1UfpPtxFw+EFuQ41HhCWZfha5jSVRG7C7I= +github.com/go-openapi/strfmt v0.19.11/go.mod h1:UukAYgTaQfqJuAFlNxxMWNvMYiwiXtLsF2VwmoFtbtc= +github.com/go-openapi/strfmt v0.20.0/go.mod h1:UukAYgTaQfqJuAFlNxxMWNvMYiwiXtLsF2VwmoFtbtc= +github.com/go-openapi/strfmt v0.20.1/go.mod h1:43urheQI9dNtE5lTZQfuFJvjYJKPrxicATpEfZwHUNk= +github.com/go-openapi/strfmt v0.20.2/go.mod h1:43urheQI9dNtE5lTZQfuFJvjYJKPrxicATpEfZwHUNk= github.com/go-openapi/swag v0.17.0/go.mod h1:AByQ+nYG6gQg71GINrmuDXCPWdL640yX49/kXLo40Tg= github.com/go-openapi/swag v0.18.0/go.mod h1:AByQ+nYG6gQg71GINrmuDXCPWdL640yX49/kXLo40Tg= github.com/go-openapi/swag v0.19.2/go.mod h1:POnQmlKehdgb5mhVOsnJFsivZCEZ/vjK9gh66Z9tfKk= github.com/go-openapi/swag v0.19.5/go.mod h1:POnQmlKehdgb5mhVOsnJFsivZCEZ/vjK9gh66Z9tfKk= github.com/go-openapi/swag v0.19.7/go.mod h1:ao+8BpOPyKdpQz3AOJfbeEVpLmWAvlT1IfTe5McPyhY= github.com/go-openapi/swag v0.19.9/go.mod h1:ao+8BpOPyKdpQz3AOJfbeEVpLmWAvlT1IfTe5McPyhY= +github.com/go-openapi/swag v0.19.12/go.mod h1:eFdyEBkTdoAf/9RXBvj4cr1nH7GD8Kzo5HTt47gr72M= +github.com/go-openapi/swag v0.19.13/go.mod h1:QYRuS/SOXUCsnplDa677K7+DxSOj6IPNl/eQntq43wQ= +github.com/go-openapi/swag v0.19.14/go.mod h1:QYRuS/SOXUCsnplDa677K7+DxSOj6IPNl/eQntq43wQ= +github.com/go-openapi/swag v0.19.15/go.mod h1:QYRuS/SOXUCsnplDa677K7+DxSOj6IPNl/eQntq43wQ= github.com/go-openapi/validate v0.18.0/go.mod h1:Uh4HdOzKt19xGIGm1qHf/ofbX1YQ4Y+MYsct2VUrAJ4= github.com/go-openapi/validate v0.19.2/go.mod h1:1tRCw7m3jtI8eNWEEliiAqUIcBztB2KDnRCRMUi7GTA= github.com/go-openapi/validate v0.19.3/go.mod h1:90Vh6jjkTn+OT1Eefm0ZixWNFjhtOH7vS9k0lo6zwJo= github.com/go-openapi/validate v0.19.8/go.mod h1:8DJv2CVJQ6kGNpFW6eV9N3JviE1C85nY1c2z52x1Gk4= +github.com/go-openapi/validate v0.19.10/go.mod h1:RKEZTUWDkxKQxN2jDT7ZnZi2bhZlbNMAuKvKB+IaGx8= +github.com/go-openapi/validate v0.19.12/go.mod h1:Rzou8hA/CBw8donlS6WNEUQupNvUZ0waH08tGe6kAQ4= +github.com/go-openapi/validate v0.19.15/go.mod h1:tbn/fdOwYHgrhPBzidZfJC2MIVvs9GA7monOmWBbeCI= +github.com/go-openapi/validate v0.20.1/go.mod h1:b60iJT+xNNLfaQJUqLI7946tYiFEOuE9E4k54HpKcJ0= +github.com/go-openapi/validate v0.20.2/go.mod h1:e7OJoKNgd0twXZwIn0A43tHbvIcr/rZIVCbJBpTUoY0= github.com/go-ping/ping v0.0.0-20210201095549-52eed920f98c h1:fWdhUpCuoeNIPiQ+pkAmmERYEjhVx5/cbVGK7T99OkI= github.com/go-ping/ping v0.0.0-20210201095549-52eed920f98c/go.mod h1:35JbSyV/BYqHwwRA6Zr1uVDm1637YlNOU61wI797NPI= +github.com/go-playground/locales v0.12.1/go.mod h1:IUMDtCfWo/w/mtMfIE/IG2K+Ey3ygWanZIBtBW0W2TM= +github.com/go-playground/universal-translator v0.16.0/go.mod h1:1AnU7NaIRDWWzGEKwgtJRd2xk99HeFyHw3yid4rvQIY= github.com/go-redis/redis v6.15.9+incompatible h1:K0pv1D7EQUjfyoMql+r/jZqCLizCGKFlFgcHWWmHQjg= github.com/go-redis/redis v6.15.9+incompatible/go.mod h1:NAIEuMOZ/fxfXJIrKDQDz8wamY7mA7PouImQ2Jvg6kA= github.com/go-sql-driver/mysql v1.4.0/go.mod h1:zAC/RDZ24gD3HViQzih4MyKcchzm+sOG5ZlKdlhCg5w= @@ -704,8 +790,21 @@ github.com/go-sql-driver/mysql v1.4.1/go.mod h1:zAC/RDZ24gD3HViQzih4MyKcchzm+sOG github.com/go-sql-driver/mysql v1.5.0/go.mod h1:DCzpHaOWr8IXmIStZouvnhqoel9Qv2LBy8hT2VhHyBg= github.com/go-sql-driver/mysql v1.6.0 h1:BCTh4TKNUYmOmMUcQ3IipzF5prigylS7XXjEkfCHuOE= github.com/go-sql-driver/mysql v1.6.0/go.mod h1:DCzpHaOWr8IXmIStZouvnhqoel9Qv2LBy8hT2VhHyBg= -github.com/go-stack/stack v1.8.0 h1:5SgMzNM5HxrEjV0ww2lTmX6E2Izsfxas4+YHWRs3Lsk= github.com/go-stack/stack v1.8.0/go.mod h1:v0f6uXyyMGvRgIKkXu+yp6POWl0qKG85gN/melR3HDY= +github.com/go-stack/stack v1.8.1 h1:ntEHSVwIt7PNXNpgPmVfMrNhLtgjlmnZha2kOpuRiDw= +github.com/go-stack/stack v1.8.1/go.mod h1:dcoOX6HbPZSZptuspn9bctJ+N/CnF5gGygcUP3XYfe4= +github.com/go-toolsmith/astcast v1.0.0/go.mod h1:mt2OdQTeAQcY4DQgPSArJjHCcOwlX+Wl/kwN+LbLGQ4= +github.com/go-toolsmith/astcopy v1.0.0/go.mod h1:vrgyG+5Bxrnz4MZWPF+pI4R8h3qKRjjyvV/DSez4WVQ= +github.com/go-toolsmith/astequal v1.0.0/go.mod h1:H+xSiq0+LtiDC11+h1G32h7Of5O3CYFJ99GVbS5lDKY= +github.com/go-toolsmith/astfmt v1.0.0/go.mod h1:cnWmsOAuq4jJY6Ct5YWlVLmcmLMn1JUPuQIHCY7CJDw= +github.com/go-toolsmith/astinfo v0.0.0-20180906194353-9809ff7efb21/go.mod h1:dDStQCHtmZpYOmjRP/8gHHnCCch3Zz3oEgCdZVdtweU= +github.com/go-toolsmith/astp v1.0.0/go.mod h1:RSyrtpVlfTFGDYRbrjyWP1pYu//tSFcvdYrA8meBmLI= +github.com/go-toolsmith/pkgload v1.0.0/go.mod h1:5eFArkbO80v7Z0kdngIxsRXRMTaX4Ilcwuh3clNrQJc= +github.com/go-toolsmith/strparse v1.0.0/go.mod h1:YI2nUKP9YGZnL/L1/DLFBfixrcjslWct4wyljWhSRy8= +github.com/go-toolsmith/typep v1.0.0/go.mod h1:JSQCQMUPdRlMZFswiq3TGpNp1GMktqkR2Ns5AIQkATU= +github.com/go-toolsmith/typep v1.0.2/go.mod h1:JSQCQMUPdRlMZFswiq3TGpNp1GMktqkR2Ns5AIQkATU= +github.com/go-xmlfmt/xmlfmt v0.0.0-20191208150333-d5b6f63a941b/go.mod h1:aUCEOzzezBEjDBbFBoSiya/gduyIiWYRP6CnSFIV8AM= +github.com/go-zookeeper/zk v1.0.2/go.mod h1:nOB03cncLtlp4t+UAkGSV+9beXP/akpekBwL+UX1Qcw= github.com/gobuffalo/attrs v0.0.0-20190224210810-a9411de4debd/go.mod h1:4duuawTqi2wkkpB4ePgWMaai6/Kc6WEz83bhFwpHzj0= github.com/gobuffalo/depgen v0.0.0-20190329151759-d478694a28d3/go.mod h1:3STtPUQYuzV0gBVOY3vy6CfMm/ljR4pABfrTeHNLHUY= github.com/gobuffalo/depgen v0.1.0/go.mod h1:+ifsuy7fhi15RWncXQQKjWS9JPkdah5sZvtHc2RXGlg= @@ -736,20 +835,21 @@ github.com/goburrow/serial v0.1.0 h1:v2T1SQa/dlUqQiYIT8+Cu7YolfqAi3K96UmhwYyuSrA github.com/goburrow/serial v0.1.0/go.mod h1:sAiqG0nRVswsm1C97xsttiYCzSLBmUZ/VSlVLZJ8haA= github.com/gobwas/glob v0.2.3 h1:A4xDbljILXROh+kObIiy5kIaPYD8e96x1tgBhUI5J+Y= github.com/gobwas/glob v0.2.3/go.mod h1:d3Ez4x06l9bZtSvzIay5+Yzi0fmZzPgnTbPcKjJAkT8= +github.com/gocql/gocql v0.0.0-20200228163523-cd4b606dd2fb/go.mod h1:DL0ekTmBSTdlNF25Orwt/JMzqIq3EJ4MVa/J/uK64OY= github.com/godbus/dbus v0.0.0-20151105175453-c7fdd8b5cd55/go.mod h1:/YcGZj5zSblfDWMMoOzV4fas9FZnQYTkDnsGvmh2Grw= github.com/godbus/dbus v0.0.0-20180201030542-885f9cc04c9c/go.mod h1:/YcGZj5zSblfDWMMoOzV4fas9FZnQYTkDnsGvmh2Grw= github.com/godbus/dbus v0.0.0-20190422162347-ade71ed3457e/go.mod h1:bBOAhwG1umN6/6ZUMtDFBMQR8jRg9O75tm9K00oMsK4= github.com/godbus/dbus/v5 v5.0.3/go.mod h1:xhWf0FNVPg57R7Z0UbKHbJfkEywrmjJnf7w5xrFpKfA= github.com/godbus/dbus/v5 v5.0.4/go.mod h1:xhWf0FNVPg57R7Z0UbKHbJfkEywrmjJnf7w5xrFpKfA= +github.com/gofrs/flock v0.8.0/go.mod h1:F1TvTiK9OcQqauNUHlbJvyl9Qa1QvF/gOUDKA14jxHU= github.com/gofrs/uuid v3.2.0+incompatible h1:y12jRkkFxsd7GpqdSZ+/KCs/fJbqpEXSGd4+jfEaewE= github.com/gofrs/uuid v3.2.0+incompatible/go.mod h1:b2aQJv3Z4Fp6yNu3cdSllBxTCLRxnplIgP/c0N/04lM= github.com/gofrs/uuid v3.3.0+incompatible h1:8K4tyRfvU1CYPgJsveYFQMhpFd/wXNM7iK6rR7UHz84= github.com/gofrs/uuid v3.3.0+incompatible/go.mod h1:b2aQJv3Z4Fp6yNu3cdSllBxTCLRxnplIgP/c0N/04lM= github.com/gogo/googleapis v1.1.0/go.mod h1:gf4bu3Q80BeJ6H1S1vYPm8/ELATdvryBaNFGgqEef3s= github.com/gogo/googleapis v1.2.0/go.mod h1:Njal3psf3qN6dwBtQfUmBZh2ybovJ0tlu3o/AC7HYjU= -github.com/gogo/googleapis v1.3.1/go.mod h1:d+q1s/xVJxZGKWwC/6UfPIF33J+G1Tq4GYv9Y+Tg/EU= -github.com/gogo/googleapis v1.4.0 h1:zgVt4UpGxcqVOw97aRGxT4svlcmdK35fynLNctY32zI= github.com/gogo/googleapis v1.4.0/go.mod h1:5YRNX2z1oM5gXdAkurHa942MDgEJyk02w4OecKY87+c= +github.com/gogo/googleapis v1.4.1/go.mod h1:2lpHqI5OcWCtVElxXnPt+s8oJvMpySlOyM6xDCrzib4= github.com/gogo/protobuf v1.1.1/go.mod h1:r8qH/GZQm5c6nD/R0oafs1akxWv10x8SbQlK7atdtwQ= github.com/gogo/protobuf v1.1.1/go.mod h1:r8qH/GZQm5c6nD/R0oafs1akxWv10x8SbQlK7atdtwQ= github.com/gogo/protobuf v1.2.0/go.mod h1:r8qH/GZQm5c6nD/R0oafs1akxWv10x8SbQlK7atdtwQ= @@ -772,8 +872,9 @@ github.com/golang/groupcache v0.0.0-20160516000752-02826c3e7903/go.mod h1:cIg4er github.com/golang/groupcache v0.0.0-20190129154638-5b532d6fd5ef/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc= github.com/golang/groupcache v0.0.0-20190702054246-869f871628b6/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc= github.com/golang/groupcache v0.0.0-20191227052852-215e87163ea7/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc= -github.com/golang/groupcache v0.0.0-20200121045136-8c9f03a8e57e h1:1r7pUrabqp18hOBcwBwiTsbnFeTZHV9eER/QT5JVZxY= github.com/golang/groupcache v0.0.0-20200121045136-8c9f03a8e57e/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc= +github.com/golang/groupcache v0.0.0-20210331224755-41bb18bfe9da h1:oI5xCqsCo564l8iNU+DwB5epxmsaqB+rhGL0m5jtYqE= +github.com/golang/groupcache v0.0.0-20210331224755-41bb18bfe9da/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc= github.com/golang/mock v1.1.1/go.mod h1:oTYuIxOrZwtPieC+H1uAHpcLFnEyAGVDL/k47Jfbm0A= github.com/golang/mock v1.2.0/go.mod h1:oTYuIxOrZwtPieC+H1uAHpcLFnEyAGVDL/k47Jfbm0A= github.com/golang/mock v1.3.1/go.mod h1:sBzyDLLjw3U8JLTeZvSv8jJB+tU5PVekmnlKIyFUx0Y= @@ -783,7 +884,6 @@ github.com/golang/mock v1.4.3/go.mod h1:UOMv5ysSaYNkG+OFQykRIcU/QvvxJf3p21QfJ2Bt github.com/golang/mock v1.4.4/go.mod h1:l3mdAwkq5BuhzHwde/uurv3sEJeZMXNpwsxVWU71h+4= github.com/golang/mock v1.5.0/go.mod h1:CWnOUgYIOo4TcNZ0wHX3YZCqsaM1I1Jvs6v3mP3KVu8= github.com/golang/mock v1.6.0/go.mod h1:p6yTPP+5HYm5mzsMV8JkE6ZKdX+/wYM6Hr+LicevLPs= -github.com/golang/protobuf v0.0.0-20161109072736-4bd1920723d7/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= github.com/golang/protobuf v0.0.0-20170307001533-c9c7427a2a70/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= github.com/golang/protobuf v1.2.0/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= github.com/golang/protobuf v1.3.1/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= @@ -803,16 +903,29 @@ github.com/golang/protobuf v1.5.0/go.mod h1:FsONVRAS9T7sI+LIUmWTfcYkHO4aIWwzhcaS github.com/golang/protobuf v1.5.1/go.mod h1:DopwsBzvsk0Fs44TXzsVbJyPhcCPeIwnvohx4u74HPM= github.com/golang/protobuf v1.5.2 h1:ROPKBNFfQgOUMifHyP+KYbvpjbdoFNs+aK7DXlji0Tw= github.com/golang/protobuf v1.5.2/go.mod h1:XVQd3VNwM+JqD3oG2Ue2ip4fOMUkwXdXDdiuN0vRsmY= +github.com/golang/snappy v0.0.0-20170215233205-553a64147049/go.mod h1:/XxbfmMg8lxefKM7IXC3fBNl/7bRcc72aCRzEWrmP2Q= github.com/golang/snappy v0.0.0-20180518054509-2e65f85255db/go.mod h1:/XxbfmMg8lxefKM7IXC3fBNl/7bRcc72aCRzEWrmP2Q= github.com/golang/snappy v0.0.1/go.mod h1:/XxbfmMg8lxefKM7IXC3fBNl/7bRcc72aCRzEWrmP2Q= github.com/golang/snappy v0.0.3 h1:fHPg5GQYlCeLIPB9BZqMVR5nR9A+IM5zcgeTdjMYmLA= github.com/golang/snappy v0.0.3/go.mod h1:/XxbfmMg8lxefKM7IXC3fBNl/7bRcc72aCRzEWrmP2Q= -github.com/google/addlicense v0.0.0-20190510175307-22550fa7c1b0/go.mod h1:QtPG26W17m+OIQgE6gQ24gC1M6pUaMBAbFrTIDtwG/E= +github.com/golangci/check v0.0.0-20180506172741-cfe4005ccda2/go.mod h1:k9Qvh+8juN+UKMCS/3jFtGICgW8O96FVaZsaxdzDkR4= +github.com/golangci/dupl v0.0.0-20180902072040-3e9179ac440a/go.mod h1:ryS0uhF+x9jgbj/N71xsEqODy9BN81/GonCZiOzirOk= +github.com/golangci/go-misc v0.0.0-20180628070357-927a3d87b613/go.mod h1:SyvUF2NxV+sN8upjjeVYr5W7tyxaT1JVtvhKhOn2ii8= +github.com/golangci/gofmt v0.0.0-20190930125516-244bba706f1a/go.mod h1:9qCChq59u/eW8im404Q2WWTrnBUQKjpNYKMbU4M7EFU= +github.com/golangci/golangci-lint v1.38.0/go.mod h1:Knp/sd5ATrVp7EOzWzwIIFH+c8hUfpW+oOQb8NvdZDo= +github.com/golangci/lint-1 v0.0.0-20191013205115-297bf364a8e0/go.mod h1:66R6K6P6VWk9I95jvqGxkqJxVWGFy9XlDwLwVz1RCFg= +github.com/golangci/maligned v0.0.0-20180506175553-b1d89398deca/go.mod h1:tvlJhZqDe4LMs4ZHD0oMUlt9G2LWuDGoisJTBzLMV9o= +github.com/golangci/misspell v0.3.5/go.mod h1:dEbvlSfYbMQDtrpRMQU675gSDLDNa8sCPPChZ7PhiVA= +github.com/golangci/revgrep v0.0.0-20210208091834-cd28932614b5/go.mod h1:LK+zW4MpyytAWQRz0M4xnzEk50lSvqDQKfx304apFkY= +github.com/golangci/unconvert v0.0.0-20180507085042-28b1c447d1f4/go.mod h1:Izgrg8RkN3rCIMLGE9CyYmU9pY2Jer6DgANEnZ/L/cQ= +github.com/google/addlicense v0.0.0-20200906110928-a0294312aa76/go.mod h1:EMjYTRimagHs1FwlIqKyX3wAM0u3rA+McvlIIWmSamA= github.com/google/btree v0.0.0-20180813153112-4030bb1f1f0c/go.mod h1:lNA+9X1NB3Zf8V7Ke586lFgjr2dZNuvo3lPJSGZ5JPQ= -github.com/google/btree v1.0.0 h1:0udJVsspx3VBr5FwtLhQQtuAsVc79tTq0ocGIPAU6qo= github.com/google/btree v1.0.0/go.mod h1:lNA+9X1NB3Zf8V7Ke586lFgjr2dZNuvo3lPJSGZ5JPQ= -github.com/google/flatbuffers v1.11.0 h1:O7CEyB8Cb3/DmtxODGtLHcEvpr81Jm5qLg/hsHnxA2A= +github.com/google/btree v1.0.1 h1:gK4Kx5IaGY9CD5sPJ36FHiBJ6ZXl0kilRiiCj+jdYp4= +github.com/google/btree v1.0.1/go.mod h1:xXMiIv4Fb/0kKde4SpL7qlzvu5cMJDRkFDxJfI9uaxA= github.com/google/flatbuffers v1.11.0/go.mod h1:1AeVuKshWv4vARoZatz6mlQ0JxURH0Kv5+zNeJKJCa8= +github.com/google/flatbuffers v1.12.0 h1:/PtAHvnBY4Kqnx/xCQ3OIV9uYcSFGScBsWI3Oogeh6w= +github.com/google/flatbuffers v1.12.0/go.mod h1:1AeVuKshWv4vARoZatz6mlQ0JxURH0Kv5+zNeJKJCa8= github.com/google/go-cmp v0.2.0/go.mod h1:oXzfMopK8JAjlY9xF4vHSVASa0yLyX7SntLO5aqRK0M= github.com/google/go-cmp v0.3.0/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMywk6iLU= github.com/google/go-cmp v0.3.1/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMywk6iLU= @@ -850,6 +963,7 @@ github.com/google/pprof v0.0.0-20201023163331-3e6fc7fc9c4c/go.mod h1:kpwsk12EmLe github.com/google/pprof v0.0.0-20201203190320-1bf35d6f28c2/go.mod h1:kpwsk12EmLew5upagYY7GY0pfYCcupk39gWOCRROcvE= github.com/google/pprof v0.0.0-20210122040257-d980be63207e/go.mod h1:kpwsk12EmLew5upagYY7GY0pfYCcupk39gWOCRROcvE= github.com/google/pprof v0.0.0-20210226084205-cbba55b83ad5/go.mod h1:kpwsk12EmLew5upagYY7GY0pfYCcupk39gWOCRROcvE= +github.com/google/pprof v0.0.0-20210323184331-8eee2492667d/go.mod h1:kpwsk12EmLew5upagYY7GY0pfYCcupk39gWOCRROcvE= github.com/google/pprof v0.0.0-20210601050228-01bbb1931b22/go.mod h1:kpwsk12EmLew5upagYY7GY0pfYCcupk39gWOCRROcvE= github.com/google/pprof v0.0.0-20210609004039-a478d1d731e9/go.mod h1:kpwsk12EmLew5upagYY7GY0pfYCcupk39gWOCRROcvE= github.com/google/pprof v0.0.0-20210720184732-4bb14d4b1be1/go.mod h1:kpwsk12EmLew5upagYY7GY0pfYCcupk39gWOCRROcvE= @@ -862,32 +976,43 @@ github.com/google/uuid v1.2.0/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+ github.com/googleapis/gax-go/v2 v2.0.4/go.mod h1:0Wqv26UfaUD9n4G6kQubkQ+KchISgw+vpHVxEJEs9eg= github.com/googleapis/gax-go/v2 v2.0.5 h1:sjZBwGj9Jlw33ImPtvFviGYvseOtDM7hkSKB7+Tv3SM= github.com/googleapis/gax-go/v2 v2.0.5/go.mod h1:DWXyrwAJ9X0FpwwEdw+IPEYBICEFu5mhpdKc/us6bOk= -github.com/googleapis/gnostic v0.0.0-20170729233727-0c5108395e2d/go.mod h1:sJBsCZ4ayReDTBIg8b9dl28c5xFWyhBTVRp3pOg5EKY= -github.com/googleapis/gnostic v0.1.0/go.mod h1:sJBsCZ4ayReDTBIg8b9dl28c5xFWyhBTVRp3pOg5EKY= -github.com/googleapis/gnostic v0.4.0/go.mod h1:on+2t9HRStVgn95RSsFWFz+6Q0Snyqv1awfrALZdbtU= github.com/googleapis/gnostic v0.4.1/go.mod h1:LRhVm6pbyptWbWbuZ38d1eyptfvIytN3ir6b65WBswg= github.com/googleapis/gnostic v0.5.1/go.mod h1:6U4PtQXGIEt/Z3h5MAT7FNofLnw9vXk2cUuW7uA/OeU= github.com/googleapis/gnostic v0.5.5 h1:9fHAtK0uDfpveeqqo1hkEZJcFvYXAiCN3UutL8F9xHw= github.com/googleapis/gnostic v0.5.5/go.mod h1:7+EbHbldMins07ALC74bsA81Ovc97DwqyJO1AENw9kA= +github.com/gookit/color v1.3.6/go.mod h1:R3ogXq2B9rTbXoSHJ1HyUVAZ3poOJHpd9nQmyGZsfvQ= github.com/gopcua/opcua v0.2.0-rc2.0.20210409063412-baabb9b14fd2 h1:OtFKr0Kwe1oLpMR+uNMh/DPgC5fxAq4xRe6HBv8LDqQ= github.com/gopcua/opcua v0.2.0-rc2.0.20210409063412-baabb9b14fd2/go.mod h1:a6QH4F9XeODklCmWuvaOdL8v9H0d73CEKUHWVZLQyE8= -github.com/gophercloud/gophercloud v0.1.0/go.mod h1:vxM41WHh5uqHVBMZHzuwNOHh8XEoIEcSTewFxm1c5g8= -github.com/gophercloud/gophercloud v0.12.0/go.mod h1:gmC5oQqMDOMO1t1gq5DquX/yAU808e/4mzjjDA76+Ss= +github.com/gophercloud/gophercloud v0.16.0/go.mod h1:wRtmUelyIIv3CSSDI47aUwbs075O6i+LY+pXsKCBsb4= github.com/gopherjs/gopherjs v0.0.0-20181017120253-0766667cb4d1 h1:EGx4pi6eqNxGaHF6qqu48+N2wcFQ5qg5FXgOdqsJ5d8= github.com/gopherjs/gopherjs v0.0.0-20181017120253-0766667cb4d1/go.mod h1:wJfORRmW1u3UXTncJ5qlYoELFm8eSnnEO6hX4iZ3EWY= github.com/gordonklaus/ineffassign v0.0.0-20200309095847-7953dde2c7bf/go.mod h1:cuNKsD1zp2v6XfE/orVX2QE1LC+i254ceGcVeDT3pTU= +github.com/gordonklaus/ineffassign v0.0.0-20210225214923-2e10b2664254/go.mod h1:M9mZEtGIsR1oDaZagNPNG9iq9n2HrhZ17dsXk73V3Lw= github.com/gorilla/context v1.1.1/go.mod h1:kBGZzfjB9CEq2AlWe17Uuf7NDRt0dE0s8S51q0aT7Yg= github.com/gorilla/handlers v0.0.0-20150720190736-60c7bfde3e33/go.mod h1:Qkdc/uu4tH4g6mTK6auzZ766c4CA0Ng8+o/OAirnOIQ= +github.com/gorilla/handlers v1.5.1/go.mod h1:t8XrUpc4KVXb7HGyJ4/cEnwQiaxrX/hz1Zv/4g96P1Q= github.com/gorilla/mux v1.6.2/go.mod h1:1lud6UwP+6orDFRuTfBEV8e9/aOM/c4fVVCaMa2zaAs= github.com/gorilla/mux v1.7.2/go.mod h1:1lud6UwP+6orDFRuTfBEV8e9/aOM/c4fVVCaMa2zaAs= -github.com/gorilla/mux v1.7.3 h1:gnP5JzjVOuiZD07fKKToCAOjS0yOpj/qPETTXCCS6hw= github.com/gorilla/mux v1.7.3/go.mod h1:1lud6UwP+6orDFRuTfBEV8e9/aOM/c4fVVCaMa2zaAs= +github.com/gorilla/mux v1.7.4/go.mod h1:DVbg23sWSpFRCP0SfiEN6jmj59UnW/n46BH5rLB71So= +github.com/gorilla/mux v1.8.0 h1:i40aqfkR1h2SlN9hojwV5ZA91wcXFOvkdNIeFDP5koI= +github.com/gorilla/mux v1.8.0/go.mod h1:DVbg23sWSpFRCP0SfiEN6jmj59UnW/n46BH5rLB71So= +github.com/gorilla/securecookie v1.1.1/go.mod h1:ra0sb63/xPlUeL+yeDciTfxMRAA+MP+HVt/4epWDjd4= +github.com/gorilla/sessions v1.2.1/go.mod h1:dk2InVEVJ0sfLlnXv9EAgkf6ecYs/i80K/zI+bUmuGM= github.com/gorilla/websocket v0.0.0-20170926233335-4201258b820c/go.mod h1:E7qHFY5m1UJ88s3WnNqhKjPHQ0heANvMoAMk2YaljkQ= github.com/gorilla/websocket v1.4.0/go.mod h1:E7qHFY5m1UJ88s3WnNqhKjPHQ0heANvMoAMk2YaljkQ= github.com/gorilla/websocket v1.4.2 h1:+/TMaTYc4QFitKJxsQ7Yye35DkWvkdLcvGKqM+x0Ufc= github.com/gorilla/websocket v1.4.2/go.mod h1:YR8l580nyteQvAITg2hZ9XVh4b55+EU/adAjf1fMHhE= github.com/gosnmp/gosnmp v1.32.0 h1:gctewmZx5qFI0oHMzRnjETqIZ093d9NgZy9TQr3V0iA= github.com/gosnmp/gosnmp v1.32.0/go.mod h1:EIp+qkEpXoVsyZxXKy0AmXQx0mCHMMcIhXXvNDMpgF0= +github.com/gostaticanalysis/analysisutil v0.0.0-20190318220348-4088753ea4d3/go.mod h1:eEOZF4jCKGi+aprrirO9e7WKB3beBRtWgqGunKl6pKE= +github.com/gostaticanalysis/analysisutil v0.0.3/go.mod h1:eEOZF4jCKGi+aprrirO9e7WKB3beBRtWgqGunKl6pKE= +github.com/gostaticanalysis/analysisutil v0.1.0/go.mod h1:dMhHRU9KTiDcuLGdy87/2gTR8WruwYZrKdRq9m1O6uw= +github.com/gostaticanalysis/analysisutil v0.4.1/go.mod h1:18U/DLpRgIUd459wGxVHE0fRgmo1UgHDcbw7F5idXu0= +github.com/gostaticanalysis/comment v1.3.0/go.mod h1:xMicKDx7XRXYdVwY9f9wQpDJVnqWxw9wCauCMKp+IBI= +github.com/gostaticanalysis/comment v1.4.1/go.mod h1:ih6ZxzTHLdadaiSnF5WY3dxUoXfXAlTaRzuaNDlSado= +github.com/gostaticanalysis/forcetypeassert v0.0.0-20200621232751-01d4955beaa5/go.mod h1:qZEedyP/sY1lTGV1uJ3VhWZ2mqag3IkWsDHVbplHXak= +github.com/gostaticanalysis/nilerr v0.1.1/go.mod h1:wZYb6YI5YAxxq0i1+VJbY0s2YONW0HU0GPE3+5PWN4A= github.com/gregjones/httpcache v0.0.0-20180305231024-9cad4c3443a7/go.mod h1:FecbI9+v66THATjSRHfNgh1IVFe/9kFxbXtjV0ctIMA= github.com/grid-x/modbus v0.0.0-20210224155242-c4a3d042e99b h1:Y4xqzO0CDNoehCr3ncgie3IgFTO9AzV8PMMEWESFM5c= github.com/grid-x/modbus v0.0.0-20210224155242-c4a3d042e99b/go.mod h1:YaK0rKJenZ74vZFcSSLlAQqtG74PMI68eDjpDCDDmTw= @@ -895,22 +1020,27 @@ github.com/grid-x/serial v0.0.0-20191104121038-e24bc9bf6f08 h1:syBxnRYnSPUDdkdo5 github.com/grid-x/serial v0.0.0-20191104121038-e24bc9bf6f08/go.mod h1:kdOd86/VGFWRrtkNwf1MPk0u1gIjc4Y7R2j7nhwc7Rk= github.com/grpc-ecosystem/go-grpc-middleware v1.0.0/go.mod h1:FiyG127CGDf3tlThmgyCl78X/SZQqEOJBCDaAfeWzPs= github.com/grpc-ecosystem/go-grpc-middleware v1.0.1-0.20190118093823-f849b5445de4/go.mod h1:FiyG127CGDf3tlThmgyCl78X/SZQqEOJBCDaAfeWzPs= +github.com/grpc-ecosystem/go-grpc-middleware v1.2.2/go.mod h1:EaizFBKfUKtMIF5iaDEhniwNedqGo9FuLFzppDr3uwI= +github.com/grpc-ecosystem/go-grpc-middleware v1.3.0/go.mod h1:z0ButlSOZa5vEBq9m2m2hlwIgKw+rp3sdCBRoJY+30Y= github.com/grpc-ecosystem/go-grpc-prometheus v1.2.0/go.mod h1:8NvIoxWQoOIhqOTXgfV/d3M/q6VIi02HzZEHgUlZvzk= github.com/grpc-ecosystem/grpc-gateway v1.9.0/go.mod h1:vNeuVxBJEsws4ogUvrchl83t/GYV9WGTSLVdBhOQFDY= github.com/grpc-ecosystem/grpc-gateway v1.9.5/go.mod h1:vNeuVxBJEsws4ogUvrchl83t/GYV9WGTSLVdBhOQFDY= -github.com/grpc-ecosystem/grpc-gateway v1.14.6/go.mod h1:zdiPV4Yse/1gnckTHtghG4GkDEdKCRJduHpTxT3/jcw= +github.com/grpc-ecosystem/grpc-gateway v1.14.5/go.mod h1:UJ0EZAp832vCd54Wev9N1BMKEyvcZ5+IM0AwDrnlkEc= github.com/grpc-ecosystem/grpc-gateway v1.16.0 h1:gmcG1KaJ57LophUzW0Hy8NmPhnMZb4M0+kPpLofRdBo= github.com/grpc-ecosystem/grpc-gateway v1.16.0/go.mod h1:BDjrQk3hbvj6Nolgz8mAMFbcEtjT1g+wF4CSlocrBnw= +github.com/grpc-ecosystem/grpc-opentracing v0.0.0-20180507213350-8e809c8a8645/go.mod h1:6iZfnjpejD4L/4DwD7NryNaJyCQdzwWwH2MWhCA90Kw= github.com/hailocab/go-hostpool v0.0.0-20160125115350-e80d13ce29ed h1:5upAirOpQc1Q53c0bnx2ufif5kANL7bfZWcc6VJWJd8= github.com/hailocab/go-hostpool v0.0.0-20160125115350-e80d13ce29ed/go.mod h1:tMWxXQ9wFIaZeTI9F+hmhFiGpFmhOHzyShyFUhRm0H4= github.com/harlow/kinesis-consumer v0.3.1-0.20181230152818-2f58b136fee0 h1:U0KvGD9CJIl1nbgu9yLsfWxMT6WqL8fG0IBB7RvOZZQ= github.com/harlow/kinesis-consumer v0.3.1-0.20181230152818-2f58b136fee0/go.mod h1:dk23l2BruuUzRP8wbybQbPn3J7sZga2QHICCeaEy5rQ= +github.com/hashicorp/consul/api v1.1.0/go.mod h1:VmuI/Lkw1nC05EYQWNKwWGbkg+FbDBtguAZLlVdkD9Q= github.com/hashicorp/consul/api v1.3.0/go.mod h1:MmDNSzIMUjNpY/mQ398R4bk2FnqQLoPndWW5VkKPlCE= -github.com/hashicorp/consul/api v1.6.0/go.mod h1:1NSuaUUkFaJzMasbfq/11wKYWSR67Xn6r2DXKhuDNFg= +github.com/hashicorp/consul/api v1.8.1/go.mod h1:sDjTOq0yUyv5G4h+BqSea7Fn6BU+XbolEz1952UB+mk= github.com/hashicorp/consul/api v1.11.0 h1:Hw/G8TtRvOElqxVIhBzXciiSTbapq8hZ2XKZsXk5ZCE= github.com/hashicorp/consul/api v1.11.0/go.mod h1:XjsvQN+RJGWI2TWy1/kqaE16HrR2J/FWgkYjdZQsX9M= +github.com/hashicorp/consul/sdk v0.1.1/go.mod h1:VKf9jXwCTEY1QZP2MOLRhb5i/I/ssyNV1vwHyQBF0x8= github.com/hashicorp/consul/sdk v0.3.0/go.mod h1:VKf9jXwCTEY1QZP2MOLRhb5i/I/ssyNV1vwHyQBF0x8= -github.com/hashicorp/consul/sdk v0.6.0/go.mod h1:fY08Y9z5SvJqevyZNy6WWPXiG3KwBPAvlcdx16zZ0fM= +github.com/hashicorp/consul/sdk v0.7.0/go.mod h1:fY08Y9z5SvJqevyZNy6WWPXiG3KwBPAvlcdx16zZ0fM= github.com/hashicorp/consul/sdk v0.8.0 h1:OJtKBtEjboEZvG6AOUdh4Z1Zbyu0WcxQ0qatRrZHTVU= github.com/hashicorp/consul/sdk v0.8.0/go.mod h1:GBvyrGALthsZObzUGsfgHZQDXjg4lOjagTIwIR1vPms= github.com/hashicorp/errwrap v0.0.0-20141028054710-7554cd9344ce/go.mod h1:YH+1FKiLXxHSkmPseP+kNlulaMuP3n2brvKWEqk/Jc4= @@ -920,8 +1050,11 @@ github.com/hashicorp/go-cleanhttp v0.5.0/go.mod h1:JpRdi6/HCYpAwUzNwuwqhbovhLtng github.com/hashicorp/go-cleanhttp v0.5.1 h1:dH3aiDG9Jvb5r5+bYHsikaOUIpcM0xvgMXVoDkXMzJM= github.com/hashicorp/go-cleanhttp v0.5.1/go.mod h1:JpRdi6/HCYpAwUzNwuwqhbovhLtngrth3wmdIIUrZ80= github.com/hashicorp/go-hclog v0.12.0/go.mod h1:whpDNt7SSdeAju8AWKIWsul05p54N/39EeqMAyrmvFQ= -github.com/hashicorp/go-hclog v0.12.2 h1:F1fdYblUEsxKiailtkhCCG2g4bipEgaHiDc8vffNpD4= -github.com/hashicorp/go-hclog v0.12.2/go.mod h1:whpDNt7SSdeAju8AWKIWsul05p54N/39EeqMAyrmvFQ= +github.com/hashicorp/go-hclog v0.14.1/go.mod h1:whpDNt7SSdeAju8AWKIWsul05p54N/39EeqMAyrmvFQ= +github.com/hashicorp/go-hclog v0.15.0/go.mod h1:whpDNt7SSdeAju8AWKIWsul05p54N/39EeqMAyrmvFQ= +github.com/hashicorp/go-hclog v0.16.1/go.mod h1:whpDNt7SSdeAju8AWKIWsul05p54N/39EeqMAyrmvFQ= +github.com/hashicorp/go-hclog v0.16.2 h1:K4ev2ib4LdQETX5cSZBG0DVLk1jwGqSPXBjdah3veNs= +github.com/hashicorp/go-hclog v0.16.2/go.mod h1:whpDNt7SSdeAju8AWKIWsul05p54N/39EeqMAyrmvFQ= github.com/hashicorp/go-immutable-radix v1.0.0/go.mod h1:0y9vanUI8NX6FsYoO3zeMjhV/C5i9g4Q3DwcSNZ4P60= github.com/hashicorp/go-immutable-radix v1.2.0 h1:l6UW37iCXwZkZoAbEYnptSHVE/cQ5bOTPYG5W3vf9+8= github.com/hashicorp/go-immutable-radix v1.2.0/go.mod h1:0y9vanUI8NX6FsYoO3zeMjhV/C5i9g4Q3DwcSNZ4P60= @@ -932,6 +1065,8 @@ github.com/hashicorp/go-multierror v0.0.0-20161216184304-ed905158d874/go.mod h1: github.com/hashicorp/go-multierror v1.0.0/go.mod h1:dHtQlpGsu+cZNNAkkCN/P3hoUDHhCYQXV3UM06sGGrk= github.com/hashicorp/go-multierror v1.1.0 h1:B9UzwGQJehnUY1yNrnwREHc3fGbC2xefo8g4TbElacI= github.com/hashicorp/go-multierror v1.1.0/go.mod h1:spPvp8C1qA32ftKqdAHm4hHTbPw+vmowP0z+KUhOZdA= +github.com/hashicorp/go-plugin v1.4.0/go.mod h1:5fGEH17QVwTTcR0zV7yhDPLLmFX9YSZ38b18Udy6vYQ= +github.com/hashicorp/go-plugin v1.4.2/go.mod h1:5fGEH17QVwTTcR0zV7yhDPLLmFX9YSZ38b18Udy6vYQ= github.com/hashicorp/go-retryablehttp v0.5.3/go.mod h1:9B5zBasrRhHXnJnui7y6sL7es7NDiJgTc6Er0maI1Xs= github.com/hashicorp/go-rootcerts v1.0.0/go.mod h1:K6zTfqpRlCUIjkwsN4Z+hiSfzSTQa6eBIzfwKfwNnHU= github.com/hashicorp/go-rootcerts v1.0.2 h1:jzhAVGtqPKbwpyCPELlgNWhE1znq+qwJtW5Oi2viEzc= @@ -958,10 +1093,11 @@ github.com/hashicorp/memberlist v0.1.3/go.mod h1:ajVTdAv/9Im8oMAAj5G31PhhMCZJV2p github.com/hashicorp/memberlist v0.2.2 h1:5+RffWKwqJ71YPu9mWsF7ZOscZmwfasdA8kbdC7AO2g= github.com/hashicorp/memberlist v0.2.2/go.mod h1:MS2lj3INKhZjWNqd3N0m3J+Jxf3DAOnAH9VT3Sh9MUE= github.com/hashicorp/serf v0.8.2/go.mod h1:6hOLApaqBFA1NXqRQAsxw9QxuDEvNxSQRwA/JwenrHc= -github.com/hashicorp/serf v0.9.3/go.mod h1:UWDWwZeL5cuWDJdl0C6wrvrUwEqtQ4ZKBKKENpqIUyk= github.com/hashicorp/serf v0.9.5 h1:EBWvyu9tcRszt3Bxp3KNssBMP1KuHWyO51lz9+786iM= github.com/hashicorp/serf v0.9.5/go.mod h1:UWDWwZeL5cuWDJdl0C6wrvrUwEqtQ4ZKBKKENpqIUyk= -github.com/hetznercloud/hcloud-go v1.21.1/go.mod h1:xng8lbDUg+xM1dgc0yGHX5EeqbwIq7UYlMWMTx3SQVg= +github.com/hashicorp/yamux v0.0.0-20180604194846-3520598351bb/go.mod h1:+NfK9FKeTrX5uv1uIXGdwYDTeHna2qgaIlx54MXqjAM= +github.com/hashicorp/yamux v0.0.0-20190923154419-df201c70410d/go.mod h1:+NfK9FKeTrX5uv1uIXGdwYDTeHna2qgaIlx54MXqjAM= +github.com/hetznercloud/hcloud-go v1.24.0/go.mod h1:3YmyK8yaZZ48syie6xpm3dt26rtB6s65AisBHylXYFA= github.com/hpcloud/tail v1.0.0/go.mod h1:ab1qPbhIpdTxEkNHXyeSf5vhxWSCs/tWer42PpOxQnU= github.com/hudl/fargo v1.3.0/go.mod h1:y3CKSmjA+wD2gak7sUSXTAoopbhU08POFhmITJgmKTg= github.com/ianlancetaylor/demangle v0.0.0-20181102032728-5e5cf60278f6/go.mod h1:aSSvb/t6k1mPoxDqO4vJh6VOCGPwU4O0C2/Eqndh1Sc= @@ -974,10 +1110,10 @@ github.com/imdario/mergo v0.3.12/go.mod h1:jmQim1M+e3UYxmgPu/WyfjB3N3VflVyUjjjwH github.com/inconshreveable/mousetrap v1.0.0/go.mod h1:PxqpIevigyE2G7u3NXJIT2ANytuPF1OarO4DADm73n8= github.com/influxdata/apcupsd v0.0.0-20210427145308-694d5caead0e h1:3J1OB4RDKwXs5l8uEV6BP/tucOJOPDQysiT7/9cuXzA= github.com/influxdata/apcupsd v0.0.0-20210427145308-694d5caead0e/go.mod h1:WYK/Z/aXq9cbMFIL5ihcA4sX/r/3/WCas/Qvs/2fXcA= -github.com/influxdata/flux v0.65.0/go.mod h1:BwN2XG2lMszOoquQaFdPET8FRQfrXiZsWmcMO9rkaVY= +github.com/influxdata/flux v0.65.1/go.mod h1:J754/zds0vvpfwuq7Gc2wRdVwEodfpCFM7mYlOw2LqY= github.com/influxdata/go-syslog/v3 v3.0.0 h1:jichmjSZlYK0VMmlz+k4WeOQd7z745YLsvGMqwtYt4I= github.com/influxdata/go-syslog/v3 v3.0.0/go.mod h1:tulsOp+CecTAYC27u9miMgq21GqXRW6VdKbOG+QSP4Q= -github.com/influxdata/influxdb v1.8.2/go.mod h1:SIzcnsjaHRFpmlxpJ4S3NT64qtEKYweNTUMb/vh0OMQ= +github.com/influxdata/influxdb v1.8.4/go.mod h1:JugdFhsvvI8gadxOI6noqNeeBHvWNTbfYGtiAn+2jhI= github.com/influxdata/influxdb-observability/common v0.2.7 h1:C+oDh8Kbw+Ykx9yog/uJXL27rwMN3hgTLQfAFg1eQO0= github.com/influxdata/influxdb-observability/common v0.2.7/go.mod h1:+8VMGrfWZnXjc1c/oP+N4O/sHoneWgN3ojAHwgYgV4A= github.com/influxdata/influxdb-observability/influx2otel v0.2.7 h1:YIXH+qNQgAtTA5U3s/wxDxxh5Vz+ylhZhyuRxtfTBqs= @@ -985,7 +1121,7 @@ github.com/influxdata/influxdb-observability/influx2otel v0.2.7/go.mod h1:ASyDMo github.com/influxdata/influxdb-observability/otel2influx v0.2.7 h1:FACov3tcGCKfEGXsyUbgUOQx3zXffXaCFbN3ntAzh1E= github.com/influxdata/influxdb-observability/otel2influx v0.2.7/go.mod h1:tE3OSy4RyAHIjxYlFZBsWorEM3aqaUeqSx3mbacm8KI= github.com/influxdata/influxdb1-client v0.0.0-20191209144304-8bf82d3c094d/go.mod h1:qj24IKcXYK6Iy9ceXlo3Tc+vtHo9lIhSX5JddghvEPo= -github.com/influxdata/influxql v1.1.0/go.mod h1:KpVI7okXjK6PRi3Z5B+mtKZli+R1DnZgb3N+tzevNgo= +github.com/influxdata/influxql v1.1.1-0.20200828144457-65d3ef77d385/go.mod h1:gHp9y86a/pxhjJ+zMjNXiQAA197Xk9wLxaz+fGG+kWk= github.com/influxdata/line-protocol v0.0.0-20180522152040-32c6aa80de5e/go.mod h1:4kt73NQhadE3daL3WhR5EJ/J2ocX0PZzwxQ0gXJ7oFE= github.com/influxdata/promql/v2 v2.12.0/go.mod h1:fxOPu+DY0bqCTCECchSRtWfc+0X19ybifQhZoQNF5D8= github.com/influxdata/roaring v0.4.13-0.20180809181101-fc520f41fab6/go.mod h1:bSgUQ7q5ZLSO+bKBGqJiCBGAl+9DxyW63zLTujjUlOE= @@ -1039,27 +1175,46 @@ github.com/jackc/pgx/v4 v4.6.0/go.mod h1:vPh43ZzxijXUVJ+t/EmXBtFmbFVO72cuneCT9oA github.com/jackc/puddle v0.0.0-20190413234325-e4ced69a3a2b/go.mod h1:m4B5Dj62Y0fbyuIc15OsIqK0+JU8nkqQjsgx7dvjSWk= github.com/jackc/puddle v0.0.0-20190608224051-11cab39313c9/go.mod h1:m4B5Dj62Y0fbyuIc15OsIqK0+JU8nkqQjsgx7dvjSWk= github.com/jackc/puddle v1.1.0/go.mod h1:m4B5Dj62Y0fbyuIc15OsIqK0+JU8nkqQjsgx7dvjSWk= -github.com/jaegertracing/jaeger v1.15.1 h1:7QzNAXq+4ko9GtCjozDNAp2uonoABu+B2Rk94hjQcp4= -github.com/jaegertracing/jaeger v1.15.1/go.mod h1:LUWPSnzNPGRubM8pk0inANGitpiMOOxihXx0+53llXI= +github.com/jaegertracing/jaeger v1.22.0/go.mod h1:WnwW68MjJEViSLRQhe0nkIsBDaF3CzfFd8wJcpJv24k= +github.com/jaegertracing/jaeger v1.23.0/go.mod h1:gB6Qc+Kjd/IX1G82oGTArbHI3ZRO//iUkaMW+gzL9uw= +github.com/jaegertracing/jaeger v1.26.0 h1:4LbUdb9l/Mx83zYvjLbkrayheX+Aga26NEI+feo3xzA= +github.com/jaegertracing/jaeger v1.26.0/go.mod h1:SwHsl1PLZVAdkQTPrziQ+4xV9FxzJXRvTDW1YrUIWEA= github.com/james4k/rcon v0.0.0-20120923215419-8fbb8268b60a h1:JxcWget6X/VfBMKxPIc28Jel37LGREut2fpV+ObkwJ0= github.com/james4k/rcon v0.0.0-20120923215419-8fbb8268b60a/go.mod h1:1qNVsDcmNQDsAXYfUuF/Z0rtK5eT8x9D6Pi7S3PjXAg= +github.com/jcmturner/aescts/v2 v2.0.0 h1:9YKLH6ey7H4eDBXW8khjYslgyqG2xZikXP0EQFKrle8= +github.com/jcmturner/aescts/v2 v2.0.0/go.mod h1:AiaICIRyfYg35RUkr8yESTqvSy7csK90qZ5xfvvsoNs= +github.com/jcmturner/dnsutils/v2 v2.0.0 h1:lltnkeZGL0wILNvrNiVCR6Ro5PGU/SeBvVO/8c/iPbo= +github.com/jcmturner/dnsutils/v2 v2.0.0/go.mod h1:b0TnjGOvI/n42bZa+hmXL+kFJZsFT7G4t3HTlQ184QM= +github.com/jcmturner/gofork v0.0.0-20190328161633-dc7c13fece03/go.mod h1:MK8+TM0La+2rjBD4jE12Kj1pCCxK7d2LK/UM3ncEo0o= github.com/jcmturner/gofork v1.0.0 h1:J7uCkflzTEhUZ64xqKnkDxq3kzc96ajM1Gli5ktUem8= github.com/jcmturner/gofork v1.0.0/go.mod h1:MK8+TM0La+2rjBD4jE12Kj1pCCxK7d2LK/UM3ncEo0o= +github.com/jcmturner/goidentity/v6 v6.0.1 h1:VKnZd2oEIMorCTsFBnJWbExfNN7yZr3EhJAxwOkZg6o= +github.com/jcmturner/goidentity/v6 v6.0.1/go.mod h1:X1YW3bgtvwAXju7V3LCIMpY0Gbxyjn/mY9zx4tFonSg= +github.com/jcmturner/gokrb5/v8 v8.4.2 h1:6ZIM6b/JJN0X8UM43ZOM6Z4SJzla+a/u7scXFJzodkA= +github.com/jcmturner/gokrb5/v8 v8.4.2/go.mod h1:sb+Xq/fTY5yktf/VxLsE3wlfPqQjp0aWNYyvBVK62bc= +github.com/jcmturner/rpc/v2 v2.0.3 h1:7FXXj8Ti1IaVFpSAziCZWNzbNuZmnvw/i6CqLNdWfZY= +github.com/jcmturner/rpc/v2 v2.0.3/go.mod h1:VUJYCIDm3PVOEHw8sgt091/20OJjskO/YJki3ELg/Hc= github.com/jessevdk/go-flags v1.4.0/go.mod h1:4FA24M0QyGHXBuZZK/XkWh8h0e1EYbRYJSGM75WSRxI= +github.com/jgautheron/goconst v1.4.0/go.mod h1:aAosetZ5zaeC/2EfMeRswtxUFBpe2Hr7HzkgX4fanO4= +github.com/jhump/protoreflect v1.6.0/go.mod h1:eaTn3RZAmMBcV0fifFvlm6VHNz3wSkYyXYWUh7ymB74= github.com/jhump/protoreflect v1.8.3-0.20210616212123-6cc1efa697ca h1:a0GZUdb+qnutF8shJxr2qs2qT3fnF+ptxTxPB8+oIvk= github.com/jhump/protoreflect v1.8.3-0.20210616212123-6cc1efa697ca/go.mod h1:7GcYQDdMU/O/BBrl/cX6PNHpXh6cenjd8pneu5yW7Tg= +github.com/jingyugao/rowserrcheck v0.0.0-20210130005344-c6a0c12dd98d/go.mod h1:/EZlaYCnEX24i7qdVhT9du5JrtFWYRQr67bVgR7JJC8= +github.com/jirfag/go-printf-func-name v0.0.0-20200119135958-7558a9eaa5af/go.mod h1:HEWGJkRDzjJY2sqdDwxccsGicWEf9BQOZsq2tV+xzM0= github.com/jmespath/go-jmespath v0.0.0-20160202185014-0b12d6b521d8/go.mod h1:Nht3zPeWKUH0NzdCt2Blrr5ys8VGpn0CEB0cQHVjt7k= github.com/jmespath/go-jmespath v0.0.0-20160803190731-bd40a432e4c7/go.mod h1:Nht3zPeWKUH0NzdCt2Blrr5ys8VGpn0CEB0cQHVjt7k= github.com/jmespath/go-jmespath v0.0.0-20180206201540-c2b33e8439af/go.mod h1:Nht3zPeWKUH0NzdCt2Blrr5ys8VGpn0CEB0cQHVjt7k= github.com/jmespath/go-jmespath v0.0.0-20180206201540-c2b33e8439af/go.mod h1:Nht3zPeWKUH0NzdCt2Blrr5ys8VGpn0CEB0cQHVjt7k= -github.com/jmespath/go-jmespath v0.3.0/go.mod h1:9QtRXoHjLGCJ5IBSaohpXITPlowMeeYCZ7fLUTSywik= github.com/jmespath/go-jmespath v0.4.0 h1:BEgLn5cpjn8UN1mAw4NjwDrS35OdebyEtFe+9YPoQUg= github.com/jmespath/go-jmespath v0.4.0/go.mod h1:T8mJZnbsbmF+m6zOOFylbeCJqk5+pHWvzYPziyZiYoo= github.com/jmespath/go-jmespath/internal/testify v1.5.1 h1:shLQSRRSCCPj3f2gpwzGwWFoC7ycTf1rcQZHOlsJ6N8= github.com/jmespath/go-jmespath/internal/testify v1.5.1/go.mod h1:L3OGu8Wl2/fWfCI6z80xFu9LTZmf1ZRjMHUOPmWr69U= +github.com/jmoiron/sqlx v1.2.0/go.mod h1:1FEQNm3xlJgrMD+FBdI9+xvCksHtbpVBBw5dYhBSsks= github.com/joho/godotenv v1.3.0 h1:Zjp+RcGpHhGlrMbJzXTrZZPrWj+1vfm90La1wgB6Bhc= github.com/joho/godotenv v1.3.0/go.mod h1:7hK45KPybAkOC6peb+G5yklZfMxEjkZhHbwpqxOKXbg= github.com/jonboulle/clockwork v0.1.0/go.mod h1:Ii8DK3G1RaLaWxj9trq07+26W01tbo22gdxWY5EU2bo= +github.com/josharian/intern v1.0.0 h1:vlS4z54oSdjm0bgjRigI+G1HpF+tI+9rE5LLzOg8HmY= +github.com/josharian/intern v1.0.0/go.mod h1:5DoeVV0s6jJacbCEi61lwdGj/aVlrQvzHFFd8Hwg//Y= github.com/jpillora/backoff v0.0.0-20180909062703-3050d21c67d7/go.mod h1:2iMrUgbbvHEiQClaW2NsSzMyGHqN+rDFqY705q49KG0= github.com/jpillora/backoff v1.0.0 h1:uvFg412JmmHBHw7iwprIxkPMI+sGQ4kzOWsMeHnm2EA= github.com/jpillora/backoff v1.0.0/go.mod h1:J/6gKK9jxlEcS3zixgDgUAsiuZ7yrSoa/FX5e0EB2j4= @@ -1085,6 +1240,7 @@ github.com/juju/loggo v0.0.0-20190526231331-6e530bcce5d8/go.mod h1:vgyd7OREkbtVE github.com/juju/testing v0.0.0-20191001232224-ce9dec17d28b/go.mod h1:63prj8cnj0tU0S9OHjGJn+b1h0ZghCndfnbQolrYTwA= github.com/julienschmidt/httprouter v1.2.0/go.mod h1:SYymIcj16QtmaHHD7aYtjjsJG7VTCxuUUipMqKk8s4w= github.com/julienschmidt/httprouter v1.3.0/go.mod h1:JR6WtHb+2LUe8TCKY3cZOxFyyO8IZAc4RVcycCCAKdM= +github.com/julz/importas v0.0.0-20210226073942-60b4fa260dd0/go.mod h1:oSFU2R4XK/P7kNBrnL/FEQlDGN1/6WoxXEjSSXO0DV0= github.com/jung-kurt/gofpdf v1.0.3-0.20190309125859-24315acbbda5/go.mod h1:7Id9E/uU8ce6rXgefFLlgrJj/GYY22cpxn+r32jIOes= github.com/jwilder/encoding v0.0.0-20170811194829-b4e1701a28ef/go.mod h1:Ct9fl0F6iIOGgxJ5npU/IUOhOhqlVrGjyIZc8/MagT0= github.com/kardianos/service v1.0.0 h1:HgQS3mFfOlyntWX8Oke98JcJLqt1DBcHR4kxShpYef0= @@ -1098,65 +1254,86 @@ github.com/kballard/go-shellquote v0.0.0-20180428030007-95032a82bc51/go.mod h1:C github.com/kisielk/errcheck v1.1.0/go.mod h1:EZBBE59ingxPouuu3KfxchcWSUPOHkagtvWXihfKN4Q= github.com/kisielk/errcheck v1.2.0/go.mod h1:/BMXB+zMLi60iA8Vv6Ksmxu/1UDYcXs4uQLJ+jE2L00= github.com/kisielk/errcheck v1.5.0/go.mod h1:pFxgyoBC7bSaBwPgfKdkLd5X25qrDl4LWUI2bnpBCr8= +github.com/kisielk/errcheck v1.6.0/go.mod h1:pFxgyoBC7bSaBwPgfKdkLd5X25qrDl4LWUI2bnpBCr8= github.com/kisielk/gotool v1.0.0/go.mod h1:XhKaO+MFFWcvkIS/tQcRk01m1F5IRFswLeQ+oQHNcck= github.com/klauspost/compress v1.4.0/go.mod h1:RyIbtBH6LamlWaDj8nUwkbUhJ87Yi3uG0guNDohfE1A= github.com/klauspost/compress v1.9.5/go.mod h1:RyIbtBH6LamlWaDj8nUwkbUhJ87Yi3uG0guNDohfE1A= +github.com/klauspost/compress v1.10.7/go.mod h1:aoV0uJVorq1K+umq18yTdKaF57EivdYsUV+/s2qKfXs= github.com/klauspost/compress v1.11.0/go.mod h1:aoV0uJVorq1K+umq18yTdKaF57EivdYsUV+/s2qKfXs= github.com/klauspost/compress v1.11.3/go.mod h1:aoV0uJVorq1K+umq18yTdKaF57EivdYsUV+/s2qKfXs= github.com/klauspost/compress v1.11.12/go.mod h1:aoV0uJVorq1K+umq18yTdKaF57EivdYsUV+/s2qKfXs= github.com/klauspost/compress v1.11.12/go.mod h1:aoV0uJVorq1K+umq18yTdKaF57EivdYsUV+/s2qKfXs= github.com/klauspost/compress v1.11.13/go.mod h1:aoV0uJVorq1K+umq18yTdKaF57EivdYsUV+/s2qKfXs= -github.com/klauspost/compress v1.13.1 h1:wXr2uRxZTJXHLly6qhJabee5JqIhTRoLBhDOA74hDEQ= -github.com/klauspost/compress v1.13.1/go.mod h1:8dP1Hq4DHOhN9w426knH3Rhby4rFm6D8eO+e+Dq5Gzg= +github.com/klauspost/compress v1.12.2/go.mod h1:8dP1Hq4DHOhN9w426knH3Rhby4rFm6D8eO+e+Dq5Gzg= +github.com/klauspost/compress v1.12.3/go.mod h1:8dP1Hq4DHOhN9w426knH3Rhby4rFm6D8eO+e+Dq5Gzg= +github.com/klauspost/compress v1.13.4 h1:0zhec2I8zGnjWcKyLl6i3gPqKANCCn5e9xmviEEeX6s= +github.com/klauspost/compress v1.13.4/go.mod h1:8dP1Hq4DHOhN9w426knH3Rhby4rFm6D8eO+e+Dq5Gzg= github.com/klauspost/cpuid v0.0.0-20170728055534-ae7887de9fa5/go.mod h1:Pj4uuM528wm8OyEC2QMXAi2YiTZ96dNQPGgoMS4s3ek= github.com/klauspost/crc32 v0.0.0-20161016154125-cb6bfca970f6/go.mod h1:+ZoRqAPRLkC4NPOvfYeR5KNOrY6TD+/sAC3HXPZgDYg= github.com/klauspost/pgzip v1.0.2-0.20170402124221-0bf5dcad4ada/go.mod h1:Ch1tH69qFZu15pkjo5kYi6mth2Zzwzt50oCQKQE9RUs= github.com/konsorten/go-windows-terminal-sequences v1.0.1/go.mod h1:T0+1ngSBFLxvqU3pZ+m/2kptfBszLMUkC4ZK/EgS/cQ= github.com/konsorten/go-windows-terminal-sequences v1.0.2/go.mod h1:T0+1ngSBFLxvqU3pZ+m/2kptfBszLMUkC4ZK/EgS/cQ= github.com/konsorten/go-windows-terminal-sequences v1.0.3/go.mod h1:T0+1ngSBFLxvqU3pZ+m/2kptfBszLMUkC4ZK/EgS/cQ= +github.com/kr/fs v0.1.0/go.mod h1:FFnZGqtBN9Gxj7eW1uZ42v5BccTP0vu6NEaFoC2HwRg= github.com/kr/logfmt v0.0.0-20140226030751-b84e30acd515/go.mod h1:+0opPa2QZZtGFBFZlji/RkVcI2GknAs/DXo4wKdlNEc= github.com/kr/pretty v0.1.0/go.mod h1:dAy3ld7l9f0ibDNOQOHHMYYIIbhfbHSm3C4ZsoJORNo= github.com/kr/pretty v0.2.0/go.mod h1:ipq/a2n7PKx3OHsz4KJII5eveXtPO4qwEXGdVfWzfnI= -github.com/kr/pretty v0.2.1 h1:Fmg33tUaq4/8ym9TJN1x7sLJnHVwhP33CNkpYV/7rwI= github.com/kr/pretty v0.2.1/go.mod h1:ipq/a2n7PKx3OHsz4KJII5eveXtPO4qwEXGdVfWzfnI= +github.com/kr/pretty v0.3.0 h1:WgNl7dwNpEZ6jJ9k1snq4pZsg7DOEN8hP9Xw0Tsjwk0= +github.com/kr/pretty v0.3.0/go.mod h1:640gp4NfQd8pI5XOwp5fnNeVWj67G7CFk/SaSQn7NBk= github.com/kr/pty v1.1.1/go.mod h1:pFQYn66WHrOpPYNljwOMqo10TkYh1fy3cYio2l3bCsQ= github.com/kr/pty v1.1.5/go.mod h1:9r2w37qlBe7rQ6e1fg1S/9xpWHSnaqNdHD3WcMdbPDA= github.com/kr/pty v1.1.8/go.mod h1:O1sed60cT9XZ5uDucP5qwvh+TE3NnUj51EiZO/lmSfw= github.com/kr/text v0.1.0/go.mod h1:4Jbv+DJW3UT/LiOwJeYQe1efqtUx/iVham/4vfdArNI= github.com/kr/text v0.2.0 h1:5Nx0Ya0ZqY2ygV366QzturHI13Jq95ApcVaJBhpS+AY= github.com/kr/text v0.2.0/go.mod h1:eLer722TekiGuMkidMxC/pM04lWEeraHUUmBw8l2grE= +github.com/kulti/thelper v0.4.0/go.mod h1:vMu2Cizjy/grP+jmsvOFDx1kYP6+PD1lqg4Yu5exl2U= +github.com/kunwardeep/paralleltest v1.0.2/go.mod h1:ZPqNm1fVHPllh5LPVujzbVz1JN2GhLxSfY+oqUsvG30= github.com/kylelemons/godebug v1.1.0 h1:RPNrshWIDI6G2gRW9EHilWtl7Z6Sb1BR0xunSBf0SNc= github.com/kylelemons/godebug v1.1.0/go.mod h1:9/0rRGxNHcop5bhtWyNeEfOS8JIWk580+fNqagV/RAw= +github.com/kyoh86/exportloopref v0.1.8/go.mod h1:1tUcJeiioIs7VWe5gcOObrux3lb66+sBqGZrRkMwPgg= github.com/leesper/go_rng v0.0.0-20190531154944-a612b043e353 h1:X/79QL0b4YJVO5+OsPH9rF2u428CIrGL/jLmPsoOQQ4= github.com/leesper/go_rng v0.0.0-20190531154944-a612b043e353/go.mod h1:N0SVk0uhy+E1PZ3C9ctsPRlvOPAFPkCNlcPBDkt0N3U= +github.com/leodido/go-urn v1.1.0/go.mod h1:+cyI34gQWZcE1eQU7NVgKkkzdXDQHr1dBMtdAPozLkw= github.com/leodido/ragel-machinery v0.0.0-20181214104525-299bdde78165 h1:bCiVCRCs1Heq84lurVinUPy19keqGEe4jh5vtK37jcg= github.com/leodido/ragel-machinery v0.0.0-20181214104525-299bdde78165/go.mod h1:WZxr2/6a/Ar9bMDc2rN/LJrE/hF6bXE4LPyDSIxwAfg= +github.com/leoluk/perflib_exporter v0.1.0/go.mod h1:rpV0lYj7lemdTm31t7zpCqYqPnw7xs86f+BaaNBVYFM= github.com/lib/pq v1.0.0/go.mod h1:5WUZQaWbwv1U+lTReE5YruASi9Al49XbQIvNi/34Woo= github.com/lib/pq v1.1.0/go.mod h1:5WUZQaWbwv1U+lTReE5YruASi9Al49XbQIvNi/34Woo= github.com/lib/pq v1.2.0/go.mod h1:5WUZQaWbwv1U+lTReE5YruASi9Al49XbQIvNi/34Woo= -github.com/lib/pq v1.3.0 h1:/qkRGz8zljWiDcFvgpwUpwIAPu3r07TDvs3Rws+o/pU= -github.com/lib/pq v1.3.0/go.mod h1:5WUZQaWbwv1U+lTReE5YruASi9Al49XbQIvNi/34Woo= +github.com/lib/pq v1.9.0 h1:L8nSXQQzAYByakOFMTwpjRoHsMJklur4Gi59b6VivR8= +github.com/lib/pq v1.9.0/go.mod h1:AlVN5x4E4T544tWzH6hKfbfQvm3HdbOxrmggDNAPY9o= github.com/lightstep/lightstep-tracer-common/golang/gogo v0.0.0-20190605223551-bc2310a04743/go.mod h1:qklhhLq1aX+mtWk9cPHPzaBjWImj5ULL6C7HFJtXQMM= github.com/lightstep/lightstep-tracer-go v0.18.1/go.mod h1:jlF1pusYV4pidLvZ+XD0UBX0ZE6WURAspgAczcDHrL4= +github.com/logrusorgru/aurora v0.0.0-20181002194514-a7b3b318ed4e/go.mod h1:7rIyQOR62GCctdiQpZ/zOJlFyk6y+94wXzv6RNZgaR4= +github.com/lucasb-eyer/go-colorful v1.0.2/go.mod h1:0MS4r+7BZKSJ5mw4/S5MPN+qHFF1fYclkSPilDOKW0s= +github.com/lucasb-eyer/go-colorful v1.0.3/go.mod h1:R4dSotOR9KMtayYi1e77YzuveK+i7ruzyGqttikkLy0= github.com/lyft/protoc-gen-validate v0.0.13/go.mod h1:XbGvPuh87YZc5TdIa2/I4pLk0QoUACkjt2znoq26NVQ= +github.com/magefile/mage v1.10.0/go.mod h1:z5UZb/iS3GoOSn0JgWuiw7dxlurVYTu+/jHXqQg881A= github.com/magiconair/properties v1.8.0/go.mod h1:PppfXfuXeibc/6YijjN8zIbojt8czPbwD3XqdrwzmxQ= -github.com/mailru/easyjson v0.0.0-20160728113105-d5b7844b561a/go.mod h1:C1wdFJiN94OJF2b5HbByQZoLdCWB1Yqtg26g4irojpc= +github.com/magiconair/properties v1.8.1/go.mod h1:PppfXfuXeibc/6YijjN8zIbojt8czPbwD3XqdrwzmxQ= +github.com/magiconair/properties v1.8.5/go.mod h1:y3VJvCyxH9uVvJTWEGAELF3aiYNyPKd5NZ3oSwXrF60= github.com/mailru/easyjson v0.0.0-20180823135443-60711f1a8329/go.mod h1:C1wdFJiN94OJF2b5HbByQZoLdCWB1Yqtg26g4irojpc= github.com/mailru/easyjson v0.0.0-20190312143242-1de009706dbe/go.mod h1:C1wdFJiN94OJF2b5HbByQZoLdCWB1Yqtg26g4irojpc= github.com/mailru/easyjson v0.0.0-20190614124828-94de47d64c63/go.mod h1:C1wdFJiN94OJF2b5HbByQZoLdCWB1Yqtg26g4irojpc= github.com/mailru/easyjson v0.0.0-20190626092158-b2ccc519800e/go.mod h1:C1wdFJiN94OJF2b5HbByQZoLdCWB1Yqtg26g4irojpc= github.com/mailru/easyjson v0.7.0/go.mod h1:KAzv3t3aY1NaHWoQz1+4F1ccyAH66Jk7yos7ldAVICs= -github.com/mailru/easyjson v0.7.1 h1:mdxE1MF9o53iCb2Ghj1VfWvh7ZOwHpnVG/xwXrV90U8= github.com/mailru/easyjson v0.7.1/go.mod h1:KAzv3t3aY1NaHWoQz1+4F1ccyAH66Jk7yos7ldAVICs= +github.com/mailru/easyjson v0.7.6/go.mod h1:xzfreul335JAWq5oZzymOObrkdz5UnU4kGfJJLY9Nlc= +github.com/mailru/easyjson v0.7.7 h1:UGYAvKxe3sBsEDzO8ZeWOSlIQfWFlxbzLZe7hwFURr0= +github.com/mailru/easyjson v0.7.7/go.mod h1:xzfreul335JAWq5oZzymOObrkdz5UnU4kGfJJLY9Nlc= +github.com/maratori/testpackage v1.0.1/go.mod h1:ddKdw+XG0Phzhx8BFDTKgpWP4i7MpApTE5fXSKAqwDU= github.com/markbates/oncer v0.0.0-20181203154359-bf2de49a0be2/go.mod h1:Ld9puTsIW75CHf65OeIOkyKbteujpZVXDpWK6YGZbxE= github.com/markbates/safe v1.0.1/go.mod h1:nAqgmRi7cY2nqMc92/bSEeQA+R4OheNU2T1kNSCBdG0= github.com/marstr/guid v1.1.0/go.mod h1:74gB1z2wpxxInTG6yaqA7KrtM0NZ+RbrcqDvYHefzho= +github.com/matoous/godox v0.0.0-20210227103229-6504466cf951/go.mod h1:1BELzlh859Sh1c6+90blK8lbYy0kwQf1bYlBhBysy1s= github.com/mattn/go-colorable v0.0.9/go.mod h1:9vuHe8Xs5qXnSaW/c/ABM9alt+Vo+STaOChaDxuIBZU= github.com/mattn/go-colorable v0.1.1/go.mod h1:FuOcm+DKB9mbwrcAfNl7/TZVBZ6rcnceauSikq3lYCQ= github.com/mattn/go-colorable v0.1.2/go.mod h1:U0ppj6V5qS13XJ6of8GYAs25YV2eR4EVcfRqFIhoBtE= github.com/mattn/go-colorable v0.1.4/go.mod h1:U0ppj6V5qS13XJ6of8GYAs25YV2eR4EVcfRqFIhoBtE= -github.com/mattn/go-colorable v0.1.6 h1:6Su7aK7lXmJ/U79bYtBjLNaha4Fs1Rg9plHpcH+vvnE= github.com/mattn/go-colorable v0.1.6/go.mod h1:u6P/XSegPjTcexA+o6vUJrdnUu04hMope9wVRipJSqc= +github.com/mattn/go-colorable v0.1.8 h1:c1ghPdyEDarC70ftn0y+A/Ee++9zz8ljHG1b13eJ0s8= +github.com/mattn/go-colorable v0.1.8/go.mod h1:u6P/XSegPjTcexA+o6vUJrdnUu04hMope9wVRipJSqc= github.com/mattn/go-ieproxy v0.0.0-20190610004146-91bb50d98149/go.mod h1:31jz6HNzdxOmlERGGEc4v/dMssOfmp2p5bT/okiKFFc= github.com/mattn/go-ieproxy v0.0.1 h1:qiyop7gCflfhwCzGyeT0gro3sF9AIg9HU98JORTkqfI= github.com/mattn/go-ieproxy v0.0.1/go.mod h1:pYabZ6IHcRpFh7vIaLfK7rdcWgFEb3SFJ6/gNWuh88E= @@ -1172,26 +1349,34 @@ github.com/mattn/go-isatty v0.0.12 h1:wuysRhFDzyxgEmMf5xjvJ2M9dZoWAXNNr5LSBS7uHX github.com/mattn/go-isatty v0.0.12/go.mod h1:cbi8OIDigv2wuxKPP5vlRcQ1OAZbq2CE4Kysco4FUpU= github.com/mattn/go-runewidth v0.0.2/go.mod h1:LwmH8dsx7+W8Uxz3IHJYH5QSwggIsqBzpuz5H//U1FU= github.com/mattn/go-runewidth v0.0.3/go.mod h1:LwmH8dsx7+W8Uxz3IHJYH5QSwggIsqBzpuz5H//U1FU= +github.com/mattn/go-runewidth v0.0.4/go.mod h1:LwmH8dsx7+W8Uxz3IHJYH5QSwggIsqBzpuz5H//U1FU= +github.com/mattn/go-runewidth v0.0.7/go.mod h1:H031xJmbD/WCDINGzjvQ9THkh0rPKHF+m2gUSrubnMI= +github.com/mattn/go-runewidth v0.0.8/go.mod h1:H031xJmbD/WCDINGzjvQ9THkh0rPKHF+m2gUSrubnMI= github.com/mattn/go-shellwords v1.0.3/go.mod h1:3xCvwCdWdlDJUrvuMn7Wuy9eWs4pE8vqg+NOMyg4B2o= +github.com/mattn/go-sqlite3 v1.9.0/go.mod h1:FPy6KqzDD04eiIsT53CuJW3U88zkxoIYsOqkbpncsNc= github.com/mattn/go-sqlite3 v1.11.0/go.mod h1:FPy6KqzDD04eiIsT53CuJW3U88zkxoIYsOqkbpncsNc= github.com/mattn/go-sqlite3 v1.11.0/go.mod h1:FPy6KqzDD04eiIsT53CuJW3U88zkxoIYsOqkbpncsNc= github.com/mattn/go-sqlite3 v1.14.6 h1:dNPt6NO46WmLVt2DLNpwczCmdV5boIZ6g/tlDrlRUbg= github.com/mattn/go-sqlite3 v1.14.6/go.mod h1:NyWgC/yNuGj7Q9rpYnZvas74GogHl5/Z4A/KQRfk6bU= github.com/mattn/go-tty v0.0.0-20180907095812-13ff1204f104/go.mod h1:XPvLUNfbS4fJH25nqRHfWLMa1ONC8Amw+mIA639KxkE= +github.com/mattn/goveralls v0.0.2/go.mod h1:8d1ZMHsd7fW6IRPKQh46F2WRpyib5/X4FOpevwGNQEw= github.com/matttproud/golang_protobuf_extensions v1.0.1/go.mod h1:D8He9yQNgCq6Z5Ld7szi9bcBfOoFv/3dc6xSMkL2PC0= github.com/matttproud/golang_protobuf_extensions v1.0.2-0.20181231171920-c182affec369 h1:I0XW9+e1XWDxdcEniV4rQAIOPUGDq67JSCiRCgGCZLI= github.com/matttproud/golang_protobuf_extensions v1.0.2-0.20181231171920-c182affec369/go.mod h1:BSXmuO+STAnVfrANrmjBb36TMTDstsz7MSK+HVaYKv4= +github.com/mbilski/exhaustivestruct v1.2.0/go.mod h1:OeTBVxQWoEmB2J2JCHmXWPJ0aksxSUOUy+nvtVEfzXc= github.com/mdlayher/genetlink v1.0.0 h1:OoHN1OdyEIkScEmRgxLEe2M9U8ClMytqA5niynLtfj0= github.com/mdlayher/genetlink v1.0.0/go.mod h1:0rJ0h4itni50A86M2kHcgS85ttZazNt7a8H2a2cw0Gc= github.com/mdlayher/netlink v0.0.0-20190409211403-11939a169225/go.mod h1:eQB3mZE4aiYnlUsyGGCOpPETfdQq4Jhsgf1fk3cwQaA= github.com/mdlayher/netlink v1.0.0/go.mod h1:KxeJAFOFLG6AjpyDkQ/iIhxygIUKD+vcwqcnu43w/+M= github.com/mdlayher/netlink v1.1.0 h1:mpdLgm+brq10nI9zM1BpX1kpDbh3NLl3RSnVq6ZSkfg= github.com/mdlayher/netlink v1.1.0/go.mod h1:H4WCitaheIsdF9yOYu8CFmCgQthAPIWZmcKp9uZHgmY= +github.com/mgechev/dots v0.0.0-20190921121421-c36f7dcfbb81/go.mod h1:KQ7+USdGKfpPjXk4Ga+5XxQM4Lm4e3gAogrreFAYpOg= +github.com/mgechev/revive v1.0.3/go.mod h1:POGGZagSo/0frdr7VeAifzS5Uka0d0GPiM35MsTO8nE= github.com/microsoft/ApplicationInsights-Go v0.4.4 h1:G4+H9WNs6ygSCe6sUyxRc2U81TI5Es90b2t/MwX5KqY= github.com/microsoft/ApplicationInsights-Go v0.4.4/go.mod h1:fKRUseBqkw6bDiXTs3ESTiU/4YTIHsQS4W3fP2ieF4U= github.com/miekg/dns v1.0.14/go.mod h1:W1PPwlIAgtquWBMBEV9nkV9Cazfe8ScdGz/Lj7v3Nrg= github.com/miekg/dns v1.1.26/go.mod h1:bPDLeHnStXmXAq1m/Ch/hvfNHr14JKNPMBo3VZKjuso= -github.com/miekg/dns v1.1.31/go.mod h1:KNUDUusw/aVsxyTYZM1oqvCicbwhgbNgztCETuNZ7xM= +github.com/miekg/dns v1.1.41/go.mod h1:p6aan82bvRIyn+zDIv9xYNUpwa73JcSh9BKwknJysuI= github.com/miekg/dns v1.1.43 h1:JKfpVSCB84vrAmHzyrsxB5NAr5kLoMXZArPSw7Qlgyg= github.com/miekg/dns v1.1.43/go.mod h1:+evo5L0630/F6ca/Z9+GAqzhjGyn8/c+TBaOyfEl0V4= github.com/miekg/pkcs11 v1.0.3/go.mod h1:XsNlhZGX73bx86s2hdc/FuaLm2CPZJemRLMA+WTFxgs= @@ -1205,6 +1390,8 @@ github.com/mitchellh/cli v1.1.0/go.mod h1:xcISNoH86gajksDmfB23e/pu+B+GeFRMYmoHXx github.com/mitchellh/go-homedir v1.0.0/go.mod h1:SfyaCUpYCn1Vlf4IUYiD9fPX4A5wJrkLzIz1N1q0pr0= github.com/mitchellh/go-homedir v1.1.0 h1:lukF9ziXFxDFPkA1vsr5zpc1XuPDn/wFntq5mG+4E0Y= github.com/mitchellh/go-homedir v1.1.0/go.mod h1:SfyaCUpYCn1Vlf4IUYiD9fPX4A5wJrkLzIz1N1q0pr0= +github.com/mitchellh/go-ps v1.0.0/go.mod h1:J4lOc8z8yJs6vUwklHw2XEIiT4z4C40KtWVN3nvg8Pg= +github.com/mitchellh/go-testing-interface v0.0.0-20171004221916-a61a99592b77/go.mod h1:kRemZodwjscx+RGhAo8eIhFbs2+BFgRtFPeD/KE+zxI= github.com/mitchellh/go-testing-interface v1.0.0 h1:fzU/JVNcaqHQEcVFAKeR41fkiLdIPrefOvVG1VZ96U0= github.com/mitchellh/go-testing-interface v1.0.0/go.mod h1:kRemZodwjscx+RGhAo8eIhFbs2+BFgRtFPeD/KE+zxI= github.com/mitchellh/go-wordwrap v1.0.0/go.mod h1:ZXFpozHsX6DPmq2I0TCekCxypsnAUbP2oI0UX1GXzOo= @@ -1212,9 +1399,14 @@ github.com/mitchellh/gox v0.4.0/go.mod h1:Sd9lOJ0+aimLBi73mGofS1ycjY8lL3uZM3JPS4 github.com/mitchellh/iochan v1.0.0/go.mod h1:JwYml1nuB7xOzsp52dPpHFffvOCDupsG0QubkSMEySY= github.com/mitchellh/mapstructure v0.0.0-20160808181253-ca63d7c062ee/go.mod h1:FVVH3fgwuzCH5S8UJGiWEs2h04kUh9fWfEaFds41c1Y= github.com/mitchellh/mapstructure v1.1.2/go.mod h1:FVVH3fgwuzCH5S8UJGiWEs2h04kUh9fWfEaFds41c1Y= -github.com/mitchellh/mapstructure v1.2.2 h1:dxe5oCinTXiTIcfgmZecdCzPmAJKd46KsCWc35r0TV4= github.com/mitchellh/mapstructure v1.2.2/go.mod h1:bFUtVrKA4DC2yAKiSyO/QUcy7e+RRV2QTWOzhPopBRo= +github.com/mitchellh/mapstructure v1.3.2/go.mod h1:bFUtVrKA4DC2yAKiSyO/QUcy7e+RRV2QTWOzhPopBRo= +github.com/mitchellh/mapstructure v1.3.3/go.mod h1:bFUtVrKA4DC2yAKiSyO/QUcy7e+RRV2QTWOzhPopBRo= +github.com/mitchellh/mapstructure v1.4.0/go.mod h1:bFUtVrKA4DC2yAKiSyO/QUcy7e+RRV2QTWOzhPopBRo= +github.com/mitchellh/mapstructure v1.4.1 h1:CpVNEelQCZBooIPDn+AR3NpivK/TIKU8bDxdASFVQag= +github.com/mitchellh/mapstructure v1.4.1/go.mod h1:bFUtVrKA4DC2yAKiSyO/QUcy7e+RRV2QTWOzhPopBRo= github.com/mitchellh/osext v0.0.0-20151018003038-5e2d6d41470f/go.mod h1:OkQIRizQZAeMln+1tSwduZz7+Af5oFlKirV/MSYes2A= +github.com/mjibson/esc v0.2.0/go.mod h1:9Hw9gxxfHulMF5OJKCyhYD7PzlSdhzXyaGEBRPH1OPs= github.com/moby/ipvs v1.0.1 h1:aoZ7fhLTXgDbzVrAnvV+XbKOU8kOET7B3+xULDF/1o0= github.com/moby/ipvs v1.0.1/go.mod h1:2pngiyseZbIKXNv7hsKj3O9UEz30c53MT9005gt2hxQ= github.com/moby/locker v1.0.1/go.mod h1:S7SDdo5zpBK84bzzVlKr2V0hz+7x9hWbYC/kq7oQppc= @@ -1240,9 +1432,12 @@ github.com/modern-go/reflect2 v0.0.0-20180701023420-4b7aa43c6742/go.mod h1:bx2lN github.com/modern-go/reflect2 v1.0.1 h1:9f412s+6RmYXLWZSEzVVgPGK7C2PphHj5RJrvfx9AWI= github.com/modern-go/reflect2 v1.0.1/go.mod h1:bx2lNnkwVCuqBIxFjflWJWanXIb3RllmbCylyMrvgv0= github.com/montanaflynn/stats v0.0.0-20171201202039-1bf9dbcd8cbe/go.mod h1:wL8QJuTMNUDYhXwkmfOly8iTdp5TEcJFWZD2D7SIkUc= +github.com/moricho/tparallel v0.2.1/go.mod h1:fXEIZxG2vdfl0ZF8b42f5a78EhjjD5mX8qUplsoSU4k= github.com/morikuni/aec v0.0.0-20170113033406-39771216ff4c/go.mod h1:BbKIizmSmc5MMPqRYbxO4ZU0S0+P200+tUnFx7PXmsc= github.com/morikuni/aec v1.0.0 h1:nP9CBfwrvYnBRgY6qfDQkygYDmYwOilePFkwzv4dU8A= github.com/morikuni/aec v1.0.0/go.mod h1:BbKIizmSmc5MMPqRYbxO4ZU0S0+P200+tUnFx7PXmsc= +github.com/mozilla/tls-observatory v0.0.0-20190404164649-a3c1b6cfecfd/go.mod h1:SrKMQvPiws7F7iqYp8/TX+IhxCYhzr6N/1yb8cwHsGk= +github.com/mozilla/tls-observatory v0.0.0-20201209171846-0547674fceff/go.mod h1:SrKMQvPiws7F7iqYp8/TX+IhxCYhzr6N/1yb8cwHsGk= github.com/mrunalp/fileutils v0.5.0/go.mod h1:M1WthSahJixYnrXQl/DFQuteStB1weuxD2QJNHXfbSQ= github.com/mschoch/smat v0.0.0-20160514031455-90eadee771ae/go.mod h1:qAyveg+e4CE+eKJXWVjKXM4ck2QobLqTDytGJbLLhJg= github.com/multiplay/go-ts3 v1.0.0 h1:loxtEFqvYtpoGh1jOqEt6aDzctYuQsi3vb3dMpvWiWw= @@ -1252,6 +1447,7 @@ github.com/munnerz/goautoneg v0.0.0-20191010083416-a7dc8b61c822/go.mod h1:+n7T8m github.com/mwitkow/go-conntrack v0.0.0-20161129095857-cc309e4a2223/go.mod h1:qRWi+5nqEBWmkhHvq77mSJWrCKwh8bxhgT7d/eI7P4U= github.com/mwitkow/go-conntrack v0.0.0-20190716064945-2f068394615f/go.mod h1:qRWi+5nqEBWmkhHvq77mSJWrCKwh8bxhgT7d/eI7P4U= github.com/mxk/go-flowrate v0.0.0-20140419014527-cca7078d478f/go.mod h1:ZdcZmHo+o7JKHSa8/e818NopupXU1YMK5fe1lsApnBw= +github.com/nakabonne/nestif v0.3.0/go.mod h1:dI314BppzXjJ4HsCnbo7XzrJHPszZsjnk5wEBSYHI2c= github.com/naoina/go-stringutil v0.1.0 h1:rCUeRUHjBjGTSHl0VC00jUPLz8/F9dDzYI70Hzifhks= github.com/naoina/go-stringutil v0.1.0/go.mod h1:XJ2SJL9jCtBh+P9q5btrd/Ylo8XwT/h1USek5+NqSA0= github.com/nats-io/jwt v0.3.0/go.mod h1:fRYCDE99xlTsqUzISS1Bi75UBJ6ljOJQOAAu5VglpSg= @@ -1274,11 +1470,15 @@ github.com/nats-io/nkeys v0.3.0 h1:cgM5tL53EvYRU+2YLXIK0G2mJtK12Ft9oeooSZMA2G8= github.com/nats-io/nkeys v0.3.0/go.mod h1:gvUNGjVcM2IPr5rCsRsC6Wb3Hr2CQAm08dsxtV6A5y4= github.com/nats-io/nuid v1.0.1 h1:5iA8DT8V7q8WK2EScv2padNa/rTESc1KdnPw4TC2paw= github.com/nats-io/nuid v1.0.1/go.mod h1:19wcPz3Ph3q0Jbyiqsd0kePYG7A95tJPxeL+1OSON2c= +github.com/nbutton23/zxcvbn-go v0.0.0-20180912185939-ae427f1e4c1d/go.mod h1:o96djdrsSGy3AWPyBgZMAGfxZNfgntdJG+11KU4QvbU= +github.com/nbutton23/zxcvbn-go v0.0.0-20201221231540-e56b841a3c88/go.mod h1:KSVJerMDfblTH7p5MZaTt+8zaT2iEk3AkVb9PQdZuE8= github.com/ncw/swift v1.0.47/go.mod h1:23YIA4yWVnGwv2dQlN4bB7egfYX6YLn0Yo/S6zZO/ZM= github.com/newrelic/newrelic-telemetry-sdk-go v0.5.1 h1:9YEHXplqlVkOltThchh+RxeODvTb1TBvQ1181aXg3pY= github.com/newrelic/newrelic-telemetry-sdk-go v0.5.1/go.mod h1:2kY6OeOxrJ+RIQlVjWDc/pZlT3MIf30prs6drzMfJ6E= github.com/niemeyer/pretty v0.0.0-20200227124842-a10e7caefd8e/go.mod h1:zD1mROLANZcx1PVRCS0qkT7pwLkGfwJo4zjcN/Tysno= +github.com/nishanths/exhaustive v0.1.0/go.mod h1:S1j9110vxV1ECdCudXRkeMnFQ/DQk9ajLT0Uf2MYZQQ= github.com/nishanths/predeclared v0.0.0-20200524104333-86fad755b4d3/go.mod h1:nt3d53pc1VYcphSCIaYAJtnPYnr3Zyn8fMq2wvPGPso= +github.com/nishanths/predeclared v0.2.1/go.mod h1:HvkGJcA3naj4lOwnFXFDkFxVtSqQMB9sbB1usJ+xjQE= github.com/nsqio/go-nsq v1.0.8 h1:3L2F8tNLlwXXlp2slDUrUWSBn2O3nMh8R1/KEDFTHPk= github.com/nsqio/go-nsq v1.0.8/go.mod h1:vKq36oyeVXgsS5Q8YEO7WghqidAVXQlcFxzQbQTuDEY= github.com/nxadm/tail v1.4.4 h1:DQuhQpB1tVlglWS2hLQ5OV6B5r8aGxSrPc5Qo6uTN78= @@ -1288,6 +1488,9 @@ github.com/oklog/run v1.0.0/go.mod h1:dlhp/R75TPv97u0XWUtDeV/lRKWPKSdTuV0TZvrmrQ github.com/oklog/run v1.1.0/go.mod h1:sVPdnTZT1zYwAJeCMu2Th4T21pA3FPOQRfWjQlk7DVU= github.com/oklog/ulid v1.3.1/go.mod h1:CirwcVhetQ6Lv90oh/F+FBtV6XMibvdAFo93nm5qn4U= github.com/olekukonko/tablewriter v0.0.0-20170122224234-a0225b3f23b5/go.mod h1:vsDQFd/mU46D+Z4whnwzcISnGGzXWMclvtLoiIKAKIo= +github.com/olekukonko/tablewriter v0.0.4/go.mod h1:zq6QwlOf5SlnkVbMSr5EoBv3636FWnp+qbPhuoO21uA= +github.com/olivere/elastic v6.2.35+incompatible/go.mod h1:J+q1zQJTgAz9woqsbVRqGeB5G1iqDKVBWLNSYW8yfJ8= +github.com/olivere/elastic v6.2.37+incompatible/go.mod h1:J+q1zQJTgAz9woqsbVRqGeB5G1iqDKVBWLNSYW8yfJ8= github.com/onsi/ginkgo v0.0.0-20151202141238-7f8ab55aaf3b/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE= github.com/onsi/ginkgo v0.0.0-20170829012221-11459a886d9c/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE= github.com/onsi/ginkgo v1.6.0/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE= @@ -1297,18 +1500,22 @@ github.com/onsi/ginkgo v1.10.1/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+ github.com/onsi/ginkgo v1.10.1/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE= github.com/onsi/ginkgo v1.10.3/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE= github.com/onsi/ginkgo v1.11.0/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE= +github.com/onsi/ginkgo v1.12.0/go.mod h1:oUhWkIvk5aDxtKvDDuw8gItl8pKl42LzjC9KZE0HfGg= github.com/onsi/ginkgo v1.12.1/go.mod h1:zj2OWP4+oCPe1qIXoGWkgMRwljMUYCdkwsT2108oapk= -github.com/onsi/ginkgo v1.14.0 h1:2mOpI4JVVPBN+WQRa0WKH2eXR+Ey+uK4n7Zj0aYpIQA= github.com/onsi/ginkgo v1.14.0/go.mod h1:iSB4RoI2tjJc9BBv4NKIKWKya62Rps+oPG/Lv9klQyY= +github.com/onsi/ginkgo v1.14.2 h1:8mVmC9kjFFmA8H4pKMUhcblgifdkOIXPvbhN1T36q1M= +github.com/onsi/ginkgo v1.14.2/go.mod h1:iSB4RoI2tjJc9BBv4NKIKWKya62Rps+oPG/Lv9klQyY= github.com/onsi/gomega v0.0.0-20151007035656-2152b45fa28a/go.mod h1:C1qb7wdrVGGVU+Z6iS04AVkA3Q65CEZX59MT0QO5uiA= github.com/onsi/gomega v0.0.0-20170829124025-dcabb60a477c/go.mod h1:C1qb7wdrVGGVU+Z6iS04AVkA3Q65CEZX59MT0QO5uiA= github.com/onsi/gomega v1.4.3/go.mod h1:ex+gbHU/CVuBBDIJjb2X0qEXbFg53c61hWP/1CpauHY= github.com/onsi/gomega v1.5.0/go.mod h1:ex+gbHU/CVuBBDIJjb2X0qEXbFg53c61hWP/1CpauHY= github.com/onsi/gomega v1.7.0/go.mod h1:ex+gbHU/CVuBBDIJjb2X0qEXbFg53c61hWP/1CpauHY= github.com/onsi/gomega v1.7.1/go.mod h1:XdKZgCCFLUoM/7CFJVPcG8C1xQ1AJ0vpAezJrB7JYyY= +github.com/onsi/gomega v1.9.0/go.mod h1:Ho0h+IUsWyvy1OpqCwxlQ/21gkhVunqlU8fDGcoTdcA= github.com/onsi/gomega v1.10.1/go.mod h1:iN09h71vgCQne3DLsj+A5owkum+a2tYe+TOCB1ybHNo= -github.com/onsi/gomega v1.10.3 h1:gph6h/qe9GSUw1NhH1gp+qb+h8rXD8Cy60Z32Qw3ELA= github.com/onsi/gomega v1.10.3/go.mod h1:V9xEwhxec5O8UDM77eCW8vLymOMltsqPVYWrpDsH8xc= +github.com/onsi/gomega v1.10.4 h1:NiTx7EEvBzu9sFOD1zORteLSt3o8gnlvZZwSE9TnY9U= +github.com/onsi/gomega v1.10.4/go.mod h1:g/HbgYopi++010VEqkFgJHKC09uJiW9UkXvMUuKHUCQ= github.com/op/go-logging v0.0.0-20160315200505-970db520ece7/go.mod h1:HzydrMdWErDVzsI23lYNej1Htcns9BCg93Dk0bBINWk= github.com/openconfig/gnmi v0.0.0-20180912164834-33a1865c3029 h1:lXQqyLroROhwR2Yq/kXbLzVecgmVeZh2TFLg6OxCd+w= github.com/openconfig/gnmi v0.0.0-20180912164834-33a1865c3029/go.mod h1:t+O9It+LKzfOAhKTT5O0ehDix+MTqbtT0T9t+7zzOvc= @@ -1350,8 +1557,10 @@ github.com/opencontainers/selinux v1.6.0/go.mod h1:VVGKuOLlE7v4PJyT6h7mNWvq1rzqi github.com/opencontainers/selinux v1.8.0/go.mod h1:RScLhm78qiWa2gbVCcGkC7tCGdgk3ogry1nUQF8Evvo= github.com/opencontainers/selinux v1.8.0/go.mod h1:RScLhm78qiWa2gbVCcGkC7tCGdgk3ogry1nUQF8Evvo= github.com/opencontainers/selinux v1.8.2/go.mod h1:MUIHuUEvKB1wtJjQdOyYRgOnLD2xAPP8dBsCoU0KuF8= +github.com/opentracing-contrib/go-grpc v0.0.0-20191001143057-db30781987df/go.mod h1:DYR5Eij8rJl8h7gblRrOZ8g0kW1umSpKqYIBTgeDtLo= github.com/opentracing-contrib/go-observer v0.0.0-20170622124052-a52f23424492 h1:lM6RxxfUMrYL/f8bWEUqdXrANWtrL7Nndbm9iFN0DlU= github.com/opentracing-contrib/go-observer v0.0.0-20170622124052-a52f23424492/go.mod h1:Ngi6UdF0k5OKD5t5wlmGhe/EDKPoUM3BXZSSfIuJbis= +github.com/opentracing-contrib/go-stdlib v0.0.0-20190519235532-cf7a6c988dc9/go.mod h1:PLldrQSroqzH70Xl+1DQcGnefIbqsKR7UDaiux3zV+w= github.com/opentracing-contrib/go-stdlib v1.0.0/go.mod h1:qtI1ogk+2JhVPIXVc6q+NHziSmy2W5GbdQZFUHADCBU= github.com/opentracing/basictracer-go v1.0.0/go.mod h1:QfBfYuafItcjQuMwinw9GhYKwFXS9KnPs5lxoYwgW74= github.com/opentracing/opentracing-go v1.0.2/go.mod h1:UkNAQd3GIcIGf0SeVgPpRdFStlNbqXla1AfSYxPUl2o= @@ -1366,27 +1575,35 @@ github.com/openzipkin/zipkin-go v0.2.1/go.mod h1:NaW6tEwdmWMaCDZzg8sh+IBNOxHMPnh github.com/openzipkin/zipkin-go v0.2.2/go.mod h1:NaW6tEwdmWMaCDZzg8sh+IBNOxHMPnhQw8ySjnjRyN4= github.com/openzipkin/zipkin-go v0.2.5 h1:UwtQQx2pyPIgWYHRg+epgdx1/HnBQTgN3/oIYEJTQzU= github.com/openzipkin/zipkin-go v0.2.5/go.mod h1:KpXfKdgRDnnhsxw4pNIH9Md5lyFqKUa4YDFlwRYAMyE= +github.com/ory/go-acc v0.2.6/go.mod h1:4Kb/UnPcT8qRAk3IAxta+hvVapdxTLWtrr7bFLlEgpw= +github.com/ory/viper v1.7.5/go.mod h1:ypOuyJmEUb3oENywQZRgeAMwqgOyDqwboO1tj3DjTaM= github.com/pact-foundation/pact-go v1.0.4/go.mod h1:uExwJY4kCzNPcHRj+hCR/HBbOOIwwtUjcrb0b5/5kLM= github.com/pascaldekloe/goe v0.0.0-20180627143212-57f6aae5913c/go.mod h1:lzWF7FIEvWOWxwDKqyGYQf6ZUaNfKdP144TG7ZOy1lc= github.com/pascaldekloe/goe v0.1.0 h1:cBOtyMzM9HTpWjXfbbunk26uA6nG3a8n06Wieeh0MwY= github.com/pascaldekloe/goe v0.1.0/go.mod h1:lzWF7FIEvWOWxwDKqyGYQf6ZUaNfKdP144TG7ZOy1lc= github.com/paulbellamy/ratecounter v0.2.0/go.mod h1:Hfx1hDpSGoqxkVVpBi/IlYD7kChlfo5C6hzIHwPqfFE= -github.com/pavius/impi v0.0.0-20180302134524-c1cbdcb8df2b/go.mod h1:x/hU0bfdWIhuOT1SKwiJg++yvkk6EuOtJk8WtDZqgr8= +github.com/pavius/impi v0.0.3/go.mod h1:x/hU0bfdWIhuOT1SKwiJg++yvkk6EuOtJk8WtDZqgr8= github.com/pborman/uuid v1.2.0/go.mod h1:X/NO0urCmaxf9VXbdlT7C2Yzkj2IKimNn4k+gtPdI/k= github.com/pelletier/go-toml v1.2.0/go.mod h1:5z9KED0ma1S8pY6P1sdut58dfprrGBbd/94hg7ilaic= github.com/pelletier/go-toml v1.4.0/go.mod h1:PN7xzY2wHTK0K9p34ErDQMlFxa51Fk0OUruD3k1mMwo= +github.com/pelletier/go-toml v1.6.0/go.mod h1:5N711Q9dKgbdkxHL+MEfF31hpT7l0S0s/t2kKREewys= github.com/pelletier/go-toml v1.7.0/go.mod h1:vwGMzjaWMwyfHwgIBhI2YUM4fB6nL6lVAvS1LBMMhTE= +github.com/pelletier/go-toml v1.8.0/go.mod h1:D6yutnOGMveHEPV7VQOuvI/gXY61bv+9bAOTRnLElKs= github.com/pelletier/go-toml v1.8.1/go.mod h1:T2/BmBdy8dvIRq1a/8aqjN41wvWlN4lrapLU/GW4pbc= +github.com/pelletier/go-toml v1.9.3/go.mod h1:u1nR/EPcESfeI/szUZKdtJ0xRNbUoANCkoOuaOx1Y+c= github.com/performancecopilot/speed v3.0.0+incompatible/go.mod h1:/CLtqpZ5gBg1M9iaPbIdPPGyKcA8hKdoy6hAWba7Yac= github.com/peterbourgon/diskv v2.0.1+incompatible/go.mod h1:uqqh8zWWbv1HBMNONnaR/tNboyR3/BZd58JJSHlUSCU= github.com/peterh/liner v1.0.1-0.20180619022028-8c1271fcf47f/go.mod h1:xIteQHvHuaLYG9IFj6mSxM0fCKrs34IrEQUhOYuGPHc= +github.com/phayes/checkstyle v0.0.0-20170904204023-bfd46e6a821d/go.mod h1:3OzsM7FXDQlpCiw2j81fOmAwQLnZnLGXVKUzeKQXIAw= github.com/philhofer/fwd v1.0.0/go.mod h1:gk3iGcWd9+svBvR0sR+KPcfE+RNWozjowpeBVG3ZVNU= github.com/philhofer/fwd v1.1.1 h1:GdGcTjf5RNAxwS4QLsiMzJYj5KEvPJD3Abr261yRQXQ= github.com/philhofer/fwd v1.1.1/go.mod h1:gk3iGcWd9+svBvR0sR+KPcfE+RNWozjowpeBVG3ZVNU= +github.com/pierrec/lz4 v0.0.0-20190327172049-315a67e90e41/go.mod h1:3/3N9NVKO0jef7pBehbT1qWhCMrIgbYNnFAZCqQ5LRc= github.com/pierrec/lz4 v1.0.2-0.20190131084431-473cd7ce01a1/go.mod h1:3/3N9NVKO0jef7pBehbT1qWhCMrIgbYNnFAZCqQ5LRc= github.com/pierrec/lz4 v2.0.5+incompatible/go.mod h1:pdkljMzZIN41W+lC3N2tnIh5sFi+IEE17M5jbnwPHcY= -github.com/pierrec/lz4 v2.5.2+incompatible h1:WCjObylUIOlKy/+7Abdn34TLIkXiA4UWUMhxq9m9ZXI= -github.com/pierrec/lz4 v2.5.2+incompatible/go.mod h1:pdkljMzZIN41W+lC3N2tnIh5sFi+IEE17M5jbnwPHcY= +github.com/pierrec/lz4 v2.4.1+incompatible/go.mod h1:pdkljMzZIN41W+lC3N2tnIh5sFi+IEE17M5jbnwPHcY= +github.com/pierrec/lz4 v2.6.0+incompatible h1:Ix9yFKn1nSPBLFl/yZknTp8TU5G4Ps0JDmguYK6iH1A= +github.com/pierrec/lz4 v2.6.0+incompatible/go.mod h1:pdkljMzZIN41W+lC3N2tnIh5sFi+IEE17M5jbnwPHcY= github.com/pion/dtls/v2 v2.0.9 h1:7Ow+V++YSZQMYzggI0P9vLJz/hUFcffsfGMfT/Qy+u8= github.com/pion/dtls/v2 v2.0.9/go.mod h1:O0Wr7si/Zj5/EBFlDzDd6UtVxx25CE1r7XM7BQKYQho= github.com/pion/logging v0.2.2 h1:M9+AIj/+pxNsDfAT64+MAVgJO0rsyLnoJKCqf//DoeY= @@ -1404,23 +1621,31 @@ github.com/pkg/errors v0.8.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINE github.com/pkg/errors v0.9.1 h1:FEBLx1zS214owpjy7qsBeixbURkuhQAwrK5UwLGTwt4= github.com/pkg/errors v0.9.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0= github.com/pkg/profile v1.2.1/go.mod h1:hJw3o1OdXxsrSjjVksARp5W95eeEaEfptyVZyv6JUPA= +github.com/pkg/sftp v1.10.1/go.mod h1:lYOWFsE0bwd1+KfKJaKeuokY15vzFx25BLbzYYoAxZI= github.com/pkg/term v0.0.0-20180730021639-bffc007b7fd5/go.mod h1:eCbImbZ95eXtAUIbLAuAVnBnwf83mjf6QIVH8SHYwqQ= +github.com/pmezard/go-difflib v0.0.0-20151028094244-d8ed2627bdf0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4= github.com/pmezard/go-difflib v1.0.0 h1:4DBwDE0NGyQoBHbLQYPwSUPoCMWR5BEzIk/f1lZbAQM= github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4= +github.com/polyfloyd/go-errorlint v0.0.0-20201127212506-19bd8db6546f/go.mod h1:wi9BfjxjF/bwiZ701TzmfKu6UKC357IOAtNr0Td0Lvw= github.com/posener/complete v1.1.1/go.mod h1:em0nMJCgc9GFtwrmVmEMR/ZL6WyhyjMBndrE9hABlRI= github.com/posener/complete v1.2.3/go.mod h1:WZIdtGGp+qx0sLrYKtIRAruyNpv6hFCicSgv7Sy7s/s= github.com/pquerna/cachecontrol v0.0.0-20171018203845-0dec1b30a021/go.mod h1:prYjPmNq4d1NPVmpShWobRqXY3q7Vp+80DqgxxUrUIA= +github.com/pquerna/cachecontrol v0.1.0/go.mod h1:NrUG3Z7Rdu85UNR3vm7SOsl1nFIeSiQnrHV5K9mBcUI= github.com/prometheus/alertmanager v0.21.0/go.mod h1:h7tJ81NA0VLWvWEayi1QltevFkLF3KxmC/malTcT8Go= github.com/prometheus/client_golang v0.0.0-20180209125602-c332b6f63c06/go.mod h1:7SWBe2y4D6OKWSNQJUaRYU/AaXPKyh/dDVn+NZz0KFw= github.com/prometheus/client_golang v0.9.1/go.mod h1:7SWBe2y4D6OKWSNQJUaRYU/AaXPKyh/dDVn+NZz0KFw= +github.com/prometheus/client_golang v0.9.2/go.mod h1:OsXs2jCmiKlQ1lTBmv21f2mNfw4xf/QclQDMrYNZzcM= github.com/prometheus/client_golang v0.9.3-0.20190127221311-3c4408c8b829/go.mod h1:p2iRAGwDERtqlqzRXnrOVns+ignqQo//hLXqYxZYVNs= github.com/prometheus/client_golang v0.9.3/go.mod h1:/TN21ttK/J9q6uSwhBd54HahCDft0ttaMvbicHlPoso= github.com/prometheus/client_golang v1.0.0/go.mod h1:db9x61etRT2tGnBNRi70OPL5FsnadC4Ky3P0J6CfImo= github.com/prometheus/client_golang v1.1.0/go.mod h1:I1FGZT9+L76gKKOs5djB6ezCbFQP1xR9D75/vuwEF3g= github.com/prometheus/client_golang v1.3.0/go.mod h1:hJaj2vgQTGQmVCsAACORcieXFeDPbaTKGT+JTgUa3og= github.com/prometheus/client_golang v1.4.0/go.mod h1:e9GMxYsXl05ICDXkRhurwBS4Q3OK1iX/F2sw+iXX5zU= +github.com/prometheus/client_golang v1.5.1/go.mod h1:e9GMxYsXl05ICDXkRhurwBS4Q3OK1iX/F2sw+iXX5zU= github.com/prometheus/client_golang v1.6.0/go.mod h1:ZLOG9ck3JLRdB5MgO8f+lLTe83AXG6ro35rLTxvnIl4= github.com/prometheus/client_golang v1.7.1/go.mod h1:PY5Wy2awLA44sXw4AOSfFBetzPP4j5+D6mVACh+pe2M= +github.com/prometheus/client_golang v1.9.0/go.mod h1:FqZLKOZnGdFAhOK4nqGHa7D66IdsO+O441Eve7ptJDU= +github.com/prometheus/client_golang v1.10.0/go.mod h1:WJM3cc3yu7XKBKa/I8WeZm+V3eltZnBwfENSU7mdogU= github.com/prometheus/client_golang v1.11.0 h1:HNkLOAEQMIDv/K+04rukrLx6ch7msSRwf3/SASFAGtQ= github.com/prometheus/client_golang v1.11.0/go.mod h1:Z6t4BnS23TR94PD6BsDNk8yVqroYurpAkEiz0P2BEV0= github.com/prometheus/client_model v0.0.0-20171117100541-99fa1f4be8e5/go.mod h1:MbSGuTsp3dbXC40dX6PRTWyKYBIrTGTE9sqQNg2J8bo= @@ -1433,6 +1658,7 @@ github.com/prometheus/client_model v0.2.0 h1:uq5h0d+GuxiXLJLNABMgp2qUWDPiLvgCzz2 github.com/prometheus/client_model v0.2.0/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA= github.com/prometheus/common v0.0.0-20180110214958-89604d197083/go.mod h1:daVV7qP5qjZbuso7PdcryaAu0sAZbrN9i7WWcTMWvro= github.com/prometheus/common v0.0.0-20181113130724-41aa239b4cce/go.mod h1:daVV7qP5qjZbuso7PdcryaAu0sAZbrN9i7WWcTMWvro= +github.com/prometheus/common v0.0.0-20181126121408-4724e9255275/go.mod h1:daVV7qP5qjZbuso7PdcryaAu0sAZbrN9i7WWcTMWvro= github.com/prometheus/common v0.2.0/go.mod h1:TNfzLD0ON7rHzMJeJkieUDPYmFC7Snx/y86RQel1bk4= github.com/prometheus/common v0.4.0/go.mod h1:TNfzLD0ON7rHzMJeJkieUDPYmFC7Snx/y86RQel1bk4= github.com/prometheus/common v0.4.1/go.mod h1:TNfzLD0ON7rHzMJeJkieUDPYmFC7Snx/y86RQel1bk4= @@ -1440,11 +1666,17 @@ github.com/prometheus/common v0.6.0/go.mod h1:eBmuwkDJBwy6iBfxCBob6t6dR6ENT/y+J+ github.com/prometheus/common v0.7.0/go.mod h1:DjGbpBbp5NYNiECxcL/VnbXCCaQpKd3tt26CguLLsqA= github.com/prometheus/common v0.9.1/go.mod h1:yhUN8i9wzaXS3w1O07YhxHEBxD+W35wd8bs7vj7HSQ4= github.com/prometheus/common v0.10.0/go.mod h1:Tlit/dnDKsSWFlCLTWaA1cyBgKHSMdTB80sz/V91rCo= -github.com/prometheus/common v0.13.0/go.mod h1:U+gB1OBLb1lF3O42bTCL+FK18tX9Oar16Clt/msog/s= -github.com/prometheus/common v0.26.0 h1:iMAkS2TDoNWnKM+Kopnx/8tnEStIfpYA0ur0xQzzhMQ= +github.com/prometheus/common v0.15.0/go.mod h1:U+gB1OBLb1lF3O42bTCL+FK18tX9Oar16Clt/msog/s= +github.com/prometheus/common v0.18.0/go.mod h1:U+gB1OBLb1lF3O42bTCL+FK18tX9Oar16Clt/msog/s= +github.com/prometheus/common v0.23.0/go.mod h1:H6QK/N6XVT42whUeIdI3dp36w49c+/iMDk7UAI2qm7Q= +github.com/prometheus/common v0.25.0/go.mod h1:H6QK/N6XVT42whUeIdI3dp36w49c+/iMDk7UAI2qm7Q= github.com/prometheus/common v0.26.0/go.mod h1:M7rCNAaPfAosfx8veZJCuw84e35h3Cfd9VFqTh1DIvc= +github.com/prometheus/common v0.30.0 h1:JEkYlQnpzrzQFxi6gnukFPdQ+ac82oRhzMcIduJu/Ug= +github.com/prometheus/common v0.30.0/go.mod h1:vu+V0TpY+O6vW9J44gczi3Ap/oXXR10b+M/gUGO4Hls= +github.com/prometheus/exporter-toolkit v0.5.1/go.mod h1:OCkM4805mmisBhLmVFw858QYi3v0wKdY6/UxrT0pZVg= github.com/prometheus/procfs v0.0.0-20180125133057-cb4147076ac7/go.mod h1:c3At6R/oaqEKCNdg8wHV1ftS6bRYblBhIjjI8uT2IGk= github.com/prometheus/procfs v0.0.0-20181005140218-185b4288413d/go.mod h1:c3At6R/oaqEKCNdg8wHV1ftS6bRYblBhIjjI8uT2IGk= +github.com/prometheus/procfs v0.0.0-20181204211112-1dc9a6cbc91a/go.mod h1:c3At6R/oaqEKCNdg8wHV1ftS6bRYblBhIjjI8uT2IGk= github.com/prometheus/procfs v0.0.0-20190117184657-bf6a532e95b1/go.mod h1:c3At6R/oaqEKCNdg8wHV1ftS6bRYblBhIjjI8uT2IGk= github.com/prometheus/procfs v0.0.0-20190507164030-5867b95ac084/go.mod h1:TjEm7ze935MbeOT/UhFTIMYKhuLP4wbCsTZCD3I8kEA= github.com/prometheus/procfs v0.0.0-20190522114515-bc1a522cf7b1/go.mod h1:TjEm7ze935MbeOT/UhFTIMYKhuLP4wbCsTZCD3I8kEA= @@ -1458,17 +1690,27 @@ github.com/prometheus/procfs v0.2.0/go.mod h1:lV6e/gmhEcM9IjHGsFOCxxuZ+z1YqCvr4O github.com/prometheus/procfs v0.2.0/go.mod h1:lV6e/gmhEcM9IjHGsFOCxxuZ+z1YqCvr4OA4YeYWdaU= github.com/prometheus/procfs v0.6.0 h1:mxy4L2jP6qMonqmq+aTtOx1ifVWUgG/TAmntgbh3xv4= github.com/prometheus/procfs v0.6.0/go.mod h1:cz+aTbrPOrUb4q7XlbU9ygM+/jj0fzG6c1xBZuNvfVA= -github.com/prometheus/prometheus v1.8.2-0.20200911110723-e83ef207b6c2 h1:IB/5RJRcJiR/YzKs4Aou86s/RaMepZOZVCArYNHJHWc= -github.com/prometheus/prometheus v1.8.2-0.20200911110723-e83ef207b6c2/go.mod h1:Td6hjwdXDmVt5CI9T03Sw+yBNxLBq/Yx3ZtmtP8zlCA= +github.com/prometheus/prometheus v1.8.2-0.20210430082741-2a4b8e12bbf2 h1:AHi2TGs09Mv4v688/bjcY2PfAcu9+p4aPvsgVQ4nYDk= +github.com/prometheus/prometheus v1.8.2-0.20210430082741-2a4b8e12bbf2/go.mod h1:5aBj+GpLB+V5MCnrKm5+JAqEJwzDiLugOmDhgt7sDec= +github.com/prometheus/statsd_exporter v0.20.0/go.mod h1:YL3FWCG8JBBtaUSxAg4Gz2ZYu22bS84XM89ZQXXTWmQ= github.com/prometheus/tsdb v0.7.1/go.mod h1:qhTCs0VvXwvX/y3TZrWD7rabWM+ijKTux40TwIPHuXU= +github.com/quasilyte/go-consistent v0.0.0-20190521200055-c6f3937de18c/go.mod h1:5STLWrekHfjyYwxBRVRXNOSewLJ3PWfDJd1VyTS21fI= +github.com/quasilyte/go-ruleguard v0.3.0/go.mod h1:p2miAhLp6fERzFNbcuQ4bevXs8rgK//uCHsUDkumITg= +github.com/quasilyte/go-ruleguard/dsl v0.0.0-20210106184943-e47d54850b18/go.mod h1:KeCP03KrjuSO0H1kTuZQCWlQPulDV6YMIXmpQss17rU= +github.com/quasilyte/go-ruleguard/dsl v0.0.0-20210115110123-c73ee1cbff1f/go.mod h1:KeCP03KrjuSO0H1kTuZQCWlQPulDV6YMIXmpQss17rU= +github.com/quasilyte/go-ruleguard/rules v0.0.0-20201231183845-9e62ed36efe1/go.mod h1:7JTjp89EGyU1d6XfBiXihJNG37wB2VRkd125Q1u7Plc= +github.com/quasilyte/regex/syntax v0.0.0-20200407221936-30656e2c4a95/go.mod h1:rlzQ04UMyJXu/aOvhd8qT+hvDrFpiwqp8MRXDY9szc0= github.com/rcrowley/go-metrics v0.0.0-20181016184325-3113b8401b8a/go.mod h1:bCqnVzQkZxMG4s8nGwiZ5l3QUCyqpo9Y+/ZMZ9VjZe4= -github.com/rcrowley/go-metrics v0.0.0-20200313005456-10cdbea86bc0 h1:MkV+77GLUNo5oJ0jf870itWm3D0Sjh7+Za9gazKc5LQ= -github.com/rcrowley/go-metrics v0.0.0-20200313005456-10cdbea86bc0/go.mod h1:bCqnVzQkZxMG4s8nGwiZ5l3QUCyqpo9Y+/ZMZ9VjZe4= +github.com/rcrowley/go-metrics v0.0.0-20190826022208-cac0b30c2563/go.mod h1:bCqnVzQkZxMG4s8nGwiZ5l3QUCyqpo9Y+/ZMZ9VjZe4= +github.com/rcrowley/go-metrics v0.0.0-20201227073835-cf1acfcdf475 h1:N/ElC8H3+5XpJzTSTfLsJV/mx9Q9g7kxmchpfZyxgzM= +github.com/rcrowley/go-metrics v0.0.0-20201227073835-cf1acfcdf475/go.mod h1:bCqnVzQkZxMG4s8nGwiZ5l3QUCyqpo9Y+/ZMZ9VjZe4= github.com/remyoudompheng/bigfft v0.0.0-20200410134404-eec4a21b6bb0 h1:OdAsTTz6OkFY5QxjkYwrChwuRruF69c169dPK26NUlk= github.com/remyoudompheng/bigfft v0.0.0-20200410134404-eec4a21b6bb0/go.mod h1:qqbHyh8v60DhA7CoWK5oRCqLrMHRGoxYCSS9EjAz6Eo= github.com/retailnext/hllpp v1.0.1-0.20180308014038-101a6d2f8b52/go.mod h1:RDpi1RftBQPUCDRw6SmxeaREsAaRKnOclghuzp/WRzc= github.com/riemann/riemann-go-client v0.5.0 h1:yPP7tz1vSYJkSZvZFCsMiDsHHXX57x8/fEX3qyEXuAA= github.com/riemann/riemann-go-client v0.5.0/go.mod h1:FMiaOL8dgBnRfgwENzV0xlYJ2eCbV1o7yqVwOBLbShQ= +github.com/rivo/tview v0.0.0-20200219210816-cd38d7432498/go.mod h1:6lkG1x+13OShEf0EaOCaTQYyB7d5nSbb181KtjlS+84= +github.com/rivo/uniseg v0.1.0/go.mod h1:J6wj4VEh+S6ZtnVlnTBMWIodfgj8LQOQFoIToxlJtxc= github.com/robertkrimen/otto v0.0.0-20191219234010-c382bd3c16ff h1:+6NUiITWwE5q1KO6SAfUX918c+Tab0+tGAM/mtdlUyA= github.com/robertkrimen/otto v0.0.0-20191219234010-c382bd3c16ff/go.mod h1:xvqspoSXJTIpemEonrMDFq6XzwHYYgToXWj5eRX1OtY= github.com/robfig/cron/v3 v3.0.1 h1:WdRxkvbJztn8LMz/QEvLN5sBU+xKpSqwwUO1Pjr4qDs= @@ -1478,50 +1720,64 @@ github.com/rogpeppe/fastuuid v1.2.0/go.mod h1:jVj6XXZzXRy/MSR5jhDC/2q6DgLz+nrA6L github.com/rogpeppe/go-internal v1.1.0/go.mod h1:M8bDsm7K2OlrFYOpmOWEs/qY81heoFRclV5y23lUDJ4= github.com/rogpeppe/go-internal v1.2.2/go.mod h1:M8bDsm7K2OlrFYOpmOWEs/qY81heoFRclV5y23lUDJ4= github.com/rogpeppe/go-internal v1.3.0/go.mod h1:M8bDsm7K2OlrFYOpmOWEs/qY81heoFRclV5y23lUDJ4= +github.com/rogpeppe/go-internal v1.6.1/go.mod h1:xXDCJY+GAPziupqXw64V24skbSoqbTEfhy4qGm1nDQc= +github.com/rogpeppe/go-internal v1.6.2 h1:aIihoIOHCiLZHxyoNQ+ABL4NKhFTgKLBdMLyEAh98m0= +github.com/rogpeppe/go-internal v1.6.2/go.mod h1:xXDCJY+GAPziupqXw64V24skbSoqbTEfhy4qGm1nDQc= github.com/rs/cors v1.7.0/go.mod h1:gFx+x8UowdsKA9AchylcLynDq+nNFfI8FkUZdN/jGCU= +github.com/rs/cors v1.8.0/go.mod h1:EBwu+T5AvHOcXwvZIkQFjUN6s8Czyqw12GL/Y0tUyRM= github.com/rs/xid v1.2.1/go.mod h1:+uKXf+4Djp6Md1KODXJxgGQPKngRmWyn10oCKFzNHOQ= github.com/rs/zerolog v1.13.0/go.mod h1:YbFCdg8HfsridGWAh22vktObvhZbQsZXe4/zB0OKkWU= github.com/rs/zerolog v1.15.0/go.mod h1:xYTKnLHcpfU2225ny5qZjxnj9NvkumZYjJHlAThCjNc= +github.com/russross/blackfriday v1.5.2/go.mod h1:JO/DiYxRf+HjHt06OyowR9PTA263kcR/rfWxYHBV53g= github.com/russross/blackfriday/v2 v2.0.1/go.mod h1:+Rmxgy9KzJVeS9/2gXHxylqXiyQDYRxCVz55jmeOWTM= +github.com/ryancurrah/gomodguard v1.2.0/go.mod h1:rNqbC4TOIdUDcVMSIpNNAzTbzXAZa6W5lnUepvuMMgQ= +github.com/ryanrolds/sqlclosecheck v0.3.0/go.mod h1:1gREqxyTGR3lVtpngyFo3hZAgk0KCtEdgEkHwDbigdA= github.com/ryanuber/columnize v0.0.0-20160712163229-9b3edd62028f/go.mod h1:sm1tb6uqfes/u+d4ooFouqFdy9/2g9QGwK3SQygK0Ts= github.com/ryanuber/columnize v2.1.0+incompatible/go.mod h1:sm1tb6uqfes/u+d4ooFouqFdy9/2g9QGwK3SQygK0Ts= github.com/safchain/ethtool v0.0.0-20190326074333-42ed695e3de8/go.mod h1:Z0q5wiBQGYcxhMZ6gUqHn6pYNLypFAvaL3UvgZLR0U4= github.com/safchain/ethtool v0.0.0-20200218184317-f459e2d13664 h1:gvolwzuDhul9qK6/oHqxCHD5TEYfsWNBGidOeG6kvpk= github.com/safchain/ethtool v0.0.0-20200218184317-f459e2d13664/go.mod h1:Z0q5wiBQGYcxhMZ6gUqHn6pYNLypFAvaL3UvgZLR0U4= -github.com/samuel/go-zookeeper v0.0.0-20190810000440-0ceca61e4d75/go.mod h1:gi+0XIa01GRL2eRQVjQkKGqKF3SF9vZR/HnPullcV2E= github.com/samuel/go-zookeeper v0.0.0-20190923202752-2cc03de413da/go.mod h1:gi+0XIa01GRL2eRQVjQkKGqKF3SF9vZR/HnPullcV2E= github.com/samuel/go-zookeeper v0.0.0-20200724154423-2164a8ac840e h1:CGjiMQ0wMH4wtNWrlj6kiTbkPt2F3rbYnhGX6TWLfco= github.com/samuel/go-zookeeper v0.0.0-20200724154423-2164a8ac840e/go.mod h1:gi+0XIa01GRL2eRQVjQkKGqKF3SF9vZR/HnPullcV2E= +github.com/sanity-io/litter v1.2.0/go.mod h1:JF6pZUFgu2Q0sBZ+HSV35P8TVPI1TTzEwyu9FXAw2W4= +github.com/sanposhiho/wastedassign v0.1.3/go.mod h1:LGpq5Hsv74QaqM47WtIsRSF/ik9kqk07kchgv66tLVE= +github.com/scaleway/scaleway-sdk-go v1.0.0-beta.7.0.20210223165440-c65ae3540d44/go.mod h1:CJJ5VAbozOl0yEw7nHB9+7BXTJbIn6h7W+f6Gau5IP8= github.com/sean-/seed v0.0.0-20170313163322-e2103e2c3529 h1:nn5Wsu0esKSJiIVhscUtVbo7ada43DJhG55ua/hjS5I= github.com/sean-/seed v0.0.0-20170313163322-e2103e2c3529/go.mod h1:DxrIzT+xaE7yg65j358z/aeFdxmN0P9QXhEzd20vsDc= github.com/seccomp/libseccomp-golang v0.9.1/go.mod h1:GbW5+tmTXfcxTToHLXlScSlAvWlF4P2Ca7zGrPiEpWo= +github.com/securego/gosec v0.0.0-20200203094520-d13bb6d2420c/go.mod h1:gp0gaHj0WlmPh9BdsTmo1aq6C27yIPWdxCKGFGdVKBE= +github.com/securego/gosec/v2 v2.6.1/go.mod h1:I76p3NTHBXsGhybUW+cEQ692q2Vp+A0Z6ZLzDIZy+Ao= github.com/segmentio/kafka-go v0.1.0/go.mod h1:X6itGqS9L4jDletMsxZ7Dz+JFWxM6JHfPOCvTvk+EJo= github.com/segmentio/kafka-go v0.2.0/go.mod h1:X6itGqS9L4jDletMsxZ7Dz+JFWxM6JHfPOCvTvk+EJo= github.com/sensu/sensu-go/api/core/v2 v2.9.0 h1:NanHMIWbrHP/L4Ge0V1x2+0G9bxFHpvhwjdr3wSF9Vg= github.com/sensu/sensu-go/api/core/v2 v2.9.0/go.mod h1:QcgxKxydmScE66hLBTzbFhhiPSR/JHqUjNi/+Lelh6E= github.com/sergi/go-diff v1.0.0/go.mod h1:0CfEIISq7TuYL3j771MWULgwwjU+GofnZX9QAmXWZgo= -github.com/shirou/gopsutil v2.18.10+incompatible/go.mod h1:5b4v6he4MtMOwMlS0TUMTu2PcXUg8+E1lC7eC3UO/RA= +github.com/shazow/go-diff v0.0.0-20160112020656-b6b7b6733b8c/go.mod h1:/PevMnwAxekIXwN8qQyfc5gl2NlkB3CQlkizAbOkeBs= +github.com/shirou/gopsutil v3.21.5+incompatible/go.mod h1:5b4v6he4MtMOwMlS0TUMTu2PcXUg8+E1lC7eC3UO/RA= github.com/shirou/gopsutil v3.21.8+incompatible h1:sh0foI8tMRlCidUJR+KzqWYWxrkuuPIGiO6Vp+KXdCU= github.com/shirou/gopsutil v3.21.8+incompatible/go.mod h1:5b4v6he4MtMOwMlS0TUMTu2PcXUg8+E1lC7eC3UO/RA= -github.com/shirou/w32 v0.0.0-20160930032740-bb4de0191aa4/go.mod h1:qsXQc7+bwAM3Q1u/4XEfrquwF8Lw7D7y5cD8CuHnfIc= +github.com/shirou/gopsutil/v3 v3.21.1/go.mod h1:igHnfak0qnw1biGeI2qKQvu0ZkwvEkUcCLlYhZzdr/4= github.com/shopspring/decimal v0.0.0-20180709203117-cd690d0c9e24/go.mod h1:M+9NzErvs504Cn4c5DxATwIqPbtswREoFCre64PpcG4= github.com/shopspring/decimal v0.0.0-20200105231215-408a2507e114 h1:Pm6R878vxWWWR+Sa3ppsLce/Zq+JNTs6aVvRu13jv9A= github.com/shopspring/decimal v0.0.0-20200105231215-408a2507e114/go.mod h1:DKyhrW/HYNuLGql+MJL6WCR6knT2jwCFRcu2hWCYk4o= github.com/showwin/speedtest-go v1.1.4 h1:pcY1W5LYZu44lH6Fuu80nu/Pj67n//VArlZudbAgR6E= github.com/showwin/speedtest-go v1.1.4/go.mod h1:dJugxvC/AQDt4HQQKZ9lKNa2+b1c8nzj9IL0a/F8l1U= +github.com/shurcooL/go v0.0.0-20180423040247-9e1955d9fb6e/go.mod h1:TDJrrUr11Vxrven61rcy3hJMUqaf/CLWYhHNPmT14Lk= +github.com/shurcooL/go-goon v0.0.0-20170922171312-37c2f522c041/go.mod h1:N5mDOmsrJOB+vfqUK+7DmDyjhSLIIBnXo9lvZJj3MWQ= github.com/shurcooL/httpfs v0.0.0-20190707220628-8d4bc4ba7749/go.mod h1:ZY1cvUeJuFPAdZ/B6v7RHavJWZn2YPVFQ1OSXhCGOkg= github.com/shurcooL/sanitized_anchor_name v1.0.0/go.mod h1:1NzhyTcUVG4SuEtjjoZeVRXNmyL/1OwPU0+IJeTBvfc= github.com/shurcooL/vfsgen v0.0.0-20181202132449-6a9ea43bcacd/go.mod h1:TrYk7fJVaAttu97ZZKrO9UbRa8izdowaMIZcxYMbVaw= -github.com/shurcooL/vfsgen v0.0.0-20200627165143-92b8a710ab6c/go.mod h1:TrYk7fJVaAttu97ZZKrO9UbRa8izdowaMIZcxYMbVaw= +github.com/shurcooL/vfsgen v0.0.0-20200824052919-0d455de96546/go.mod h1:TrYk7fJVaAttu97ZZKrO9UbRa8izdowaMIZcxYMbVaw= github.com/signalfx/com_signalfx_metrics_protobuf v0.0.2 h1:X886QgwZH5qr9HIQkk3mWcNEhUxx6D8rUZumzLV4Wiw= github.com/signalfx/com_signalfx_metrics_protobuf v0.0.2/go.mod h1:tCQQqyJAVF1+mxNdqOi18sS/zaSrE6EMyWwRA2QTl70= github.com/signalfx/gohistogram v0.0.0-20160107210732-1ccfd2ff5083 h1:WsShHmu12ZztYPfh9b+I+VjYD1o8iOHhB67WZCMEEE8= github.com/signalfx/gohistogram v0.0.0-20160107210732-1ccfd2ff5083/go.mod h1:adPDS6s7WaajdFBV9mQ7i0dKfQ8xiDnF9ZNETVPpp7c= -github.com/signalfx/golib/v3 v3.3.34 h1:s78S24+exS0jH21oeSB1qPeiekIKkeXGv0hg7f67HvU= -github.com/signalfx/golib/v3 v3.3.34/go.mod h1:PB7OovVijH7OGhzMewarEcIZG3eG6akWMDucIb5Jnb4= +github.com/signalfx/golib/v3 v3.3.38 h1:4EukKPAxVsqlkfaetUv+BpbuJ2l0YeQbwiQg3ADtlzU= +github.com/signalfx/golib/v3 v3.3.38/go.mod h1:J7vY30VdC39CSin5ZRIrThnkyNW8x1fnJGD+NBW4LuY= github.com/signalfx/gomemcache v0.0.0-20180823214636-4f7ef64c72a9/go.mod h1:Ytb8KfCSyuwy/VILnROdgCvbQLA5ch0nkbG7lKT0BXw= -github.com/signalfx/sapm-proto v0.4.0 h1:5lQX++6FeIjUZEIcnSgBqhOpmSjMkRBW3y/4ZiKMo5E= -github.com/signalfx/sapm-proto v0.4.0/go.mod h1:x3gtwJ1GRejtkghB4nYpwixh2zqJrLbPU959ZNhM0Fk= +github.com/signalfx/sapm-proto v0.7.2 h1:iM/y3gezQm1/j7JBS0gXhEJ8ROeneb6DY7n0OcnvLks= +github.com/signalfx/sapm-proto v0.7.2/go.mod h1:HLufOh6Gd2altGxbeve+s6hh0EWCWoOM7MmuYuvs5PI= github.com/sirupsen/logrus v1.0.4-0.20170822132746-89742aefa4b2/go.mod h1:pMByvHTf9Beacp5x1UXfOR9xyW/9antXMhjMPG0dEzc= github.com/sirupsen/logrus v1.0.6/go.mod h1:pMByvHTf9Beacp5x1UXfOR9xyW/9antXMhjMPG0dEzc= github.com/sirupsen/logrus v1.2.0/go.mod h1:LxeOpSwHxABJmUn/MG1IvRgCAasNZTLOkJPxbbu5VWo= @@ -1533,6 +1789,7 @@ github.com/sirupsen/logrus v1.6.0/go.mod h1:7uNnSEd1DgxDLC74fIahvMZmmYsHGZGEOFrf github.com/sirupsen/logrus v1.6.0/go.mod h1:7uNnSEd1DgxDLC74fIahvMZmmYsHGZGEOFrfsX/uA88= github.com/sirupsen/logrus v1.6.0/go.mod h1:7uNnSEd1DgxDLC74fIahvMZmmYsHGZGEOFrfsX/uA88= github.com/sirupsen/logrus v1.7.0/go.mod h1:yWOB1SBYBC5VeMP7gHvWumXLIWorT60ONWic61uBYv0= +github.com/sirupsen/logrus v1.8.0/go.mod h1:4GuYW9TZmE769R5STWrRakJc4UqQ3+QQ95fyz7ENv1A= github.com/sirupsen/logrus v1.8.1 h1:dJKuHgqk1NNQlqoA6BTlM1Wf9DOH3NBjQyu0h9+AZZE= github.com/sirupsen/logrus v1.8.1/go.mod h1:yWOB1SBYBC5VeMP7gHvWumXLIWorT60ONWic61uBYv0= github.com/sleepinggenius2/gosmi v0.4.3 h1:99Zwzy1Cvgsh396sw07oR2G4ab88ILGZFMxSlGWnR6o= @@ -1540,21 +1797,31 @@ github.com/sleepinggenius2/gosmi v0.4.3/go.mod h1:l8OniPmd3bJzw0MXP2/qh7AhP/e+bT github.com/smartystreets/assertions v0.0.0-20180927180507-b2de0cb4f26d h1:zE9ykElWQ6/NYmHa3jpm/yHnI4xSofP+UP6SpjHcSeM= github.com/smartystreets/assertions v0.0.0-20180927180507-b2de0cb4f26d/go.mod h1:OnSkiWE9lh6wB0YB77sQom3nweQdgAjqCqsofrRNTgc= github.com/smartystreets/goconvey v0.0.0-20190330032615-68dc04aab96a/go.mod h1:syvi0/a8iFYH4r/RixwvyeAJjdLS9QV7WQ/tjFTllLA= -github.com/smartystreets/goconvey v1.6.4-0.20190306220146-200a235640ff/go.mod h1:KSQcGKpxUMHk3nbYzs/tIBAM2iDooCn0BmttHOJEbLs= github.com/smartystreets/goconvey v1.6.4 h1:fv0U8FUIMPNf1L9lnHLvLhgicrIVChEkdzIKYqbNC9s= github.com/smartystreets/goconvey v1.6.4/go.mod h1:syvi0/a8iFYH4r/RixwvyeAJjdLS9QV7WQ/tjFTllLA= github.com/snowflakedb/gosnowflake v1.5.0 h1:Md7P8zbPegXy0+/SZ2nG8whXYkAT44nQ/yEb35LlIKo= github.com/snowflakedb/gosnowflake v1.5.0/go.mod h1:1kyg2XEduwti88V11PKRHImhXLK5WpGiayY6lFNYb98= github.com/soheilhy/cmux v0.1.4/go.mod h1:IM3LyeVVIOuxMH7sFAkER9+bJ4dT7Ms6E4xg4kGIyLM= +github.com/soheilhy/cmux v0.1.5/go.mod h1:T7TcVDs9LWfQgPlPsdngu6I6QIoyIFZDDC6sNE1GqG0= +github.com/sonatard/noctx v0.0.1/go.mod h1:9D2D/EoULe8Yy2joDHJj7bv3sZoq9AaSb8B4lqBjiZI= github.com/sony/gobreaker v0.4.1/go.mod h1:ZKptC7FHNvhBz7dN2LGjPVBz2sZJmc0/PkyDJOjmxWY= +github.com/sourcegraph/go-diff v0.6.1/go.mod h1:iBszgVvyxdc8SFZ7gm69go2KDdt3ag071iBaWPF6cjs= github.com/spaolacci/murmur3 v0.0.0-20180118202830-f09979ecbc72/go.mod h1:JwIasOWyU6f++ZhiEuf87xNszmSA2myDM2Kzu9HwQUA= +github.com/spaolacci/murmur3 v1.1.0/go.mod h1:JwIasOWyU6f++ZhiEuf87xNszmSA2myDM2Kzu9HwQUA= github.com/spf13/afero v1.1.2/go.mod h1:j4pytiNVoe2o6bmDsKpLACNPDBIoEAkihy7loJ1B0CQ= github.com/spf13/afero v1.2.2/go.mod h1:9ZxEEn6pIJ8Rxe320qSDBk6AsU0r9pR7Q4OcevTdifk= +github.com/spf13/afero v1.6.0/go.mod h1:Ai8FlHk4v/PARR026UzYexafAt9roJ7LcLMAmO6Z93I= github.com/spf13/cast v1.3.0/go.mod h1:Qx5cxh0v+4UWYiBimWS+eyWzqEqokIECu5etghLkUJE= +github.com/spf13/cast v1.3.1/go.mod h1:Qx5cxh0v+4UWYiBimWS+eyWzqEqokIECu5etghLkUJE= github.com/spf13/cobra v0.0.2-0.20171109065643-2da4a54c5cee/go.mod h1:1l0Ry5zgKvJasoi3XT1TypsSe7PqH0Sj9dhYf7v3XqQ= github.com/spf13/cobra v0.0.3/go.mod h1:1l0Ry5zgKvJasoi3XT1TypsSe7PqH0Sj9dhYf7v3XqQ= +github.com/spf13/cobra v0.0.5/go.mod h1:3K3wKZymM7VvHMDS9+Akkh4K60UwM26emMESw8tLCHU= +github.com/spf13/cobra v0.0.7/go.mod h1:/6GTrnGXV9HjY+aR4k0oJ5tcvakLuG6EuKReYlHNrgE= github.com/spf13/cobra v1.0.0/go.mod h1:/6GTrnGXV9HjY+aR4k0oJ5tcvakLuG6EuKReYlHNrgE= +github.com/spf13/cobra v1.1.3/go.mod h1:pGADOWyqRD/YMrPZigI/zbliZ2wVD/23d+is3pSWzOo= +github.com/spf13/cobra v1.2.1/go.mod h1:ExllRjgxM/piMAM+3tAZvg8fsklGAf3tPfi+i8t68Nk= github.com/spf13/jwalterweatherman v1.0.0/go.mod h1:cQK4TGJAtQXfYWX+Ddv3mKDzgVb68N+wFjFa4jdeBTo= +github.com/spf13/jwalterweatherman v1.1.0/go.mod h1:aNWZUN0dPAAO/Ljvb5BEdw96iTZ0EXowPYD95IqWIGo= github.com/spf13/pflag v0.0.0-20170130214245-9ff6c6923cff/go.mod h1:DYY7MBk1bdzusC3SYhjObp+wFpr4gzcvqqNjLnInEg4= github.com/spf13/pflag v1.0.1-0.20171106142849-4c012f6dcd95/go.mod h1:DYY7MBk1bdzusC3SYhjObp+wFpr4gzcvqqNjLnInEg4= github.com/spf13/pflag v1.0.1/go.mod h1:DYY7MBk1bdzusC3SYhjObp+wFpr4gzcvqqNjLnInEg4= @@ -1562,7 +1829,13 @@ github.com/spf13/pflag v1.0.3/go.mod h1:DYY7MBk1bdzusC3SYhjObp+wFpr4gzcvqqNjLnIn github.com/spf13/pflag v1.0.3/go.mod h1:DYY7MBk1bdzusC3SYhjObp+wFpr4gzcvqqNjLnInEg4= github.com/spf13/pflag v1.0.5 h1:iy+VFUOCP1a+8yFto/drg2CJ5u0yRoB7fZw3DKv/JXA= github.com/spf13/pflag v1.0.5/go.mod h1:McXfInJRrz4CZXVZOBLb0bTZqETkiAhM9Iw0y3An2Bg= +github.com/spf13/viper v1.3.2/go.mod h1:ZiWeW+zYFKm7srdB9IoDzzZXaJaI5eL9QjNiN/DMA2s= github.com/spf13/viper v1.4.0/go.mod h1:PTJ7Z/lr49W6bUbkmS1V3by4uWynFiR9p7+dSq/yZzE= +github.com/spf13/viper v1.6.2/go.mod h1:t3iDnF5Jlj76alVNuyFBk5oUMCvsrkbvZK0WQdfDi5k= +github.com/spf13/viper v1.7.0/go.mod h1:8WkrPz2fc9jxqZNCJI/76HCieCp4Q8HaLFoCha5qpdg= +github.com/spf13/viper v1.7.1/go.mod h1:8WkrPz2fc9jxqZNCJI/76HCieCp4Q8HaLFoCha5qpdg= +github.com/spf13/viper v1.8.1/go.mod h1:o0Pch8wJ9BVSWGQMbra6iw0oQ5oktSIBaujf1rJH9Ns= +github.com/ssgreg/nlreturn/v2 v2.1.0/go.mod h1:E/iiPB78hV7Szg2YfRgyIrk1AD6JVMTRkkxBiELzh2I= github.com/stefanberger/go-pkcs11uri v0.0.0-20201008174630-78d3cae3a980/go.mod h1:AO3tvPzVZ/ayst6UlUKUv6rcPQInYe3IknH3jYhAKu8= github.com/stoewer/go-strcase v1.2.0/go.mod h1:IBiWB2sKIp3wVVQ3Y035++gc+knqhUQag1KpM8ahLw8= github.com/streadway/amqp v0.0.0-20190404075320-75d898a42a94/go.mod h1:AZpEONHx3DKn8O/DFsRAY58/XVQiIPMTMB1SddzLXVw= @@ -1574,7 +1847,9 @@ github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+ github.com/stretchr/objx v0.1.1/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME= github.com/stretchr/objx v0.2.0 h1:Hbg2NidpLE8veEBkEZTL3CvlkUIVzuU9jDplZO54c48= github.com/stretchr/objx v0.2.0/go.mod h1:qt09Ya8vawLte6SNmTgCsAVtYtaKzEcn8ATUoHMkEqE= +github.com/stretchr/testify v0.0.0-20161117074351-18a02ba4a312/go.mod h1:a8OnRcib4nhh0OaRAV+Yts87kKdq0PP7pXfy6kDkUVs= github.com/stretchr/testify v0.0.0-20180303142811-b89eecf5ca5d/go.mod h1:a8OnRcib4nhh0OaRAV+Yts87kKdq0PP7pXfy6kDkUVs= +github.com/stretchr/testify v1.1.4/go.mod h1:a8OnRcib4nhh0OaRAV+Yts87kKdq0PP7pXfy6kDkUVs= github.com/stretchr/testify v1.2.0/go.mod h1:a8OnRcib4nhh0OaRAV+Yts87kKdq0PP7pXfy6kDkUVs= github.com/stretchr/testify v1.2.2/go.mod h1:a8OnRcib4nhh0OaRAV+Yts87kKdq0PP7pXfy6kDkUVs= github.com/stretchr/testify v1.3.0/go.mod h1:M5WIy9Dh21IEIfnGCwXGc5bZfKNJtfHm1UVUgZn+9EI= @@ -1584,15 +1859,18 @@ github.com/stretchr/testify v1.6.0/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/ github.com/stretchr/testify v1.6.1/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg= github.com/stretchr/testify v1.7.0 h1:nwc3DEeHmmLAfoZucVR881uASk0Mfjw8xYJ99tb5CcY= github.com/stretchr/testify v1.7.0/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg= +github.com/subosito/gotenv v1.2.0/go.mod h1:N0PQaV/YGNqwC0u51sEeR/aUtSLEXKX9iv69rRypqCw= github.com/syndtr/gocapability v0.0.0-20170704070218-db04d3cc01c8/go.mod h1:hkRG7XYTFWNJGYcbNJQlaLq0fg1yr4J4t/NcTQtrfww= github.com/syndtr/gocapability v0.0.0-20180916011248-d98352740cb2/go.mod h1:hkRG7XYTFWNJGYcbNJQlaLq0fg1yr4J4t/NcTQtrfww= github.com/syndtr/gocapability v0.0.0-20200815063812-42c35b437635/go.mod h1:hkRG7XYTFWNJGYcbNJQlaLq0fg1yr4J4t/NcTQtrfww= github.com/tbrandon/mbserver v0.0.0-20170611213546-993e1772cc62 h1:Oj2e7Sae4XrOsk3ij21QjjEgAcVSeo9nkp0dI//cD2o= github.com/tbrandon/mbserver v0.0.0-20170611213546-993e1772cc62/go.mod h1:qUzPVlSj2UgxJkVbH0ZwuuiR46U8RBMDT5KLY78Ifpw= github.com/tchap/go-patricia v2.2.6+incompatible/go.mod h1:bmLyhP68RS6kStMGxByiQ23RP/odRBOTVjwp2cDyi6I= +github.com/tdakkota/asciicheck v0.0.0-20200416200610-e657995f937b/go.mod h1:yHp0ai0Z9gUljN3o0xMhYJnH/IcvkdTBOX2fmJ93JEM= github.com/tedsuo/ifrit v0.0.0-20180802180643-bea94bb476cc/go.mod h1:eyZnKCc955uh98WQvzOm0dgAeLnf2O0Rz0LPoC5ze+0= github.com/testcontainers/testcontainers-go v0.11.1 h1:FiYsB83LSGbiawoV8TpAZGfcCUbtaeeg1SXqEKUxh08= github.com/testcontainers/testcontainers-go v0.11.1/go.mod h1:/V0UVq+1e7NWYoqTPog179clf0Qp9TOyp4EcXaEFQz8= +github.com/tetafro/godot v1.4.4/go.mod h1:FVDd4JuKliW3UgjswZfJfHq4vAx0bD/Jd5brJjGeaz4= github.com/tidwall/gjson v1.9.0 h1:+Od7AE26jAaMgVC31cQV/Ope5iKXulNMflrlB7k+F9E= github.com/tidwall/gjson v1.9.0/go.mod h1:5/xDoumyyDNerp2U36lyolv46b3uF/9Bu6OfyQ9GImk= github.com/tidwall/match v1.0.3 h1:FQUVvBImDutD8wJLN6c5eMzWtjgONK9MwIBCOrUJKeE= @@ -1600,27 +1878,45 @@ github.com/tidwall/match v1.0.3/go.mod h1:eRSPERbgtNPcGhD8UCthc6PmLEQXEWd3PRB5JT github.com/tidwall/pretty v1.0.0/go.mod h1:XNkn88O1ChpSDQmQeStsy+sBenx6DDtFZJxhVysOjyk= github.com/tidwall/pretty v1.1.0 h1:K3hMW5epkdAVwibsQEfR/7Zj0Qgt4DxtNumTq/VloO8= github.com/tidwall/pretty v1.1.0/go.mod h1:XNkn88O1ChpSDQmQeStsy+sBenx6DDtFZJxhVysOjyk= +github.com/timakin/bodyclose v0.0.0-20200424151742-cb6215831a94/go.mod h1:Qimiffbc6q9tBWlVV6x0P9sat/ao1xEkREYPPj9hphk= github.com/tinylib/msgp v1.0.2/go.mod h1:+d+yLhGm8mzTaHzB+wgMYrodPfmZrzkirds8fDWklFE= github.com/tinylib/msgp v1.1.6 h1:i+SbKraHhnrf9M5MYmvQhFnbLhAXSDWF8WWsuyRdocw= github.com/tinylib/msgp v1.1.6/go.mod h1:75BAfg2hauQhs3qedfdDZmWAPcFMAvJE5b9rGOMufyw= -github.com/tklauser/go-sysconf v0.3.5 h1:uu3Xl4nkLzQfXNsWn15rPc/HQCJKObbt1dKJeWp3vU4= github.com/tklauser/go-sysconf v0.3.5/go.mod h1:MkWzOF4RMCshBAMXuhXJs64Rte09mITnppBXY/rYEFI= -github.com/tklauser/numcpus v0.2.2 h1:oyhllyrScuYI6g+h/zUvNXNp1wy7x8qQy3t/piefldA= +github.com/tklauser/go-sysconf v0.3.9 h1:JeUVdAOWhhxVcU6Eqr/ATFHgXk/mmiItdKeJPev3vTo= +github.com/tklauser/go-sysconf v0.3.9/go.mod h1:11DU/5sG7UexIrp/O6g35hrWzu0JxlwQ3LSFUzyeuhs= github.com/tklauser/numcpus v0.2.2/go.mod h1:x3qojaO3uyYt0i56EW/VUYs7uBvdl2fkfZFu0T9wgjM= +github.com/tklauser/numcpus v0.3.0 h1:ILuRUQBtssgnxw0XXIjKUC56fgnOrFoQQ/4+DeU2biQ= +github.com/tklauser/numcpus v0.3.0/go.mod h1:yFGUr7TUHQRAhyqBcEg0Ge34zDBAsIvJJcyE6boqnA8= github.com/tmc/grpc-websocket-proxy v0.0.0-20170815181823-89b8d40f7ca8/go.mod h1:ncp9v5uamzpCO7NfCPTXjqaC+bZgJeR0sMTm6dMHP7U= github.com/tmc/grpc-websocket-proxy v0.0.0-20190109142713-0ad062ec5ee5/go.mod h1:ncp9v5uamzpCO7NfCPTXjqaC+bZgJeR0sMTm6dMHP7U= +github.com/tomarrell/wrapcheck v0.0.0-20201130113247-1683564d9756/go.mod h1:yiFB6fFoV7saXirUGfuK+cPtUh4NX/Hf5y2WC2lehu0= +github.com/tommy-muehle/go-mnd/v2 v2.3.1/go.mod h1:WsUAkMJMYww6l/ufffCD3m+P7LEvr8TnZn9lwVDlgzw= github.com/tv42/httpunix v0.0.0-20150427012821-b75d8614f926/go.mod h1:9ESjWnEqriFuLhtthL60Sar/7RFoluCcXsuvEwTV5KM= github.com/uber/jaeger-client-go v2.25.0+incompatible/go.mod h1:WVhlPFC8FDjOFMMWRy2pZqQJSXxYSwNYOkTr/Z6d3Kk= -github.com/uber/jaeger-lib v2.2.0+incompatible/go.mod h1:ComeNDZlWwrWnDv8aPp0Ba6+uUTzImX/AauajbLI56U= +github.com/uber/jaeger-client-go v2.29.1+incompatible/go.mod h1:WVhlPFC8FDjOFMMWRy2pZqQJSXxYSwNYOkTr/Z6d3Kk= +github.com/uber/jaeger-lib v2.4.0+incompatible/go.mod h1:ComeNDZlWwrWnDv8aPp0Ba6+uUTzImX/AauajbLI56U= +github.com/uber/jaeger-lib v2.4.1+incompatible/go.mod h1:ComeNDZlWwrWnDv8aPp0Ba6+uUTzImX/AauajbLI56U= github.com/ugorji/go v1.1.4/go.mod h1:uQMGLiO92mf5W77hV/PUCpI3pbzQx3CRekS0kk+RGrc= +github.com/ugorji/go v1.1.7/go.mod h1:kZn38zHttfInRq0xu/PH0az30d+z6vm202qpg1oXVMw= +github.com/ugorji/go/codec v0.0.0-20181204163529-d75b2dcb6bc8/go.mod h1:VFNgLljTbGfSG7qAOspJ7OScBnGdDN/yBr0sguwnwf0= +github.com/ugorji/go/codec v1.1.7/go.mod h1:Ax+UKWsSmolVDwsd+7N3ZtXu+yMGCf907BLYF3GoBXY= +github.com/ultraware/funlen v0.0.3/go.mod h1:Dp4UiAus7Wdb9KUZsYWZEWiRzGuM2kXM1lPbfaF6xhA= +github.com/ultraware/whitespace v0.0.4/go.mod h1:aVMh/gQve5Maj9hQ/hg+F75lr/X5A89uZnzAmWSineA= github.com/urfave/cli v0.0.0-20171014202726-7bc6a0acffa5/go.mod h1:70zkFmudgCuE/ngEzBv17Jvp/497gISqfk5gWijbERA= github.com/urfave/cli v1.20.0/go.mod h1:70zkFmudgCuE/ngEzBv17Jvp/497gISqfk5gWijbERA= github.com/urfave/cli v1.22.1/go.mod h1:Gos4lmkARVdJ6EkW0WaNv/tZAAMe9V7XWyB60NtXRu0= github.com/urfave/cli v1.22.2/go.mod h1:Gos4lmkARVdJ6EkW0WaNv/tZAAMe9V7XWyB60NtXRu0= +github.com/uudashr/gocognit v1.0.1/go.mod h1:j44Ayx2KW4+oB6SWMv8KsmHzZrOInQav7D3cQMJ5JUM= +github.com/valyala/bytebufferpool v1.0.0/go.mod h1:6bBcMArwyJ5K/AmCkWv1jt77kVWyCJ6HpOuEn7z0Csc= +github.com/valyala/fasthttp v1.16.0/go.mod h1:YOKImeEosDdBPnxc0gy7INqi3m1zK6A+xl6TwOBhHCA= +github.com/valyala/quicktemplate v1.6.3/go.mod h1:fwPzK2fHuYEODzJ9pkw0ipCPNHZ2tD5KW4lOuSdPKzY= +github.com/valyala/tcplisten v0.0.0-20161114210144-ceec8f93295a/go.mod h1:v3UYOV9WzVtRmSR+PDvWpU/qWl4Wa5LApYYX4ZtKbio= github.com/vapourismo/knx-go v0.0.0-20201122213738-75fe09ace330 h1:iBlTJosRsR70amr0zsmSPvaKNH8K/p3YlX/5SdPmSl8= github.com/vapourismo/knx-go v0.0.0-20201122213738-75fe09ace330/go.mod h1:7+aWBsUJCo9OQRCgTypRmIQW9KKKcPMjtrdnYIBsS70= github.com/vaughan0/go-ini v0.0.0-20130923145212-a98ad7ee00ec/go.mod h1:owBmyHYMLkxyrugmfwE/DLJyW8Ro9mkphwuVErQ0iUw= github.com/vektah/gqlparser v1.1.2/go.mod h1:1ycwN7Ij5njmMkPPAOaRFY4rET2Enx7IkVv3vaXspKw= +github.com/vektra/mockery v0.0.0-20181123154057-e78b021dcbb5/go.mod h1:ppEjwdhyy7Y31EnHRDm1JkChoC7LXIJ7Ex0VYLWtZtQ= github.com/vishvananda/netlink v0.0.0-20181108222139-023a6dafdcdf/go.mod h1:+SR5DhBJrl6ZM7CoCKvpw5BKroDKQ+PJqOg65H/2ktk= github.com/vishvananda/netlink v1.1.0/go.mod h1:cTgwzPIzzgDAYoQrMm0EdrjRUBkTqKYppBueQtXaqoE= github.com/vishvananda/netlink v1.1.1-0.20201029203352-d40f9887b852 h1:cPXZWzzG0NllBLdjWoD1nDfaqu98YMv+OneaKc8sPOA= @@ -1634,6 +1930,7 @@ github.com/vjeantet/grok v1.0.1/go.mod h1:ax1aAchzC6/QMXMcyzHQGZWaW1l195+uMYIkCW github.com/vmware/govmomi v0.26.0 h1:JMZR5c7MHH3nCEAVYS3WyRIA35W3+b3tLwAqxVzq1Rw= github.com/vmware/govmomi v0.26.0/go.mod h1:daTuJEcQosNMXYJOeku0qdBJP9SOLLWB3Mqz8THtv6o= github.com/vmware/vmw-guestinfo v0.0.0-20170707015358-25eff159a728/go.mod h1:x9oS4Wk2s2u4tS29nEaDLdzvuHdB19CvSGJjPgkZJNk= +github.com/wadey/gocovmerge v0.0.0-20160331181800-b5bfa59ec0ad/go.mod h1:Hy8o65+MXnS6EwGElrSRjUzQDLXreJlzYLlWiHtt8hM= github.com/wavefronthq/wavefront-sdk-go v0.9.7 h1:SrtABcXXeKCW5SerQYsnCzHo15GeggjZmL+DjtTy6CI= github.com/wavefronthq/wavefront-sdk-go v0.9.7/go.mod h1:JTGsu+KKgxx+GitC65VVdftN2iep1nVpQi/8EGR6v4Y= github.com/willf/bitset v1.1.3/go.mod h1:RjeCKbqT1RxIR/KWY6phxZiaY1IyutSBfGjNPySAYV4= @@ -1649,11 +1946,13 @@ github.com/xdg-go/scram v1.0.2 h1:akYIkZ28e6A96dkWNJQu3nmCzH3YfwMPQExUYDaRv7w= github.com/xdg-go/scram v1.0.2/go.mod h1:1WAq6h33pAW+iRreB34OORO2Nf7qel3VV3fjBj+hCSs= github.com/xdg-go/stringprep v1.0.2 h1:6iq84/ryjjeRmMJwxutI51F2GIPlP5BfTvXHeYjyhBc= github.com/xdg-go/stringprep v1.0.2/go.mod h1:8F9zXuvzgwmyT5DUm4GUfZGDdT3W+LCvS6+da4O5kxM= -github.com/xdg/scram v0.0.0-20180814205039-7eeb5667e42c h1:u40Z8hqBAAQyv+vATcGgV0YCnDjqSL7/q/JyPhhJSPk= github.com/xdg/scram v0.0.0-20180814205039-7eeb5667e42c/go.mod h1:lB8K/P019DLNhemzwFU4jHLhdvlE6uDZjXFejJXr49I= +github.com/xdg/scram v1.0.3 h1:nTadYh2Fs4BK2xdldEa2g5bbaZp0/+1nJMMPtPxS/to= +github.com/xdg/scram v1.0.3/go.mod h1:lB8K/P019DLNhemzwFU4jHLhdvlE6uDZjXFejJXr49I= github.com/xdg/stringprep v0.0.0-20180714160509-73f8eece6fdc/go.mod h1:Jhud4/sHMO4oL310DaZAKk9ZaJ08SJfe+sJh0HrGL1Y= -github.com/xdg/stringprep v1.0.0 h1:d9X0esnoa3dFsV0FG35rAT0RIhYFlPq7MiP+DW89La0= github.com/xdg/stringprep v1.0.0/go.mod h1:Jhud4/sHMO4oL310DaZAKk9ZaJ08SJfe+sJh0HrGL1Y= +github.com/xdg/stringprep v1.0.3 h1:cmL5Enob4W83ti/ZHuZLuKD/xqJfus4fVPwE+/BDm+4= +github.com/xdg/stringprep v1.0.3/go.mod h1:Jhud4/sHMO4oL310DaZAKk9ZaJ08SJfe+sJh0HrGL1Y= github.com/xeipuuv/gojsonpointer v0.0.0-20180127040702-4e3ac2762d5f/go.mod h1:N2zxlSyiKSe5eX1tZViRH5QA0qijqEDrYZiPEAiq3wU= github.com/xeipuuv/gojsonreference v0.0.0-20180127040603-bd5ef7bd5415/go.mod h1:GwrjFmJcFw6At/Gs6z4yjiIwzuJ1/+UwLxMQDVQXShQ= github.com/xeipuuv/gojsonschema v0.0.0-20180618132009-1d523034197f/go.mod h1:5yf86TLmAcydyeJq5YvxkGPE2fm/u4myDekKRoLuqhs= @@ -1677,18 +1976,25 @@ github.com/yvasiyarov/newrelic_platform_go v0.0.0-20140908184405-b21fdbd4370f/go github.com/zenazn/goji v0.9.0/go.mod h1:7S9M489iMyHBNxwZnk9/EHS098H4/F6TATF2mIxtB1Q= go.etcd.io/bbolt v1.3.2/go.mod h1:IbVyRI1SCnLcuJnV2u8VeU0CEYM7e686BmAb1XKL+uU= go.etcd.io/bbolt v1.3.3/go.mod h1:IbVyRI1SCnLcuJnV2u8VeU0CEYM7e686BmAb1XKL+uU= -go.etcd.io/bbolt v1.3.4/go.mod h1:G5EMThwa9y8QZGBClrRx5EY+Yw9kAhnjy3bSjsnlVTQ= go.etcd.io/bbolt v1.3.5/go.mod h1:G5EMThwa9y8QZGBClrRx5EY+Yw9kAhnjy3bSjsnlVTQ= go.etcd.io/etcd v0.0.0-20191023171146-3cf2f69b5738/go.mod h1:dnLIgRNXwCJa5e+c6mIZCrds/GIG4ncV9HhK5PX7jPg= go.etcd.io/etcd v0.5.0-alpha.5.0.20200910180754-dd1b699fc489 h1:1JFLBqwIgdyHN1ZtgjTBwO+blA6gVOmZurpiMEsETKo= go.etcd.io/etcd v0.5.0-alpha.5.0.20200910180754-dd1b699fc489/go.mod h1:yVHk9ub3CSBatqGNg7GRmsnfLWtoW60w4eDYfh7vHDg= go.etcd.io/etcd/api/v3 v3.5.0 h1:GsV3S+OfZEOCNXdtNkBSR7kgLobAa/SO6tCxRa0GAYw= go.etcd.io/etcd/api/v3 v3.5.0/go.mod h1:cbVKeC6lCfl7j/8jBhAK6aIYO9XOjdptoxU/nLQcPvs= +go.etcd.io/etcd/client/pkg/v3 v3.5.0/go.mod h1:IJHfcCEKxYu1Os13ZdwCwIUTUVGYTSAM3YSwc9/Ac1g= +go.etcd.io/etcd/client/v2 v2.305.0/go.mod h1:h9puh54ZTgAKtEbut2oe9P4L/oqKCVB6xsXlzd7alYQ= go.mongodb.org/mongo-driver v1.0.3/go.mod h1:u7ryQJ+DOzQmeO7zB6MHyr8jkEQvC8vH7qLUO4lqsUM= go.mongodb.org/mongo-driver v1.1.1/go.mod h1:u7ryQJ+DOzQmeO7zB6MHyr8jkEQvC8vH7qLUO4lqsUM= go.mongodb.org/mongo-driver v1.1.2/go.mod h1:u7ryQJ+DOzQmeO7zB6MHyr8jkEQvC8vH7qLUO4lqsUM= go.mongodb.org/mongo-driver v1.3.0/go.mod h1:MSWZXKOynuguX+JSvwP8i+58jYCXxbia8HS3gZBapIE= go.mongodb.org/mongo-driver v1.3.2/go.mod h1:MSWZXKOynuguX+JSvwP8i+58jYCXxbia8HS3gZBapIE= +go.mongodb.org/mongo-driver v1.3.4/go.mod h1:MSWZXKOynuguX+JSvwP8i+58jYCXxbia8HS3gZBapIE= +go.mongodb.org/mongo-driver v1.4.3/go.mod h1:WcMNYLx/IlOxLe6JRJiv2uXuCz6zBLndR4SoGjYphSc= +go.mongodb.org/mongo-driver v1.4.4/go.mod h1:WcMNYLx/IlOxLe6JRJiv2uXuCz6zBLndR4SoGjYphSc= +go.mongodb.org/mongo-driver v1.4.6/go.mod h1:WcMNYLx/IlOxLe6JRJiv2uXuCz6zBLndR4SoGjYphSc= +go.mongodb.org/mongo-driver v1.5.1/go.mod h1:gRXCHX4Jo7J0IJ1oDQyUxF7jfy19UfxniMS4xxMmUqw= +go.mongodb.org/mongo-driver v1.5.2/go.mod h1:gRXCHX4Jo7J0IJ1oDQyUxF7jfy19UfxniMS4xxMmUqw= go.mongodb.org/mongo-driver v1.5.3 h1:wWbFB6zaGHpzguF3f7tW94sVE8sFl3lHx8OZx/4OuFI= go.mongodb.org/mongo-driver v1.5.3/go.mod h1:gRXCHX4Jo7J0IJ1oDQyUxF7jfy19UfxniMS4xxMmUqw= go.mozilla.org/pkcs7 v0.0.0-20200128120323-432b2356ecb1/go.mod h1:SNgMg+EgDFwmvSmLRTNKC5fegJjB7v23qTQ0XLGUNHk= @@ -1702,6 +2008,8 @@ go.opencensus.io v0.22.4/go.mod h1:yxeiOL68Rb0Xd1ddK5vPZ/oVn4vY4Ynel7k9FzqtOIw= go.opencensus.io v0.22.5/go.mod h1:5pWMHQbX5EPX2/62yrJeAkowc+lfs/XD7Uxpq3pI6kk= go.opencensus.io v0.23.0 h1:gqCw0LfLxScz8irSi8exQc7fyQ0fKQU/qnC/X8+V/1M= go.opencensus.io v0.23.0/go.mod h1:XItmlyltB5F7CS4xOC1DcqMoFqwtC6OG2xF7mCv7P7E= +go.opentelemetry.io/collector v0.28.0 h1:XmRwoSj3HZtC7O/12fBoQ9DInvwBwFHgHLZrwNxNjQY= +go.opentelemetry.io/collector v0.28.0/go.mod h1:AP/BTXwo1eedoJO7V+HQ68CSvJU1lcdqOzJCgt1VsNs= go.opentelemetry.io/collector/model v0.35.0 h1:NpKjghiqlei4ecwjOYOMhD6tj4gY8yiWHPJmbFs/ArI= go.opentelemetry.io/collector/model v0.35.0/go.mod h1:+7YCSjJG+MqiIFjauzt7oM2qkqBsaJWh5hcsO4fwsAc= go.opentelemetry.io/otel v1.0.0-RC3 h1:kvwiyEkiUT/JaadXzVLI/R1wDO934A7r3Bs2wEe6wqA= @@ -1731,25 +2039,32 @@ go.uber.org/atomic v1.3.2/go.mod h1:gD2HeocX3+yG+ygLZcrzQJaqmWj9AIm7n08wl/qW/PE= go.uber.org/atomic v1.4.0/go.mod h1:gD2HeocX3+yG+ygLZcrzQJaqmWj9AIm7n08wl/qW/PE= go.uber.org/atomic v1.5.0/go.mod h1:sABNBOSYdrvTF6hTgEIbc7YasKWGhgEQZyfxyTvoXHQ= go.uber.org/atomic v1.6.0/go.mod h1:sABNBOSYdrvTF6hTgEIbc7YasKWGhgEQZyfxyTvoXHQ= -go.uber.org/atomic v1.7.0 h1:ADUqmZGgLDDfbSL9ZmPxKTybcoEYHgpYfELNoN+7hsw= go.uber.org/atomic v1.7.0/go.mod h1:fEN4uk6kAWBTFdckzkM89CLk9XfWZrxpCo0nPH17wJc= +go.uber.org/atomic v1.9.0 h1:ECmE8Bn/WFTYwEW/bpKD3M8VtR/zQVbavAoalC1PYyE= +go.uber.org/atomic v1.9.0/go.mod h1:fEN4uk6kAWBTFdckzkM89CLk9XfWZrxpCo0nPH17wJc= +go.uber.org/automaxprocs v1.4.0/go.mod h1:/mTEdr7LvHhs0v7mjdxDreTz1OG5zdZGqgOnhWiR/+Q= go.uber.org/goleak v1.1.10/go.mod h1:8a7PlsEVH3e/a/GLqe5IIrQx6GzcnRmZEufDUTk4A7A= go.uber.org/multierr v1.1.0/go.mod h1:wR5kodmAFQ0UK8QlbwjlSNy0Z68gJhDJUG5sjR94q/0= go.uber.org/multierr v1.3.0/go.mod h1:VgVr7evmIr6uPjLBxg28wmKNXyqE9akIJ5XnfpiKl+4= +go.uber.org/multierr v1.5.0/go.mod h1:FeouvMocqHpRaaGuG9EjoKcStLC43Zu/fmqdUMPcKYU= go.uber.org/multierr v1.6.0 h1:y6IPFStTAIT5Ytl7/XYmHvzXQ7S3g/IeZW9hyZ5thw4= go.uber.org/multierr v1.6.0/go.mod h1:cdWPpRnG4AhwMwsgIHip0KRBQjJy5kYEpYjJxpXp9iU= go.uber.org/tools v0.0.0-20190618225709-2cfd321de3ee/go.mod h1:vJERXedbb3MVM5f9Ejo0C68/HhF8uaILCdgjnY+goOA= go.uber.org/zap v1.9.1/go.mod h1:vwi/ZaCAaUcBkycHslxD9B2zi4UTXhF60s6SWpuDF0Q= go.uber.org/zap v1.10.0/go.mod h1:vwi/ZaCAaUcBkycHslxD9B2zi4UTXhF60s6SWpuDF0Q= go.uber.org/zap v1.13.0/go.mod h1:zwrFLgMcdUuIBviXEYEH1YKNaOBnKXsx2IPda5bBwHM= +go.uber.org/zap v1.16.0/go.mod h1:MA8QOfq0BHJwdXa996Y4dYkAqRKB8/1K1QMMZVaNZjQ= +go.uber.org/zap v1.17.0/go.mod h1:MXVU+bhUf/A7Xi2HNOnopQOrmycQ5Ih87HtOu4q5SSo= +go.uber.org/zap v1.19.0/go.mod h1:xg/QME4nWcxGxrpdeYfq7UvYrLh66cuVKdrbD1XF/NI= golang.org/x/crypto v0.0.0-20171113213409-9f005a07e0d3/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4= golang.org/x/crypto v0.0.0-20180904163835-0709b304e793/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4= golang.org/x/crypto v0.0.0-20181009213950-7c1a557ab941/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4= golang.org/x/crypto v0.0.0-20181029021203-45a5f77698d3/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4= -golang.org/x/crypto v0.0.0-20190211182817-74369b46fc67/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4= +golang.org/x/crypto v0.0.0-20181203042331-505ab145d0a9/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4= golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w= golang.org/x/crypto v0.0.0-20190320223903-b7391e95e576/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w= golang.org/x/crypto v0.0.0-20190325154230-a5d413f7728c/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w= +golang.org/x/crypto v0.0.0-20190404164418-38d8ce5564a5/go.mod h1:WFFai1msRO1wXaEeE5yQxYXgSfI8pQAWXbQop6sCtWE= golang.org/x/crypto v0.0.0-20190411191339-88737f569e3a/go.mod h1:WFFai1msRO1wXaEeE5yQxYXgSfI8pQAWXbQop6sCtWE= golang.org/x/crypto v0.0.0-20190422162423-af44ce270edf/go.mod h1:WFFai1msRO1wXaEeE5yQxYXgSfI8pQAWXbQop6sCtWE= golang.org/x/crypto v0.0.0-20190510104115-cbcb75029529/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= @@ -1762,19 +2077,22 @@ golang.org/x/crypto v0.0.0-20190820162420-60c769a6c586/go.mod h1:yigFU9vqHzYiE8U golang.org/x/crypto v0.0.0-20190923035154-9ee001bba392/go.mod h1:/lpIB1dKB+9EgE3H3cr1v9wB50oz8l4C4h62xy7jSTY= golang.org/x/crypto v0.0.0-20191002192127-34f69633bfdc/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= golang.org/x/crypto v0.0.0-20191011191535-87dc89f01550/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= -golang.org/x/crypto v0.0.0-20191202143827-86a70503ff7e/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= golang.org/x/crypto v0.0.0-20191206172530-e9b2fee46413/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= golang.org/x/crypto v0.0.0-20200204104054-c9f3fb736b72/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= -golang.org/x/crypto v0.0.0-20200220183623-bac4c82f6975/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= golang.org/x/crypto v0.0.0-20200302210943-78000ba7a073/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= golang.org/x/crypto v0.0.0-20200323165209-0ec3e9974c59/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= +golang.org/x/crypto v0.0.0-20200510223506-06a226fb4e37/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= golang.org/x/crypto v0.0.0-20200622213623-75b288015ac9/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= golang.org/x/crypto v0.0.0-20200728195943-123391ffb6de/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= -golang.org/x/crypto v0.0.0-20200820211705-5c72a883971a/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= golang.org/x/crypto v0.0.0-20201002170205-7f63de1d35b0/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= +golang.org/x/crypto v0.0.0-20201112155050-0c6587e931a9/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= +golang.org/x/crypto v0.0.0-20201208171446-5f87f3452ae9/go.mod h1:jdWPYTVW3xRLrWPugEBEK3UY2ZEsg3UU495nc5E+M+I= +golang.org/x/crypto v0.0.0-20201221181555-eec23a3978ad/go.mod h1:jdWPYTVW3xRLrWPugEBEK3UY2ZEsg3UU495nc5E+M+I= +golang.org/x/crypto v0.0.0-20210220033148-5ea612d1eb83/go.mod h1:jdWPYTVW3xRLrWPugEBEK3UY2ZEsg3UU495nc5E+M+I= golang.org/x/crypto v0.0.0-20210314154223-e6e6c4f2bb5b/go.mod h1:T9bdIzuCu7OtxOm1hfPfRQxPLYneinmdGuTeoZ9dtd4= golang.org/x/crypto v0.0.0-20210314154223-e6e6c4f2bb5b/go.mod h1:T9bdIzuCu7OtxOm1hfPfRQxPLYneinmdGuTeoZ9dtd4= golang.org/x/crypto v0.0.0-20210322153248-0c34fe9e7dc2/go.mod h1:T9bdIzuCu7OtxOm1hfPfRQxPLYneinmdGuTeoZ9dtd4= +golang.org/x/crypto v0.0.0-20210421170649-83a5a9bb288b/go.mod h1:T9bdIzuCu7OtxOm1hfPfRQxPLYneinmdGuTeoZ9dtd4= golang.org/x/crypto v0.0.0-20210513164829-c07d793c2f9a/go.mod h1:P+XmwS30IXTQdn5tA2iutPOUgjI07+tq3H3K9MVA1s8= golang.org/x/crypto v0.0.0-20210616213533-5ff15b29337e h1:gsTQYXdTw2Gq7RBsWvlQ91b+aEQ6bXFUngBGuR8sPpI= golang.org/x/crypto v0.0.0-20210616213533-5ff15b29337e/go.mod h1:GvvjBRRGRdwPK5ydBHafDWAxML/pGHZbMvKqRZ5+Abc= @@ -1791,6 +2109,7 @@ golang.org/x/exp v0.0.0-20191227195350-da58074b4299/go.mod h1:2RIsYlXP63K8oxa1u0 golang.org/x/exp v0.0.0-20200119233911-0405dc783f0a/go.mod h1:2RIsYlXP63K8oxa1u096TMicItID8zy7Y6sNkU49FU4= golang.org/x/exp v0.0.0-20200207192155-f17229e696bd/go.mod h1:J/WKrq2StrnmMY6+EHIKF9dgMWnmCNThgcyBT1FY9mM= golang.org/x/exp v0.0.0-20200224162631-6cc2880d07d6/go.mod h1:3jZMyOhIsHpP37uCMkUooju7aAi5cS1Q23tOzKc+0MU= +golang.org/x/exp v0.0.0-20200331195152-e8c3332aa8e5/go.mod h1:4M0jN8W1tt0AVLNr8HDosyJCDCDuyL9N9+3m7wDWgKw= golang.org/x/image v0.0.0-20180708004352-c73c2afc3b81/go.mod h1:ux5Hcp/YLpHSI86hEcLt0YII63i6oz57MZXIpbrjZUs= golang.org/x/image v0.0.0-20190227222117-0694c2d4d067/go.mod h1:kZ7UVZpmo3dzQBMxlp+ypCbDeSB+sBbTgSJuh5dn5js= golang.org/x/image v0.0.0-20190802002840-cff245a6509b/go.mod h1:FeLwcggjj3mMvU+oOTbSwawSJRM1uh48EjtB4UJZlP0= @@ -1818,7 +2137,7 @@ golang.org/x/mod v0.4.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= golang.org/x/mod v0.4.1/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= golang.org/x/mod v0.4.2 h1:Gz96sIWK3OalVv/I/qNygP42zyoKp3xptRVCWRFEBvo= golang.org/x/mod v0.4.2/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= -golang.org/x/net v0.0.0-20170114055629-f2499483f923/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= +golang.org/x/net v0.0.0-20180530234432-1e491301e022/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20180724234803-3673e40ba225/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20180826012351-8a410e7b638d/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20180906233101-161cd47e91fd/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= @@ -1846,13 +2165,13 @@ golang.org/x/net v0.0.0-20190628185345-da137c7871d7/go.mod h1:z5CRVTTTmAJ677TzLL golang.org/x/net v0.0.0-20190724013045-ca1201d0de80/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= golang.org/x/net v0.0.0-20190813141303-74dc4d7220e7/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= golang.org/x/net v0.0.0-20190827160401-ba9fcec4b297/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= +golang.org/x/net v0.0.0-20190921015927-1a5e07d1ff72/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= golang.org/x/net v0.0.0-20190923162816-aa69164e4478/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= golang.org/x/net v0.0.0-20191002035440-2ec189313ef0/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= golang.org/x/net v0.0.0-20191003171128-d98b1b443823/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= golang.org/x/net v0.0.0-20191004110552-13f9640d40b9/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= golang.org/x/net v0.0.0-20191007182048-72f939374954/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= golang.org/x/net v0.0.0-20191112182307-2180aed22343/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= -golang.org/x/net v0.0.0-20191126235420-ef20fe5d7933/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= golang.org/x/net v0.0.0-20191209160850-c0dbc17a3553/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= golang.org/x/net v0.0.0-20200114155413-6afb5195e5aa/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= golang.org/x/net v0.0.0-20200202094626-16171245cfb2/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= @@ -1860,12 +2179,14 @@ golang.org/x/net v0.0.0-20200222125558-5a598a2470a0/go.mod h1:z5CRVTTTmAJ677TzLL golang.org/x/net v0.0.0-20200226121028-0de0cce0169b/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= golang.org/x/net v0.0.0-20200301022130-244492dfa37a/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= golang.org/x/net v0.0.0-20200324143707-d3edc9973b7e/go.mod h1:qpuaurCH72eLCgpAm/N6yyVIVM9cpaDIP3A8BGJEC5A= +golang.org/x/net v0.0.0-20200421231249-e086a090c8fd/go.mod h1:qpuaurCH72eLCgpAm/N6yyVIVM9cpaDIP3A8BGJEC5A= golang.org/x/net v0.0.0-20200425230154-ff2c4b7c35a0/go.mod h1:qpuaurCH72eLCgpAm/N6yyVIVM9cpaDIP3A8BGJEC5A= golang.org/x/net v0.0.0-20200501053045-e0ff5e5a1de5/go.mod h1:qpuaurCH72eLCgpAm/N6yyVIVM9cpaDIP3A8BGJEC5A= golang.org/x/net v0.0.0-20200506145744-7e3656a0809f/go.mod h1:qpuaurCH72eLCgpAm/N6yyVIVM9cpaDIP3A8BGJEC5A= golang.org/x/net v0.0.0-20200513185701-a91f0712d120/go.mod h1:qpuaurCH72eLCgpAm/N6yyVIVM9cpaDIP3A8BGJEC5A= golang.org/x/net v0.0.0-20200520004742-59133d7f0dd7/go.mod h1:qpuaurCH72eLCgpAm/N6yyVIVM9cpaDIP3A8BGJEC5A= golang.org/x/net v0.0.0-20200520182314-0ba52f642ac2/go.mod h1:qpuaurCH72eLCgpAm/N6yyVIVM9cpaDIP3A8BGJEC5A= +golang.org/x/net v0.0.0-20200602114024-627f9648deb9/go.mod h1:qpuaurCH72eLCgpAm/N6yyVIVM9cpaDIP3A8BGJEC5A= golang.org/x/net v0.0.0-20200625001655-4c5254603344/go.mod h1:/O7V0waA8r7cgGh81Ro3o1hOxt32SMVPicZroKQ2sZA= golang.org/x/net v0.0.0-20200707034311-ab3426394381/go.mod h1:/O7V0waA8r7cgGh81Ro3o1hOxt32SMVPicZroKQ2sZA= golang.org/x/net v0.0.0-20200813134508-3edf25e44fcc/go.mod h1:/O7V0waA8r7cgGh81Ro3o1hOxt32SMVPicZroKQ2sZA= @@ -1876,21 +2197,28 @@ golang.org/x/net v0.0.0-20201021035429-f5854403a974/go.mod h1:sp8m0HH+o8qH0wwXwY golang.org/x/net v0.0.0-20201031054903-ff519b6c9102/go.mod h1:sp8m0HH+o8qH0wwXwYZr8TS3Oi6o0r6Gce1SSxlDquU= golang.org/x/net v0.0.0-20201110031124-69a78807bb2b/go.mod h1:sp8m0HH+o8qH0wwXwYZr8TS3Oi6o0r6Gce1SSxlDquU= golang.org/x/net v0.0.0-20201201195509-5d6afe98e0b7/go.mod h1:sp8m0HH+o8qH0wwXwYZr8TS3Oi6o0r6Gce1SSxlDquU= +golang.org/x/net v0.0.0-20201202161906-c7110b5ffcbb/go.mod h1:sp8m0HH+o8qH0wwXwYZr8TS3Oi6o0r6Gce1SSxlDquU= golang.org/x/net v0.0.0-20201209123823-ac852fbbde11/go.mod h1:m0MpNAwzfU5UDzcl9v0D8zg8gWTRqZa9RBIspLL5mdg= golang.org/x/net v0.0.0-20201224014010-6772e930b67b/go.mod h1:m0MpNAwzfU5UDzcl9v0D8zg8gWTRqZa9RBIspLL5mdg= golang.org/x/net v0.0.0-20201224014010-6772e930b67b/go.mod h1:m0MpNAwzfU5UDzcl9v0D8zg8gWTRqZa9RBIspLL5mdg= golang.org/x/net v0.0.0-20201224014010-6772e930b67b/go.mod h1:m0MpNAwzfU5UDzcl9v0D8zg8gWTRqZa9RBIspLL5mdg= golang.org/x/net v0.0.0-20210119194325-5f4716e94777/go.mod h1:m0MpNAwzfU5UDzcl9v0D8zg8gWTRqZa9RBIspLL5mdg= +golang.org/x/net v0.0.0-20210224082022-3d97a244fca7/go.mod h1:m0MpNAwzfU5UDzcl9v0D8zg8gWTRqZa9RBIspLL5mdg= golang.org/x/net v0.0.0-20210226172049-e18ecbb05110/go.mod h1:m0MpNAwzfU5UDzcl9v0D8zg8gWTRqZa9RBIspLL5mdg= golang.org/x/net v0.0.0-20210226172049-e18ecbb05110/go.mod h1:m0MpNAwzfU5UDzcl9v0D8zg8gWTRqZa9RBIspLL5mdg= golang.org/x/net v0.0.0-20210316092652-d523dce5a7f4/go.mod h1:RBQZq4jEuRlivfhVLdyRGr576XBO4/greRjx4P4O3yc= +golang.org/x/net v0.0.0-20210324051636-2c4c8ecb7826/go.mod h1:RBQZq4jEuRlivfhVLdyRGr576XBO4/greRjx4P4O3yc= golang.org/x/net v0.0.0-20210331212208-0fccb6fa2b5c/go.mod h1:p54w0d4576C0XHj96bSt6lcn1PtDYWL6XObtHCRCNQM= golang.org/x/net v0.0.0-20210405180319-a5a99cb37ef4/go.mod h1:p54w0d4576C0XHj96bSt6lcn1PtDYWL6XObtHCRCNQM= golang.org/x/net v0.0.0-20210405180319-a5a99cb37ef4/go.mod h1:p54w0d4576C0XHj96bSt6lcn1PtDYWL6XObtHCRCNQM= +golang.org/x/net v0.0.0-20210427231257-85d9c07bbe3a/go.mod h1:OJAsFXCWl8Ukc7SiCT/9KSuxbyM7479/AVlXFRxuMCk= golang.org/x/net v0.0.0-20210503060351-7fd8e65b6420/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y= +golang.org/x/net v0.0.0-20210510120150-4163338589ed/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y= golang.org/x/net v0.0.0-20210520170846-37e1c6afe023/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y= -golang.org/x/net v0.0.0-20210610132358-84b48f89b13b h1:k+E048sYJHyVnsr1GDrRZWQ32D2C7lWs9JRc0bel53A= +golang.org/x/net v0.0.0-20210525063256-abc453219eb5/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y= golang.org/x/net v0.0.0-20210610132358-84b48f89b13b/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y= +golang.org/x/net v0.0.0-20210614182718-04defd469f4e h1:XpT3nA5TvE525Ne3hInMh6+GETgn27Zfm9dxsThnX2Q= +golang.org/x/net v0.0.0-20210614182718-04defd469f4e/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y= golang.org/x/oauth2 v0.0.0-20180821212333-d2e6202438be/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U= golang.org/x/oauth2 v0.0.0-20190226205417-e64efc72b421/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw= golang.org/x/oauth2 v0.0.0-20190604053449-0f29369cfe45/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw= @@ -1902,6 +2230,8 @@ golang.org/x/oauth2 v0.0.0-20201208152858-08078c50e5b5/go.mod h1:KelEdhl1UZF7XfJ golang.org/x/oauth2 v0.0.0-20210218202405-ba52d332ba99/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A= golang.org/x/oauth2 v0.0.0-20210220000619-9bb904979d93/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A= golang.org/x/oauth2 v0.0.0-20210313182246-cd4f82c27b84/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A= +golang.org/x/oauth2 v0.0.0-20210323180902-22b0adad7558/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A= +golang.org/x/oauth2 v0.0.0-20210402161424-2e8d93401602/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A= golang.org/x/oauth2 v0.0.0-20210514164344-f6687ab2804c/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A= golang.org/x/oauth2 v0.0.0-20210628180205-a41e5a781914/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A= golang.org/x/oauth2 v0.0.0-20210805134026-6f1e6394065a h1:4Kd8OPUx1xgUwrHDaviWZO8MsgoZTZYC3g+8m16RBww= @@ -1919,7 +2249,6 @@ golang.org/x/sync v0.0.0-20201020160332-67f06af15bc9/go.mod h1:RxMgew5VJxzue5/jJ golang.org/x/sync v0.0.0-20201207232520-09787c993a3a/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20210220032951-036812b2e83c h1:5KslGYwFpkhGh+Q16bwMP3cOontH8FOep7tGV86Y7SQ= golang.org/x/sync v0.0.0-20210220032951-036812b2e83c/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= -golang.org/x/sys v0.0.0-20170830134202-bb24a47a89ea/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20180823144017-11551d06cbcc/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20180830151530-49385e6e1522/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20180905080454-ebe1bf3edb33/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= @@ -1928,14 +2257,15 @@ golang.org/x/sys v0.0.0-20181026203630-95b1ffbd15a5/go.mod h1:STP8DvDyc/dI5b8T5h golang.org/x/sys v0.0.0-20181107165924-66b7b1311ac8/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20181116152217-5ac8a444bdc5/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20181122145206-62eef0e2fa9b/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= +golang.org/x/sys v0.0.0-20181205085412-a5c9d58dba9a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20190130150945-aca44879d564/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20190204203706-41f3e6584952/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= -golang.org/x/sys v0.0.0-20190209173611-3b5209105503/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20190222072716-a9d3bda3a223/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20190312061237-fead79001313/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20190321052220-f7bb7a8bee54/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20190403152447-81d4e9dc473e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20190405154228-4b34438f7a67/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20190411185658-b44545bcd369/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20190412213103-97732733099d/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20190419153524-e8e3143a4f4a/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= @@ -1950,6 +2280,8 @@ golang.org/x/sys v0.0.0-20190606165138-5da285871e9c/go.mod h1:h1NjWce9XRLGQEsW7w golang.org/x/sys v0.0.0-20190606203320-7fc4e5ec1444/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20190616124812-15dcb6c0061f/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20190624142023-c5567b49c5d0/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20190626150813-e07cf5db2756/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20190626221950-04f50cda93cb/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20190726091711-fc99dfbffb4e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20190801041406-cbf593c0f2f3/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20190812073006-9eafafc0a87e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= @@ -1970,7 +2302,6 @@ golang.org/x/sys v0.0.0-20191026070338-33540a1f6037/go.mod h1:h1NjWce9XRLGQEsW7w golang.org/x/sys v0.0.0-20191112214154-59a1497f0cea/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20191115151921-52ab43148777/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20191120155948-bd437916bb0e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20191128015809-6d18c012aee9/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20191204072324-ce4227a45e2e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20191210023423-ac6580df4449/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20191220142924-d4481acd189f/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= @@ -1995,14 +2326,13 @@ golang.org/x/sys v0.0.0-20200511232937-7e40ca221e25/go.mod h1:h1NjWce9XRLGQEsW7w golang.org/x/sys v0.0.0-20200515095857-1151b9dac4a9/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20200519105757-fe76b779f299/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20200523222454-059865788121/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200602225109-6fdc65e7d980/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20200615200032-f1bc736245b1/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20200622214017-ed371f2e16b4/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20200625212154-ddb9806d33ae/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20200728102440-3e129f6d46b1/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20200803210538-64077c9b5642/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20200817155316-9781c653f443/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20200821140526-fda516888d29/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20200821140526-fda516888d29/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20200828194041-157a740278f4/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20200831180312-196b9ba8737a/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20200905004654-be1d3432aa8f/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= @@ -2010,6 +2340,7 @@ golang.org/x/sys v0.0.0-20200909081042-eff7692f9009/go.mod h1:h1NjWce9XRLGQEsW7w golang.org/x/sys v0.0.0-20200916030750-2334cc1a136f/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20200922070232-aee5d888a860/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20200930185726-fdedc70b468f/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20201024232916-9f70ab9862d5/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20201112073958-5cba982894dd/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20201117170446-d9b008d0a637/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20201119102817-f84b799fce68/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= @@ -2019,20 +2350,25 @@ golang.org/x/sys v0.0.0-20201201145000-ef89a241ccb3/go.mod h1:h1NjWce9XRLGQEsW7w golang.org/x/sys v0.0.0-20201201145000-ef89a241ccb3/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20201202213521-69691e467435/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20201202213521-69691e467435/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20201214210602-f9fddec55a1e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20210104204734-6f8348627aad/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20210119212857-b64e53b001e4/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20210124154548-22da62e12c0c/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20210124154548-22da62e12c0c/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20210124154548-22da62e12c0c/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20210220050731-9a76102bfb43/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20210225134936-a50acf3fe073/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20210303074136-134d130e1a04/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20210305230114-8fe3ee5dd75b/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20210309074719-68d13333faf2/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20210314195730-07df6a141424/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20210315160823-c6e025ad8005/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20210316164454-77fc1eacc6aa/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20210316164454-77fc1eacc6aa/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20210320140829-1e4c9ba3b0c4/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20210324051608-47abb6519492/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20210330210617-4fbd30eecc44/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20210403161142-5e06dd20ab57/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20210423082822-04245dca01da/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20210423185535-09eb48e85fd7/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20210426230700-d19ff857e887/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= @@ -2045,11 +2381,13 @@ golang.org/x/sys v0.0.0-20210611083646-a4fc73990273/go.mod h1:oPkhp1MJrh7nUepCBc golang.org/x/sys v0.0.0-20210615035016-665e8c7367d1/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20210616094352-59db8d763f22/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20210630005230-0f9fa26af87c/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.0.0-20210806184541-e5e7981a1069 h1:siQdpVirKtzPhKl3lZWozZraCFObP8S1v6PRp0bLrtU= golang.org/x/sys v0.0.0-20210806184541-e5e7981a1069/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1 h1:v+OssWQX+hTHEmOBgwxdZxK4zHq3yOs8F9J7mk0PY8E= +golang.org/x/sys v0.0.0-20210816074244-15123e1e1f71 h1:ikCpsnYR+Ew0vu99XlDp55lGgDJdIMx3f4a18jfse/s= +golang.org/x/sys v0.0.0-20210816074244-15123e1e1f71/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/term v0.0.0-20201117132131-f5c789dd3221/go.mod h1:Nr5EML6q2oocZ2LXRh80K7BxOlk5/8JxuGnuhpl+muw= golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo= -golang.org/x/text v0.0.0-20160726164857-2910a502d2bf/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= +golang.org/x/term v0.0.0-20210220032956-6a3ed077a48d h1:SZxvLBoTP5yHO3Frd4z4vrF+DBX9vMVanchswa69toE= +golang.org/x/term v0.0.0-20210220032956-6a3ed077a48d/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo= golang.org/x/text v0.0.0-20170915032832-14c0d48ead0c/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= golang.org/x/text v0.3.1-0.20180807135948-17ff2d5776d2/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= @@ -2065,21 +2403,27 @@ golang.org/x/time v0.0.0-20190308202827-9d24e82272b4/go.mod h1:tRJNPiyCQ0inRvYxb golang.org/x/time v0.0.0-20191024005414-555d28b269f0/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= golang.org/x/time v0.0.0-20200416051211-89c76fbcd5d1/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= golang.org/x/time v0.0.0-20200630173020-3af7569d3a1e/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= +golang.org/x/time v0.0.0-20210220033141-f8bda1e9f3ba/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= golang.org/x/time v0.0.0-20210723032227-1f47c861a9ac h1:7zkz7BUtwNFFqcowJ+RIgu2MaV/MapERkDIy+mwPyjs= golang.org/x/time v0.0.0-20210723032227-1f47c861a9ac/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= golang.org/x/tools v0.0.0-20180221164845-07fd8470d635/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= golang.org/x/tools v0.0.0-20180525024113-a5b4c53f6e8b/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= golang.org/x/tools v0.0.0-20180828015842-6cd1fcedba52/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= golang.org/x/tools v0.0.0-20180917221912-90fa682c2a6e/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= -golang.org/x/tools v0.0.0-20181011042414-1f849cf54d09/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= golang.org/x/tools v0.0.0-20181030221726-6c7e314b6563/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= +golang.org/x/tools v0.0.0-20181112210238-4b1f3b6b1646/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= +golang.org/x/tools v0.0.0-20190110163146-51295c7ec13a/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= golang.org/x/tools v0.0.0-20190114222345-bf090417da8b/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= golang.org/x/tools v0.0.0-20190125232054-d66bd3c5d5a6/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= golang.org/x/tools v0.0.0-20190206041539-40960b6deb8e/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= golang.org/x/tools v0.0.0-20190226205152-f727befe758c/go.mod h1:9Yl7xja0Znq3iFh3HoIrodX9oNMXvdceNzlUR8zjMvY= +golang.org/x/tools v0.0.0-20190307163923-6a08e3108db3/go.mod h1:25r3+/G6/xytQM8iWZKq3Hn0kr0rgFKPUNVEL/dr3z4= golang.org/x/tools v0.0.0-20190311212946-11955173bddd/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs= +golang.org/x/tools v0.0.0-20190311215038-5c2858a9cfe5/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs= golang.org/x/tools v0.0.0-20190312151545-0bb0c0a6e846/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs= golang.org/x/tools v0.0.0-20190312170243-e65039ee4138/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs= +golang.org/x/tools v0.0.0-20190321232350-e250d351ecad/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs= +golang.org/x/tools v0.0.0-20190322203728-c1a832b0ad89/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs= golang.org/x/tools v0.0.0-20190328211700-ab21143f2384/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs= golang.org/x/tools v0.0.0-20190329151228-23e29df326fe/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs= golang.org/x/tools v0.0.0-20190416151739-9c9e1878f421/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs= @@ -2097,27 +2441,29 @@ golang.org/x/tools v0.0.0-20190624222133-a101b041ded4/go.mod h1:/rFqwRUd4F7ZHNgw golang.org/x/tools v0.0.0-20190628153133-6cdbf07be9d0/go.mod h1:/rFqwRUd4F7ZHNgwSSTFct+R/Kf4OFW1sUzUTQQTgfc= golang.org/x/tools v0.0.0-20190816200558-6889da9d5479/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= golang.org/x/tools v0.0.0-20190823170909-c4a336ef6a2f/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= -golang.org/x/tools v0.0.0-20190906203814-12febf440ab1/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= golang.org/x/tools v0.0.0-20190907020128-2ca718005c18/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= +golang.org/x/tools v0.0.0-20190910044552-dd2b5c81c578/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= golang.org/x/tools v0.0.0-20190911174233-4f2ddba30aff/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= +golang.org/x/tools v0.0.0-20190916130336-e45ffcd953cc/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= golang.org/x/tools v0.0.0-20191012152004-8de300cfc20a/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= golang.org/x/tools v0.0.0-20191029041327-9cc4af7d6b2c/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= golang.org/x/tools v0.0.0-20191029190741-b9c20aec41a5/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= golang.org/x/tools v0.0.0-20191108193012-7d206e10da11/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= +golang.org/x/tools v0.0.0-20191112195655-aa38f8e97acc/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= golang.org/x/tools v0.0.0-20191113191852-77e3bb0ad9e7/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= golang.org/x/tools v0.0.0-20191115202509-3a792d9c32b2/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= golang.org/x/tools v0.0.0-20191119224855-298f0cb1881e/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= golang.org/x/tools v0.0.0-20191125144606-a911d9008d1f/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= golang.org/x/tools v0.0.0-20191130070609-6e064ea0cf2d/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= -golang.org/x/tools v0.0.0-20191203134012-c197fd4bf371/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= -golang.org/x/tools v0.0.0-20191216052735-49a3e744a425/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= golang.org/x/tools v0.0.0-20191216173652-a0e659d51361/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= golang.org/x/tools v0.0.0-20191227053925-7b8e75db28f4/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= golang.org/x/tools v0.0.0-20200103221440-774c71fcf114/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= golang.org/x/tools v0.0.0-20200108203644-89082a384178/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= golang.org/x/tools v0.0.0-20200117161641-43d50277825c/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= +golang.org/x/tools v0.0.0-20200117220505-0cba7a3a9ee9/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= golang.org/x/tools v0.0.0-20200122220014-bf1340f18c4a/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= golang.org/x/tools v0.0.0-20200130002326-2f3ba24bd6e7/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= +golang.org/x/tools v0.0.0-20200203023011-6f24f261dadb/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= golang.org/x/tools v0.0.0-20200204074204-1cc6d1ef6c74/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= golang.org/x/tools v0.0.0-20200207183749-b753a1ba74fa/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= golang.org/x/tools v0.0.0-20200212150539-ea181f53ac56/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= @@ -2125,7 +2471,11 @@ golang.org/x/tools v0.0.0-20200224181240-023911ca70b2/go.mod h1:TB2adYChydJhpapK golang.org/x/tools v0.0.0-20200227222343-706bc42d1f0d/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= golang.org/x/tools v0.0.0-20200304193943-95d2e580d8eb/go.mod h1:o4KQGtdN14AW+yjsvvwRTJJuXz8XRtIHtEnmAXLyFUw= golang.org/x/tools v0.0.0-20200312045724-11d5b4c81c7d/go.mod h1:o4KQGtdN14AW+yjsvvwRTJJuXz8XRtIHtEnmAXLyFUw= +golang.org/x/tools v0.0.0-20200324003944-a576cf524670/go.mod h1:Sl4aGygMT6LrqrWclx+PTx3U+LnKx/seiNR+3G19Ar8= +golang.org/x/tools v0.0.0-20200329025819-fd4102a86c65/go.mod h1:Sl4aGygMT6LrqrWclx+PTx3U+LnKx/seiNR+3G19Ar8= golang.org/x/tools v0.0.0-20200331025713-a30bf2db82d4/go.mod h1:Sl4aGygMT6LrqrWclx+PTx3U+LnKx/seiNR+3G19Ar8= +golang.org/x/tools v0.0.0-20200414032229-332987a829c3/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE= +golang.org/x/tools v0.0.0-20200422022333-3d57cf2e726e/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE= golang.org/x/tools v0.0.0-20200501065659-ab2804fb9c9d/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE= golang.org/x/tools v0.0.0-20200512131952-2bc93b1c0c88/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE= golang.org/x/tools v0.0.0-20200513201620-d5fe73897c97/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE= @@ -2133,17 +2483,34 @@ golang.org/x/tools v0.0.0-20200515010526-7d3b6ebf133d/go.mod h1:EkVYQZoAsY45+roY golang.org/x/tools v0.0.0-20200522201501-cb1345f3a375/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE= golang.org/x/tools v0.0.0-20200618134242-20370b0cb4b2/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE= golang.org/x/tools v0.0.0-20200619180055-7c47624df98f/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE= +golang.org/x/tools v0.0.0-20200622203043-20e05c1c8ffa/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE= +golang.org/x/tools v0.0.0-20200624225443-88f3c62a19ff/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE= +golang.org/x/tools v0.0.0-20200625211823-6506e20df31f/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE= golang.org/x/tools v0.0.0-20200717024301-6ddee64345a6/go.mod h1:njjCfa9FT2d7l9Bc6FUM5FLjQPp3cFF28FI3qnDFljA= +golang.org/x/tools v0.0.0-20200724022722-7017fd6b1305/go.mod h1:njjCfa9FT2d7l9Bc6FUM5FLjQPp3cFF28FI3qnDFljA= golang.org/x/tools v0.0.0-20200729194436-6467de6f59a7/go.mod h1:njjCfa9FT2d7l9Bc6FUM5FLjQPp3cFF28FI3qnDFljA= golang.org/x/tools v0.0.0-20200804011535-6c149bb5ef0d/go.mod h1:njjCfa9FT2d7l9Bc6FUM5FLjQPp3cFF28FI3qnDFljA= -golang.org/x/tools v0.0.0-20200822203824-307de81be3f4/go.mod h1:njjCfa9FT2d7l9Bc6FUM5FLjQPp3cFF28FI3qnDFljA= +golang.org/x/tools v0.0.0-20200812195022-5ae4c3c160a0/go.mod h1:njjCfa9FT2d7l9Bc6FUM5FLjQPp3cFF28FI3qnDFljA= +golang.org/x/tools v0.0.0-20200820010801-b793a1359eac/go.mod h1:njjCfa9FT2d7l9Bc6FUM5FLjQPp3cFF28FI3qnDFljA= golang.org/x/tools v0.0.0-20200825202427-b303f430e36d/go.mod h1:njjCfa9FT2d7l9Bc6FUM5FLjQPp3cFF28FI3qnDFljA= +golang.org/x/tools v0.0.0-20200831203904-5a2aa26beb65/go.mod h1:Cj7w3i3Rnn0Xh82ur9kSqwfTHTeVxaDqrfMjpcNT6bE= golang.org/x/tools v0.0.0-20200904185747-39188db58858/go.mod h1:Cj7w3i3Rnn0Xh82ur9kSqwfTHTeVxaDqrfMjpcNT6bE= +golang.org/x/tools v0.0.0-20201001104356-43ebab892c4c/go.mod h1:z6u4i615ZeAfBE4XtMziQW1fSVJXACjjbWkB/mvPzlU= +golang.org/x/tools v0.0.0-20201002184944-ecd9fd270d5d/go.mod h1:z6u4i615ZeAfBE4XtMziQW1fSVJXACjjbWkB/mvPzlU= +golang.org/x/tools v0.0.0-20201011145850-ed2f50202694/go.mod h1:z6u4i615ZeAfBE4XtMziQW1fSVJXACjjbWkB/mvPzlU= golang.org/x/tools v0.0.0-20201022035929-9cf592e881e9/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA= +golang.org/x/tools v0.0.0-20201023174141-c8cfbd0f21e6/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA= +golang.org/x/tools v0.0.0-20201028025901-8cd080b735b3/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA= golang.org/x/tools v0.0.0-20201110124207-079ba7bd75cd/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA= +golang.org/x/tools v0.0.0-20201114224030-61ea331ec02b/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA= +golang.org/x/tools v0.0.0-20201118003311-bd56c0adb394/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA= golang.org/x/tools v0.0.0-20201124115921-2c860bdd6e78/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA= golang.org/x/tools v0.0.0-20201201161351-ac6f37ff4c2a/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA= golang.org/x/tools v0.0.0-20201208233053-a543418bbed2/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA= +golang.org/x/tools v0.0.0-20201230224404-63754364767c/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA= +golang.org/x/tools v0.0.0-20210101214203-2dba1e4ea05c/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA= +golang.org/x/tools v0.0.0-20210102185154-773b96fafca2/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA= +golang.org/x/tools v0.0.0-20210104081019-d8d6ddbec6ee/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA= golang.org/x/tools v0.0.0-20210105154028-b0ab187a4818/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA= golang.org/x/tools v0.0.0-20210106214847-113979e3529a/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA= golang.org/x/tools v0.1.0/go.mod h1:xkSsbof2nBLbhDlRMhhhyNLN/zl3eTqcnHD5viDpcZ0= @@ -2194,7 +2561,9 @@ google.golang.org/api v0.35.0/go.mod h1:/XrVsuzM0rZmrsbjJutiuftIzeuTQcEeaYcSk/mQ google.golang.org/api v0.36.0/go.mod h1:+z5ficQTmoYpPn8LCUNVpK5I7hwkpjbcgqA7I34qYtE= google.golang.org/api v0.40.0/go.mod h1:fYKFpnQN0DsDSKRVRcQSDQNtqWPfM9i+zNPxepjRCQ8= google.golang.org/api v0.41.0/go.mod h1:RkxM5lITDfTzmyKFPt+wGrCJbVfniCr2ool8kTBzRTU= +google.golang.org/api v0.42.0/go.mod h1:+Oj4s6ch2SEGtPjGqfUfZonBH0GjQH89gTeKKAEGZKI= google.golang.org/api v0.43.0/go.mod h1:nQsDGjRXMo4lvh5hP0TKqF244gqhGcr/YSIykhUk/94= +google.golang.org/api v0.44.0/go.mod h1:EBOGZqzyhtvMDoxwS97ctnh0zUmYY6CxqXsc1AvkYD8= google.golang.org/api v0.47.0/go.mod h1:Wbvgpq1HddcWVtzsVLyfLp8lDg6AA241LmgIL59tHXo= google.golang.org/api v0.48.0/go.mod h1:71Pr1vy+TAZRPkPs/xlCf5SsU8WjuAWv1Pfjbtukyy4= google.golang.org/api v0.50.0/go.mod h1:4bNT5pAuq5ji4SRZm+5QIkjny9JAyVD/3gaSihNefaw= @@ -2211,6 +2580,7 @@ google.golang.org/appengine v1.6.6/go.mod h1:8WjMMxjGQR8xUklV/ARdw2HLXBOI7O7uCID google.golang.org/appengine v1.6.7 h1:FZR1q0exgwxzPzp/aF+VccGrSfxfPpkBqjIIEq3ru6c= google.golang.org/appengine v1.6.7/go.mod h1:8WjMMxjGQR8xUklV/ARdw2HLXBOI7O7uCIDZVag1xfc= google.golang.org/cloud v0.0.0-20151119220103-975617b05ea8/go.mod h1:0H1ncTHf11KCFhTc/+EFRbzSCOZx+VUbRMk55Yv5MYk= +google.golang.org/genproto v0.0.0-20170818010345-ee236bd376b0/go.mod h1:JiN7NxoALGmiZfu7CAH4rXhgtRTLTxftemlI0sWmxmc= google.golang.org/genproto v0.0.0-20180817151627-c66870c02cf8/go.mod h1:JiN7NxoALGmiZfu7CAH4rXhgtRTLTxftemlI0sWmxmc= google.golang.org/genproto v0.0.0-20190307195333-5fe7a883aa19/go.mod h1:VzzqZJRnGkLBvHegQrXjBqPurQTc5/KpmUdxsrq26oE= google.golang.org/genproto v0.0.0-20190418145605-e7d98fc518a7/go.mod h1:VzzqZJRnGkLBvHegQrXjBqPurQTc5/KpmUdxsrq26oE= @@ -2222,6 +2592,7 @@ google.golang.org/genproto v0.0.0-20190716160619-c506a9f90610/go.mod h1:DMBHOl98 google.golang.org/genproto v0.0.0-20190801165951-fa694d86fc64/go.mod h1:DMBHOl98Agz4BDEuKkezgsaosCRResVns1a3J2ZsMNc= google.golang.org/genproto v0.0.0-20190819201941-24fa4b261c55/go.mod h1:DMBHOl98Agz4BDEuKkezgsaosCRResVns1a3J2ZsMNc= google.golang.org/genproto v0.0.0-20190911173649-1774047e7e51/go.mod h1:IbNlFCBrqXvoKpeg0TB2l7cyZUmoaFKYIwrEpbDKLA8= +google.golang.org/genproto v0.0.0-20190927181202-20e1ac93f88c/go.mod h1:IbNlFCBrqXvoKpeg0TB2l7cyZUmoaFKYIwrEpbDKLA8= google.golang.org/genproto v0.0.0-20191108220845-16a3f7862a1a/go.mod h1:n3cpQtvxv34hfy77yVDNjmbRyujviMdxYliBSkLhpCc= google.golang.org/genproto v0.0.0-20191115194625-c23dd37a84c9/go.mod h1:n3cpQtvxv34hfy77yVDNjmbRyujviMdxYliBSkLhpCc= google.golang.org/genproto v0.0.0-20191216164720-4f79533eabd1/go.mod h1:n3cpQtvxv34hfy77yVDNjmbRyujviMdxYliBSkLhpCc= @@ -2237,6 +2608,7 @@ google.golang.org/genproto v0.0.0-20200228133532-8c2c7df3a383/go.mod h1:55QSHmfG google.golang.org/genproto v0.0.0-20200305110556-506484158171/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c= google.golang.org/genproto v0.0.0-20200312145019-da6875a35672/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c= google.golang.org/genproto v0.0.0-20200331122359-1ee6d9798940/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c= +google.golang.org/genproto v0.0.0-20200423170343-7949de9c1215/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c= google.golang.org/genproto v0.0.0-20200430143042-b979b6f78d84/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c= google.golang.org/genproto v0.0.0-20200511104702-f5ebc3bea380/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c= google.golang.org/genproto v0.0.0-20200513103714-09dca8ec2884/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c= @@ -2245,7 +2617,6 @@ google.golang.org/genproto v0.0.0-20200526211855-cb27e3aa2013/go.mod h1:NbSheEEY google.golang.org/genproto v0.0.0-20200618031413-b414f8b61790/go.mod h1:jDfRM7FcilCzHH/e9qn6dsT145K34l5v+OpcnNgKAAA= google.golang.org/genproto v0.0.0-20200729003335-053ba62fc06f/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= google.golang.org/genproto v0.0.0-20200804131852-c06518451d9c/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= -google.golang.org/genproto v0.0.0-20200815001618-f69a88009b70/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= google.golang.org/genproto v0.0.0-20200825200019-8632dd797987/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= google.golang.org/genproto v0.0.0-20200904004341-0bd0a958aa1d/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= google.golang.org/genproto v0.0.0-20201019141844-1ed22bb0c154/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= @@ -2257,6 +2628,7 @@ google.golang.org/genproto v0.0.0-20201214200347-8c77b98c765d/go.mod h1:FWY/as6D google.golang.org/genproto v0.0.0-20210222152913-aa3ee6e6a81c/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= google.golang.org/genproto v0.0.0-20210303154014-9728d6b83eeb/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= google.golang.org/genproto v0.0.0-20210310155132-4ce2db91004e/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= +google.golang.org/genproto v0.0.0-20210312152112-fc591d9ea70f/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= google.golang.org/genproto v0.0.0-20210319143718-93e7006c17a6/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= google.golang.org/genproto v0.0.0-20210402141018-6c239bbf2bb1/go.mod h1:9lPAdzaEmUacj36I+k7YKbEc5CXzPIeORRgDAUOu28A= google.golang.org/genproto v0.0.0-20210513213006-bf773b8c8384/go.mod h1:P3QM42oQyzQSnHPnZ/vqoCdDmzH28fzWByN9asMeM8A= @@ -2273,6 +2645,7 @@ google.golang.org/genproto v0.0.0-20210824181836-a4879c3d0e89/go.mod h1:eFjDcFEc google.golang.org/genproto v0.0.0-20210827211047-25e5f791fe06 h1:Ogdiaj9EMVKYHnDsESxwlTr/k5eqCdwoQVJEcdg0NbE= google.golang.org/genproto v0.0.0-20210827211047-25e5f791fe06/go.mod h1:eFjDcFEctNawg4eG61bRv87N7iHBWyVhJu7u1kqDUXY= google.golang.org/grpc v0.0.0-20160317175043-d3ddb4469d5a/go.mod h1:yo6s7OP7yaDglbqo1J04qKzAhqBH6lvTonzMVmEdcZw= +google.golang.org/grpc v1.8.0/go.mod h1:yo6s7OP7yaDglbqo1J04qKzAhqBH6lvTonzMVmEdcZw= google.golang.org/grpc v1.17.0/go.mod h1:6QZJwpn2B+Zp71q/5VxRsJ6NXXVCE5NRUHRo+f3cWCs= google.golang.org/grpc v1.19.0/go.mod h1:mqu4LbDTu4XGKhr4mRzUsmM4RtVoemTSY81AxZiDr8c= google.golang.org/grpc v1.20.0/go.mod h1:chYK+tFQF0nDUGJgXMSgLCQk3phJEuONr2DCgLDdAQM= @@ -2332,7 +2705,6 @@ gopkg.in/check.v1 v1.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8 gopkg.in/check.v1 v1.0.0-20180628173108-788fd7840127/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= gopkg.in/check.v1 v1.0.0-20190902080502-41f04d3bba15/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= gopkg.in/check.v1 v1.0.0-20200227125254-8fa46927fb4f/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= -gopkg.in/check.v1 v1.0.0-20200902074654-038fdea0a05b/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c h1:Hei/4ADfdWqJk1ZMxUNpqntNwaWcugrBjAiHlqqRiVk= gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c/go.mod h1:JHkPIbrfpd72SG/EVd6muEfDQjcINNoR0C8j2r3qZ4Q= gopkg.in/cheggaaa/pb.v1 v1.0.25/go.mod h1:V/YB90LKu/1FcN3WVnfiiE5oMCibMjukxqG/qStrOgw= @@ -2347,22 +2719,23 @@ gopkg.in/fsnotify.v1 v1.4.7/go.mod h1:Tz8NjZHkW78fSQdbUxIjBTcgA1z1m8ZHf0WmKUhAMy gopkg.in/fsnotify/fsnotify.v1 v1.4.7/go.mod h1:Fyux9zXlo4rWoMSIzpn9fDAYjalPqJ/K1qJ27s+7ltE= gopkg.in/gcfg.v1 v1.2.3/go.mod h1:yesOnuUOFQAhST5vPY4nbZsb/huCgGGXlipJsBn0b3o= gopkg.in/gemnasium/logrus-airbrake-hook.v2 v2.1.2/go.mod h1:Xk6kEKp8OKb+X14hQBKWaSkCsqBpgog8nAV2xsGOxlo= +gopkg.in/go-playground/assert.v1 v1.2.1/go.mod h1:9RXL0bg/zibRAgZUYszZSwO/z8Y/a8bDuhia5mkpMnE= +gopkg.in/go-playground/validator.v9 v9.29.1/go.mod h1:+c9/zcJMFNgbLvly1L1V+PpxWdVbfP1avr/N00E2vyQ= gopkg.in/gorethink/gorethink.v3 v3.0.5 h1:e2Uc/Xe+hpcVQFsj6MuHlYog3r0JYpnTzwDj/y2O4MU= gopkg.in/gorethink/gorethink.v3 v3.0.5/go.mod h1:+3yIIHJUGMBK+wyPH+iN5TP+88ikFDfZdqTlK3Y9q8I= gopkg.in/inconshreveable/log15.v2 v2.0.0-20180818164646-67afb5ed74ec/go.mod h1:aPpfJ7XW+gOuirDoZ8gHhLh3kZ1B08FtV2bbmy7Jv3s= gopkg.in/inf.v0 v0.9.1 h1:73M5CoZyi3ZLMOyDlQh031Cx6N9NDJ2Vvfl76EDAgDc= gopkg.in/inf.v0 v0.9.1/go.mod h1:cWUDdTG/fYaXco+Dcufb5Vnc6Gp2YChqWtbxRZE0mXw= -gopkg.in/ini.v1 v1.42.0 h1:7N3gPTt50s8GuLortA00n8AqRTk75qOP98+mTPpgzRk= gopkg.in/ini.v1 v1.42.0/go.mod h1:pNLf8WUiyNEtQjuu5G5vTm06TEv9tsIgeAvK8hOrP4k= -gopkg.in/jcmturner/aescts.v1 v1.0.1 h1:cVVZBK2b1zY26haWB4vbBiZrfFQnfbTVrE3xZq6hrEw= +gopkg.in/ini.v1 v1.51.0/go.mod h1:pNLf8WUiyNEtQjuu5G5vTm06TEv9tsIgeAvK8hOrP4k= +gopkg.in/ini.v1 v1.52.0/go.mod h1:pNLf8WUiyNEtQjuu5G5vTm06TEv9tsIgeAvK8hOrP4k= +gopkg.in/ini.v1 v1.57.0/go.mod h1:pNLf8WUiyNEtQjuu5G5vTm06TEv9tsIgeAvK8hOrP4k= +gopkg.in/ini.v1 v1.62.0 h1:duBzk771uxoUuOlyRLkHsygud9+5lrlGjdFBb4mSKDU= +gopkg.in/ini.v1 v1.62.0/go.mod h1:pNLf8WUiyNEtQjuu5G5vTm06TEv9tsIgeAvK8hOrP4k= gopkg.in/jcmturner/aescts.v1 v1.0.1/go.mod h1:nsR8qBOg+OucoIW+WMhB3GspUQXq9XorLnQb9XtvcOo= -gopkg.in/jcmturner/dnsutils.v1 v1.0.1 h1:cIuC1OLRGZrld+16ZJvvZxVJeKPsvd5eUIvxfoN5hSM= gopkg.in/jcmturner/dnsutils.v1 v1.0.1/go.mod h1:m3v+5svpVOhtFAP/wSz+yzh4Mc0Fg7eRhxkJMWSIz9Q= -gopkg.in/jcmturner/goidentity.v3 v3.0.0 h1:1duIyWiTaYvVx3YX2CYtpJbUFd7/UuPYCfgXtQ3VTbI= gopkg.in/jcmturner/goidentity.v3 v3.0.0/go.mod h1:oG2kH0IvSYNIu80dVAyu/yoefjq1mNfM5bm88whjWx4= -gopkg.in/jcmturner/gokrb5.v7 v7.5.0 h1:a9tsXlIDD9SKxotJMK3niV7rPZAJeX2aD/0yg3qlIrg= -gopkg.in/jcmturner/gokrb5.v7 v7.5.0/go.mod h1:l8VISx+WGYp+Fp7KRbsiUuXTTOnxIc3Tuvyavf11/WM= -gopkg.in/jcmturner/rpc.v1 v1.1.0 h1:QHIUxTX1ISuAv9dD2wJ9HWQVuWDX/Zc0PfeC2tjc4rU= +gopkg.in/jcmturner/gokrb5.v7 v7.2.3/go.mod h1:l8VISx+WGYp+Fp7KRbsiUuXTTOnxIc3Tuvyavf11/WM= gopkg.in/jcmturner/rpc.v1 v1.1.0/go.mod h1:YIdkC4XfD6GXbzje11McwsDuOlZQSb9W4vfLvuNnlv8= gopkg.in/ldap.v3 v3.1.0 h1:DIDWEjI7vQWREh0S8X5/NFPCZ3MCVd55LmXKPW4XLGE= gopkg.in/ldap.v3 v3.1.0/go.mod h1:dQjCc0R0kfyFjIlWNMH1DORwUASZyDxo2Ry1B51dXaQ= @@ -2388,12 +2761,12 @@ gopkg.in/yaml.v2 v2.2.2/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= gopkg.in/yaml.v2 v2.2.3/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= gopkg.in/yaml.v2 v2.2.4/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= gopkg.in/yaml.v2 v2.2.5/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= -gopkg.in/yaml.v2 v2.2.7/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= gopkg.in/yaml.v2 v2.2.8/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= gopkg.in/yaml.v2 v2.3.0/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= gopkg.in/yaml.v2 v2.4.0 h1:D8xgwECY7CYvx+Y2n4sBz93Jn9JRvxdiyyo8CTfuKaY= gopkg.in/yaml.v2 v2.4.0/go.mod h1:RDklbk79AGWmwhnvt/jBztapEOGDOx6ZbXqjP6csGnQ= gopkg.in/yaml.v3 v3.0.0-20200313102051-9f266ea9e77c/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= +gopkg.in/yaml.v3 v3.0.0-20200605160147-a5ece683394c/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= gopkg.in/yaml.v3 v3.0.0-20200615113413-eeeca48fe776/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= gopkg.in/yaml.v3 v3.0.0-20210107192922-496545a6307b h1:h8qDotaEPuJATrMmW04NCwg7v22aHH28wwpauUhK9Oo= gopkg.in/yaml.v3 v3.0.0-20210107192922-496545a6307b/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= @@ -2410,25 +2783,31 @@ honnef.co/go/tools v0.0.0-20190523083050-ea95bdfd59fc/go.mod h1:rf3lG4BRIbNafJWh honnef.co/go/tools v0.0.1-2019.2.3/go.mod h1:a3bituU0lyd329TUQxRnasdCoJDkEUEAqEt0JzvZhAg= honnef.co/go/tools v0.0.1-2020.1.3/go.mod h1:X/FiERA/W4tHapMX5mGpAtMSVEeEUOyHaw9vFzvIQ3k= honnef.co/go/tools v0.0.1-2020.1.4/go.mod h1:X/FiERA/W4tHapMX5mGpAtMSVEeEUOyHaw9vFzvIQ3k= -k8s.io/api v0.18.8/go.mod h1:d/CXqwWv+Z2XEG1LgceeDmHQwpUJhROPx16SlxJgERY= +honnef.co/go/tools v0.1.1/go.mod h1:NgwopIslSNH47DimFoV78dnkksY2EFtX0ajyb3K/las= +honnef.co/go/tools v0.1.2/go.mod h1:NgwopIslSNH47DimFoV78dnkksY2EFtX0ajyb3K/las= +honnef.co/go/tools v0.2.0/go.mod h1:lPVVZ2BS5TfnjLyizF7o7hv7j9/L+8cZY2hLyjP9cGY= +honnef.co/go/tools v0.2.1/go.mod h1:lPVVZ2BS5TfnjLyizF7o7hv7j9/L+8cZY2hLyjP9cGY= k8s.io/api v0.20.1/go.mod h1:KqwcCVogGxQY3nBlRpwt+wpAMF/KjaCc7RpywacvqUo= k8s.io/api v0.20.4/go.mod h1:++lNL1AJMkDymriNniQsWRkMDzRaX2Y/POTUi8yvqYQ= -k8s.io/api v0.20.6 h1:bgdZrW++LqgrLikWYNruIKAtltXbSCX2l5mJu11hrVE= k8s.io/api v0.20.6/go.mod h1:X9e8Qag6JV/bL5G6bU8sdVRltWKmdHsFUGS3eVndqE8= -k8s.io/apimachinery v0.18.8/go.mod h1:6sQd+iHEqmOtALqOFjSWp2KZ9F0wlU/nWm0ZgsYWMig= +k8s.io/api v0.21.0/go.mod h1:+YbrhBBGgsxbF6o6Kj4KJPJnBmAKuXDeS3E18bgHNVU= +k8s.io/api v0.22.2 h1:M8ZzAD0V6725Fjg53fKeTJxGsJvRbk4TEm/fexHMtfw= +k8s.io/api v0.22.2/go.mod h1:y3ydYpLJAaDI+BbSe2xmGcqxiWHmWjkEeIbiwHvnPR8= k8s.io/apimachinery v0.20.1/go.mod h1:WlLqWAHZGg07AeltaI0MV5uk1Omp8xaN0JGLY6gkRpU= k8s.io/apimachinery v0.20.4/go.mod h1:WlLqWAHZGg07AeltaI0MV5uk1Omp8xaN0JGLY6gkRpU= k8s.io/apimachinery v0.20.6/go.mod h1:ejZXtW1Ra6V1O5H8xPBGz+T3+4gfkTCeExAHKU57MAc= +k8s.io/apimachinery v0.21.0/go.mod h1:jbreFvJo3ov9rj7eWT7+sYiRx+qZuCYXwWT1bcDswPY= k8s.io/apimachinery v0.22.2 h1:ejz6y/zNma8clPVfNDLnPbleBo6MpoFy/HBiBqCouVk= k8s.io/apimachinery v0.22.2/go.mod h1:O3oNtNadZdeOMxHFVxOreoznohCpy0z6mocxbZr7oJ0= k8s.io/apiserver v0.20.1/go.mod h1:ro5QHeQkgMS7ZGpvf4tSMx6bBOgPfE+f52KwvXfScaU= k8s.io/apiserver v0.20.4/go.mod h1:Mc80thBKOyy7tbvFtB4kJv1kbdD0eIH8k8vianJcbFM= k8s.io/apiserver v0.20.6/go.mod h1:QIJXNt6i6JB+0YQRNcS0hdRHJlMhflFmsBDeSgT1r8Q= -k8s.io/client-go v0.18.8/go.mod h1:HqFqMllQ5NnQJNwjro9k5zMyfhZlOwpuTLVrxjkYSxU= k8s.io/client-go v0.20.1/go.mod h1:/zcHdt1TeWSd5HoUe6elJmHSQ6uLLgp4bIJHVEuy+/Y= k8s.io/client-go v0.20.4/go.mod h1:LiMv25ND1gLUdBeYxBIwKpkSC5IsozMMmOOeSJboP+k= -k8s.io/client-go v0.20.6 h1:nJZOfolnsVtDtbGJNCxzOtKUAu7zvXjB8+pMo9UNxZo= k8s.io/client-go v0.20.6/go.mod h1:nNQMnOvEUEsOzRRFIIkdmYOjAZrC8bgq0ExboWSU1I0= +k8s.io/client-go v0.21.0/go.mod h1:nNBytTF9qPFDEhoqgEPaarobC8QPae13bElIVHzIglA= +k8s.io/client-go v0.22.2 h1:DaSQgs02aCC1QcwUdkKZWOeaVsQjYvWv8ZazcZ6JcHc= +k8s.io/client-go v0.22.2/go.mod h1:sAlhrkVDf50ZHx6z4K0S40wISNTarf1r800F+RlCF6U= k8s.io/component-base v0.20.1/go.mod h1:guxkoJnNoh8LNrbtiQOlyp2Y2XFCZQmrcg2n/DeYNLk= k8s.io/component-base v0.20.4/go.mod h1:t4p9EdiagbVCJKrQ1RsA5/V4rFQNDfRlevJajlGwgjI= k8s.io/component-base v0.20.6/go.mod h1:6f1MPBAeI+mvuts3sIdtpjljHWBQ2cIy38oBIWMYnrM= @@ -2436,24 +2815,21 @@ k8s.io/cri-api v0.17.3/go.mod h1:X1sbHmuXhwaHs9xxYffLqJogVsnI+f6cPRcgPel7ywM= k8s.io/cri-api v0.20.1/go.mod h1:2JRbKt+BFLTjtrILYVqQK5jqhI+XNdF6UiGMgczeBCI= k8s.io/cri-api v0.20.4/go.mod h1:2JRbKt+BFLTjtrILYVqQK5jqhI+XNdF6UiGMgczeBCI= k8s.io/cri-api v0.20.6/go.mod h1:ew44AjNXwyn1s0U4xCKGodU7J1HzBeZ1MpGrpa5r8Yc= -k8s.io/gengo v0.0.0-20190128074634-0689ccc1d7d6/go.mod h1:ezvh/TsK7cY6rbqRK0oQQ8IAqLxYwwyPxAX1Pzy0ii0= k8s.io/gengo v0.0.0-20200413195148-3a45101e95ac/go.mod h1:ezvh/TsK7cY6rbqRK0oQQ8IAqLxYwwyPxAX1Pzy0ii0= -k8s.io/klog v0.0.0-20181102134211-b9b56d5dfc92/go.mod h1:Gq+BEi5rUBO/HRz0bTSXDUcqjScdoY3a9IHpCEIOOfk= -k8s.io/klog v0.3.0/go.mod h1:Gq+BEi5rUBO/HRz0bTSXDUcqjScdoY3a9IHpCEIOOfk= k8s.io/klog v1.0.0 h1:Pt+yjF5aB1xDSVbau4VsWe+dQNzA0qv1LlXdC2dF6Q8= k8s.io/klog v1.0.0/go.mod h1:4Bi6QPql/J/LkTDqv7R/cd3hPo4k2DG6Ptcz060Ez5I= k8s.io/klog/v2 v2.0.0/go.mod h1:PBfzABfn139FHAV07az/IF9Wp1bkk3vpT2XSJ76fSDE= k8s.io/klog/v2 v2.4.0/go.mod h1:Od+F08eJP+W3HUb4pSrPpgp9DGU4GzlpG/TmITuYh/Y= +k8s.io/klog/v2 v2.8.0/go.mod h1:hy9LJ/NvuK+iVyP4Ehqva4HxZG/oXyIS3n3Jmire4Ec= k8s.io/klog/v2 v2.9.0 h1:D7HV+n1V57XeZ0m6tdRkfknthUaM06VFbWldOFh8kzM= k8s.io/klog/v2 v2.9.0/go.mod h1:hy9LJ/NvuK+iVyP4Ehqva4HxZG/oXyIS3n3Jmire4Ec= -k8s.io/kube-openapi v0.0.0-20200410145947-61e04a5be9a6/go.mod h1:GRQhZsXIAJ1xR0C9bd8UpWHZ5plfAS9fzPjJuQ6JL3E= k8s.io/kube-openapi v0.0.0-20201113171705-d219536bb9fd/go.mod h1:WOJ3KddDSol4tAGcJo0Tvi+dK12EcqSLqcWsryKMpfM= +k8s.io/kube-openapi v0.0.0-20210305001622-591a79e4bda7/go.mod h1:wXW5VT87nVfh/iLV8FpR2uDvrFyomxbtb1KivDbvPTE= k8s.io/kube-openapi v0.0.0-20210421082810-95288971da7e/go.mod h1:vHXdDvt9+2spS2Rx9ql3I8tycm3H9FDfdUoIuKCefvw= k8s.io/kubernetes v1.13.0/go.mod h1:ocZa8+6APFNC2tX1DZASIbocyYT5jHzqFVsY5aoB7Jk= -k8s.io/utils v0.0.0-20200324210504-a9aa75ae1b89/go.mod h1:sZAwmy6armz5eXlNoLmJcl4F1QuKu7sr+mFQ0byX7Ew= -k8s.io/utils v0.0.0-20200414100711-2df71ebbae66/go.mod h1:jPW/WVKK9YHAvNhRxK0md/EJ228hCsBRufyofKtW8HA= -k8s.io/utils v0.0.0-20201110183641-67b214c5f920 h1:CbnUZsM497iRC5QMVkHwyl8s2tB3g7yaSHkYPkpgelw= k8s.io/utils v0.0.0-20201110183641-67b214c5f920/go.mod h1:jPW/WVKK9YHAvNhRxK0md/EJ228hCsBRufyofKtW8HA= +k8s.io/utils v0.0.0-20210819203725-bdf08cb9a70a h1:8dYfu/Fc9Gz2rNJKB9IQRGgQOh2clmRzNIPPY1xLY5g= +k8s.io/utils v0.0.0-20210819203725-bdf08cb9a70a/go.mod h1:jPW/WVKK9YHAvNhRxK0md/EJ228hCsBRufyofKtW8HA= modernc.org/cc/v3 v3.32.4/go.mod h1:0R6jl1aZlIl2avnYfbfHBS1QB6/f+16mihBObaBC878= modernc.org/cc/v3 v3.33.5 h1:gfsIOmcv80EelyQyOHn/Xhlzex8xunhQxWiJRMYmPrI= modernc.org/cc/v3 v3.33.5/go.mod h1:0R6jl1aZlIl2avnYfbfHBS1QB6/f+16mihBObaBC878= @@ -2483,16 +2859,19 @@ modernc.org/token v1.0.0/go.mod h1:UGzOrNV1mAFSEB63lOFHIpNRUVMvYTc6yu1SMY/XTDM= modernc.org/z v1.0.1-0.20210308123920-1f282aa71362/go.mod h1:8/SRk5C/HgiQWCgXdfpb+1RvhORdkz5sw72d3jjtyqA= modernc.org/z v1.0.1 h1:WyIDpEpAIx4Hel6q/Pcgj/VhaQV5XPJ2I6ryIYbjnpc= modernc.org/z v1.0.1/go.mod h1:8/SRk5C/HgiQWCgXdfpb+1RvhORdkz5sw72d3jjtyqA= +mvdan.cc/gofumpt v0.1.0/go.mod h1:yXG1r1WqZVKWbVRtBWKWX9+CxGYfA51nSomhM0woR48= +mvdan.cc/interfacer v0.0.0-20180901003855-c20040233aed/go.mod h1:Xkxe497xwlCKkIaQYRfC7CSLworTXY9RMqwhhCm+8Nc= +mvdan.cc/lint v0.0.0-20170908181259-adc824a0674b/go.mod h1:2odslEg/xrtNQqCYg2/jCoyKnw3vv5biOc3JnIcYfL4= +mvdan.cc/unparam v0.0.0-20210104141923-aac4ce9116a7/go.mod h1:hBpJkZE8H/sb+VRFvw2+rBpHNsTBcvSpk61hr8mzXZE= rsc.io/binaryregexp v0.2.0/go.mod h1:qTv7/COck+e2FymRvadv62gMdZztPaShugOCi3I+8D8= rsc.io/pdf v0.1.1/go.mod h1:n8OzWcQ6Sp37PL01nO98y4iUCRdTGarVfzxY20ICaU4= rsc.io/quote/v3 v3.1.0/go.mod h1:yEA65RcK8LyAZtP9Kv3t0HmxON59tX3rD+tICJqUlj0= rsc.io/sampler v1.3.0/go.mod h1:T1hPZKmBbMNahiBKFy5HrXp6adAjACjK9JXDnKaTXpA= sigs.k8s.io/apiserver-network-proxy/konnectivity-client v0.0.14/go.mod h1:LEScyzhFmoF5pso/YSeBstl57mOzx9xlU9n85RGrDQg= sigs.k8s.io/apiserver-network-proxy/konnectivity-client v0.0.15/go.mod h1:LEScyzhFmoF5pso/YSeBstl57mOzx9xlU9n85RGrDQg= -sigs.k8s.io/structured-merge-diff/v3 v3.0.0-20200116222232-67a7b8c61874/go.mod h1:PlARxl6Hbt/+BC80dRLi1qAmnMqwqDg62YvvVkZjemw= -sigs.k8s.io/structured-merge-diff/v3 v3.0.0/go.mod h1:PlARxl6Hbt/+BC80dRLi1qAmnMqwqDg62YvvVkZjemw= sigs.k8s.io/structured-merge-diff/v4 v4.0.2/go.mod h1:bJZC9H9iH24zzfZ/41RGcq60oK1F7G282QMXDPYydCw= sigs.k8s.io/structured-merge-diff/v4 v4.0.3/go.mod h1:bJZC9H9iH24zzfZ/41RGcq60oK1F7G282QMXDPYydCw= +sigs.k8s.io/structured-merge-diff/v4 v4.1.0/go.mod h1:bJZC9H9iH24zzfZ/41RGcq60oK1F7G282QMXDPYydCw= sigs.k8s.io/structured-merge-diff/v4 v4.1.2 h1:Hr/htKFmJEbtMgS/UD0N+gtgctAqz81t3nu+sPzynno= sigs.k8s.io/structured-merge-diff/v4 v4.1.2/go.mod h1:j/nl6xW8vLS49O8YvXW1ocPhZawJtm+Yrr7PPRQ0Vg4= sigs.k8s.io/yaml v1.1.0/go.mod h1:UJmg0vDUVViEyp3mgSv9WPwZCDxu4rQW1olrI1uml+o= From 7c168220308a2b63ec9ea44ee4c7d4b64538b640 Mon Sep 17 00:00:00 2001 From: Bill Liu Date: Tue, 12 Oct 2021 03:35:21 +1300 Subject: [PATCH 123/176] docs: add external input plugin entry for opcda --- EXTERNAL_PLUGINS.md | 1 + 1 file changed, 1 insertion(+) diff --git a/EXTERNAL_PLUGINS.md b/EXTERNAL_PLUGINS.md index fc71044d6172d..7e074d10f2e8d 100644 --- a/EXTERNAL_PLUGINS.md +++ b/EXTERNAL_PLUGINS.md @@ -8,6 +8,7 @@ Pull requests welcome. ## Inputs - [awsalarms](https://github.com/vipinvkmenon/awsalarms) - Simple plugin to gather/monitor alarms generated in AWS. - [octoprint](https://github.com/BattleBas/octoprint-telegraf-plugin) - Gather 3d print information from the octoprint API. +- [opcda](https://github.com/lpc921/telegraf-execd-opcda) - Gather data from [OPC Fundation's Data Access (DA)](https://opcfoundation.org/about/opc-technologies/opc-classic/) protocol for industrial automation. - [open-hardware-monitor](https://github.com/marianob85/open_hardware_monitor-telegraf-plugin) - Gather sensors data provided by [Open Hardware Monitor](http://openhardwaremonitor.org) - [plex](https://github.com/russorat/telegraf-webhooks-plex) - Listens for events from Plex Media Server [Webhooks](https://support.plex.tv/articles/115002267687-webhooks/). - [rand](https://github.com/ssoroka/rand) - Generate random numbers From 4321f8ae67a95f152aa5e83074e1e06f588811c1 Mon Sep 17 00:00:00 2001 From: xavpaice Date: Tue, 12 Oct 2021 14:31:33 +1300 Subject: [PATCH 124/176] fix: patched intel rdt to allow sudo (#9527) Co-authored-by: Joe Guo --- plugins/inputs/intel_rdt/README.md | 27 +++++++++++++++++++ plugins/inputs/intel_rdt/intel_rdt.go | 38 +++++++++++++++++++++++---- 2 files changed, 60 insertions(+), 5 deletions(-) diff --git a/plugins/inputs/intel_rdt/README.md b/plugins/inputs/intel_rdt/README.md index 8a0f0a1ea6e75..cc98c13b6c0e0 100644 --- a/plugins/inputs/intel_rdt/README.md +++ b/plugins/inputs/intel_rdt/README.md @@ -24,6 +24,29 @@ Note: pqos tool needs root privileges to work properly. Metrics will be constantly reported from the following `pqos` commands within the given interval: +#### If telegraf does not run as the root user + +The `pqos` binary needs to run as root. If telegraf is running as a non-root user, you may enable sudo +to allow `pqos` to run correctly. +The `pqos` command requires root level access to run. There are two options to +overcome this if you run telegraf as a non-root user. + +It is possible to update the pqos binary with setuid using `chmod u+s +/path/to/pqos`. This approach is simple and requires no modification to the +Telegraf configuration, however pqos is not a read-only tool and there are +security implications for making such a command setuid root. + +Alternately, you may enable sudo to allow `pqos` to run correctly, as follows: + +Add the following to your sudoers file (assumes telegraf runs as a user named `telegraf`): + +``` +telegraf ALL=(ALL) NOPASSWD:/usr/sbin/pqos -r --iface-os --mon-file-type=csv --mon-interval=* +``` + +If you wish to use sudo, you must also add `use_sudo = true` to the Telegraf +configuration (see below). + #### In case of cores monitoring: ``` pqos -r --iface-os --mon-file-type=csv --mon-interval=INTERVAL --mon-core=all:[CORES]\;mbt:[CORES] @@ -76,6 +99,10 @@ More about Intel RDT: https://www.intel.com/content/www/us/en/architecture-and-t ## Mandatory if cores aren't set and forbidden if cores are specified. ## e.g. ["qemu", "pmd"] # processes = ["process"] + + ## Specify if the pqos process should be called with sudo. + ## Mandatory if the telegraf process does not run as root. + # use_sudo = false ``` ### Exposed metrics diff --git a/plugins/inputs/intel_rdt/intel_rdt.go b/plugins/inputs/intel_rdt/intel_rdt.go index e0c7de526b067..486a13c98c535 100644 --- a/plugins/inputs/intel_rdt/intel_rdt.go +++ b/plugins/inputs/intel_rdt/intel_rdt.go @@ -14,6 +14,7 @@ import ( "strconv" "strings" "sync" + "syscall" "time" "github.com/google/go-cmp/cmp" @@ -46,6 +47,7 @@ type IntelRDT struct { Processes []string `toml:"processes"` SamplingInterval int32 `toml:"sampling_interval"` ShortenedMetrics bool `toml:"shortened_metrics"` + UseSudo bool `toml:"use_sudo"` Log telegraf.Logger `toml:"-"` Publisher Publisher `toml:"-"` @@ -97,6 +99,10 @@ func (r *IntelRDT) SampleConfig() string { ## Mandatory if cores aren't set and forbidden if cores are specified. ## e.g. ["qemu", "pmd"] # processes = ["process"] + + ## Specify if the pqos process should be called with sudo. + ## Mandatory if the telegraf process does not run as root. + # use_sudo = false ` } @@ -254,6 +260,12 @@ func (r *IntelRDT) readData(ctx context.Context, args []string, processesPIDsAss cmd := exec.Command(r.PqosPath, append(args)...) + if r.UseSudo { + // run pqos with `/bin/sh -c "sudo /path/to/pqos ..."` + args = []string{"-c", fmt.Sprintf("sudo %s %s", r.PqosPath, strings.Replace(strings.Join(args, " "), ";", "\\;", -1))} + cmd = exec.Command("/bin/sh", args...) + } + cmdReader, err := cmd.StdoutPipe() if err != nil { r.errorChan <- err @@ -334,14 +346,30 @@ func (r *IntelRDT) processOutput(cmdReader io.ReadCloser, processesPIDsAssociati } func shutDownPqos(pqos *exec.Cmd) error { + timeout := time.Second * 2 + if pqos.Process != nil { - err := pqos.Process.Signal(os.Interrupt) - if err != nil { - err = pqos.Process.Kill() - if err != nil { - return fmt.Errorf("failed to shut down pqos: %v", err) + // try to send interrupt signal, ignore err for now + _ = pqos.Process.Signal(os.Interrupt) + + // wait and constantly check if pqos is still running + ctx, cancel := context.WithTimeout(context.Background(), timeout) + defer cancel() + for { + if err := pqos.Process.Signal(syscall.Signal(0)); err == os.ErrProcessDone { + return nil + } else if ctx.Err() != nil { + break } } + + // if pqos is still running after some period, try to kill it + // this will send SIGTERM to pqos, and leave garbage in `/sys/fs/resctrl/mon_groups` + // fixed in https://github.com/intel/intel-cmt-cat/issues/197 + err := pqos.Process.Kill() + if err != nil { + return fmt.Errorf("failed to shut down pqos: %v", err) + } } return nil } From 15d868586dc2faa6f527bea1e8d2efe247b02fb8 Mon Sep 17 00:00:00 2001 From: Sven Rebhan <36194019+srebhan@users.noreply.github.com> Date: Tue, 12 Oct 2021 18:42:25 +0200 Subject: [PATCH 125/176] Fix: Update snowflake database driver to 1.6.2 (#9866) --- docs/LICENSE_OF_DEPENDENCIES.md | 1 + go.mod | 50 +++++------ go.sum | 147 +++++++++++++++++++++++--------- 3 files changed, 132 insertions(+), 66 deletions(-) diff --git a/docs/LICENSE_OF_DEPENDENCIES.md b/docs/LICENSE_OF_DEPENDENCIES.md index 34a54d2d2fde9..c52b110b28fd8 100644 --- a/docs/LICENSE_OF_DEPENDENCIES.md +++ b/docs/LICENSE_OF_DEPENDENCIES.md @@ -38,6 +38,7 @@ following works: - github.com/aws/aws-sdk-go-v2/credentials [Apache License 2.0](https://github.com/aws/aws-sdk-go-v2/blob/main/credentials/LICENSE.txt) - github.com/aws/aws-sdk-go-v2/feature/ec2/imds [Apache License 2.0](https://github.com/aws/aws-sdk-go-v2/blob/main/feature/ec2/imds/LICENSE.txt) - github.com/aws/aws-sdk-go-v2/feature/s3/manager [Apache License 2.0](https://github.com/aws/aws-sdk-go-v2/blob/main/LICENSE.txt) +- github.com/aws/aws-sdk-go-v2/internal/ini [Apache License 2.0](https://github.com/aws/aws-sdk-go-v2/blob/main/service/ec2/LICENSE.txt) - github.com/aws/aws-sdk-go-v2/service/ec2 [Apache License 2.0](https://github.com/aws/aws-sdk-go-v2/blob/main/service/ec2/LICENSE.txt) - github.com/aws/aws-sdk-go-v2/service/internal/accept-encoding [Apache License 2.0](https://github.com/aws/aws-sdk-go-v2/blob/main/service/internal/accept-encoding/LICENSE.txt) - github.com/aws/aws-sdk-go-v2/service/internal/presigned-url [Apache License 2.0](https://github.com/aws/aws-sdk-go-v2/blob/main/service/internal/presigned-url/LICENSE.txt) diff --git a/go.mod b/go.mod index bcb5f9af3a7af..af89256872a8e 100644 --- a/go.mod +++ b/go.mod @@ -14,7 +14,7 @@ require ( github.com/Azure/azure-kusto-go v0.3.2 github.com/Azure/azure-pipeline-go v0.2.3 // indirect github.com/Azure/azure-sdk-for-go v52.5.0+incompatible // indirect - github.com/Azure/azure-storage-blob-go v0.13.0 // indirect + github.com/Azure/azure-storage-blob-go v0.14.0 // indirect github.com/Azure/azure-storage-queue-go v0.0.0-20191125232315-636801874cdd github.com/Azure/go-amqp v0.13.12 // indirect github.com/Azure/go-ansiterm v0.0.0-20170929234023-d6e3b3328b78 // indirect @@ -42,24 +42,24 @@ require ( github.com/antchfx/jsonquery v1.1.4 github.com/antchfx/xmlquery v1.3.6 github.com/antchfx/xpath v1.1.11 - github.com/apache/arrow/go/arrow v0.0.0-20200601151325-b2287a20f230 // indirect + github.com/apache/arrow/go/arrow v0.0.0-20211006091945-a69884db78f4 // indirect github.com/apache/thrift v0.14.2 github.com/aristanetworks/glog v0.0.0-20191112221043-67e8567f59f3 // indirect github.com/aristanetworks/goarista v0.0.0-20190325233358-a123909ec740 github.com/armon/go-metrics v0.3.3 // indirect github.com/aws/aws-sdk-go v1.38.69 - github.com/aws/aws-sdk-go-v2 v1.3.2 - github.com/aws/aws-sdk-go-v2/config v1.1.5 - github.com/aws/aws-sdk-go-v2/credentials v1.1.5 // indirect - github.com/aws/aws-sdk-go-v2/feature/ec2/imds v1.0.6 - github.com/aws/aws-sdk-go-v2/feature/s3/manager v1.1.2 // indirect + github.com/aws/aws-sdk-go-v2 v1.9.1 + github.com/aws/aws-sdk-go-v2/config v1.8.2 + github.com/aws/aws-sdk-go-v2/credentials v1.4.2 // indirect + github.com/aws/aws-sdk-go-v2/feature/ec2/imds v1.5.1 + github.com/aws/aws-sdk-go-v2/feature/s3/manager v1.5.3 // indirect github.com/aws/aws-sdk-go-v2/service/ec2 v1.1.0 - github.com/aws/aws-sdk-go-v2/service/internal/accept-encoding v1.0.4 // indirect - github.com/aws/aws-sdk-go-v2/service/internal/presigned-url v1.0.6 // indirect - github.com/aws/aws-sdk-go-v2/service/internal/s3shared v1.2.2 // indirect - github.com/aws/aws-sdk-go-v2/service/s3 v1.5.0 // indirect - github.com/aws/aws-sdk-go-v2/service/sso v1.1.5 // indirect - github.com/aws/aws-sdk-go-v2/service/sts v1.2.2 // indirect + github.com/aws/aws-sdk-go-v2/service/internal/accept-encoding v1.3.0 // indirect + github.com/aws/aws-sdk-go-v2/service/internal/presigned-url v1.3.1 // indirect + github.com/aws/aws-sdk-go-v2/service/internal/s3shared v1.7.1 // indirect + github.com/aws/aws-sdk-go-v2/service/s3 v1.16.0 // indirect + github.com/aws/aws-sdk-go-v2/service/sso v1.4.1 // indirect + github.com/aws/aws-sdk-go-v2/service/sts v1.7.1 // indirect github.com/aws/smithy-go v1.8.0 github.com/benbjohnson/clock v1.1.0 github.com/beorn7/perks v1.0.1 // indirect @@ -93,7 +93,7 @@ require ( github.com/echlebek/timeproxy v1.0.0 // indirect github.com/eclipse/paho.mqtt.golang v1.3.0 github.com/fatih/color v1.10.0 // indirect - github.com/form3tech-oss/jwt-go v3.2.3+incompatible // indirect + github.com/form3tech-oss/jwt-go v3.2.5+incompatible // indirect github.com/ghodss/yaml v1.0.1-0.20190212211648-25d852aebe32 github.com/go-logfmt/logfmt v0.5.0 github.com/go-logr/logr v0.4.0 // indirect @@ -112,13 +112,13 @@ require ( github.com/golang/geo v0.0.0-20190916061304-5b978397cfec github.com/golang/groupcache v0.0.0-20210331224755-41bb18bfe9da // indirect github.com/golang/protobuf v1.5.2 - github.com/golang/snappy v0.0.3 - github.com/google/flatbuffers v1.12.0 // indirect + github.com/golang/snappy v0.0.4 + github.com/google/flatbuffers v2.0.0+incompatible // indirect github.com/google/go-cmp v0.5.6 github.com/google/go-github/v32 v32.1.0 github.com/google/go-querystring v1.0.0 // indirect github.com/google/gofuzz v1.1.0 // indirect - github.com/google/uuid v1.2.0 // indirect + github.com/google/uuid v1.3.0 // indirect github.com/googleapis/gax-go/v2 v2.0.5 // indirect github.com/googleapis/gnostic v0.5.5 // indirect github.com/gopcua/opcua v0.2.0-rc2.0.20210409063412-baabb9b14fd2 @@ -164,7 +164,7 @@ require ( github.com/kardianos/service v1.0.0 github.com/karrick/godirwalk v1.16.1 github.com/kballard/go-shellquote v0.0.0-20180428030007-95032a82bc51 - github.com/klauspost/compress v1.13.4 // indirect + github.com/klauspost/compress v1.13.6 // indirect github.com/kr/pretty v0.3.0 // indirect github.com/kr/text v0.2.0 // indirect github.com/kylelemons/godebug v1.1.0 // indirect @@ -212,7 +212,7 @@ require ( github.com/pion/logging v0.2.2 // indirect github.com/pion/transport v0.12.3 // indirect github.com/pion/udp v0.1.1 // indirect - github.com/pkg/browser v0.0.0-20180916011732-0a3d74bf9ce4 // indirect + github.com/pkg/browser v0.0.0-20210911075715-681adbf594b8 // indirect github.com/pkg/errors v0.9.1 github.com/pmezard/go-difflib v1.0.0 // indirect github.com/prometheus/client_golang v1.11.0 @@ -238,7 +238,7 @@ require ( github.com/signalfx/sapm-proto v0.7.2 // indirect github.com/sirupsen/logrus v1.8.1 github.com/sleepinggenius2/gosmi v0.4.3 - github.com/snowflakedb/gosnowflake v1.5.0 + github.com/snowflakedb/gosnowflake v1.6.2 github.com/streadway/amqp v0.0.0-20190827072141-edfb9018d271 github.com/stretchr/objx v0.2.0 // indirect github.com/stretchr/testify v1.7.0 @@ -275,14 +275,14 @@ require ( go.starlark.net v0.0.0-20210406145628-7a1108eaa012 go.uber.org/atomic v1.9.0 // indirect go.uber.org/multierr v1.6.0 // indirect - golang.org/x/crypto v0.0.0-20210616213533-5ff15b29337e // indirect + golang.org/x/crypto v0.0.0-20210921155107-089bfa567519 // indirect golang.org/x/mod v0.4.2 // indirect - golang.org/x/net v0.0.0-20210614182718-04defd469f4e + golang.org/x/net v0.0.0-20211005215030-d2e5035098b3 golang.org/x/oauth2 v0.0.0-20210805134026-6f1e6394065a golang.org/x/sync v0.0.0-20210220032951-036812b2e83c - golang.org/x/sys v0.0.0-20210816074244-15123e1e1f71 + golang.org/x/sys v0.0.0-20211004093028-2c5d950f24ef golang.org/x/term v0.0.0-20210220032956-6a3ed077a48d // indirect - golang.org/x/text v0.3.6 + golang.org/x/text v0.3.7 golang.org/x/time v0.0.0-20210723032227-1f47c861a9ac // indirect golang.org/x/tools v0.1.5 golang.org/x/xerrors v0.0.0-20200804184101-5ec99f83aff1 // indirect @@ -327,12 +327,14 @@ require ( ) require ( + github.com/aws/aws-sdk-go-v2/internal/ini v1.2.3 // indirect github.com/cenkalti/backoff/v4 v4.1.1 // indirect github.com/jcmturner/aescts/v2 v2.0.0 // indirect github.com/jcmturner/dnsutils/v2 v2.0.0 // indirect github.com/jcmturner/gokrb5/v8 v8.4.2 // indirect github.com/jcmturner/rpc/v2 v2.0.3 // indirect github.com/josharian/intern v1.0.0 // indirect + github.com/pierrec/lz4/v4 v4.1.8 // indirect github.com/rogpeppe/go-internal v1.6.2 // indirect go.opentelemetry.io/otel v1.0.0-RC3 // indirect go.opentelemetry.io/otel/exporters/otlp/otlpmetric v0.23.0 // indirect diff --git a/go.sum b/go.sum index de38213b94a7b..f4bd2822edf1c 100644 --- a/go.sum +++ b/go.sum @@ -64,6 +64,7 @@ collectd.org v0.5.0 h1:y4uFSAuOmeVhG3GCRa3/oH+ysePfO/+eGJNfd0Qa3d8= collectd.org v0.5.0/go.mod h1:A/8DzQBkF6abtvrT2j/AU/4tiBgJWYyh0y/oB/4MlWE= contrib.go.opencensus.io/exporter/prometheus v0.3.0/go.mod h1:rpCPVQKhiyH8oomWgm34ZmgIdZa8OVYO5WAIygPbBBE= dmitri.shuralyov.com/gpu/mtl v0.0.0-20190408044501-666a987793e9/go.mod h1:H6x//7gZCb22OMCxBHrMx7a5I7Hp++hsVxbQ4BYO7hU= +gioui.org v0.0.0-20210308172011-57750fc8a0a6/go.mod h1:RSH6KIUZ0p2xy5zHDxgAM4zumjgTw83q2ge/PI+yyw8= github.com/AndreasBriese/bbloom v0.0.0-20190825152654-46b345b51c96/go.mod h1:bOvUY6CB00SOBii9/FifXqc0awNKxLFCL/+pkDPuyl8= github.com/Azure/azure-amqp-common-go/v3 v3.0.1 h1:mXh+eyOxGLBfqDtfmbtby0l7XfG/6b2NkuZ3B7i6zHA= github.com/Azure/azure-amqp-common-go/v3 v3.0.1/go.mod h1:PBIGdzcO1teYoufTKMcGibdKaYZv4avS+O6LNIp8bq0= @@ -83,8 +84,8 @@ github.com/Azure/azure-sdk-for-go v52.5.0+incompatible h1:/NLBWHCnIHtZyLPc1P7WIq github.com/Azure/azure-sdk-for-go v52.5.0+incompatible/go.mod h1:9XXNKU+eRnpl9moKnB4QOLf1HestfXbmab5FXxiDBjc= github.com/Azure/azure-storage-blob-go v0.6.0/go.mod h1:oGfmITT1V6x//CswqY2gtAHND+xIP64/qL7a5QJix0Y= github.com/Azure/azure-storage-blob-go v0.8.0/go.mod h1:lPI3aLPpuLTeUwh1sViKXFxwl2B6teiRqI0deQUvsw0= -github.com/Azure/azure-storage-blob-go v0.13.0 h1:lgWHvFh+UYBNVQLFHXkvul2f6yOPA9PIH82RTG2cSwc= -github.com/Azure/azure-storage-blob-go v0.13.0/go.mod h1:pA9kNqtjUeQF2zOSu4s//nUdBD+e64lEuc4sVnuOfNs= +github.com/Azure/azure-storage-blob-go v0.14.0 h1:1BCg74AmVdYwO3dlKwtFU1V0wU2PZdREkXvAmZJRUlM= +github.com/Azure/azure-storage-blob-go v0.14.0/go.mod h1:SMqIBi+SuiQH32bvyjngEewEeXoPfKMgWlBDaYf6fck= github.com/Azure/azure-storage-queue-go v0.0.0-20191125232315-636801874cdd h1:b3wyxBl3vvr15tUAziPBPK354y+LSdfPCpex5oBttHo= github.com/Azure/azure-storage-queue-go v0.0.0-20191125232315-636801874cdd/go.mod h1:K6am8mT+5iFXgingS9LUc7TmbsW6XBw3nxaRyaMyWc8= github.com/Azure/go-amqp v0.13.0/go.mod h1:qj+o8xPCz9tMSbQ83Vp8boHahuRDl5mkNHyt1xlxUTs= @@ -109,7 +110,6 @@ github.com/Azure/go-autorest/autorest/adal v0.8.0/go.mod h1:Z6vX6WXXuyieHAXwMj0S github.com/Azure/go-autorest/autorest/adal v0.8.1/go.mod h1:ZjhuQClTqx435SRJ2iMlOxPYt3d2C/T/7TiQCVZSn3Q= github.com/Azure/go-autorest/autorest/adal v0.8.2/go.mod h1:ZjhuQClTqx435SRJ2iMlOxPYt3d2C/T/7TiQCVZSn3Q= github.com/Azure/go-autorest/autorest/adal v0.9.0/go.mod h1:/c022QCutn2P7uY+/oQWWNcK9YU+MH96NgK+jErpbcg= -github.com/Azure/go-autorest/autorest/adal v0.9.2/go.mod h1:/3SMAM86bP6wC9Ev35peQDUeqFZBMH07vvUOmg4z/fE= github.com/Azure/go-autorest/autorest/adal v0.9.5/go.mod h1:B7KF7jKIeC9Mct5spmyCB/A8CG/sEz1vwIRGv/bbw7A= github.com/Azure/go-autorest/autorest/adal v0.9.11/go.mod h1:nBKAnTomx8gDtl+3ZCJv2v0KACFHWTB2drffI1B68Pk= github.com/Azure/go-autorest/autorest/adal v0.9.13/go.mod h1:W/MM4U6nLxnIskrw4UwWzlHfGjwUS50aOsc/I3yuU8M= @@ -249,8 +249,9 @@ github.com/antihax/optional v0.0.0-20180407024304-ca021399b1a6/go.mod h1:V8iCPQY github.com/antihax/optional v1.0.0/go.mod h1:uupD/76wgC+ih3iEmQUL+0Ugr19nfwCT1kdvxnR2qWY= github.com/antonmedv/expr v1.8.9/go.mod h1:5qsM3oLGDND7sDmQGDXHkYfkjYMUX14qsgqmHhwGEk8= github.com/apache/arrow/go/arrow v0.0.0-20191024131854-af6fa24be0db/go.mod h1:VTxUBvSJ3s3eHAg65PNgrsn5BtqCRPdmyXh6rAfdxN0= -github.com/apache/arrow/go/arrow v0.0.0-20200601151325-b2287a20f230 h1:5ultmol0yeX75oh1hY78uAFn3dupBQ/QUNxERCkiaUQ= -github.com/apache/arrow/go/arrow v0.0.0-20200601151325-b2287a20f230/go.mod h1:QNYViu/X0HXDHw7m3KXzWSVXIbfUvJqBFe6Gj8/pYA0= +github.com/apache/arrow/go/arrow v0.0.0-20210818145353-234c94e4ce64/go.mod h1:2qMFB56yOP3KzkB3PbYZ4AlUFg3a88F67TIx5lB/WwY= +github.com/apache/arrow/go/arrow v0.0.0-20211006091945-a69884db78f4 h1:nPUln5QTzhftSpmld3xcXw/GOJ3z1E8fR8tUrrc0YWk= +github.com/apache/arrow/go/arrow v0.0.0-20211006091945-a69884db78f4/go.mod h1:Q7yQnSMnLvcXlZ8RV+jwz/6y1rQTqbX6C82SndT52Zs= github.com/apache/thrift v0.12.0/go.mod h1:cp2SuWMxlEZw2r+iP2GNCdIi4C1qmUzdZFSVb+bacwQ= github.com/apache/thrift v0.13.0/go.mod h1:cp2SuWMxlEZw2r+iP2GNCdIi4C1qmUzdZFSVb+bacwQ= github.com/apache/thrift v0.14.1/go.mod h1:cp2SuWMxlEZw2r+iP2GNCdIi4C1qmUzdZFSVb+bacwQ= @@ -285,33 +286,47 @@ github.com/aws/aws-sdk-go v1.38.69 h1:V489lmrdkIQSfF6OAGZZ1Cavcm7eczCm2JcGvX+yHR github.com/aws/aws-sdk-go v1.38.69/go.mod h1:hcU610XS61/+aQV88ixoOzUoG7v3b31pl2zKMmprdro= github.com/aws/aws-sdk-go-v2 v0.18.0/go.mod h1:JWVYvqSMppoMJC0x5wdwiImzgXTI9FuZwxzkQq9wy+g= github.com/aws/aws-sdk-go-v2 v1.1.0/go.mod h1:smfAbmpW+tcRVuNUjo3MOArSZmW72t62rkCzc2i0TWM= -github.com/aws/aws-sdk-go-v2 v1.3.2 h1:RQj8l98yKUm0UV2Wd3w/Ms+TXV9Rs1E6Kr5tRRMfyU4= -github.com/aws/aws-sdk-go-v2 v1.3.2/go.mod h1:7OaACgj2SX3XGWnrIjGlJM22h6yD6MEWKvm7levnnM8= -github.com/aws/aws-sdk-go-v2/config v1.1.5 h1:imDWOGwlIrRpHLallJ9mli2SIQ4egtGKtFUFsuGRIaQ= -github.com/aws/aws-sdk-go-v2/config v1.1.5/go.mod h1:P3F1hku7qzC81txjwXnwOM6Ex6ezkU6+/557Teyb64E= -github.com/aws/aws-sdk-go-v2/credentials v1.1.5 h1:R9v/eN5cXv5yMLC619xRYl5PgCSuy5SarizmM7+qqSA= -github.com/aws/aws-sdk-go-v2/credentials v1.1.5/go.mod h1:Ir1R6tPiR1/2y1hes8yOijFMz54hzSmgcmCDo6F45Qc= -github.com/aws/aws-sdk-go-v2/feature/ec2/imds v1.0.6 h1:zoOz5V56jO/rGixsCDnrQtAzYRYM2hGA/43U6jVMFbo= -github.com/aws/aws-sdk-go-v2/feature/ec2/imds v1.0.6/go.mod h1:0+fWMitrmIpENiY8/1DyhdYPUCAPvd9UNz9mtCsEoLQ= -github.com/aws/aws-sdk-go-v2/feature/s3/manager v1.1.2 h1:Doa5wabOIDA0XZzBX5yCTAPGwDCVZ8Ux0wh29AUDmN4= -github.com/aws/aws-sdk-go-v2/feature/s3/manager v1.1.2/go.mod h1:Azf567f5wBUfUbwpyJJnLM/geFFIzEulGR30L+nQZOE= +github.com/aws/aws-sdk-go-v2 v1.8.0/go.mod h1:xEFuWz+3TYdlPRuo+CqATbeDWIWyaT5uAPwPaWtgse0= +github.com/aws/aws-sdk-go-v2 v1.9.1 h1:ZbovGV/qo40nrOJ4q8G33AGICzaPI45FHQWJ9650pF4= +github.com/aws/aws-sdk-go-v2 v1.9.1/go.mod h1:cK/D0BBs0b/oWPIcX/Z/obahJK1TT7IPVjy53i/mX/4= +github.com/aws/aws-sdk-go-v2/config v1.6.0/go.mod h1:TNtBVmka80lRPk5+S9ZqVfFszOQAGJJ9KbT3EM3CHNU= +github.com/aws/aws-sdk-go-v2/config v1.8.2 h1:Dqy4ySXFmulRmZhfynm/5CD4Y6aXiTVhDtXLIuUe/r0= +github.com/aws/aws-sdk-go-v2/config v1.8.2/go.mod h1:r0bkX9NyuCuf28qVcsEMtpAQibT7gA1Q0gzkjvgJdLU= +github.com/aws/aws-sdk-go-v2/credentials v1.3.2/go.mod h1:PACKuTJdt6AlXvEq8rFI4eDmoqDFC5DpVKQbWysaDgM= +github.com/aws/aws-sdk-go-v2/credentials v1.4.2 h1:8kVE4Og6wlhVrMGiORQ3p9gRj2exjzhFRB+QzWBUa5Q= +github.com/aws/aws-sdk-go-v2/credentials v1.4.2/go.mod h1:9Sp6u121/f0NnvHyhG7dgoYeUTEFC2vsvJqJ6wXpkaI= +github.com/aws/aws-sdk-go-v2/feature/ec2/imds v1.4.0/go.mod h1:Mj/U8OpDbcVcoctrYwA2bak8k/HFPdcLzI/vaiXMwuM= +github.com/aws/aws-sdk-go-v2/feature/ec2/imds v1.5.1 h1:Nm+BxqBtT0r+AnD6byGMCGT4Km0QwHBy8mAYptNPXY4= +github.com/aws/aws-sdk-go-v2/feature/ec2/imds v1.5.1/go.mod h1:W1ldHfsgeGlKpJ4xZMKZUI6Wmp6EAstU7PxnhbXWWrI= +github.com/aws/aws-sdk-go-v2/feature/s3/manager v1.4.0/go.mod h1:eHwXu2+uE/T6gpnYWwBwqoeqRf9IXyCcolyOWDRAErQ= +github.com/aws/aws-sdk-go-v2/feature/s3/manager v1.5.3 h1:0O72494cCsazjpsGfo+LXezru6PMSp0HUB1m5UfpaRU= +github.com/aws/aws-sdk-go-v2/feature/s3/manager v1.5.3/go.mod h1:claNkz2j/N/AZceFcAbR0NyuWnrn+jCYpI+6Ozjsc0k= +github.com/aws/aws-sdk-go-v2/internal/ini v1.2.0/go.mod h1:Q5jATQc+f1MfZp3PDMhn6ry18hGvE0i8yvbXoKbnZaE= +github.com/aws/aws-sdk-go-v2/internal/ini v1.2.3 h1:NnXJXUz7oihrSlPKEM0yZ19b+7GQ47MX/LluLlEyE/Y= +github.com/aws/aws-sdk-go-v2/internal/ini v1.2.3/go.mod h1:EES9ToeC3h063zCFDdqWGnARExNdULPaBvARm1FLwxA= github.com/aws/aws-sdk-go-v2/service/ec2 v1.1.0 h1:+VnEgB1yp+7KlOsk6FXX/v/fU9uL5oSujIMkKQBBmp8= github.com/aws/aws-sdk-go-v2/service/ec2 v1.1.0/go.mod h1:/6514fU/SRcY3+ousB1zjUqiXjruSuti2qcfE70osOc= -github.com/aws/aws-sdk-go-v2/service/internal/accept-encoding v1.0.4 h1:8yeByqOL6UWBsOOXsHnW93/ukwL66O008tRfxXxnTwA= -github.com/aws/aws-sdk-go-v2/service/internal/accept-encoding v1.0.4/go.mod h1:BCfU3Uo2fhKcMZFp9zU5QQGQxqWCOYmZ/27Dju3S/do= +github.com/aws/aws-sdk-go-v2/service/internal/accept-encoding v1.2.2/go.mod h1:EASdTcM1lGhUe1/p4gkojHwlGJkeoRjjr1sRCzup3Is= +github.com/aws/aws-sdk-go-v2/service/internal/accept-encoding v1.3.0 h1:gceOysEWNNwLd6cki65IMBZ4WAM0MwgBQq2n7kejoT8= +github.com/aws/aws-sdk-go-v2/service/internal/accept-encoding v1.3.0/go.mod h1:v8ygadNyATSm6elwJ/4gzJwcFhri9RqS8skgHKiwXPU= github.com/aws/aws-sdk-go-v2/service/internal/presigned-url v1.0.1/go.mod h1:PISaKWylTYAyruocNk4Lr9miOOJjOcVBd7twCPbydDk= -github.com/aws/aws-sdk-go-v2/service/internal/presigned-url v1.0.6 h1:ldYIsOP4WyjdzW8t6RC/aSieajrlx+3UN3UCZy1KM5Y= -github.com/aws/aws-sdk-go-v2/service/internal/presigned-url v1.0.6/go.mod h1:L0KWr0ASo83PRZu9NaZaDsw3koS6PspKv137DMDZjHo= -github.com/aws/aws-sdk-go-v2/service/internal/s3shared v1.2.2 h1:aU8H58DoYxNo8R1TaSPTofkuxfQNnoqZmWL+G3+k/vA= -github.com/aws/aws-sdk-go-v2/service/internal/s3shared v1.2.2/go.mod h1:nnutjMLuna0s3GVY/MAkpLX03thyNER06gXvnMAPj5g= -github.com/aws/aws-sdk-go-v2/service/s3 v1.5.0 h1:VbwXUI3L0hyhVmrFxbDxrs6cBX8TNFX0YxCpooMNjvY= -github.com/aws/aws-sdk-go-v2/service/s3 v1.5.0/go.mod h1:uwA7gs93Qcss43astPUb1eq4RyceNmYWAQjZFDOAMLo= -github.com/aws/aws-sdk-go-v2/service/sso v1.1.5 h1:B7ec5wE4+3Ldkurmq0C4gfQFtElGTG+/iTpi/YPMzi4= -github.com/aws/aws-sdk-go-v2/service/sso v1.1.5/go.mod h1:bpGz0tidC4y39sZkQSkpO/J0tzWCMXHbw6FZ0j1GkWM= -github.com/aws/aws-sdk-go-v2/service/sts v1.2.2 h1:fKw6QSGcFlvZCBPYx3fo4sL0HfTmaT06ZtMHJfQQNQQ= -github.com/aws/aws-sdk-go-v2/service/sts v1.2.2/go.mod h1:ssRzzJ2RZOVuKj2Vx1YE7ypfil/BIlgmQnCSW4DistU= +github.com/aws/aws-sdk-go-v2/service/internal/presigned-url v1.2.2/go.mod h1:NXmNI41bdEsJMrD0v9rUvbGCB5GwdBEpKvUvIY3vTFg= +github.com/aws/aws-sdk-go-v2/service/internal/presigned-url v1.3.1 h1:APEjhKZLFlNVLATnA/TJyA+w1r/xd5r5ACWBDZ9aIvc= +github.com/aws/aws-sdk-go-v2/service/internal/presigned-url v1.3.1/go.mod h1:Ve+eJOx9UWaT/lMVebnFhDhO49fSLVedHoA82+Rqme0= +github.com/aws/aws-sdk-go-v2/service/internal/s3shared v1.5.2/go.mod h1:QuL2Ym8BkrLmN4lUofXYq6000/i5jPjosCNK//t6gak= +github.com/aws/aws-sdk-go-v2/service/internal/s3shared v1.7.1 h1:YEz2KMyqK2zyG3uOa0l2xBc/H6NUVJir8FhwHQHF3rc= +github.com/aws/aws-sdk-go-v2/service/internal/s3shared v1.7.1/go.mod h1:yg4EN/BKoc7+DLhNOxxdvoO3+iyW2FuynvaKqLcLDUM= +github.com/aws/aws-sdk-go-v2/service/s3 v1.12.0/go.mod h1:6J++A5xpo7QDsIeSqPK4UHqMSyPOCopa+zKtqAMhqVQ= +github.com/aws/aws-sdk-go-v2/service/s3 v1.16.0 h1:dt1JQFj/135ozwGIWeCM3aQ8N/kB3Xu3Uu4r9zuOIyc= +github.com/aws/aws-sdk-go-v2/service/s3 v1.16.0/go.mod h1:Tk23mCmfL3wb3tNIeMk/0diUZ0W4R6uZtjYKguMLW2s= +github.com/aws/aws-sdk-go-v2/service/sso v1.3.2/go.mod h1:J21I6kF+d/6XHVk7kp/cx9YVD2TMD2TbLwtRGVcinXo= +github.com/aws/aws-sdk-go-v2/service/sso v1.4.1 h1:RfgQyv3bFT2Js6XokcrNtTjQ6wAVBRpoCgTFsypihHA= +github.com/aws/aws-sdk-go-v2/service/sso v1.4.1/go.mod h1:ycPdbJZlM0BLhuBnd80WX9PucWPG88qps/2jl9HugXs= +github.com/aws/aws-sdk-go-v2/service/sts v1.6.1/go.mod h1:hLZ/AnkIKHLuPGjEiyghNEdvJ2PP0MgOxcmv9EBJ4xs= +github.com/aws/aws-sdk-go-v2/service/sts v1.7.1 h1:7ce9ugapSgBapwLhg7AJTqKW5U92VRX3vX65k2tsB+g= +github.com/aws/aws-sdk-go-v2/service/sts v1.7.1/go.mod h1:r1i8QwKPzwByXqZb3POQfBs7jozrdnHz8PVbsvyx73w= github.com/aws/smithy-go v1.0.0/go.mod h1:EzMw8dbp/YJL4A5/sbhGddag+NPT7q084agLbB9LgIw= -github.com/aws/smithy-go v1.3.1/go.mod h1:SObp3lf9smib00L/v3U2eAKG8FyQ7iLrJnQiAmR5n+E= +github.com/aws/smithy-go v1.7.0/go.mod h1:SObp3lf9smib00L/v3U2eAKG8FyQ7iLrJnQiAmR5n+E= github.com/aws/smithy-go v1.8.0 h1:AEwwwXQZtUwP5Mz506FeXXrKBe0jA8gVM+1gEcSRooc= github.com/aws/smithy-go v1.8.0/go.mod h1:SObp3lf9smib00L/v3U2eAKG8FyQ7iLrJnQiAmR5n+E= github.com/benbjohnson/clock v1.1.0 h1:Q92kusRqC1XV2MjkWETPvjJVqKetz1OzxZB7mHJLju8= @@ -339,6 +354,7 @@ github.com/bmizerany/assert v0.0.0-20160611221934-b7ed37b82869/go.mod h1:Ekp36dR github.com/bmizerany/pat v0.0.0-20170815010413-6226ea591a40/go.mod h1:8rLXio+WjiTceGBHIoTvn60HIbs7Hm7bcHjyrSqYB9c= github.com/boltdb/bolt v1.3.1/go.mod h1:clJnj/oiGkjum5o1McbSZDSLxVThjynRyGBgiAx27Ps= github.com/bombsimon/wsl/v3 v3.2.0/go.mod h1:st10JtZYLE4D5sC7b8xV4zTKZwAQjCH/Hy2Pm1FNZIc= +github.com/boombuler/barcode v1.0.0/go.mod h1:paBWMcWSl3LHKBqUq+rly7CNSldXjb2rDl3JlRe0mD8= github.com/bshuster-repo/logrus-logstash-hook v0.4.1/go.mod h1:zsTqEiSzDgAa/8GZR7E1qaXrhYNDKBYy5/dWPTIflbk= github.com/bsm/sarama-cluster v2.1.13+incompatible/go.mod h1:r7ao+4tTNXvWm+VRpRJchr2kQhqxgmAp2iEX5W96gMM= github.com/buger/jsonparser v0.0.0-20180808090653-f4dd9f5a6b44/go.mod h1:bbYlZJ7hK1yFx9hf58LP0zeX7UjIGs20ufpu3evjr+s= @@ -639,9 +655,11 @@ github.com/fatih/color v1.10.0/go.mod h1:ELkj/draVOlAH/xkhN6mQ50Qd0MPOk5AAr3maGE github.com/fatih/structtag v1.2.0/go.mod h1:mBJUNpUnHmRKrKlQQlmCrh5PuhftFbNv8Ys4/aAZl94= github.com/felixge/httpsnoop v1.0.1/go.mod h1:m8KPJKqk1gH5J9DgRY2ASl2lWCfGKXixSwevea8zH2U= github.com/fogleman/gg v1.2.1-0.20190220221249-0403632d5b90/go.mod h1:R/bRT+9gY/C5z7JzPU0zXsXHKM4/ayA+zqcVNZzPa1k= +github.com/fogleman/gg v1.3.0/go.mod h1:R/bRT+9gY/C5z7JzPU0zXsXHKM4/ayA+zqcVNZzPa1k= github.com/form3tech-oss/jwt-go v3.2.2+incompatible/go.mod h1:pbq4aXjuKjdthFRnoDwaVPLA+WlJuPGy+QneDUgJi2k= -github.com/form3tech-oss/jwt-go v3.2.3+incompatible h1:7ZaBxOI7TMoYBfyA3cQHErNNyAWIKUMIwqxEtgHOs5c= github.com/form3tech-oss/jwt-go v3.2.3+incompatible/go.mod h1:pbq4aXjuKjdthFRnoDwaVPLA+WlJuPGy+QneDUgJi2k= +github.com/form3tech-oss/jwt-go v3.2.5+incompatible h1:/l4kBbb4/vGSsdtB5nUe8L7B9mImVMaBPw9L/0TBHU8= +github.com/form3tech-oss/jwt-go v3.2.5+incompatible/go.mod h1:pbq4aXjuKjdthFRnoDwaVPLA+WlJuPGy+QneDUgJi2k= github.com/fortytw2/leaktest v1.3.0 h1:u8491cBMTQ8ft8aeV+adlcytMZylmA5nnwwkRZjI8vw= github.com/fortytw2/leaktest v1.3.0/go.mod h1:jDsjWgpAGjm2CA7WthBh/CdZYEPF31XHquHwclZch5g= github.com/franela/goblin v0.0.0-20200105215937-c9ffbefa60db/go.mod h1:7dvUGVsVBjqR7JHJk0brhHOZYGmfBYOrK0ZhYMEtBr4= @@ -669,6 +687,10 @@ github.com/globalsign/mgo v0.0.0-20181015135952-eeefdecb41b8/go.mod h1:xkRDCp4j0 github.com/glycerine/go-unsnap-stream v0.0.0-20180323001048-9f0cb55181dd/go.mod h1:/20jfyN9Y5QPEAprSgKAUr+glWDY39ZiUEAYOEv5dsE= github.com/glycerine/goconvey v0.0.0-20190410193231-58a59202ab31/go.mod h1:Ogl1Tioa0aV7gstGFO7KhffUsb9M4ydbEbbxpcEDc24= github.com/go-critic/go-critic v0.5.4/go.mod h1:cjB4YGw+n/+X8gREApej7150Uyy1Tg8If6F2XOAUXNE= +github.com/go-fonts/dejavu v0.1.0/go.mod h1:4Wt4I4OU2Nq9asgDCteaAaWZOV24E+0/Pwo0gppep4g= +github.com/go-fonts/latin-modern v0.2.0/go.mod h1:rQVLdDMK+mK1xscDwsqM5J8U2jrRa3T0ecnM9pNujks= +github.com/go-fonts/liberation v0.1.1/go.mod h1:K6qoJYypsmfVjWg8KOVDQhLc8UDgIK2HYqyqAO9z7GY= +github.com/go-fonts/stix v0.1.0/go.mod h1:w/c1f0ldAUlJmLBvlbkvVXLAD+tAMqobIIQpmnUIzUY= github.com/go-gl/glfw v0.0.0-20190409004039-e6da0acd62b1/go.mod h1:vR7hzQXu2zJy9AVAgeJqvqgH9Q5CA+iKCZ2gyEVpxRU= github.com/go-gl/glfw/v3.3/glfw v0.0.0-20191125211704-12ad95a8df72/go.mod h1:tQ2UAYgL5IevRw8kRxooKSPJfGvJ9fJQFa0TUsXzTg8= github.com/go-gl/glfw/v3.3/glfw v0.0.0-20200222043503-6f7a984d4dc4/go.mod h1:tQ2UAYgL5IevRw8kRxooKSPJfGvJ9fJQFa0TUsXzTg8= @@ -678,6 +700,7 @@ github.com/go-kit/kit v0.9.0/go.mod h1:xBxKIO96dXMWWy0MnWVtmwkA9/13aqxPnvrjFYMA2 github.com/go-kit/kit v0.10.0 h1:dXFJfIHVvUcpSgDOV+Ne6t7jXri8Tfv2uOLHUZ2XNuo= github.com/go-kit/kit v0.10.0/go.mod h1:xUsJbQ/Fp4kEt7AFgCuvyX4a71u8h9jB8tj/ORgOZ7o= github.com/go-kit/log v0.1.0/go.mod h1:zbhenjAZHb184qTLMA9ZjW7ThYL0H2mk7Q6pNt4vbaY= +github.com/go-latex/latex v0.0.0-20210118124228-b3d85cf34e07/go.mod h1:CO1AlKB2CSIqUrmQPqA0gdRIlnLEY0gK5JGjh37zN5U= github.com/go-logfmt/logfmt v0.3.0/go.mod h1:Qt1PoO58o5twSAckw1HlFXLmHsOX5/0LbT9GBnD5lWE= github.com/go-logfmt/logfmt v0.4.0/go.mod h1:3RMwSq7FuexP4Kalkev3ejPJsZTpXXBr9+V4qmtdjCk= github.com/go-logfmt/logfmt v0.5.0 h1:TrB8swr/68K7m9CcGut2g3UOihhbcbiMAYiuTXdEih4= @@ -906,8 +929,9 @@ github.com/golang/protobuf v1.5.2/go.mod h1:XVQd3VNwM+JqD3oG2Ue2ip4fOMUkwXdXDdiu github.com/golang/snappy v0.0.0-20170215233205-553a64147049/go.mod h1:/XxbfmMg8lxefKM7IXC3fBNl/7bRcc72aCRzEWrmP2Q= github.com/golang/snappy v0.0.0-20180518054509-2e65f85255db/go.mod h1:/XxbfmMg8lxefKM7IXC3fBNl/7bRcc72aCRzEWrmP2Q= github.com/golang/snappy v0.0.1/go.mod h1:/XxbfmMg8lxefKM7IXC3fBNl/7bRcc72aCRzEWrmP2Q= -github.com/golang/snappy v0.0.3 h1:fHPg5GQYlCeLIPB9BZqMVR5nR9A+IM5zcgeTdjMYmLA= github.com/golang/snappy v0.0.3/go.mod h1:/XxbfmMg8lxefKM7IXC3fBNl/7bRcc72aCRzEWrmP2Q= +github.com/golang/snappy v0.0.4 h1:yAGX7huGHXlcLOEtBnF4w7FQwA26wojNCwOYAEhLjQM= +github.com/golang/snappy v0.0.4/go.mod h1:/XxbfmMg8lxefKM7IXC3fBNl/7bRcc72aCRzEWrmP2Q= github.com/golangci/check v0.0.0-20180506172741-cfe4005ccda2/go.mod h1:k9Qvh+8juN+UKMCS/3jFtGICgW8O96FVaZsaxdzDkR4= github.com/golangci/dupl v0.0.0-20180902072040-3e9179ac440a/go.mod h1:ryS0uhF+x9jgbj/N71xsEqODy9BN81/GonCZiOzirOk= github.com/golangci/go-misc v0.0.0-20180628070357-927a3d87b613/go.mod h1:SyvUF2NxV+sN8upjjeVYr5W7tyxaT1JVtvhKhOn2ii8= @@ -924,8 +948,10 @@ github.com/google/btree v1.0.0/go.mod h1:lNA+9X1NB3Zf8V7Ke586lFgjr2dZNuvo3lPJSGZ github.com/google/btree v1.0.1 h1:gK4Kx5IaGY9CD5sPJ36FHiBJ6ZXl0kilRiiCj+jdYp4= github.com/google/btree v1.0.1/go.mod h1:xXMiIv4Fb/0kKde4SpL7qlzvu5cMJDRkFDxJfI9uaxA= github.com/google/flatbuffers v1.11.0/go.mod h1:1AeVuKshWv4vARoZatz6mlQ0JxURH0Kv5+zNeJKJCa8= -github.com/google/flatbuffers v1.12.0 h1:/PtAHvnBY4Kqnx/xCQ3OIV9uYcSFGScBsWI3Oogeh6w= +github.com/google/flatbuffers v1.11.0/go.mod h1:1AeVuKshWv4vARoZatz6mlQ0JxURH0Kv5+zNeJKJCa8= github.com/google/flatbuffers v1.12.0/go.mod h1:1AeVuKshWv4vARoZatz6mlQ0JxURH0Kv5+zNeJKJCa8= +github.com/google/flatbuffers v2.0.0+incompatible h1:dicJ2oXwypfwUGnB2/TYWYEKiuk9eYQlQO/AnOHl5mI= +github.com/google/flatbuffers v2.0.0+incompatible/go.mod h1:1AeVuKshWv4vARoZatz6mlQ0JxURH0Kv5+zNeJKJCa8= github.com/google/go-cmp v0.2.0/go.mod h1:oXzfMopK8JAjlY9xF4vHSVASa0yLyX7SntLO5aqRK0M= github.com/google/go-cmp v0.3.0/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMywk6iLU= github.com/google/go-cmp v0.3.1/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMywk6iLU= @@ -971,8 +997,9 @@ github.com/google/renameio v0.1.0/go.mod h1:KWCgfxg9yswjAJkECMjeO8J8rahYeXnNhOm4 github.com/google/uuid v1.0.0/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= github.com/google/uuid v1.1.1/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= github.com/google/uuid v1.1.2/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= -github.com/google/uuid v1.2.0 h1:qJYtXnJRWmpe7m/3XlyhrsLrEURqHRM2kxzoxXqyUDs= github.com/google/uuid v1.2.0/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= +github.com/google/uuid v1.3.0 h1:t6JiXgmwXMjEs8VusXIJk2BXHsn+wx8BZdTaoZ5fu7I= +github.com/google/uuid v1.3.0/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= github.com/googleapis/gax-go/v2 v2.0.4/go.mod h1:0Wqv26UfaUD9n4G6kQubkQ+KchISgw+vpHVxEJEs9eg= github.com/googleapis/gax-go/v2 v2.0.5 h1:sjZBwGj9Jlw33ImPtvFviGYvseOtDM7hkSKB7+Tv3SM= github.com/googleapis/gax-go/v2 v2.0.5/go.mod h1:DWXyrwAJ9X0FpwwEdw+IPEYBICEFu5mhpdKc/us6bOk= @@ -1241,6 +1268,7 @@ github.com/juju/testing v0.0.0-20191001232224-ce9dec17d28b/go.mod h1:63prj8cnj0t github.com/julienschmidt/httprouter v1.2.0/go.mod h1:SYymIcj16QtmaHHD7aYtjjsJG7VTCxuUUipMqKk8s4w= github.com/julienschmidt/httprouter v1.3.0/go.mod h1:JR6WtHb+2LUe8TCKY3cZOxFyyO8IZAc4RVcycCCAKdM= github.com/julz/importas v0.0.0-20210226073942-60b4fa260dd0/go.mod h1:oSFU2R4XK/P7kNBrnL/FEQlDGN1/6WoxXEjSSXO0DV0= +github.com/jung-kurt/gofpdf v1.0.0/go.mod h1:7Id9E/uU8ce6rXgefFLlgrJj/GYY22cpxn+r32jIOes= github.com/jung-kurt/gofpdf v1.0.3-0.20190309125859-24315acbbda5/go.mod h1:7Id9E/uU8ce6rXgefFLlgrJj/GYY22cpxn+r32jIOes= github.com/jwilder/encoding v0.0.0-20170811194829-b4e1701a28ef/go.mod h1:Ct9fl0F6iIOGgxJ5npU/IUOhOhqlVrGjyIZc8/MagT0= github.com/kardianos/service v1.0.0 h1:HgQS3mFfOlyntWX8Oke98JcJLqt1DBcHR4kxShpYef0= @@ -1266,8 +1294,11 @@ github.com/klauspost/compress v1.11.12/go.mod h1:aoV0uJVorq1K+umq18yTdKaF57EivdY github.com/klauspost/compress v1.11.13/go.mod h1:aoV0uJVorq1K+umq18yTdKaF57EivdYsUV+/s2qKfXs= github.com/klauspost/compress v1.12.2/go.mod h1:8dP1Hq4DHOhN9w426knH3Rhby4rFm6D8eO+e+Dq5Gzg= github.com/klauspost/compress v1.12.3/go.mod h1:8dP1Hq4DHOhN9w426knH3Rhby4rFm6D8eO+e+Dq5Gzg= -github.com/klauspost/compress v1.13.4 h1:0zhec2I8zGnjWcKyLl6i3gPqKANCCn5e9xmviEEeX6s= +github.com/klauspost/compress v1.13.1/go.mod h1:8dP1Hq4DHOhN9w426knH3Rhby4rFm6D8eO+e+Dq5Gzg= +github.com/klauspost/compress v1.13.4/go.mod h1:8dP1Hq4DHOhN9w426knH3Rhby4rFm6D8eO+e+Dq5Gzg= github.com/klauspost/compress v1.13.4/go.mod h1:8dP1Hq4DHOhN9w426knH3Rhby4rFm6D8eO+e+Dq5Gzg= +github.com/klauspost/compress v1.13.6 h1:P76CopJELS0TiO2mebmnzgWaajssP/EszplttgQxcgc= +github.com/klauspost/compress v1.13.6/go.mod h1:/3/Vjq9QcHkK5uEr5lBEmyoZ1iFhe47etQ6QUkpK6sk= github.com/klauspost/cpuid v0.0.0-20170728055534-ae7887de9fa5/go.mod h1:Pj4uuM528wm8OyEC2QMXAi2YiTZ96dNQPGgoMS4s3ek= github.com/klauspost/crc32 v0.0.0-20161016154125-cb6bfca970f6/go.mod h1:+ZoRqAPRLkC4NPOvfYeR5KNOrY6TD+/sAC3HXPZgDYg= github.com/klauspost/pgzip v1.0.2-0.20170402124221-0bf5dcad4ada/go.mod h1:Ch1tH69qFZu15pkjo5kYi6mth2Zzwzt50oCQKQE9RUs= @@ -1598,12 +1629,18 @@ github.com/phayes/checkstyle v0.0.0-20170904204023-bfd46e6a821d/go.mod h1:3OzsM7 github.com/philhofer/fwd v1.0.0/go.mod h1:gk3iGcWd9+svBvR0sR+KPcfE+RNWozjowpeBVG3ZVNU= github.com/philhofer/fwd v1.1.1 h1:GdGcTjf5RNAxwS4QLsiMzJYj5KEvPJD3Abr261yRQXQ= github.com/philhofer/fwd v1.1.1/go.mod h1:gk3iGcWd9+svBvR0sR+KPcfE+RNWozjowpeBVG3ZVNU= +github.com/phpdave11/gofpdf v1.4.2/go.mod h1:zpO6xFn9yxo3YLyMvW8HcKWVdbNqgIfOOp2dXMnm1mY= +github.com/phpdave11/gofpdi v1.0.12/go.mod h1:vBmVV0Do6hSBHC8uKUQ71JGW+ZGQq74llk/7bXwjDoI= github.com/pierrec/lz4 v0.0.0-20190327172049-315a67e90e41/go.mod h1:3/3N9NVKO0jef7pBehbT1qWhCMrIgbYNnFAZCqQ5LRc= github.com/pierrec/lz4 v1.0.2-0.20190131084431-473cd7ce01a1/go.mod h1:3/3N9NVKO0jef7pBehbT1qWhCMrIgbYNnFAZCqQ5LRc= +github.com/pierrec/lz4 v1.0.2-0.20190131084431-473cd7ce01a1/go.mod h1:3/3N9NVKO0jef7pBehbT1qWhCMrIgbYNnFAZCqQ5LRc= +github.com/pierrec/lz4 v2.0.5+incompatible/go.mod h1:pdkljMzZIN41W+lC3N2tnIh5sFi+IEE17M5jbnwPHcY= github.com/pierrec/lz4 v2.0.5+incompatible/go.mod h1:pdkljMzZIN41W+lC3N2tnIh5sFi+IEE17M5jbnwPHcY= github.com/pierrec/lz4 v2.4.1+incompatible/go.mod h1:pdkljMzZIN41W+lC3N2tnIh5sFi+IEE17M5jbnwPHcY= github.com/pierrec/lz4 v2.6.0+incompatible h1:Ix9yFKn1nSPBLFl/yZknTp8TU5G4Ps0JDmguYK6iH1A= github.com/pierrec/lz4 v2.6.0+incompatible/go.mod h1:pdkljMzZIN41W+lC3N2tnIh5sFi+IEE17M5jbnwPHcY= +github.com/pierrec/lz4/v4 v4.1.8 h1:ieHkV+i2BRzngO4Wd/3HGowuZStgq6QkPsD1eolNAO4= +github.com/pierrec/lz4/v4 v4.1.8/go.mod h1:gZWDp/Ze/IJXGXf23ltt2EXimqmTUXEy0GFuRQyBid4= github.com/pion/dtls/v2 v2.0.9 h1:7Ow+V++YSZQMYzggI0P9vLJz/hUFcffsfGMfT/Qy+u8= github.com/pion/dtls/v2 v2.0.9/go.mod h1:O0Wr7si/Zj5/EBFlDzDd6UtVxx25CE1r7XM7BQKYQho= github.com/pion/logging v0.2.2 h1:M9+AIj/+pxNsDfAT64+MAVgJO0rsyLnoJKCqf//DoeY= @@ -1613,8 +1650,9 @@ github.com/pion/transport v0.12.3 h1:vdBfvfU/0Wq8kd2yhUMSDB/x+O4Z9MYVl2fJ5BT4JZw github.com/pion/transport v0.12.3/go.mod h1:OViWW9SP2peE/HbwBvARicmAVnesphkNkCVZIWJ6q9A= github.com/pion/udp v0.1.1 h1:8UAPvyqmsxK8oOjloDk4wUt63TzFe9WEJkg5lChlj7o= github.com/pion/udp v0.1.1/go.mod h1:6AFo+CMdKQm7UiA0eUPA8/eVCTx8jBIITLZHc9DWX5M= -github.com/pkg/browser v0.0.0-20180916011732-0a3d74bf9ce4 h1:49lOXmGaUpV9Fz3gd7TFZY106KVlPVa5jcYD1gaQf98= -github.com/pkg/browser v0.0.0-20180916011732-0a3d74bf9ce4/go.mod h1:4OwLy04Bl9Ef3GJJCoec+30X3LQs/0/m4HFRt/2LUSA= +github.com/pkg/browser v0.0.0-20210706143420-7d21f8c997e2/go.mod h1:HKlIX3XHQyzLZPlr7++PzdhaXEj94dEiJgZDTsxEqUI= +github.com/pkg/browser v0.0.0-20210911075715-681adbf594b8 h1:KoWmjvw+nsYOo29YJK9vDA65RGE3NrOnUtO7a+RF9HU= +github.com/pkg/browser v0.0.0-20210911075715-681adbf594b8/go.mod h1:HKlIX3XHQyzLZPlr7++PzdhaXEj94dEiJgZDTsxEqUI= github.com/pkg/errors v0.8.0/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0= github.com/pkg/errors v0.8.1-0.20171018195549-f15c970de5b7/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0= github.com/pkg/errors v0.8.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0= @@ -1730,6 +1768,7 @@ github.com/rs/zerolog v1.13.0/go.mod h1:YbFCdg8HfsridGWAh22vktObvhZbQsZXe4/zB0OK github.com/rs/zerolog v1.15.0/go.mod h1:xYTKnLHcpfU2225ny5qZjxnj9NvkumZYjJHlAThCjNc= github.com/russross/blackfriday v1.5.2/go.mod h1:JO/DiYxRf+HjHt06OyowR9PTA263kcR/rfWxYHBV53g= github.com/russross/blackfriday/v2 v2.0.1/go.mod h1:+Rmxgy9KzJVeS9/2gXHxylqXiyQDYRxCVz55jmeOWTM= +github.com/ruudk/golang-pdf417 v0.0.0-20181029194003-1af4ab5afa58/go.mod h1:6lfFZQK844Gfx8o5WFuvpxWRwnSoipWe/p622j1v06w= github.com/ryancurrah/gomodguard v1.2.0/go.mod h1:rNqbC4TOIdUDcVMSIpNNAzTbzXAZa6W5lnUepvuMMgQ= github.com/ryanrolds/sqlclosecheck v0.3.0/go.mod h1:1gREqxyTGR3lVtpngyFo3hZAgk0KCtEdgEkHwDbigdA= github.com/ryanuber/columnize v0.0.0-20160712163229-9b3edd62028f/go.mod h1:sm1tb6uqfes/u+d4ooFouqFdy9/2g9QGwK3SQygK0Ts= @@ -1799,8 +1838,8 @@ github.com/smartystreets/assertions v0.0.0-20180927180507-b2de0cb4f26d/go.mod h1 github.com/smartystreets/goconvey v0.0.0-20190330032615-68dc04aab96a/go.mod h1:syvi0/a8iFYH4r/RixwvyeAJjdLS9QV7WQ/tjFTllLA= github.com/smartystreets/goconvey v1.6.4 h1:fv0U8FUIMPNf1L9lnHLvLhgicrIVChEkdzIKYqbNC9s= github.com/smartystreets/goconvey v1.6.4/go.mod h1:syvi0/a8iFYH4r/RixwvyeAJjdLS9QV7WQ/tjFTllLA= -github.com/snowflakedb/gosnowflake v1.5.0 h1:Md7P8zbPegXy0+/SZ2nG8whXYkAT44nQ/yEb35LlIKo= -github.com/snowflakedb/gosnowflake v1.5.0/go.mod h1:1kyg2XEduwti88V11PKRHImhXLK5WpGiayY6lFNYb98= +github.com/snowflakedb/gosnowflake v1.6.2 h1:drZkX7Ve3qr3lLD/f0vxwesgJZfNerivknAvPRAMy88= +github.com/snowflakedb/gosnowflake v1.6.2/go.mod h1:k1Wq+O8dRD/jmFBLyStEv2OrgHoMFQpqHCRSy70P0dI= github.com/soheilhy/cmux v0.1.4/go.mod h1:IM3LyeVVIOuxMH7sFAkER9+bJ4dT7Ms6E4xg4kGIyLM= github.com/soheilhy/cmux v0.1.5/go.mod h1:T7TcVDs9LWfQgPlPsdngu6I6QIoyIFZDDC6sNE1GqG0= github.com/sonatard/noctx v0.0.1/go.mod h1:9D2D/EoULe8Yy2joDHJj7bv3sZoq9AaSb8B4lqBjiZI= @@ -2094,8 +2133,10 @@ golang.org/x/crypto v0.0.0-20210314154223-e6e6c4f2bb5b/go.mod h1:T9bdIzuCu7OtxOm golang.org/x/crypto v0.0.0-20210322153248-0c34fe9e7dc2/go.mod h1:T9bdIzuCu7OtxOm1hfPfRQxPLYneinmdGuTeoZ9dtd4= golang.org/x/crypto v0.0.0-20210421170649-83a5a9bb288b/go.mod h1:T9bdIzuCu7OtxOm1hfPfRQxPLYneinmdGuTeoZ9dtd4= golang.org/x/crypto v0.0.0-20210513164829-c07d793c2f9a/go.mod h1:P+XmwS30IXTQdn5tA2iutPOUgjI07+tq3H3K9MVA1s8= -golang.org/x/crypto v0.0.0-20210616213533-5ff15b29337e h1:gsTQYXdTw2Gq7RBsWvlQ91b+aEQ6bXFUngBGuR8sPpI= golang.org/x/crypto v0.0.0-20210616213533-5ff15b29337e/go.mod h1:GvvjBRRGRdwPK5ydBHafDWAxML/pGHZbMvKqRZ5+Abc= +golang.org/x/crypto v0.0.0-20210817164053-32db794688a5/go.mod h1:GvvjBRRGRdwPK5ydBHafDWAxML/pGHZbMvKqRZ5+Abc= +golang.org/x/crypto v0.0.0-20210921155107-089bfa567519 h1:7I4JAnoQBe7ZtJcBaYHi5UtiO8tQHbUSXxL+pnGRANg= +golang.org/x/crypto v0.0.0-20210921155107-089bfa567519/go.mod h1:GvvjBRRGRdwPK5ydBHafDWAxML/pGHZbMvKqRZ5+Abc= golang.org/x/exp v0.0.0-20180321215751-8460e604b9de/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA= golang.org/x/exp v0.0.0-20180807140117-3d87b88a115f/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA= golang.org/x/exp v0.0.0-20190121172915-509febef88a4/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA= @@ -2103,16 +2144,24 @@ golang.org/x/exp v0.0.0-20190125153040-c74c464bbbf2/go.mod h1:CJ0aWSM057203Lf6IL golang.org/x/exp v0.0.0-20190306152737-a1d7652674e8/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA= golang.org/x/exp v0.0.0-20190510132918-efd6b22b2522/go.mod h1:ZjyILWgesfNpC6sMxTJOJm9Kp84zZh5NQWvqDGG3Qr8= golang.org/x/exp v0.0.0-20190829153037-c13cbed26979/go.mod h1:86+5VVa7VpoJ4kLfm080zCjGlMRFzhUhsZKEZO7MGek= +golang.org/x/exp v0.0.0-20191002040644-a1355ae1e2c3/go.mod h1:NOZ3BPKG0ec/BKJQgnvsSFpcKLM5xXVWnvZS97DWHgE= golang.org/x/exp v0.0.0-20191030013958-a1ab85dbe136/go.mod h1:JXzH8nQsPlswgeRAPE3MuO9GYsAcnJvJ4vnMwN/5qkY= golang.org/x/exp v0.0.0-20191129062945-2f5052295587/go.mod h1:2RIsYlXP63K8oxa1u096TMicItID8zy7Y6sNkU49FU4= golang.org/x/exp v0.0.0-20191227195350-da58074b4299/go.mod h1:2RIsYlXP63K8oxa1u096TMicItID8zy7Y6sNkU49FU4= golang.org/x/exp v0.0.0-20200119233911-0405dc783f0a/go.mod h1:2RIsYlXP63K8oxa1u096TMicItID8zy7Y6sNkU49FU4= golang.org/x/exp v0.0.0-20200207192155-f17229e696bd/go.mod h1:J/WKrq2StrnmMY6+EHIKF9dgMWnmCNThgcyBT1FY9mM= golang.org/x/exp v0.0.0-20200224162631-6cc2880d07d6/go.mod h1:3jZMyOhIsHpP37uCMkUooju7aAi5cS1Q23tOzKc+0MU= +golang.org/x/exp v0.0.0-20200331195152-e8c3332aa8e5 h1:FR+oGxGfbQu1d+jglI3rCkjAjUnhRSZcUxr+DqlDLNo= golang.org/x/exp v0.0.0-20200331195152-e8c3332aa8e5/go.mod h1:4M0jN8W1tt0AVLNr8HDosyJCDCDuyL9N9+3m7wDWgKw= golang.org/x/image v0.0.0-20180708004352-c73c2afc3b81/go.mod h1:ux5Hcp/YLpHSI86hEcLt0YII63i6oz57MZXIpbrjZUs= golang.org/x/image v0.0.0-20190227222117-0694c2d4d067/go.mod h1:kZ7UVZpmo3dzQBMxlp+ypCbDeSB+sBbTgSJuh5dn5js= golang.org/x/image v0.0.0-20190802002840-cff245a6509b/go.mod h1:FeLwcggjj3mMvU+oOTbSwawSJRM1uh48EjtB4UJZlP0= +golang.org/x/image v0.0.0-20190910094157-69e4b8554b2a/go.mod h1:FeLwcggjj3mMvU+oOTbSwawSJRM1uh48EjtB4UJZlP0= +golang.org/x/image v0.0.0-20200119044424-58c23975cae1/go.mod h1:FeLwcggjj3mMvU+oOTbSwawSJRM1uh48EjtB4UJZlP0= +golang.org/x/image v0.0.0-20200430140353-33d19683fad8/go.mod h1:FeLwcggjj3mMvU+oOTbSwawSJRM1uh48EjtB4UJZlP0= +golang.org/x/image v0.0.0-20200618115811-c13761719519/go.mod h1:FeLwcggjj3mMvU+oOTbSwawSJRM1uh48EjtB4UJZlP0= +golang.org/x/image v0.0.0-20201208152932-35266b937fa6/go.mod h1:FeLwcggjj3mMvU+oOTbSwawSJRM1uh48EjtB4UJZlP0= +golang.org/x/image v0.0.0-20210216034530-4410531fe030/go.mod h1:FeLwcggjj3mMvU+oOTbSwawSJRM1uh48EjtB4UJZlP0= golang.org/x/lint v0.0.0-20181026193005-c67002cb31c3/go.mod h1:UVdnD1Gm6xHRNCYTkRU2/jEulfH38KcIWyp/GAMgvoE= golang.org/x/lint v0.0.0-20190227174305-5b3e6a55c961/go.mod h1:wehouNa3lNwaWXcvxsM5YxQ5yQlVC4a0KAMCusXpPoU= golang.org/x/lint v0.0.0-20190301231843-5614ed5bae6f/go.mod h1:UVdnD1Gm6xHRNCYTkRU2/jEulfH38KcIWyp/GAMgvoE= @@ -2217,8 +2266,12 @@ golang.org/x/net v0.0.0-20210510120150-4163338589ed/go.mod h1:9nx3DQGgdP8bBQD5qx golang.org/x/net v0.0.0-20210520170846-37e1c6afe023/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y= golang.org/x/net v0.0.0-20210525063256-abc453219eb5/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y= golang.org/x/net v0.0.0-20210610132358-84b48f89b13b/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y= -golang.org/x/net v0.0.0-20210614182718-04defd469f4e h1:XpT3nA5TvE525Ne3hInMh6+GETgn27Zfm9dxsThnX2Q= +golang.org/x/net v0.0.0-20210610132358-84b48f89b13b/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y= +golang.org/x/net v0.0.0-20210614182718-04defd469f4e/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y= golang.org/x/net v0.0.0-20210614182718-04defd469f4e/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y= +golang.org/x/net v0.0.0-20210813160813-60bc85c4be6d/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y= +golang.org/x/net v0.0.0-20211005215030-d2e5035098b3 h1:G64nFNerDErBd2KdvHvIn3Ee6ccUQBTfhDZEO0DccfU= +golang.org/x/net v0.0.0-20211005215030-d2e5035098b3/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y= golang.org/x/oauth2 v0.0.0-20180821212333-d2e6202438be/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U= golang.org/x/oauth2 v0.0.0-20190226205417-e64efc72b421/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw= golang.org/x/oauth2 v0.0.0-20190604053449-0f29369cfe45/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw= @@ -2359,6 +2412,7 @@ golang.org/x/sys v0.0.0-20210124154548-22da62e12c0c/go.mod h1:h1NjWce9XRLGQEsW7w golang.org/x/sys v0.0.0-20210220050731-9a76102bfb43/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20210225134936-a50acf3fe073/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20210303074136-134d130e1a04/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20210304124612-50617c2ba197/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20210305230114-8fe3ee5dd75b/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20210309074719-68d13333faf2/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20210314195730-07df6a141424/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= @@ -2379,11 +2433,14 @@ golang.org/x/sys v0.0.0-20210603081109-ebe580a85c40/go.mod h1:oPkhp1MJrh7nUepCBc golang.org/x/sys v0.0.0-20210603125802-9665404d3644/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20210611083646-a4fc73990273/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20210615035016-665e8c7367d1/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20210616045830-e2b7044e8c71/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20210616094352-59db8d763f22/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20210630005230-0f9fa26af87c/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20210806184541-e5e7981a1069/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.0.0-20210816074244-15123e1e1f71 h1:ikCpsnYR+Ew0vu99XlDp55lGgDJdIMx3f4a18jfse/s= golang.org/x/sys v0.0.0-20210816074244-15123e1e1f71/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20210818153620-00dd8d7831e7/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20211004093028-2c5d950f24ef h1:fPxZ3Umkct3LZ8gK9nbk+DWDJ9fstZa2grBn+lWVKPs= +golang.org/x/sys v0.0.0-20211004093028-2c5d950f24ef/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/term v0.0.0-20201117132131-f5c789dd3221/go.mod h1:Nr5EML6q2oocZ2LXRh80K7BxOlk5/8JxuGnuhpl+muw= golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo= golang.org/x/term v0.0.0-20210220032956-6a3ed077a48d h1:SZxvLBoTP5yHO3Frd4z4vrF+DBX9vMVanchswa69toE= @@ -2395,8 +2452,9 @@ golang.org/x/text v0.3.2/go.mod h1:bEr9sfX3Q8Zfm5fL9x+3itogRgK3+ptLWKqgva+5dAk= golang.org/x/text v0.3.3/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= golang.org/x/text v0.3.4/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= golang.org/x/text v0.3.5/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= -golang.org/x/text v0.3.6 h1:aRYxNxv6iGQlyVaZmk6ZgYEDa+Jg18DxebPSrd6bg1M= golang.org/x/text v0.3.6/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= +golang.org/x/text v0.3.7 h1:olpwvP2KacW1ZWvsR7uQhoyTYvKAupfQrRGBFM352Gk= +golang.org/x/text v0.3.7/go.mod h1:u+2+/6zg+i71rQMx5EYifcz6MCKuco9NR6JIITiCfzQ= golang.org/x/time v0.0.0-20180412165947-fbb02b2291d2/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= golang.org/x/time v0.0.0-20181108054448-85acf8d2951c/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= golang.org/x/time v0.0.0-20190308202827-9d24e82272b4/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= @@ -2445,6 +2503,7 @@ golang.org/x/tools v0.0.0-20190907020128-2ca718005c18/go.mod h1:b+2E5dAYhXwXZwtn golang.org/x/tools v0.0.0-20190910044552-dd2b5c81c578/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= golang.org/x/tools v0.0.0-20190911174233-4f2ddba30aff/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= golang.org/x/tools v0.0.0-20190916130336-e45ffcd953cc/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= +golang.org/x/tools v0.0.0-20190927191325-030b2cf1153e/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= golang.org/x/tools v0.0.0-20191012152004-8de300cfc20a/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= golang.org/x/tools v0.0.0-20191029041327-9cc4af7d6b2c/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= golang.org/x/tools v0.0.0-20191029190741-b9c20aec41a5/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= @@ -2534,11 +2593,14 @@ golang.zx2c4.com/wireguard/wgctrl v0.0.0-20200205215550-e35592f146e4/go.mod h1:U gonum.org/v1/gonum v0.0.0-20180816165407-929014505bf4/go.mod h1:Y+Yx5eoAFn32cQvJDxZx5Dpnq+c3wtXuadVZAcxbbBo= gonum.org/v1/gonum v0.0.0-20181121035319-3f7ecaa7e8ca/go.mod h1:Y+Yx5eoAFn32cQvJDxZx5Dpnq+c3wtXuadVZAcxbbBo= gonum.org/v1/gonum v0.6.0/go.mod h1:9mxDZsDKxgMAuccQkewq682L+0eCu4dCN2yonUJTCLU= -gonum.org/v1/gonum v0.7.0 h1:Hdks0L0hgznZLG9nzXb8vZ0rRvqNvAcgAp84y7Mwkgw= gonum.org/v1/gonum v0.7.0/go.mod h1:L02bwd0sqlsvRv41G7wGWFCsVNZFv/k1xzGIxeANHGM= +gonum.org/v1/gonum v0.8.2/go.mod h1:oe/vMfY3deqTw+1EZJhuvEW2iwGF1bW9wwu7XCu0+v0= +gonum.org/v1/gonum v0.9.3 h1:DnoIG+QAMaF5NvxnGe/oKsgKcAc6PcUyl8q0VetfQ8s= +gonum.org/v1/gonum v0.9.3/go.mod h1:TZumC3NeyVQskjXqmyWt4S3bINhy7B4eYwW69EbyX+0= gonum.org/v1/netlib v0.0.0-20181029234149-ec6d1f5cefe6/go.mod h1:wa6Ws7BG/ESfp6dHfk7C6KdzKA7wR7u/rKwOGE66zvw= gonum.org/v1/netlib v0.0.0-20190313105609-8cb42192e0e0/go.mod h1:wa6Ws7BG/ESfp6dHfk7C6KdzKA7wR7u/rKwOGE66zvw= gonum.org/v1/plot v0.0.0-20190515093506-e2840ee46a6b/go.mod h1:Wt8AAjI+ypCyYX3nZBvf6cAIx93T+c/OS2HFAYskSZc= +gonum.org/v1/plot v0.9.0/go.mod h1:3Pcqqmp6RHvJI72kgb8fThyUnav364FOsdDo2aGW5lY= google.golang.org/api v0.0.0-20160322025152-9bf6e6e569ff/go.mod h1:4mhQ8q/RsB7i+udVvVy5NUi08OU8ZlA0gRVgrF7VFY0= google.golang.org/api v0.3.1/go.mod h1:6wY9I6uQWHQ8EM57III9mq/AjF+i8G65rmVagqKMtkk= google.golang.org/api v0.4.0/go.mod h1:8k5glujaEP+g9n7WNsDg8QP6cUVNI86fCNMcbazEtwE= @@ -2636,6 +2698,7 @@ google.golang.org/genproto v0.0.0-20210602131652-f16073e35f0c/go.mod h1:UODoCrxH google.golang.org/genproto v0.0.0-20210604141403-392c879c8b08/go.mod h1:UODoCrxHCcBojKKwX1terBiRUaqAsFqJiF615XL43r0= google.golang.org/genproto v0.0.0-20210608205507-b6d2f5bf0d7d/go.mod h1:UODoCrxHCcBojKKwX1terBiRUaqAsFqJiF615XL43r0= google.golang.org/genproto v0.0.0-20210624195500-8bfb893ecb84/go.mod h1:SzzZ/N+nwJDaO1kznhnlzqS8ocJICar6hYhVyhi++24= +google.golang.org/genproto v0.0.0-20210630183607-d20f26d13c79/go.mod h1:yiaVoXHpRzHGyxV3o4DktVWY4mSUErTKaeEOq6C3t3U= google.golang.org/genproto v0.0.0-20210713002101-d411969a0d9a/go.mod h1:AxrInvYm1dci+enl5hChSFPOmmUF1+uAa/UsgNRWd7k= google.golang.org/genproto v0.0.0-20210716133855-ce7ef5c701ea/go.mod h1:AxrInvYm1dci+enl5hChSFPOmmUF1+uAa/UsgNRWd7k= google.golang.org/genproto v0.0.0-20210728212813-7823e685a01f/go.mod h1:ob2IJxKrgPT52GcgX759i1sleT07tiKowYBGbczaW48= From 66da86017fcc3caf233b1dd82a9bc7b7188d3a9e Mon Sep 17 00:00:00 2001 From: Sebastian Spaink <3441183+sspaink@users.noreply.github.com> Date: Tue, 12 Oct 2021 16:04:36 -0500 Subject: [PATCH 126/176] chore: json_v2 read testdata dir for tests (#9906) --- plugins/parsers/json_v2/parser_test.go | 102 +++---------------------- 1 file changed, 11 insertions(+), 91 deletions(-) diff --git a/plugins/parsers/json_v2/parser_test.go b/plugins/parsers/json_v2/parser_test.go index 686bf826ad9d7..3de93dc22b49f 100644 --- a/plugins/parsers/json_v2/parser_test.go +++ b/plugins/parsers/json_v2/parser_test.go @@ -3,6 +3,7 @@ package json_v2_test import ( "bufio" "fmt" + "io/ioutil" "os" "testing" @@ -15,97 +16,17 @@ import ( "github.com/stretchr/testify/require" ) -func TestData(t *testing.T) { - var tests = []struct { - name string - test string - }{ - { - name: "Test when using field and object together", - test: "mix_field_and_object", - }, - { - name: "Test complex nesting", - test: "complex_nesting", - }, - { - name: "Test having an array of objects", - test: "array_of_objects", - }, - { - name: "Test having multiple JSON inputs", - test: "multiple_json_input", - }, - { - name: "A second test when selecting with sub field and tags", - test: "subfieldtag_in_object_2", - }, - { - name: "Test selecting with sub field and tags", - test: "subfieldtag_in_object", - }, - { - name: "Test using just fields and tags", - test: "fields_and_tags", - }, - { - name: "Test gathering from array of nested objects", - test: "nested_array_of_objects", - }, - { - name: "Test setting timestamp", - test: "timestamp", - }, - { - name: "Test setting measurement name from int", - test: "measurement_name_int", - }, - { - name: "Test multiple types", - test: "types", - }, - { - name: "Test settings tags in nested object", - test: "nested_tags", - }, - { - name: "Test settings tags in nested and non-nested objects", - test: "nested_and_nonnested_tags", - }, - { - name: "Test a more complex nested tag retrieval", - test: "nested_tags_complex", - }, - { - name: "Test multiple arrays in object", - test: "multiple_arrays_in_object", - }, - { - name: "Test fields and tags complex", - test: "fields_and_tags_complex", - }, - { - name: "Test object", - test: "object", - }, - { - name: "Test multiple timestamps", - test: "multiple_timestamps", - }, - { - name: "Test field with null", - test: "null", - }, - { - name: "Test large numbers (int64, uin64, float64)", - test: "large_numbers", - }, - } +func TestMultipleConfigs(t *testing.T) { + // Get all directories in testdata + folders, err := ioutil.ReadDir("testdata") + require.NoError(t, err) + // Make sure testdata contains data + require.Greater(t, len(folders), 0) - for _, tc := range tests { - t.Run(tc.name, func(t *testing.T) { + for _, f := range folders { + t.Run(f.Name(), func(t *testing.T) { // Process the telegraf config file for the test - buf, err := os.ReadFile(fmt.Sprintf("testdata/%s/telegraf.conf", tc.test)) + buf, err := os.ReadFile(fmt.Sprintf("testdata/%s/telegraf.conf", f.Name())) require.NoError(t, err) inputs.Add("file", func() telegraf.Input { return &file.File{} @@ -122,10 +43,9 @@ func TestData(t *testing.T) { err = i.Gather(&acc) require.NoError(t, err) } - require.NoError(t, err) // Process expected metrics and compare with resulting metrics - expectedOutputs, err := readMetricFile(fmt.Sprintf("testdata/%s/expected.out", tc.test)) + expectedOutputs, err := readMetricFile(fmt.Sprintf("testdata/%s/expected.out", f.Name())) require.NoError(t, err) testutil.RequireMetricsEqual(t, expectedOutputs, acc.GetTelegrafMetrics(), testutil.IgnoreTime()) }) From 0be92db8af40ea69d6174fd85738c7b403386241 Mon Sep 17 00:00:00 2001 From: Sebastian Spaink <3441183+sspaink@users.noreply.github.com> Date: Tue, 12 Oct 2021 16:07:34 -0500 Subject: [PATCH 127/176] docs: json_v2 improved var naming and comments (#9907) --- plugins/parsers/json_v2/parser.go | 103 ++++++++++++++++-------------- 1 file changed, 55 insertions(+), 48 deletions(-) diff --git a/plugins/parsers/json_v2/parser.go b/plugins/parsers/json_v2/parser.go index ebeb6545ba549..c0570fddd1e7d 100644 --- a/plugins/parsers/json_v2/parser.go +++ b/plugins/parsers/json_v2/parser.go @@ -12,19 +12,27 @@ import ( "github.com/tidwall/gjson" ) +// Parser adheres to the parser interface, contains the parser configuration, and data required to parse JSON type Parser struct { - InputJSON []byte + // These struct fields are common for a parser Configs []Config DefaultTags map[string]string Log telegraf.Logger - Timestamp time.Time + // **** The struct fields bellow this comment are used for processing indvidual configs **** + + // measurementName is the the name of the current config used in each line protocol measurementName string + // timestamp is the timestamp used in each line protocol, defaults to time.Now() + timestamp time.Time + // **** Specific for object configuration **** + // subPathResults contains the results of sub-gjson path expressions provided in fields/tags table within object config + subPathResults []PathResult + // iterateObjects dictates if ExpandArray function will handle objects iterateObjects bool - - currentSettings JSONObject - pathResults []PathResult + // objectConfig contains the config for an object, some info is needed while iterating over the gjson results + objectConfig JSONObject } type PathResult struct { @@ -83,28 +91,27 @@ type MetricNode struct { } func (p *Parser) Parse(input []byte) ([]telegraf.Metric, error) { - p.InputJSON = input // Only valid JSON is supported - if !gjson.Valid(string(p.InputJSON)) { - return nil, fmt.Errorf("Invalid JSON provided, unable to parse") + if !gjson.Valid(string(input)) { + return nil, fmt.Errorf("invalid JSON provided, unable to parse") } var metrics []telegraf.Metric for _, c := range p.Configs { - // Measurement name configuration + // Measurement name can either be hardcoded, or parsed from the JSON using a GJSON path expression p.measurementName = c.MeasurementName if c.MeasurementNamePath != "" { - result := gjson.GetBytes(p.InputJSON, c.MeasurementNamePath) + result := gjson.GetBytes(input, c.MeasurementNamePath) if !result.IsArray() && !result.IsObject() { p.measurementName = result.String() } } - // Timestamp configuration - p.Timestamp = time.Now() + // timestamp defaults to current time, or can be parsed from the JSON using a GJSON path expression + p.timestamp = time.Now() if c.TimestampPath != "" { - result := gjson.GetBytes(p.InputJSON, c.TimestampPath) + result := gjson.GetBytes(input, c.TimestampPath) if !result.IsArray() && !result.IsObject() { if c.TimestampFormat == "" { err := fmt.Errorf("use of 'timestamp_query' requires 'timestamp_format'") @@ -112,24 +119,24 @@ func (p *Parser) Parse(input []byte) ([]telegraf.Metric, error) { } var err error - p.Timestamp, err = internal.ParseTimestamp(c.TimestampFormat, result.Value(), c.TimestampTimezone) + p.timestamp, err = internal.ParseTimestamp(c.TimestampFormat, result.Value(), c.TimestampTimezone) if err != nil { return nil, err } } } - fields, err := p.processMetric(c.Fields, false) + fields, err := p.processMetric(input, c.Fields, false) if err != nil { return nil, err } - tags, err := p.processMetric(c.Tags, true) + tags, err := p.processMetric(input, c.Tags, true) if err != nil { return nil, err } - objects, err := p.processObjects(c.JSONObjects) + objects, err := p.processObjects(input, c.JSONObjects) if err != nil { return nil, err } @@ -155,7 +162,7 @@ func (p *Parser) Parse(input []byte) ([]telegraf.Metric, error) { // processMetric will iterate over all 'field' or 'tag' configs and create metrics for each // A field/tag can either be a single value or an array of values, each resulting in its own metric // For multiple configs, a set of metrics is created from the cartesian product of each separate config -func (p *Parser) processMetric(data []DataSet, tag bool) ([]telegraf.Metric, error) { +func (p *Parser) processMetric(input []byte, data []DataSet, tag bool) ([]telegraf.Metric, error) { if len(data) == 0 { return nil, nil } @@ -167,7 +174,7 @@ func (p *Parser) processMetric(data []DataSet, tag bool) ([]telegraf.Metric, err if c.Path == "" { return nil, fmt.Errorf("GJSON path is required") } - result := gjson.GetBytes(p.InputJSON, c.Path) + result := gjson.GetBytes(input, c.Path) if result.IsObject() { p.Log.Debugf("Found object in the path: %s, ignoring it please use 'object' to gather metrics from objects", c.Path) @@ -191,7 +198,7 @@ func (p *Parser) processMetric(data []DataSet, tag bool) ([]telegraf.Metric, err p.measurementName, map[string]string{}, map[string]interface{}{}, - p.Timestamp, + p.timestamp, ), Result: result, } @@ -251,7 +258,7 @@ func (p *Parser) expandArray(result MetricNode) ([]telegraf.Metric, error) { p.Log.Debugf("Found object in query ignoring it please use 'object' to gather metrics from objects") return results, nil } - if result.IncludeCollection == nil && (len(p.currentSettings.FieldPaths) > 0 || len(p.currentSettings.TagPaths) > 0) { + if result.IncludeCollection == nil && (len(p.objectConfig.FieldPaths) > 0 || len(p.objectConfig.TagPaths) > 0) { result.IncludeCollection = p.existsInpathResults(result.Index, result.Raw) } r, err := p.combineObject(result) @@ -264,7 +271,7 @@ func (p *Parser) expandArray(result MetricNode) ([]telegraf.Metric, error) { if result.IsArray() { var err error - if result.IncludeCollection == nil && (len(p.currentSettings.FieldPaths) > 0 || len(p.currentSettings.TagPaths) > 0) { + if result.IncludeCollection == nil && (len(p.objectConfig.FieldPaths) > 0 || len(p.objectConfig.TagPaths) > 0) { result.IncludeCollection = p.existsInpathResults(result.Index, result.Raw) } result.ForEach(func(_, val gjson.Result) bool { @@ -272,7 +279,7 @@ func (p *Parser) expandArray(result MetricNode) ([]telegraf.Metric, error) { p.measurementName, map[string]string{}, map[string]interface{}{}, - p.Timestamp, + p.timestamp, ) if val.IsObject() { if p.iterateObjects { @@ -280,7 +287,7 @@ func (p *Parser) expandArray(result MetricNode) ([]telegraf.Metric, error) { n.ParentIndex += val.Index n.Metric = m n.Result = val - if n.IncludeCollection == nil && (len(p.currentSettings.FieldPaths) > 0 || len(p.currentSettings.TagPaths) > 0) { + if n.IncludeCollection == nil && (len(p.objectConfig.FieldPaths) > 0 || len(p.objectConfig.TagPaths) > 0) { n.IncludeCollection = p.existsInpathResults(n.Index, n.Raw) } r, err := p.combineObject(n) @@ -310,7 +317,7 @@ func (p *Parser) expandArray(result MetricNode) ([]telegraf.Metric, error) { n.ParentIndex += val.Index n.Metric = m n.Result = val - if n.IncludeCollection == nil && (len(p.currentSettings.FieldPaths) > 0 || len(p.currentSettings.TagPaths) > 0) { + if n.IncludeCollection == nil && (len(p.objectConfig.FieldPaths) > 0 || len(p.objectConfig.TagPaths) > 0) { n.IncludeCollection = p.existsInpathResults(n.Index, n.Raw) } r, err := p.expandArray(n) @@ -324,12 +331,12 @@ func (p *Parser) expandArray(result MetricNode) ([]telegraf.Metric, error) { return nil, err } } else { - if result.SetName == p.currentSettings.TimestampKey { - if p.currentSettings.TimestampFormat == "" { + if result.SetName == p.objectConfig.TimestampKey { + if p.objectConfig.TimestampFormat == "" { err := fmt.Errorf("use of 'timestamp_query' requires 'timestamp_format'") return nil, err } - timestamp, err := internal.ParseTimestamp(p.currentSettings.TimestampFormat, result.Value(), p.currentSettings.TimestampTimezone) + timestamp, err := internal.ParseTimestamp(p.objectConfig.TimestampFormat, result.Value(), p.objectConfig.TimestampTimezone) if err != nil { return nil, err } @@ -341,7 +348,7 @@ func (p *Parser) expandArray(result MetricNode) ([]telegraf.Metric, error) { outputName := result.OutputName desiredType := result.DesiredType - if len(p.currentSettings.FieldPaths) > 0 || len(p.currentSettings.TagPaths) > 0 { + if len(p.objectConfig.FieldPaths) > 0 || len(p.objectConfig.TagPaths) > 0 { var pathResult *PathResult // When IncludeCollection isn't nil, that means the current result is included in the collection. if result.IncludeCollection != nil { @@ -386,7 +393,7 @@ func (p *Parser) expandArray(result MetricNode) ([]telegraf.Metric, error) { } func (p *Parser) existsInpathResults(index int, raw string) *PathResult { - for _, f := range p.pathResults { + for _, f := range p.subPathResults { if f.result.Index == 0 { for _, i := range f.result.Indexes { if i == index { @@ -401,23 +408,23 @@ func (p *Parser) existsInpathResults(index int, raw string) *PathResult { } // processObjects will iterate over all 'object' configs and create metrics for each -func (p *Parser) processObjects(objects []JSONObject) ([]telegraf.Metric, error) { +func (p *Parser) processObjects(input []byte, objects []JSONObject) ([]telegraf.Metric, error) { p.iterateObjects = true var t []telegraf.Metric for _, c := range objects { - p.currentSettings = c + p.objectConfig = c if c.Path == "" { return nil, fmt.Errorf("GJSON path is required") } - result := gjson.GetBytes(p.InputJSON, c.Path) + result := gjson.GetBytes(input, c.Path) scopedJSON := []byte(result.Raw) for _, f := range c.FieldPaths { var r PathResult r.result = gjson.GetBytes(scopedJSON, f.Path) r.DataSet = f - p.pathResults = append(p.pathResults, r) + p.subPathResults = append(p.subPathResults, r) } for _, f := range c.TagPaths { @@ -425,7 +432,7 @@ func (p *Parser) processObjects(objects []JSONObject) ([]telegraf.Metric, error) r.result = gjson.GetBytes(scopedJSON, f.Path) r.DataSet = f r.tag = true - p.pathResults = append(p.pathResults, r) + p.subPathResults = append(p.subPathResults, r) } if result.Type == gjson.Null { @@ -438,7 +445,7 @@ func (p *Parser) processObjects(objects []JSONObject) ([]telegraf.Metric, error) p.measurementName, map[string]string{}, map[string]interface{}{}, - p.Timestamp, + p.timestamp, ), Result: result, } @@ -472,12 +479,12 @@ func (p *Parser) combineObject(result MetricNode) ([]telegraf.Metric, error) { } var outputName string - if p.currentSettings.DisablePrependKeys { + if p.objectConfig.DisablePrependKeys { outputName = strings.ReplaceAll(key.String(), " ", "_") } else { outputName = setName } - for k, n := range p.currentSettings.Renames { + for k, n := range p.objectConfig.Renames { if k == setName { outputName = n break @@ -490,7 +497,7 @@ func (p *Parser) combineObject(result MetricNode) ([]telegraf.Metric, error) { arrayNode.SetName = setName arrayNode.Result = val - for k, t := range p.currentSettings.Fields { + for k, t := range p.objectConfig.Fields { if setName == k { arrayNode.DesiredType = t break @@ -498,7 +505,7 @@ func (p *Parser) combineObject(result MetricNode) ([]telegraf.Metric, error) { } tag := false - for _, t := range p.currentSettings.Tags { + for _, t := range p.objectConfig.Tags { if setName == t { tag = true break @@ -531,11 +538,11 @@ func (p *Parser) combineObject(result MetricNode) ([]telegraf.Metric, error) { } func (p *Parser) isIncluded(key string, val gjson.Result) bool { - if len(p.currentSettings.IncludedKeys) == 0 { + if len(p.objectConfig.IncludedKeys) == 0 { return true } // automatically adds tags to included_keys so it does NOT have to be repeated in the config - allKeys := append(p.currentSettings.IncludedKeys, p.currentSettings.Tags...) + allKeys := append(p.objectConfig.IncludedKeys, p.objectConfig.Tags...) for _, i := range allKeys { if i == key { return true @@ -551,7 +558,7 @@ func (p *Parser) isIncluded(key string, val gjson.Result) bool { } func (p *Parser) isExcluded(key string) bool { - for _, i := range p.currentSettings.ExcludedKeys { + for _, i := range p.objectConfig.ExcludedKeys { if i == key { return true } @@ -576,25 +583,25 @@ func (p *Parser) convertType(input gjson.Result, desiredType string, name string case "uint": r, err := strconv.ParseUint(inputType, 10, 64) if err != nil { - return nil, fmt.Errorf("Unable to convert field '%s' to type uint: %v", name, err) + return nil, fmt.Errorf("unable to convert field '%s' to type uint: %v", name, err) } return r, nil case "int": r, err := strconv.ParseInt(inputType, 10, 64) if err != nil { - return nil, fmt.Errorf("Unable to convert field '%s' to type int: %v", name, err) + return nil, fmt.Errorf("unable to convert field '%s' to type int: %v", name, err) } return r, nil case "float": r, err := strconv.ParseFloat(inputType, 64) if err != nil { - return nil, fmt.Errorf("Unable to convert field '%s' to type float: %v", name, err) + return nil, fmt.Errorf("unable to convert field '%s' to type float: %v", name, err) } return r, nil case "bool": r, err := strconv.ParseBool(inputType) if err != nil { - return nil, fmt.Errorf("Unable to convert field '%s' to type bool: %v", name, err) + return nil, fmt.Errorf("unable to convert field '%s' to type bool: %v", name, err) } return r, nil } @@ -631,7 +638,7 @@ func (p *Parser) convertType(input gjson.Result, desiredType string, name string } else if inputType == 1 { return true, nil } else { - return nil, fmt.Errorf("Unable to convert field '%s' to type bool", name) + return nil, fmt.Errorf("unable to convert field '%s' to type bool", name) } } } From 061b4094cd96f21e9d8807e52b961d676d7d13bb Mon Sep 17 00:00:00 2001 From: Sebastian Spaink <3441183+sspaink@users.noreply.github.com> Date: Wed, 13 Oct 2021 13:04:02 -0500 Subject: [PATCH 128/176] fix(json_v2): remove dead code (#9908) --- plugins/parsers/json_v2/parser.go | 143 ++++++++++++------------------ 1 file changed, 59 insertions(+), 84 deletions(-) diff --git a/plugins/parsers/json_v2/parser.go b/plugins/parsers/json_v2/parser.go index c0570fddd1e7d..46d089127ccdd 100644 --- a/plugins/parsers/json_v2/parser.go +++ b/plugins/parsers/json_v2/parser.go @@ -258,9 +258,6 @@ func (p *Parser) expandArray(result MetricNode) ([]telegraf.Metric, error) { p.Log.Debugf("Found object in query ignoring it please use 'object' to gather metrics from objects") return results, nil } - if result.IncludeCollection == nil && (len(p.objectConfig.FieldPaths) > 0 || len(p.objectConfig.TagPaths) > 0) { - result.IncludeCollection = p.existsInpathResults(result.Index, result.Raw) - } r, err := p.combineObject(result) if err != nil { return nil, err @@ -272,7 +269,7 @@ func (p *Parser) expandArray(result MetricNode) ([]telegraf.Metric, error) { if result.IsArray() { var err error if result.IncludeCollection == nil && (len(p.objectConfig.FieldPaths) > 0 || len(p.objectConfig.TagPaths) > 0) { - result.IncludeCollection = p.existsInpathResults(result.Index, result.Raw) + result.IncludeCollection = p.existsInpathResults(result.Index) } result.ForEach(func(_, val gjson.Result) bool { m := metric.New( @@ -282,23 +279,16 @@ func (p *Parser) expandArray(result MetricNode) ([]telegraf.Metric, error) { p.timestamp, ) if val.IsObject() { - if p.iterateObjects { - n := result - n.ParentIndex += val.Index - n.Metric = m - n.Result = val - if n.IncludeCollection == nil && (len(p.objectConfig.FieldPaths) > 0 || len(p.objectConfig.TagPaths) > 0) { - n.IncludeCollection = p.existsInpathResults(n.Index, n.Raw) - } - r, err := p.combineObject(n) - if err != nil { - return false - } - - results = append(results, r...) - } else { - p.Log.Debugf("Found object in query ignoring it please use 'object' to gather metrics from objects") + n := result + n.ParentIndex += val.Index + n.Metric = m + n.Result = val + r, err := p.combineObject(n) + if err != nil { + return false } + + results = append(results, r...) if len(results) != 0 { for _, newResult := range results { mergeMetric(result.Metric, newResult) @@ -307,19 +297,11 @@ func (p *Parser) expandArray(result MetricNode) ([]telegraf.Metric, error) { return true } - for _, f := range result.Metric.FieldList() { - m.AddField(f.Key, f.Value) - } - for _, f := range result.Metric.TagList() { - m.AddTag(f.Key, f.Value) - } + mergeMetric(result.Metric, m) n := result n.ParentIndex += val.Index n.Metric = m n.Result = val - if n.IncludeCollection == nil && (len(p.objectConfig.FieldPaths) > 0 || len(p.objectConfig.TagPaths) > 0) { - n.IncludeCollection = p.existsInpathResults(n.Index, n.Raw) - } r, err := p.expandArray(n) if err != nil { return false @@ -355,7 +337,7 @@ func (p *Parser) expandArray(result MetricNode) ([]telegraf.Metric, error) { pathResult = result.IncludeCollection } else { // Verify that the result should be included based on the results of fieldpaths and tag paths - pathResult = p.existsInpathResults(result.ParentIndex, result.Raw) + pathResult = p.existsInpathResults(result.ParentIndex) } if pathResult == nil { return results, nil @@ -392,17 +374,18 @@ func (p *Parser) expandArray(result MetricNode) ([]telegraf.Metric, error) { return results, nil } -func (p *Parser) existsInpathResults(index int, raw string) *PathResult { +func (p *Parser) existsInpathResults(index int) *PathResult { for _, f := range p.subPathResults { - if f.result.Index == 0 { - for _, i := range f.result.Indexes { - if i == index { - return &f - } - } - } else if f.result.Index == index { + if f.result.Index == index { return &f } + + // Indexes will be populated with all the elements that match on a `#(...)#` query + for _, i := range f.result.Indexes { + if i == index { + return &f + } + } } return nil } @@ -435,10 +418,6 @@ func (p *Parser) processObjects(input []byte, objects []JSONObject) ([]telegraf. p.subPathResults = append(p.subPathResults, r) } - if result.Type == gjson.Null { - return nil, fmt.Errorf("GJSON Path returned null") - } - rootObject := MetricNode{ ParentIndex: 0, Metric: metric.New( @@ -578,33 +557,31 @@ func (p *Parser) SetDefaultTags(tags map[string]string) { func (p *Parser) convertType(input gjson.Result, desiredType string, name string) (interface{}, error) { switch inputType := input.Value().(type) { case string: - if desiredType != "string" { - switch desiredType { - case "uint": - r, err := strconv.ParseUint(inputType, 10, 64) - if err != nil { - return nil, fmt.Errorf("unable to convert field '%s' to type uint: %v", name, err) - } - return r, nil - case "int": - r, err := strconv.ParseInt(inputType, 10, 64) - if err != nil { - return nil, fmt.Errorf("unable to convert field '%s' to type int: %v", name, err) - } - return r, nil - case "float": - r, err := strconv.ParseFloat(inputType, 64) - if err != nil { - return nil, fmt.Errorf("unable to convert field '%s' to type float: %v", name, err) - } - return r, nil - case "bool": - r, err := strconv.ParseBool(inputType) - if err != nil { - return nil, fmt.Errorf("unable to convert field '%s' to type bool: %v", name, err) - } - return r, nil + switch desiredType { + case "uint": + r, err := strconv.ParseUint(inputType, 10, 64) + if err != nil { + return nil, fmt.Errorf("Unable to convert field '%s' to type uint: %v", name, err) + } + return r, nil + case "int": + r, err := strconv.ParseInt(inputType, 10, 64) + if err != nil { + return nil, fmt.Errorf("Unable to convert field '%s' to type int: %v", name, err) } + return r, nil + case "float": + r, err := strconv.ParseFloat(inputType, 64) + if err != nil { + return nil, fmt.Errorf("Unable to convert field '%s' to type float: %v", name, err) + } + return r, nil + case "bool": + r, err := strconv.ParseBool(inputType) + if err != nil { + return nil, fmt.Errorf("Unable to convert field '%s' to type bool: %v", name, err) + } + return r, nil } case bool: switch desiredType { @@ -624,22 +601,20 @@ func (p *Parser) convertType(input gjson.Result, desiredType string, name string return uint64(0), nil } case float64: - if desiredType != "float" { - switch desiredType { - case "string": - return fmt.Sprint(inputType), nil - case "int": - return input.Int(), nil - case "uint": - return input.Uint(), nil - case "bool": - if inputType == 0 { - return false, nil - } else if inputType == 1 { - return true, nil - } else { - return nil, fmt.Errorf("unable to convert field '%s' to type bool", name) - } + switch desiredType { + case "string": + return fmt.Sprint(inputType), nil + case "int": + return input.Int(), nil + case "uint": + return input.Uint(), nil + case "bool": + if inputType == 0 { + return false, nil + } else if inputType == 1 { + return true, nil + } else { + return nil, fmt.Errorf("Unable to convert field '%s' to type bool", name) } } default: From b7988d83d729b096dc72cad56332ea9bfc2463a8 Mon Sep 17 00:00:00 2001 From: Sebastian Spaink <3441183+sspaink@users.noreply.github.com> Date: Wed, 13 Oct 2021 13:18:33 -0500 Subject: [PATCH 129/176] chore: stop testing with two versions of Go (#9909) --- .circleci/config.yml | 40 +----------------------------------- Makefile | 5 ----- docs/developers/PACKAGING.md | 11 +++++++++- scripts/ci-1.16.docker | 23 --------------------- 4 files changed, 11 insertions(+), 68 deletions(-) delete mode 100644 scripts/ci-1.16.docker diff --git a/.circleci/config.yml b/.circleci/config.yml index 027a529cb0385..3cc2ac7f9b0ec 100644 --- a/.circleci/config.yml +++ b/.circleci/config.yml @@ -4,12 +4,6 @@ orbs: aws-cli: circleci/aws-cli@1.4.0 executors: - go-1_16: - working_directory: '/go/src/github.com/influxdata/telegraf' - docker: - - image: 'quay.io/influxdb/telegraf-ci:1.16.7' - environment: - GOFLAGS: -p=8 go-1_17: working_directory: '/go/src/github.com/influxdata/telegraf' docker: @@ -172,16 +166,6 @@ jobs: root: '/go' paths: - '*' - test-go-1_16: - executor: go-1_16 - steps: - - test-go - parallelism: 4 - test-go-1_16-386: - executor: go-1_16 - steps: - - test-go - parallelism: 4 test-go-1_17: executor: go-1_17 steps: @@ -224,7 +208,7 @@ jobs: - package-build: type: i386 ppc64le-package: - executor: go-1_16 + executor: go-1_17 steps: - package-build: type: ppc64le @@ -375,8 +359,6 @@ commonjobs: - &test-awaiter 'test-awaiter': requires: - - 'test-go-1_16' - - 'test-go-1_16-386' - 'test-go-1_17' - 'test-go-1_17-386' - &windows-package @@ -436,18 +418,6 @@ workflows: filters: tags: only: /.*/ - - 'test-go-1_16': - requires: - - 'deps' - filters: - tags: - only: /.*/ - - 'test-go-1_16-386': - requires: - - 'deps' - filters: - tags: - only: /.*/ - 'test-go-1_17': requires: - 'deps' @@ -525,8 +495,6 @@ workflows: requires: - 'test-go-windows' - 'test-go-mac' - - 'test-go-1_16' - - 'test-go-1_16-386' - 'test-go-1_17' - 'test-go-1_17-386' filters: @@ -550,12 +518,6 @@ workflows: nightly: jobs: - 'deps' - - 'test-go-1_16': - requires: - - 'deps' - - 'test-go-1_16-386': - requires: - - 'deps' - 'test-go-1_17': requires: - 'deps' diff --git a/Makefile b/Makefile index 143a55d58a757..0c59319de710d 100644 --- a/Makefile +++ b/Makefile @@ -199,11 +199,6 @@ plugin-%: @echo "Starting dev environment for $${$(@)} input plugin..." @docker-compose -f plugins/inputs/$${$(@)}/dev/docker-compose.yml up -.PHONY: ci-1.16 -ci-1.16: - docker build -t quay.io/influxdb/telegraf-ci:1.16.7 - < scripts/ci-1.16.docker - docker push quay.io/influxdb/telegraf-ci:1.16.7 - .PHONY: ci-1.17 ci-1.17: docker build -t quay.io/influxdb/telegraf-ci:1.17.0 - < scripts/ci-1.17.docker diff --git a/docs/developers/PACKAGING.md b/docs/developers/PACKAGING.md index cbdb61b05af01..000479c94ce42 100644 --- a/docs/developers/PACKAGING.md +++ b/docs/developers/PACKAGING.md @@ -2,7 +2,16 @@ Building the packages for Telegraf is automated using [Make](https://en.wikipedia.org/wiki/Make_(software)). Just running `make` will build a Telegraf binary for the operating system and architecture you are using (if it is supported). If you need to build a different package then you can run `make package` which will build all the supported packages. You will most likely only want a subset, you can define a subset of packages to be built by overriding the `include_packages` variable like so `make package include_packages="amd64.deb"`. You can also build all packages for a specific architecture like so `make package include_packages="$(make amd64)"`. -The packaging steps require certain tools to be setup before hand to work. These dependencies are listed in the ci-1.16.docker file which you can find in the scripts directory. Therefore it is recommended to use Docker to build the artifacts, see more details below. +The packaging steps require certain tools to be setup before hand to work. These dependencies are listed in the ci-1.17.docker file which you can find in the scripts directory. Therefore it is recommended to use Docker to build the artifacts, see more details below. + +## Go Version + +Telegraf will be built using the latest version of Go whenever possible. Incrementing the version is maintained by the core Telegraf team because it requires access to an internal docker repository that hosts the docker CI images. When a new version is released, the following process is followed: + +1. Within the `Makefile` and `.circleci\config.yml` update the Go versions to the new version number +2. Run `make ci-` where `` refers to the new Go version number (this requires internal permissions) +3. The files `scripts\installgo_mac.sh` and `scripts\installgo_windows.sh` need to be updated as well with the new Go version and SHA +4. Create a pull request with these new changes, and verify the CI passes and uses the new docker image ## Package using Docker diff --git a/scripts/ci-1.16.docker b/scripts/ci-1.16.docker deleted file mode 100644 index ab1683329e633..0000000000000 --- a/scripts/ci-1.16.docker +++ /dev/null @@ -1,23 +0,0 @@ -FROM golang:1.16.7 - -RUN chmod -R 755 "$GOPATH" - -RUN DEBIAN_FRONTEND=noninteractive \ - apt update && apt install -y --no-install-recommends \ - autoconf \ - git \ - libtool \ - locales \ - make \ - awscli \ - rpm \ - ruby \ - ruby-dev \ - zip && \ - rm -rf /var/lib/apt/lists/* - -RUN ln -sf /usr/share/zoneinfo/Etc/UTC /etc/localtime -RUN locale-gen C.UTF-8 || true -ENV LANG=C.UTF-8 - -RUN gem install fpm From 97e19ebd55264255bb1939db3ec6ce45bb0d286a Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Wed, 13 Oct 2021 14:04:18 -0500 Subject: [PATCH 130/176] fix: bump github.com/prometheus/common from 0.26.0 to 0.31.1 (#9869) Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> Co-authored-by: Sebastian Spaink --- go.mod | 2 +- go.sum | 4 +++- 2 files changed, 4 insertions(+), 2 deletions(-) diff --git a/go.mod b/go.mod index af89256872a8e..dbdde82b60e64 100644 --- a/go.mod +++ b/go.mod @@ -217,7 +217,7 @@ require ( github.com/pmezard/go-difflib v1.0.0 // indirect github.com/prometheus/client_golang v1.11.0 github.com/prometheus/client_model v0.2.0 - github.com/prometheus/common v0.30.0 + github.com/prometheus/common v0.31.1 github.com/prometheus/procfs v0.6.0 github.com/prometheus/prometheus v1.8.2-0.20210430082741-2a4b8e12bbf2 github.com/rcrowley/go-metrics v0.0.0-20201227073835-cf1acfcdf475 // indirect diff --git a/go.sum b/go.sum index f4bd2822edf1c..974e10056a877 100644 --- a/go.sum +++ b/go.sum @@ -1709,8 +1709,9 @@ github.com/prometheus/common v0.18.0/go.mod h1:U+gB1OBLb1lF3O42bTCL+FK18tX9Oar16 github.com/prometheus/common v0.23.0/go.mod h1:H6QK/N6XVT42whUeIdI3dp36w49c+/iMDk7UAI2qm7Q= github.com/prometheus/common v0.25.0/go.mod h1:H6QK/N6XVT42whUeIdI3dp36w49c+/iMDk7UAI2qm7Q= github.com/prometheus/common v0.26.0/go.mod h1:M7rCNAaPfAosfx8veZJCuw84e35h3Cfd9VFqTh1DIvc= -github.com/prometheus/common v0.30.0 h1:JEkYlQnpzrzQFxi6gnukFPdQ+ac82oRhzMcIduJu/Ug= github.com/prometheus/common v0.30.0/go.mod h1:vu+V0TpY+O6vW9J44gczi3Ap/oXXR10b+M/gUGO4Hls= +github.com/prometheus/common v0.31.1 h1:d18hG4PkHnNAKNMOmFuXFaiY8Us0nird/2m60uS1AMs= +github.com/prometheus/common v0.31.1/go.mod h1:vu+V0TpY+O6vW9J44gczi3Ap/oXXR10b+M/gUGO4Hls= github.com/prometheus/exporter-toolkit v0.5.1/go.mod h1:OCkM4805mmisBhLmVFw858QYi3v0wKdY6/UxrT0pZVg= github.com/prometheus/procfs v0.0.0-20180125133057-cb4147076ac7/go.mod h1:c3At6R/oaqEKCNdg8wHV1ftS6bRYblBhIjjI8uT2IGk= github.com/prometheus/procfs v0.0.0-20181005140218-185b4288413d/go.mod h1:c3At6R/oaqEKCNdg8wHV1ftS6bRYblBhIjjI8uT2IGk= @@ -2265,6 +2266,7 @@ golang.org/x/net v0.0.0-20210503060351-7fd8e65b6420/go.mod h1:9nx3DQGgdP8bBQD5qx golang.org/x/net v0.0.0-20210510120150-4163338589ed/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y= golang.org/x/net v0.0.0-20210520170846-37e1c6afe023/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y= golang.org/x/net v0.0.0-20210525063256-abc453219eb5/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y= +golang.org/x/net v0.0.0-20210525063256-abc453219eb5/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y= golang.org/x/net v0.0.0-20210610132358-84b48f89b13b/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y= golang.org/x/net v0.0.0-20210610132358-84b48f89b13b/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y= golang.org/x/net v0.0.0-20210614182718-04defd469f4e/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y= From 432b30d2e9863f0ed61c36b0b800f95ee7a139bf Mon Sep 17 00:00:00 2001 From: Sebastian Spaink <3441183+sspaink@users.noreply.github.com> Date: Wed, 13 Oct 2021 14:43:05 -0500 Subject: [PATCH 131/176] fix(input/prometheus): move err check to correct place (#9920) --- plugins/inputs/prometheus/kubernetes.go | 5 ++--- 1 file changed, 2 insertions(+), 3 deletions(-) diff --git a/plugins/inputs/prometheus/kubernetes.go b/plugins/inputs/prometheus/kubernetes.go index a57e771bfc483..0f260acb48b60 100644 --- a/plugins/inputs/prometheus/kubernetes.go +++ b/plugins/inputs/prometheus/kubernetes.go @@ -154,12 +154,11 @@ func (p *Prometheus) cAdvisor(ctx context.Context, bearerToken string) error { // The request will be the same each time podsURL := fmt.Sprintf("https://%s:10250/pods", p.NodeIP) req, err := http.NewRequest("GET", podsURL, nil) - req.Header.Set("Authorization", "Bearer "+bearerToken) - req.Header.Add("Accept", "application/json") - if err != nil { return fmt.Errorf("error when creating request to %s to get pod list: %w", podsURL, err) } + req.Header.Set("Authorization", "Bearer "+bearerToken) + req.Header.Add("Accept", "application/json") // Update right away so code is not waiting the length of the specified scrape interval initially err = updateCadvisorPodList(p, req) From d9b202a41be27c139ad92446d458db22baebf4b9 Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Wed, 13 Oct 2021 15:51:52 -0500 Subject: [PATCH 132/176] fix: bump github.com/docker/docker from 20.10.7+incompatible to 20.10.9+incompatible (#9905) Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> Co-authored-by: Sebastian Spaink --- go.mod | 2 +- go.sum | 3 ++- 2 files changed, 3 insertions(+), 2 deletions(-) diff --git a/go.mod b/go.mod index dbdde82b60e64..5cfc87e46ef7a 100644 --- a/go.mod +++ b/go.mod @@ -82,7 +82,7 @@ require ( github.com/dgrijalva/jwt-go v3.2.0+incompatible // indirect github.com/dimchansky/utfbom v1.1.1 github.com/docker/distribution v2.7.1+incompatible // indirect - github.com/docker/docker v20.10.7+incompatible + github.com/docker/docker v20.10.9+incompatible github.com/docker/go-connections v0.4.0 // indirect github.com/docker/go-units v0.4.0 // indirect github.com/doclambda/protobufquery v0.0.0-20210317203640-88ffabe06a60 diff --git a/go.sum b/go.sum index 974e10056a877..8331bffea2935 100644 --- a/go.sum +++ b/go.sum @@ -584,8 +584,9 @@ github.com/docker/distribution v2.7.1-0.20190205005809-0d3efadf0154+incompatible github.com/docker/distribution v2.7.1+incompatible h1:a5mlkVzth6W5A4fOsS3D2EO5BUmsJpcB+cRlLU7cSug= github.com/docker/distribution v2.7.1+incompatible/go.mod h1:J2gT2udsDAN96Uj4KfcMRqY0/ypR+oyYUYmja8H+y+w= github.com/docker/docker v20.10.5+incompatible/go.mod h1:eEKB0N0r5NX/I1kEveEz05bcu8tLC/8azJZsviup8Sk= -github.com/docker/docker v20.10.7+incompatible h1:Z6O9Nhsjv+ayUEeI1IojKbYcsGdgYSNqxe1s2MYzUhQ= github.com/docker/docker v20.10.7+incompatible/go.mod h1:eEKB0N0r5NX/I1kEveEz05bcu8tLC/8azJZsviup8Sk= +github.com/docker/docker v20.10.9+incompatible h1:JlsVnETOjM2RLQa0Cc1XCIspUdXW3Zenq9P54uXBm6k= +github.com/docker/docker v20.10.9+incompatible/go.mod h1:eEKB0N0r5NX/I1kEveEz05bcu8tLC/8azJZsviup8Sk= github.com/docker/go-connections v0.4.0 h1:El9xVISelRB7BuFusrZozjnkIM5YnzCViNKohAFqRJQ= github.com/docker/go-connections v0.4.0 h1:El9xVISelRB7BuFusrZozjnkIM5YnzCViNKohAFqRJQ= github.com/docker/go-connections v0.4.0/go.mod h1:Gbd7IOopHjR8Iph03tsViu4nIes5XhDvyHbTtUxmeec= From 4194e54ceb36ec3d8eeede523f1de68b73521357 Mon Sep 17 00:00:00 2001 From: Felix Edelmann Date: Thu, 14 Oct 2021 18:06:43 +0200 Subject: [PATCH 133/176] fix: inconsistent metric types in mysql (#9403) --- plugins/inputs/mysql/v2/convert.go | 34 ++++++++++++++++++++++--- plugins/inputs/mysql/v2/convert_test.go | 4 +-- 2 files changed, 33 insertions(+), 5 deletions(-) diff --git a/plugins/inputs/mysql/v2/convert.go b/plugins/inputs/mysql/v2/convert.go index 78f978fa059ee..d5b73ec7f4c1e 100644 --- a/plugins/inputs/mysql/v2/convert.go +++ b/plugins/inputs/mysql/v2/convert.go @@ -21,6 +21,10 @@ func ParseInt(value sql.RawBytes) (interface{}, error) { return v, err } +func ParseUint(value sql.RawBytes) (interface{}, error) { + return strconv.ParseUint(string(value), 10, 64) +} + func ParseBoolAsInteger(value sql.RawBytes) (interface{}, error) { if bytes.EqualFold(value, []byte("YES")) || bytes.EqualFold(value, []byte("ON")) { return int64(1), nil @@ -29,6 +33,10 @@ func ParseBoolAsInteger(value sql.RawBytes) (interface{}, error) { return int64(0), nil } +func ParseString(value sql.RawBytes) (interface{}, error) { + return string(value), nil +} + func ParseGTIDMode(value sql.RawBytes) (interface{}, error) { // https://dev.mysql.com/doc/refman/8.0/en/replication-mode-change-online-concepts.html v := string(value) @@ -58,6 +66,9 @@ func ParseValue(value sql.RawBytes) (interface{}, error) { if val, err := strconv.ParseInt(string(value), 10, 64); err == nil { return val, nil } + if val, err := strconv.ParseUint(string(value), 10, 64); err == nil { + return val, nil + } if val, err := strconv.ParseFloat(string(value), 64); err == nil { return val, nil } @@ -70,12 +81,29 @@ func ParseValue(value sql.RawBytes) (interface{}, error) { } var GlobalStatusConversions = map[string]ConversionFunc{ - "ssl_ctx_verify_depth": ParseInt, - "ssl_verify_depth": ParseInt, + "innodb_available_undo_logs": ParseUint, + "innodb_buffer_pool_pages_misc": ParseUint, + "innodb_data_pending_fsyncs": ParseUint, + "ssl_ctx_verify_depth": ParseUint, + "ssl_verify_depth": ParseUint, } +// see https://dev.mysql.com/doc/refman/5.7/en/server-system-variables.html +// see https://dev.mysql.com/doc/refman/8.0/en/server-system-variables.html var GlobalVariableConversions = map[string]ConversionFunc{ - "gtid_mode": ParseGTIDMode, + "delay_key_write": ParseString, // ON, OFF, ALL + "enforce_gtid_consistency": ParseString, // ON, OFF, WARN + "event_scheduler": ParseString, // YES, NO, DISABLED + "gtid_mode": ParseGTIDMode, + "have_openssl": ParseBoolAsInteger, // alias for have_ssl + "have_ssl": ParseBoolAsInteger, // YES, DISABLED + "have_symlink": ParseBoolAsInteger, // YES, NO, DISABLED + "session_track_gtids": ParseString, + "session_track_transaction_info": ParseString, + "slave_skip_errors": ParseString, + "ssl_fips_mode": ParseString, + "transaction_write_set_extraction": ParseString, + "use_secondary_engine": ParseString, } func ConvertGlobalStatus(key string, value sql.RawBytes) (interface{}, error) { diff --git a/plugins/inputs/mysql/v2/convert_test.go b/plugins/inputs/mysql/v2/convert_test.go index 47189c18d1576..43133eeb39c1b 100644 --- a/plugins/inputs/mysql/v2/convert_test.go +++ b/plugins/inputs/mysql/v2/convert_test.go @@ -19,14 +19,14 @@ func TestConvertGlobalStatus(t *testing.T) { name: "default", key: "ssl_ctx_verify_depth", value: []byte("0"), - expected: int64(0), + expected: uint64(0), expectedErr: nil, }, { name: "overflow int64", key: "ssl_ctx_verify_depth", value: []byte("18446744073709551615"), - expected: int64(9223372036854775807), + expected: uint64(18446744073709551615), expectedErr: nil, }, { From 0b1f0cf55f4f320de7a38c5b93206d82eb31c997 Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Thu, 14 Oct 2021 14:06:00 -0500 Subject: [PATCH 134/176] fix: bump github.com/apache/thrift from 0.14.2 to 0.15.0 (#9921) Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> Co-authored-by: MyaLongmire --- go.mod | 2 +- go.sum | 3 ++- 2 files changed, 3 insertions(+), 2 deletions(-) diff --git a/go.mod b/go.mod index 5cfc87e46ef7a..dc11a4cd484d6 100644 --- a/go.mod +++ b/go.mod @@ -43,7 +43,7 @@ require ( github.com/antchfx/xmlquery v1.3.6 github.com/antchfx/xpath v1.1.11 github.com/apache/arrow/go/arrow v0.0.0-20211006091945-a69884db78f4 // indirect - github.com/apache/thrift v0.14.2 + github.com/apache/thrift v0.15.0 github.com/aristanetworks/glog v0.0.0-20191112221043-67e8567f59f3 // indirect github.com/aristanetworks/goarista v0.0.0-20190325233358-a123909ec740 github.com/armon/go-metrics v0.3.3 // indirect diff --git a/go.sum b/go.sum index 8331bffea2935..8c55e7ee1bed3 100644 --- a/go.sum +++ b/go.sum @@ -255,8 +255,9 @@ github.com/apache/arrow/go/arrow v0.0.0-20211006091945-a69884db78f4/go.mod h1:Q7 github.com/apache/thrift v0.12.0/go.mod h1:cp2SuWMxlEZw2r+iP2GNCdIi4C1qmUzdZFSVb+bacwQ= github.com/apache/thrift v0.13.0/go.mod h1:cp2SuWMxlEZw2r+iP2GNCdIi4C1qmUzdZFSVb+bacwQ= github.com/apache/thrift v0.14.1/go.mod h1:cp2SuWMxlEZw2r+iP2GNCdIi4C1qmUzdZFSVb+bacwQ= -github.com/apache/thrift v0.14.2 h1:hY4rAyg7Eqbb27GB6gkhUKrRAuc8xRjlNtJq+LseKeY= github.com/apache/thrift v0.14.2/go.mod h1:cp2SuWMxlEZw2r+iP2GNCdIi4C1qmUzdZFSVb+bacwQ= +github.com/apache/thrift v0.15.0 h1:aGvdaR0v1t9XLgjtBYwxcBvBOTMqClzwE26CHOgjW1Y= +github.com/apache/thrift v0.15.0/go.mod h1:PHK3hniurgQaNMZYaCLEqXKsYK8upmhPbmdP2FXSqgU= github.com/aristanetworks/glog v0.0.0-20191112221043-67e8567f59f3 h1:Bmjk+DjIi3tTAU0wxGaFbfjGUqlxxSXARq9A96Kgoos= github.com/aristanetworks/glog v0.0.0-20191112221043-67e8567f59f3/go.mod h1:KASm+qXFKs/xjSoWn30NrWBBvdTTQq+UjkhjEJHfSFA= github.com/aristanetworks/goarista v0.0.0-20190325233358-a123909ec740 h1:FD4/ikKOFxwP8muWDypbmBWc634+YcAs3eBrYAmRdZY= From d02f5057c44bfb67c2ae8392bdac5c1fdbfade87 Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Thu, 14 Oct 2021 16:24:25 -0500 Subject: [PATCH 135/176] fix: bump github.com/golang-jwt/jwt/v4 from 4.0.0 to 4.1.0 (#9904) Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> Co-authored-by: Sebastian Spaink --- go.mod | 2 +- go.sum | 3 ++- 2 files changed, 3 insertions(+), 2 deletions(-) diff --git a/go.mod b/go.mod index dc11a4cd484d6..4d967017fc3bd 100644 --- a/go.mod +++ b/go.mod @@ -107,7 +107,7 @@ require ( github.com/gobwas/glob v0.2.3 github.com/gofrs/uuid v3.3.0+incompatible github.com/gogo/protobuf v1.3.2 - github.com/golang-jwt/jwt/v4 v4.0.0 + github.com/golang-jwt/jwt/v4 v4.1.0 github.com/golang-sql/civil v0.0.0-20190719163853-cb61b32ac6fe // indirect github.com/golang/geo v0.0.0-20190916061304-5b978397cfec github.com/golang/groupcache v0.0.0-20210331224755-41bb18bfe9da // indirect diff --git a/go.sum b/go.sum index 8c55e7ee1bed3..817380dddf796 100644 --- a/go.sum +++ b/go.sum @@ -885,8 +885,9 @@ github.com/gogo/protobuf v1.3.1/go.mod h1:SlYgWuQ5SjCEi6WLHjHCa1yvBfUnHcTbrrZtXP github.com/gogo/protobuf v1.3.2 h1:Ov1cvc58UF3b5XjBnZv7+opcTcQFZebYjWzi34vdm4Q= github.com/gogo/protobuf v1.3.2/go.mod h1:P1XiOD3dCwIKUDQYPy72D8LYyHL2YPYrpS2s69NZV8Q= github.com/goji/httpauth v0.0.0-20160601135302-2da839ab0f4d/go.mod h1:nnjvkQ9ptGaCkuDUx6wNykzzlUixGxvkme+H/lnzb+A= -github.com/golang-jwt/jwt/v4 v4.0.0 h1:RAqyYixv1p7uEnocuy8P1nru5wprCh/MH2BIlW5z5/o= github.com/golang-jwt/jwt/v4 v4.0.0/go.mod h1:/xlHOz8bRuivTWchD4jCa+NbatV+wEUSzwAxVc6locg= +github.com/golang-jwt/jwt/v4 v4.1.0 h1:XUgk2Ex5veyVFVeLm0xhusUTQybEbexJXrvPNOKkSY0= +github.com/golang-jwt/jwt/v4 v4.1.0/go.mod h1:/xlHOz8bRuivTWchD4jCa+NbatV+wEUSzwAxVc6locg= github.com/golang-sql/civil v0.0.0-20190719163853-cb61b32ac6fe h1:lXe2qZdvpiX5WZkZR4hgp4KJVfY3nMkvmwbVkpv1rVY= github.com/golang-sql/civil v0.0.0-20190719163853-cb61b32ac6fe/go.mod h1:8vg3r2VgvsThLBIFL93Qb5yWzgyZWhEmBwUJWevAkK0= github.com/golang/freetype v0.0.0-20170609003504-e2365dfdc4a0/go.mod h1:E/TSTwGwJL78qG/PmXZO1EjYhfJinVAhrmmHX6Z8B9k= From 4c417976a34b25a17ce2b69e140fa5c5d8046621 Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Thu, 14 Oct 2021 16:24:54 -0500 Subject: [PATCH 136/176] fix: bump github.com/Azure/azure-kusto-go from 0.3.2 to 0.4.0 (#9768) Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> Co-authored-by: Sebastian Spaink --- go.mod | 2 +- go.sum | 4 ++-- 2 files changed, 3 insertions(+), 3 deletions(-) diff --git a/go.mod b/go.mod index 4d967017fc3bd..4933a0a4af7a0 100644 --- a/go.mod +++ b/go.mod @@ -11,7 +11,7 @@ require ( collectd.org v0.5.0 github.com/Azure/azure-amqp-common-go/v3 v3.0.1 // indirect github.com/Azure/azure-event-hubs-go/v3 v3.3.13 - github.com/Azure/azure-kusto-go v0.3.2 + github.com/Azure/azure-kusto-go v0.4.0 github.com/Azure/azure-pipeline-go v0.2.3 // indirect github.com/Azure/azure-sdk-for-go v52.5.0+incompatible // indirect github.com/Azure/azure-storage-blob-go v0.14.0 // indirect diff --git a/go.sum b/go.sum index 817380dddf796..6b55cd79a3afe 100644 --- a/go.sum +++ b/go.sum @@ -70,8 +70,8 @@ github.com/Azure/azure-amqp-common-go/v3 v3.0.1 h1:mXh+eyOxGLBfqDtfmbtby0l7XfG/6 github.com/Azure/azure-amqp-common-go/v3 v3.0.1/go.mod h1:PBIGdzcO1teYoufTKMcGibdKaYZv4avS+O6LNIp8bq0= github.com/Azure/azure-event-hubs-go/v3 v3.3.13 h1:aiI2RLjp0MzLCuFUXzR8b3h3bdPIc2c3vBYXRK8jX3E= github.com/Azure/azure-event-hubs-go/v3 v3.3.13/go.mod h1:dJ/WqDn0KEJkNznL9UT/UbXzfmkffCjSNl9x2Y8JI28= -github.com/Azure/azure-kusto-go v0.3.2 h1:XpS9co6GvEDl2oICF9HsjEsQVwEpRK6wbNWb9Z+uqsY= -github.com/Azure/azure-kusto-go v0.3.2/go.mod h1:wd50n4qlsSxh+G4f80t+Fnl2ShK9AcXD+lMOstiKuYo= +github.com/Azure/azure-kusto-go v0.4.0 h1:CivPswdkVzSXzEjzJTyOJ6e5RhI4IKvaszilyNGvs+A= +github.com/Azure/azure-kusto-go v0.4.0/go.mod h1:wd50n4qlsSxh+G4f80t+Fnl2ShK9AcXD+lMOstiKuYo= github.com/Azure/azure-pipeline-go v0.1.8/go.mod h1:XA1kFWRVhSK+KNFiOhfv83Fv8L9achrP7OxIzeTn1Yg= github.com/Azure/azure-pipeline-go v0.1.9/go.mod h1:XA1kFWRVhSK+KNFiOhfv83Fv8L9achrP7OxIzeTn1Yg= github.com/Azure/azure-pipeline-go v0.2.1/go.mod h1:UGSo8XybXnIGZ3epmeBw7Jdz+HiUVpqIlpz/HKHylF4= From 3986449ae75290a53191f7c4304cbd24af9d713a Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Pavel=20Z=C3=A1vora?= Date: Fri, 15 Oct 2021 15:12:29 +0200 Subject: [PATCH 137/176] chore: reference oracle external plugin (#9934) --- EXTERNAL_PLUGINS.md | 1 + 1 file changed, 1 insertion(+) diff --git a/EXTERNAL_PLUGINS.md b/EXTERNAL_PLUGINS.md index 7e074d10f2e8d..7ba0166a9bcc6 100644 --- a/EXTERNAL_PLUGINS.md +++ b/EXTERNAL_PLUGINS.md @@ -23,6 +23,7 @@ Pull requests welcome. - [s7comm](https://github.com/nicolasme/s7comm) - Gather information from Siemens PLC - [net_irtt](https://github.com/iAnatoly/telegraf-input-net_irtt) - Gather information from IRTT network test - [dht_sensor](https://github.com/iAnatoly/telegraf-input-dht_sensor) - Gather temperature and humidity from DHTXX sensors +- [oracle](https://github.com/bonitoo-io/telegraf-input-oracle) - Gather the statistic data from Oracle RDBMS ## Outputs - [kinesis](https://github.com/morfien101/telegraf-output-kinesis) - Aggregation and compression of metrics to send Amazon Kinesis. From a65634c3d69a0faca333f46b6d145ade931c8682 Mon Sep 17 00:00:00 2001 From: Samantha Wang <32681364+sjwang90@users.noreply.github.com> Date: Fri, 15 Oct 2021 12:05:19 -0700 Subject: [PATCH 138/176] docs: update contributing.md (#9914) --- CONTRIBUTING.md | 5 ++++- 1 file changed, 4 insertions(+), 1 deletion(-) diff --git a/CONTRIBUTING.md b/CONTRIBUTING.md index 525a752714e84..d5732dcbfa1d1 100644 --- a/CONTRIBUTING.md +++ b/CONTRIBUTING.md @@ -16,7 +16,10 @@ **Note:** If you have a pull request with only one commit, then that commit needs to follow the conventional commit format or the `Semantic Pull Request` check will fail. This is because github will use the pull request title if there are multiple commits, but if there is only one commit it will use it instead. -#### Contributing an External Plugin *(new)* +#### When will your contribution get released? +We have two kinds of releases: patch releases, which happen every few weeks, and feature releases, which happen once a quarter. If your fix is a bug fix, it will be released in the next patch release after it is merged to master. If your release is a new plugin or other feature, it will be released in the next quarterly release after it is merged to master. Quarterly releases are on the third Wednesday of March, June, September, and December. + +#### Contributing an External Plugin Input, output, and processor plugins written for internal Telegraf can be run as externally-compiled plugins through the [Execd Input](/plugins/inputs/execd), [Execd Output](/plugins/outputs/execd), and [Execd Processor](/plugins/processors/execd) Plugins without having to change the plugin code. From aaaaabd0fdfb9b44835c9d741aeccaf639e21c72 Mon Sep 17 00:00:00 2001 From: Thomas Casteleyn Date: Fri, 15 Oct 2021 21:05:37 +0200 Subject: [PATCH 139/176] docs: Add symlink to command documentation (#9926) --- cmd/telegraf/README.md | 1 + 1 file changed, 1 insertion(+) create mode 120000 cmd/telegraf/README.md diff --git a/cmd/telegraf/README.md b/cmd/telegraf/README.md new file mode 120000 index 0000000000000..162972fc44d34 --- /dev/null +++ b/cmd/telegraf/README.md @@ -0,0 +1 @@ +../../docs/COMMANDS_AND_FLAGS.md \ No newline at end of file From 34c2b6232d6b2316bf57258c26f8b6e4bc164046 Mon Sep 17 00:00:00 2001 From: alon Date: Fri, 15 Oct 2021 14:15:40 -0500 Subject: [PATCH 140/176] fix: solve compatibility issue for mongodb inputs when using 5.x relicaset (#9892) --- plugins/inputs/mongodb/mongostat.go | 25 ++++++++++++++----------- 1 file changed, 14 insertions(+), 11 deletions(-) diff --git a/plugins/inputs/mongodb/mongostat.go b/plugins/inputs/mongodb/mongostat.go index 3871f6d252909..ea69c8d424f7c 100644 --- a/plugins/inputs/mongodb/mongostat.go +++ b/plugins/inputs/mongodb/mongostat.go @@ -248,14 +248,15 @@ type TransactionStats struct { // ReplStatus stores data related to replica sets. type ReplStatus struct { - SetName string `bson:"setName"` - IsMaster interface{} `bson:"ismaster"` - Secondary interface{} `bson:"secondary"` - IsReplicaSet interface{} `bson:"isreplicaset"` - ArbiterOnly interface{} `bson:"arbiterOnly"` - Hosts []string `bson:"hosts"` - Passives []string `bson:"passives"` - Me string `bson:"me"` + SetName string `bson:"setName"` + IsWritablePrimary interface{} `bson:"isWritablePrimary"` // mongodb 5.x + IsMaster interface{} `bson:"ismaster"` + Secondary interface{} `bson:"secondary"` + IsReplicaSet interface{} `bson:"isreplicaset"` + ArbiterOnly interface{} `bson:"arbiterOnly"` + Hosts []string `bson:"hosts"` + Passives []string `bson:"passives"` + Me string `bson:"me"` } // DBRecordStats stores data related to memory operations across databases. @@ -1165,11 +1166,13 @@ func NewStatLine(oldMongo, newMongo MongoStatus, key string, all bool, sampleSec if newStat.Repl != nil { returnVal.ReplSetName = newStat.Repl.SetName // BEGIN code modification - if newStat.Repl.IsMaster.(bool) { + if val, ok := newStat.Repl.IsMaster.(bool); ok && val { returnVal.NodeType = "PRI" - } else if newStat.Repl.Secondary != nil && newStat.Repl.Secondary.(bool) { + } else if val, ok := newStat.Repl.IsWritablePrimary.(bool); ok && val { + returnVal.NodeType = "PRI" + } else if val, ok := newStat.Repl.Secondary.(bool); ok && val { returnVal.NodeType = "SEC" - } else if newStat.Repl.ArbiterOnly != nil && newStat.Repl.ArbiterOnly.(bool) { + } else if val, ok := newStat.Repl.ArbiterOnly.(bool); ok && val { returnVal.NodeType = "ARB" } else { returnVal.NodeType = "UNK" From c4c32025c8ff59d024906fad6c8b8eba06b2f836 Mon Sep 17 00:00:00 2001 From: Sam Arnold Date: Mon, 18 Oct 2021 10:04:53 -0400 Subject: [PATCH 141/176] feat: more fields for papertrail event webhook (#9940) --- plugins/inputs/webhooks/papertrail/README.md | 22 ++++++++++--- .../webhooks/papertrail/papertrail_test.go | 32 ++++++++++++++++--- .../papertrail/papertrail_webhooks.go | 13 +++++++- 3 files changed, 58 insertions(+), 9 deletions(-) diff --git a/plugins/inputs/webhooks/papertrail/README.md b/plugins/inputs/webhooks/papertrail/README.md index a3463dcaa6f8b..3f9c33ec5320c 100644 --- a/plugins/inputs/webhooks/papertrail/README.md +++ b/plugins/inputs/webhooks/papertrail/README.md @@ -14,6 +14,23 @@ Events from Papertrail come in two forms: * Each point has a field counter (`count`), which is set to `1` (signifying the event occurred) * Each event "hostname" object is converted to a `host` tag * The "saved_search" name in the payload is added as an `event` tag + * The "saved_search" id in the payload is added as a `search_id` field + * The papertrail url to view the event is built and added as a `url` field + * The rest of the data in the event is converted directly to fields on the point: + * `id` + * `source_ip` + * `source_name` + * `source_id` + * `program` + * `severity` + * `facility` + * `message` + +When a callback is received, an event-based point will look similar to: + +``` +papertrail,host=myserver.example.com,event=saved_search_name count=1i,source_name="abc",program="CROND",severity="Info",source_id=2i,message="message body",source_ip="208.75.57.121",id=7711561783320576i,facility="Cron",url="https://papertrailapp.com/searches/42?centered_on_id=7711561783320576",search_id=42i 1453248892000000000 +``` * The [count-based callback](http://help.papertrailapp.com/kb/how-it-works/web-hooks/#count-only-webhooks) @@ -22,10 +39,7 @@ Events from Papertrail come in two forms: * Each count "source_name" object is converted to a `host` tag * The "saved_search" name in the payload is added as an `event` tag -The current functionality is very basic, however this allows you to -track the number of events by host and saved search. - -When an event is received, any point will look similar to: +When a callback is received, a count-based point will look similar to: ``` papertrail,host=myserver.example.com,event=saved_search_name count=3i 1453248892000000000 diff --git a/plugins/inputs/webhooks/papertrail/papertrail_test.go b/plugins/inputs/webhooks/papertrail/papertrail_test.go index 14b8aec895c98..6cba6730c9486 100644 --- a/plugins/inputs/webhooks/papertrail/papertrail_test.go +++ b/plugins/inputs/webhooks/papertrail/papertrail_test.go @@ -67,8 +67,32 @@ func TestEventPayload(t *testing.T) { resp := post(pt, contentType, form.Encode()) require.Equal(t, http.StatusOK, resp.Code) - fields := map[string]interface{}{ - "count": uint64(1), + fields1 := map[string]interface{}{ + "count": uint64(1), + "id": int64(7711561783320576), + "source_ip": "208.75.57.121", + "source_name": "abc", + "source_id": int64(2), + "program": "CROND", + "severity": "Info", + "facility": "Cron", + "message": "message body", + "url": "https://papertrailapp.com/searches/42?centered_on_id=7711561783320576", + "search_id": int64(42), + } + + fields2 := map[string]interface{}{ + "count": uint64(1), + "id": int64(7711562567655424), + "source_ip": "208.75.57.120", + "source_name": "server1", + "source_id": int64(19), + "program": "CROND", + "severity": "Info", + "facility": "Cron", + "message": "A short event", + "url": "https://papertrailapp.com/searches/42?centered_on_id=7711562567655424", + "search_id": int64(42), } tags1 := map[string]string{ @@ -80,8 +104,8 @@ func TestEventPayload(t *testing.T) { "host": "def", } - acc.AssertContainsTaggedFields(t, "papertrail", fields, tags1) - acc.AssertContainsTaggedFields(t, "papertrail", fields, tags2) + acc.AssertContainsTaggedFields(t, "papertrail", fields1, tags1) + acc.AssertContainsTaggedFields(t, "papertrail", fields2, tags2) } func TestCountPayload(t *testing.T) { diff --git a/plugins/inputs/webhooks/papertrail/papertrail_webhooks.go b/plugins/inputs/webhooks/papertrail/papertrail_webhooks.go index 7f11e31e79a11..5aa8ecaf83fc2 100644 --- a/plugins/inputs/webhooks/papertrail/papertrail_webhooks.go +++ b/plugins/inputs/webhooks/papertrail/papertrail_webhooks.go @@ -2,6 +2,7 @@ package papertrail import ( "encoding/json" + "fmt" "log" "net/http" "time" @@ -49,7 +50,17 @@ func (pt *PapertrailWebhook) eventHandler(w http.ResponseWriter, r *http.Request "event": payload.SavedSearch.Name, } fields := map[string]interface{}{ - "count": uint64(1), + "count": uint64(1), + "id": e.ID, + "source_ip": e.SourceIP, + "source_name": e.SourceName, + "source_id": int64(e.SourceID), + "program": e.Program, + "severity": e.Severity, + "facility": e.Facility, + "message": e.Message, + "url": fmt.Sprintf("%s?centered_on_id=%d", payload.SavedSearch.SearchURL, e.ID), + "search_id": payload.SavedSearch.ID, } pt.acc.AddFields("papertrail", fields, tags, e.ReceivedAt) } From e324ef19851a7dc1fc71b0de5d3d2846547f2c43 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Thomas=20Cont=C3=A9?= <199027+tomconte@users.noreply.github.com> Date: Mon, 18 Oct 2021 16:06:35 +0200 Subject: [PATCH 142/176] feat: Azure Event Hubs output plugin (#9346) --- go.mod | 4 +- go.sum | 14 +- plugins/outputs/all/all.go | 1 + .../azure_monitor/azure_monitor_test.go | 12 ++ plugins/outputs/event_hubs/README.md | 25 +++ plugins/outputs/event_hubs/event_hubs.go | 148 ++++++++++++++++ plugins/outputs/event_hubs/event_hubs_test.go | 162 ++++++++++++++++++ 7 files changed, 362 insertions(+), 4 deletions(-) create mode 100644 plugins/outputs/event_hubs/README.md create mode 100644 plugins/outputs/event_hubs/event_hubs.go create mode 100644 plugins/outputs/event_hubs/event_hubs_test.go diff --git a/go.mod b/go.mod index 4933a0a4af7a0..95992243e964e 100644 --- a/go.mod +++ b/go.mod @@ -9,11 +9,11 @@ require ( cloud.google.com/go/pubsub v1.17.0 code.cloudfoundry.org/clock v1.0.0 // indirect collectd.org v0.5.0 - github.com/Azure/azure-amqp-common-go/v3 v3.0.1 // indirect + github.com/Azure/azure-amqp-common-go/v3 v3.1.0 // indirect github.com/Azure/azure-event-hubs-go/v3 v3.3.13 github.com/Azure/azure-kusto-go v0.4.0 github.com/Azure/azure-pipeline-go v0.2.3 // indirect - github.com/Azure/azure-sdk-for-go v52.5.0+incompatible // indirect + github.com/Azure/azure-sdk-for-go v55.0.0+incompatible // indirect github.com/Azure/azure-storage-blob-go v0.14.0 // indirect github.com/Azure/azure-storage-queue-go v0.0.0-20191125232315-636801874cdd github.com/Azure/go-amqp v0.13.12 // indirect diff --git a/go.sum b/go.sum index 6b55cd79a3afe..db4f0ad82eaed 100644 --- a/go.sum +++ b/go.sum @@ -66,8 +66,10 @@ contrib.go.opencensus.io/exporter/prometheus v0.3.0/go.mod h1:rpCPVQKhiyH8oomWgm dmitri.shuralyov.com/gpu/mtl v0.0.0-20190408044501-666a987793e9/go.mod h1:H6x//7gZCb22OMCxBHrMx7a5I7Hp++hsVxbQ4BYO7hU= gioui.org v0.0.0-20210308172011-57750fc8a0a6/go.mod h1:RSH6KIUZ0p2xy5zHDxgAM4zumjgTw83q2ge/PI+yyw8= github.com/AndreasBriese/bbloom v0.0.0-20190825152654-46b345b51c96/go.mod h1:bOvUY6CB00SOBii9/FifXqc0awNKxLFCL/+pkDPuyl8= -github.com/Azure/azure-amqp-common-go/v3 v3.0.1 h1:mXh+eyOxGLBfqDtfmbtby0l7XfG/6b2NkuZ3B7i6zHA= github.com/Azure/azure-amqp-common-go/v3 v3.0.1/go.mod h1:PBIGdzcO1teYoufTKMcGibdKaYZv4avS+O6LNIp8bq0= +github.com/Azure/azure-amqp-common-go/v3 v3.0.1/go.mod h1:PBIGdzcO1teYoufTKMcGibdKaYZv4avS+O6LNIp8bq0= +github.com/Azure/azure-amqp-common-go/v3 v3.1.0 h1:1N4YSkWYWffOpQHromYdOucBSQXhNRKzqtgICy6To8Q= +github.com/Azure/azure-amqp-common-go/v3 v3.1.0/go.mod h1:PBIGdzcO1teYoufTKMcGibdKaYZv4avS+O6LNIp8bq0= github.com/Azure/azure-event-hubs-go/v3 v3.3.13 h1:aiI2RLjp0MzLCuFUXzR8b3h3bdPIc2c3vBYXRK8jX3E= github.com/Azure/azure-event-hubs-go/v3 v3.3.13/go.mod h1:dJ/WqDn0KEJkNznL9UT/UbXzfmkffCjSNl9x2Y8JI28= github.com/Azure/azure-kusto-go v0.4.0 h1:CivPswdkVzSXzEjzJTyOJ6e5RhI4IKvaszilyNGvs+A= @@ -80,8 +82,10 @@ github.com/Azure/azure-pipeline-go v0.2.3/go.mod h1:x841ezTBIMG6O3lAcl8ATHnsOPVl github.com/Azure/azure-sdk-for-go v16.2.1+incompatible/go.mod h1:9XXNKU+eRnpl9moKnB4QOLf1HestfXbmab5FXxiDBjc= github.com/Azure/azure-sdk-for-go v44.1.0+incompatible/go.mod h1:9XXNKU+eRnpl9moKnB4QOLf1HestfXbmab5FXxiDBjc= github.com/Azure/azure-sdk-for-go v51.1.0+incompatible/go.mod h1:9XXNKU+eRnpl9moKnB4QOLf1HestfXbmab5FXxiDBjc= -github.com/Azure/azure-sdk-for-go v52.5.0+incompatible h1:/NLBWHCnIHtZyLPc1P7WIqi4Te4CC23kIQyK3Ep/7lA= +github.com/Azure/azure-sdk-for-go v51.1.0+incompatible/go.mod h1:9XXNKU+eRnpl9moKnB4QOLf1HestfXbmab5FXxiDBjc= github.com/Azure/azure-sdk-for-go v52.5.0+incompatible/go.mod h1:9XXNKU+eRnpl9moKnB4QOLf1HestfXbmab5FXxiDBjc= +github.com/Azure/azure-sdk-for-go v55.0.0+incompatible h1:L4/vUGbg1Xkw5L20LZD+hJI5I+ibWSytqQ68lTCfLwY= +github.com/Azure/azure-sdk-for-go v55.0.0+incompatible/go.mod h1:9XXNKU+eRnpl9moKnB4QOLf1HestfXbmab5FXxiDBjc= github.com/Azure/azure-storage-blob-go v0.6.0/go.mod h1:oGfmITT1V6x//CswqY2gtAHND+xIP64/qL7a5QJix0Y= github.com/Azure/azure-storage-blob-go v0.8.0/go.mod h1:lPI3aLPpuLTeUwh1sViKXFxwl2B6teiRqI0deQUvsw0= github.com/Azure/azure-storage-blob-go v0.14.0 h1:1BCg74AmVdYwO3dlKwtFU1V0wU2PZdREkXvAmZJRUlM= @@ -113,6 +117,7 @@ github.com/Azure/go-autorest/autorest/adal v0.9.0/go.mod h1:/c022QCutn2P7uY+/oQW github.com/Azure/go-autorest/autorest/adal v0.9.5/go.mod h1:B7KF7jKIeC9Mct5spmyCB/A8CG/sEz1vwIRGv/bbw7A= github.com/Azure/go-autorest/autorest/adal v0.9.11/go.mod h1:nBKAnTomx8gDtl+3ZCJv2v0KACFHWTB2drffI1B68Pk= github.com/Azure/go-autorest/autorest/adal v0.9.13/go.mod h1:W/MM4U6nLxnIskrw4UwWzlHfGjwUS50aOsc/I3yuU8M= +github.com/Azure/go-autorest/autorest/adal v0.9.13/go.mod h1:W/MM4U6nLxnIskrw4UwWzlHfGjwUS50aOsc/I3yuU8M= github.com/Azure/go-autorest/autorest/adal v0.9.16 h1:P8An8Z9rH1ldbOLdFpxYorgOt2sywL9V24dAwWHPuGc= github.com/Azure/go-autorest/autorest/adal v0.9.16/go.mod h1:tGMin8I49Yij6AQ+rvV+Xa/zwxYQB5hmsd6DkfAx2+A= github.com/Azure/go-autorest/autorest/azure/auth v0.4.2/go.mod h1:90gmfKdlmKgfjUpnCEpOJzsUEjrWDSLwHIG73tSXddM= @@ -132,6 +137,8 @@ github.com/Azure/go-autorest/autorest/mocks v0.4.0/go.mod h1:LTp+uSrOhSkaKrUy935 github.com/Azure/go-autorest/autorest/mocks v0.4.1 h1:K0laFcLE6VLTOwNgSxaGbUcLPuGXlNkbVvq4cW4nIHk= github.com/Azure/go-autorest/autorest/mocks v0.4.1/go.mod h1:LTp+uSrOhSkaKrUy935gNZuuIPPVsHlr9DSOxSayd+k= github.com/Azure/go-autorest/autorest/to v0.4.0 h1:oXVqrxakqqV1UZdSazDOPOLvOIz+XA683u8EctwboHk= +github.com/Azure/go-autorest/autorest/to v0.4.0 h1:oXVqrxakqqV1UZdSazDOPOLvOIz+XA683u8EctwboHk= +github.com/Azure/go-autorest/autorest/to v0.4.0/go.mod h1:fE8iZBn7LQR7zH/9XU2NcPR4o9jEImooCeWJcYV/zLE= github.com/Azure/go-autorest/autorest/to v0.4.0/go.mod h1:fE8iZBn7LQR7zH/9XU2NcPR4o9jEImooCeWJcYV/zLE= github.com/Azure/go-autorest/autorest/validation v0.3.1 h1:AgyqjAd94fwNAoTjl/WQXg4VvFeRFpO+UhNyRXqF1ac= github.com/Azure/go-autorest/autorest/validation v0.3.1/go.mod h1:yhLgjC0Wda5DYXl6JAsWyUe4KVNffhoDhG0zVzUMo3E= @@ -520,6 +527,8 @@ github.com/coreos/go-systemd v0.0.0-20161114122254-48702e0da86b/go.mod h1:F5haX7 github.com/coreos/go-systemd v0.0.0-20180511133405-39ca1b05acc7/go.mod h1:F5haX7vjVVG0kc13fIWeqUViNPyEJxv/OmvnBo0Yme4= github.com/coreos/go-systemd v0.0.0-20180511133405-39ca1b05acc7/go.mod h1:F5haX7vjVVG0kc13fIWeqUViNPyEJxv/OmvnBo0Yme4= github.com/coreos/go-systemd v0.0.0-20190321100706-95778dfbb74e/go.mod h1:F5haX7vjVVG0kc13fIWeqUViNPyEJxv/OmvnBo0Yme4= +github.com/coreos/go-systemd v0.0.0-20190321100706-95778dfbb74e/go.mod h1:F5haX7vjVVG0kc13fIWeqUViNPyEJxv/OmvnBo0Yme4= +github.com/coreos/go-systemd v0.0.0-20190719114852-fd7a80b32e1f/go.mod h1:F5haX7vjVVG0kc13fIWeqUViNPyEJxv/OmvnBo0Yme4= github.com/coreos/go-systemd v0.0.0-20190719114852-fd7a80b32e1f/go.mod h1:F5haX7vjVVG0kc13fIWeqUViNPyEJxv/OmvnBo0Yme4= github.com/coreos/go-systemd/v22 v22.0.0/go.mod h1:xO0FLkIi5MaZafQlIrOotqXZ90ih+1atmu1JpKERPPk= github.com/coreos/go-systemd/v22 v22.1.0/go.mod h1:xO0FLkIi5MaZafQlIrOotqXZ90ih+1atmu1JpKERPPk= @@ -2429,6 +2438,7 @@ golang.org/x/sys v0.0.0-20210324051608-47abb6519492/go.mod h1:h1NjWce9XRLGQEsW7w golang.org/x/sys v0.0.0-20210330210617-4fbd30eecc44/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20210403161142-5e06dd20ab57/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20210423082822-04245dca01da/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20210423082822-04245dca01da/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20210423185535-09eb48e85fd7/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20210426230700-d19ff857e887/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20210510120138-977fb7262007/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= diff --git a/plugins/outputs/all/all.go b/plugins/outputs/all/all.go index 7248b4ddcddb0..33b2f92dd01df 100644 --- a/plugins/outputs/all/all.go +++ b/plugins/outputs/all/all.go @@ -15,6 +15,7 @@ import ( _ "github.com/influxdata/telegraf/plugins/outputs/discard" _ "github.com/influxdata/telegraf/plugins/outputs/dynatrace" _ "github.com/influxdata/telegraf/plugins/outputs/elasticsearch" + _ "github.com/influxdata/telegraf/plugins/outputs/event_hubs" _ "github.com/influxdata/telegraf/plugins/outputs/exec" _ "github.com/influxdata/telegraf/plugins/outputs/execd" _ "github.com/influxdata/telegraf/plugins/outputs/file" diff --git a/plugins/outputs/azure_monitor/azure_monitor_test.go b/plugins/outputs/azure_monitor/azure_monitor_test.go index 803b0441af207..db8243e82d5ad 100644 --- a/plugins/outputs/azure_monitor/azure_monitor_test.go +++ b/plugins/outputs/azure_monitor/azure_monitor_test.go @@ -373,3 +373,15 @@ func TestWrite(t *testing.T) { }) } } + +func TestMain(m *testing.M) { + // Set up a fake environment for adal.getMSIType() + // Root cause: https://github.com/Azure/go-autorest/commit/def88ef859fb980eff240c755a70597bc9b490d0 + err := os.Setenv("MSI_ENDPOINT", "fake.endpoint") + + if err != nil { + panic(err) + } + + os.Exit(m.Run()) +} diff --git a/plugins/outputs/event_hubs/README.md b/plugins/outputs/event_hubs/README.md new file mode 100644 index 0000000000000..c71c06f99e160 --- /dev/null +++ b/plugins/outputs/event_hubs/README.md @@ -0,0 +1,25 @@ +# Azure Event Hubs output plugin + +This plugin for [Azure Event Hubs](https://azure.microsoft.com/en-gb/services/event-hubs/) will send metrics to a single Event Hub within an Event Hubs namespace. Metrics are sent as message batches, each message payload containing one metric object. The messages do not specify a partition key, and will thus be automatically load-balanced (round-robin) across all the Event Hub partitions. + +## Metrics + +The plugin uses the Telegraf serializers to format the metric data sent in the message payloads. You can select any of the supported output formats, although JSON is probably the easiest to integrate with downstream components. + +## Configuration + +```toml +[[ outputs.event_hubs ]] +## The full connection string to the Event Hub (required) +## The shared access key must have "Send" permissions on the target Event Hub. +connection_string = "Endpoint=sb://namespace.servicebus.windows.net/;SharedAccessKeyName=RootManageSharedAccessKey;SharedAccessKey=superSecret1234=;EntityPath=hubName" + +## Client timeout (defaults to 30s) +# timeout = "30s" + +## Data format to output. +## Each data format has its own unique set of configuration options, read +## more about them here: +## https://github.com/influxdata/telegraf/blob/master/docs/DATA_FORMATS_OUTPUT.md +data_format = "json" +``` diff --git a/plugins/outputs/event_hubs/event_hubs.go b/plugins/outputs/event_hubs/event_hubs.go new file mode 100644 index 0000000000000..3c87a84fb62ce --- /dev/null +++ b/plugins/outputs/event_hubs/event_hubs.go @@ -0,0 +1,148 @@ +package event_hubs + +import ( + "context" + "time" + + eventhub "github.com/Azure/azure-event-hubs-go/v3" + "github.com/influxdata/telegraf" + "github.com/influxdata/telegraf/config" + "github.com/influxdata/telegraf/plugins/outputs" + "github.com/influxdata/telegraf/plugins/serializers" +) + +/* +** Wrapper interface for eventhub.Hub + */ + +type EventHubInterface interface { + GetHub(s string) error + Close(ctx context.Context) error + SendBatch(ctx context.Context, iterator eventhub.BatchIterator, opts ...eventhub.BatchOption) error +} + +type eventHub struct { + hub *eventhub.Hub +} + +func (eh *eventHub) GetHub(s string) error { + hub, err := eventhub.NewHubFromConnectionString(s) + + if err != nil { + return err + } + + eh.hub = hub + + return nil +} + +func (eh *eventHub) Close(ctx context.Context) error { + return eh.hub.Close(ctx) +} + +func (eh *eventHub) SendBatch(ctx context.Context, iterator eventhub.BatchIterator, opts ...eventhub.BatchOption) error { + return eh.hub.SendBatch(ctx, iterator, opts...) +} + +/* End wrapper interface */ + +type EventHubs struct { + Log telegraf.Logger `toml:"-"` + ConnectionString string `toml:"connection_string"` + Timeout config.Duration + + Hub EventHubInterface + serializer serializers.Serializer +} + +const ( + defaultRequestTimeout = time.Second * 30 +) + +func (e *EventHubs) Description() string { + return "Configuration for Event Hubs output plugin" +} + +func (e *EventHubs) SampleConfig() string { + return ` + ## The full connection string to the Event Hub (required) + ## The shared access key must have "Send" permissions on the target Event Hub. + connection_string = "Endpoint=sb://namespace.servicebus.windows.net/;SharedAccessKeyName=RootManageSharedAccessKey;SharedAccessKey=superSecret1234=;EntityPath=hubName" + + ## Client timeout (defaults to 30s) + # timeout = "30s" + + ## Data format to output. + ## Each data format has its own unique set of configuration options, read + ## more about them here: + ## https://github.com/influxdata/telegraf/blob/master/docs/DATA_FORMATS_OUTPUT.md + data_format = "json" +` +} + +func (e *EventHubs) Init() error { + err := e.Hub.GetHub(e.ConnectionString) + + if err != nil { + return err + } + + return nil +} + +func (e *EventHubs) Connect() error { + return nil +} + +func (e *EventHubs) Close() error { + ctx, cancel := context.WithTimeout(context.Background(), time.Duration(e.Timeout)) + defer cancel() + + err := e.Hub.Close(ctx) + + if err != nil { + return err + } + + return nil +} + +func (e *EventHubs) SetSerializer(serializer serializers.Serializer) { + e.serializer = serializer +} + +func (e *EventHubs) Write(metrics []telegraf.Metric) error { + var events []*eventhub.Event + + for _, metric := range metrics { + payload, err := e.serializer.Serialize(metric) + + if err != nil { + e.Log.Debugf("Could not serialize metric: %v", err) + continue + } + + events = append(events, eventhub.NewEvent(payload)) + } + + ctx, cancel := context.WithTimeout(context.Background(), time.Duration(e.Timeout)) + defer cancel() + + err := e.Hub.SendBatch(ctx, eventhub.NewEventBatchIterator(events...)) + + if err != nil { + return err + } + + return nil +} + +func init() { + outputs.Add("event_hubs", func() telegraf.Output { + return &EventHubs{ + Hub: &eventHub{}, + Timeout: config.Duration(defaultRequestTimeout), + } + }) +} diff --git a/plugins/outputs/event_hubs/event_hubs_test.go b/plugins/outputs/event_hubs/event_hubs_test.go new file mode 100644 index 0000000000000..9b17aef605833 --- /dev/null +++ b/plugins/outputs/event_hubs/event_hubs_test.go @@ -0,0 +1,162 @@ +package event_hubs + +import ( + "context" + "fmt" + "math/rand" + "os" + "testing" + "time" + + eventhub "github.com/Azure/azure-event-hubs-go/v3" + "github.com/influxdata/telegraf/config" + "github.com/influxdata/telegraf/plugins/serializers/json" + "github.com/influxdata/telegraf/testutil" + "github.com/stretchr/testify/mock" + "github.com/stretchr/testify/require" +) + +/* +** Wrapper interface mock for eventhub.Hub + */ + +type mockEventHub struct { + mock.Mock +} + +func (eh *mockEventHub) GetHub(s string) error { + args := eh.Called(s) + return args.Error(0) +} + +func (eh *mockEventHub) Close(ctx context.Context) error { + args := eh.Called(ctx) + return args.Error(0) +} + +func (eh *mockEventHub) SendBatch(ctx context.Context, iterator eventhub.BatchIterator, opts ...eventhub.BatchOption) error { + args := eh.Called(ctx, iterator, opts) + return args.Error(0) +} + +/* End wrapper interface */ + +func TestInitAndWrite(t *testing.T) { + serializer, _ := json.NewSerializer(time.Second, "") + mockHub := &mockEventHub{} + e := &EventHubs{ + Hub: mockHub, + ConnectionString: "mock", + Timeout: config.Duration(time.Second * 5), + serializer: serializer, + } + + mockHub.On("GetHub", mock.Anything).Return(nil).Once() + err := e.Init() + require.NoError(t, err) + mockHub.AssertExpectations(t) + + metrics := testutil.MockMetrics() + + mockHub.On("SendBatch", mock.Anything, mock.Anything, mock.Anything).Return(nil).Once() + err = e.Write(metrics) + require.NoError(t, err) + mockHub.AssertExpectations(t) +} + +/* +** Integration test (requires an Event Hubs instance) + */ + +func TestInitAndWriteIntegration(t *testing.T) { + if testing.Short() { + t.Skip("Skipping integration test in short mode") + } + + if os.Getenv("EVENTHUB_CONNECTION_STRING") == "" { + t.Skip("Missing environment variable EVENTHUB_CONNECTION_STRING") + } + + ctx, cancel := context.WithTimeout(context.Background(), time.Second*30) + defer cancel() + + // Create a new, empty Event Hub + // NB: for this to work, the connection string needs to grant "Manage" permissions on the root namespace + mHub, err := eventhub.NewHubManagerFromConnectionString(os.Getenv("EVENTHUB_CONNECTION_STRING")) + require.NoError(t, err) + + r := rand.New(rand.NewSource(time.Now().UnixNano())) + name := fmt.Sprintf("testmetrics%05d", r.Intn(10000)) + + entity, err := mHub.Put(ctx, name, eventhub.HubWithPartitionCount(1)) + require.NoError(t, err) + + // Delete the test hub + defer func() { + err := mHub.Delete(ctx, entity.Name) + require.NoError(t, err) + }() + + testHubCS := os.Getenv("EVENTHUB_CONNECTION_STRING") + ";EntityPath=" + entity.Name + + // Configure the plugin to target the newly created hub + serializer, _ := json.NewSerializer(time.Second, "") + + e := &EventHubs{ + Hub: &eventHub{}, + ConnectionString: testHubCS, + Timeout: config.Duration(time.Second * 5), + serializer: serializer, + } + + // Verify that we can connect to Event Hubs + err = e.Init() + require.NoError(t, err) + + // Verify that we can successfully write data to Event Hubs + metrics := testutil.MockMetrics() + err = e.Write(metrics) + require.NoError(t, err) + + /* + ** Verify we can read data back from the test hub + */ + + exit := make(chan string) + + // Create a hub client for receiving + hub, err := eventhub.NewHubFromConnectionString(testHubCS) + require.NoError(t, err) + + // The handler function will pass received messages via the channel + handler := func(ctx context.Context, event *eventhub.Event) error { + exit <- string(event.Data) + return nil + } + + // Set up the receivers + runtimeInfo, err := hub.GetRuntimeInformation(ctx) + require.NoError(t, err) + + for _, partitionID := range runtimeInfo.PartitionIDs { + _, err := hub.Receive(ctx, partitionID, handler, eventhub.ReceiveWithStartingOffset("-1")) + require.NoError(t, err) + } + + // Wait to receive the same number of messages sent, with timeout + received := 0 +wait: + for _, metric := range metrics { + select { + case m := <-exit: + t.Logf("Received for %s: %s", metric.Name(), m) + received = received + 1 + case <-time.After(10 * time.Second): + t.Logf("Timeout") + break wait + } + } + + // Make sure received == sent + require.Equal(t, received, len(metrics)) +} From e17561d13b136581380e9cbf3f7a30110df5a8b5 Mon Sep 17 00:00:00 2001 From: Sebastian Spaink <3441183+sspaink@users.noreply.github.com> Date: Mon, 18 Oct 2021 09:28:46 -0500 Subject: [PATCH 143/176] fix: update golanci-lint to v1.42.1 (#9932) --- .github/workflows/golangci-lint.yml | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/.github/workflows/golangci-lint.yml b/.github/workflows/golangci-lint.yml index a219934109fd5..d4eac0d328059 100644 --- a/.github/workflows/golangci-lint.yml +++ b/.github/workflows/golangci-lint.yml @@ -19,7 +19,7 @@ jobs: - name: golangci-lint uses: golangci/golangci-lint-action@v2 with: - version: v1.38 + version: v1.42.1 only-new-issues: true golangci-master: if: github.ref == 'refs/heads/master' @@ -30,6 +30,6 @@ jobs: - name: golangci-lint uses: golangci/golangci-lint-action@v2 with: - version: v1.38 + version: v1.42.1 only-new-issues: true args: --issues-exit-code=0 From 2e230e6bf4b57f70ec95eabaa905042b8165a17d Mon Sep 17 00:00:00 2001 From: Sebastian Spaink <3441183+sspaink@users.noreply.github.com> Date: Mon, 18 Oct 2021 09:48:16 -0500 Subject: [PATCH 144/176] fix: Correct conversion of int with specific bit size (#9933) --- plugins/inputs/procstat/procstat.go | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/plugins/inputs/procstat/procstat.go b/plugins/inputs/procstat/procstat.go index 7b2ffba26b430..cb10d34d2a5a9 100644 --- a/plugins/inputs/procstat/procstat.go +++ b/plugins/inputs/procstat/procstat.go @@ -474,7 +474,7 @@ func (p *Procstat) simpleSystemdUnitPIDs() ([]PID, error) { if len(kv[1]) == 0 || bytes.Equal(kv[1], []byte("0")) { return nil, nil } - pid, err := strconv.Atoi(string(kv[1])) + pid, err := strconv.ParseInt(string(kv[1]), 10, 32) if err != nil { return nil, fmt.Errorf("invalid pid '%s'", kv[1]) } From d729c0a6e48218861aa5f7aba8f47d9ea76d7e3d Mon Sep 17 00:00:00 2001 From: Samantha Wang <32681364+sjwang90@users.noreply.github.com> Date: Mon, 18 Oct 2021 11:52:04 -0700 Subject: [PATCH 145/176] docs: fix broken link (#9812) --- docs/developers/SAMPLE_CONFIG.md | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/docs/developers/SAMPLE_CONFIG.md b/docs/developers/SAMPLE_CONFIG.md index f6202145d27b6..d0969212fecb2 100644 --- a/docs/developers/SAMPLE_CONFIG.md +++ b/docs/developers/SAMPLE_CONFIG.md @@ -65,10 +65,10 @@ parameters is closely related have a single description. # password = "" ``` -An parameters should usually be describable in a few sentences. If it takes +Parameters should usually be describable in a few sentences. If it takes much more than this, try to provide a shorter explanation and provide a more complex description in the Configuration section of the plugins -[README](https://github.com/influxdata/telegraf/blob/master/plugins/inputs/EXAMPLE_README.md) +[README](https://github.com/influxdata/telegraf/tree/master/plugins/inputs/example) Boolean parameters should be used judiciously. You should try to think of something better since they don't scale well, things are often not truly From 2e216825db7404eceb6d3066a3052e85befe07b5 Mon Sep 17 00:00:00 2001 From: Alexander Krantz Date: Mon, 18 Oct 2021 14:47:45 -0700 Subject: [PATCH 146/176] fix: decode Prometheus scrape path from Kuberentes labels (#9662) --- plugins/inputs/prometheus/kubernetes.go | 59 ++++++++++---------- plugins/inputs/prometheus/kubernetes_test.go | 42 ++++++++++---- 2 files changed, 61 insertions(+), 40 deletions(-) diff --git a/plugins/inputs/prometheus/kubernetes.go b/plugins/inputs/prometheus/kubernetes.go index 0f260acb48b60..9a4d6bd325c46 100644 --- a/plugins/inputs/prometheus/kubernetes.go +++ b/plugins/inputs/prometheus/kubernetes.go @@ -5,7 +5,6 @@ import ( "crypto/tls" "encoding/json" "fmt" - "log" "net" "net/http" "net/url" @@ -295,12 +294,15 @@ func registerPod(pod *corev1.Pod, p *Prometheus) { if p.kubernetesPods == nil { p.kubernetesPods = map[string]URLAndAddress{} } - targetURL := getScrapeURL(pod) - if targetURL == nil { + targetURL, err := getScrapeURL(pod) + if err != nil { + p.Log.Errorf("could not parse URL: %s", err) + return + } else if targetURL == nil { return } - log.Printf("D! [inputs.prometheus] will scrape metrics from %q", *targetURL) + p.Log.Debugf("will scrape metrics from %q", targetURL.String()) // add annotation as metrics tags tags := pod.Annotations if tags == nil { @@ -312,12 +314,7 @@ func registerPod(pod *corev1.Pod, p *Prometheus) { for k, v := range pod.Labels { tags[k] = v } - URL, err := url.Parse(*targetURL) - if err != nil { - log.Printf("E! [inputs.prometheus] could not parse URL %q: %s", *targetURL, err.Error()) - return - } - podURL := p.AddressToURL(URL, URL.Hostname()) + podURL := p.AddressToURL(targetURL, targetURL.Hostname()) // Locks earlier if using cAdvisor calls - makes a new list each time // rather than updating and removing from the same list @@ -327,22 +324,22 @@ func registerPod(pod *corev1.Pod, p *Prometheus) { } p.kubernetesPods[podURL.String()] = URLAndAddress{ URL: podURL, - Address: URL.Hostname(), - OriginalURL: URL, + Address: targetURL.Hostname(), + OriginalURL: targetURL, Tags: tags, } } -func getScrapeURL(pod *corev1.Pod) *string { +func getScrapeURL(pod *corev1.Pod) (*url.URL, error) { ip := pod.Status.PodIP if ip == "" { // return as if scrape was disabled, we will be notified again once the pod // has an IP - return nil + return nil, nil } scheme := pod.Annotations["prometheus.io/scheme"] - path := pod.Annotations["prometheus.io/path"] + pathAndQuery := pod.Annotations["prometheus.io/path"] port := pod.Annotations["prometheus.io/port"] if scheme == "" { @@ -351,34 +348,36 @@ func getScrapeURL(pod *corev1.Pod) *string { if port == "" { port = "9102" } - if path == "" { - path = "/metrics" + if pathAndQuery == "" { + pathAndQuery = "/metrics" } - u := &url.URL{ - Scheme: scheme, - Host: net.JoinHostPort(ip, port), - Path: path, + base, err := url.Parse(pathAndQuery) + if err != nil { + return nil, err } - x := u.String() + base.Scheme = scheme + base.Host = net.JoinHostPort(ip, port) - return &x + return base, nil } func unregisterPod(pod *corev1.Pod, p *Prometheus) { - url := getScrapeURL(pod) - if url == nil { + targetURL, err := getScrapeURL(pod) + if err != nil { + p.Log.Errorf("failed to parse url: %s", err) + return + } else if targetURL == nil { return } - log.Printf("D! [inputs.prometheus] registered a delete request for %q in namespace %q", - pod.Name, pod.Namespace) + p.Log.Debugf("registered a delete request for %q in namespace %q", pod.Name, pod.Namespace) p.lock.Lock() defer p.lock.Unlock() - if _, ok := p.kubernetesPods[*url]; ok { - delete(p.kubernetesPods, *url) - log.Printf("D! [inputs.prometheus] will stop scraping for %q", *url) + if _, ok := p.kubernetesPods[targetURL.String()]; ok { + delete(p.kubernetesPods, targetURL.String()) + p.Log.Debugf("will stop scraping for %q", targetURL.String()) } } diff --git a/plugins/inputs/prometheus/kubernetes_test.go b/plugins/inputs/prometheus/kubernetes_test.go index 72f995c3112c9..2f67607cd3cf3 100644 --- a/plugins/inputs/prometheus/kubernetes_test.go +++ b/plugins/inputs/prometheus/kubernetes_test.go @@ -15,7 +15,8 @@ import ( func TestScrapeURLNoAnnotations(t *testing.T) { p := &corev1.Pod{ObjectMeta: metav1.ObjectMeta{}} p.Annotations = map[string]string{} - url := getScrapeURL(p) + url, err := getScrapeURL(p) + assert.NoError(t, err) assert.Nil(t, url) } @@ -23,36 +24,57 @@ func TestScrapeURLAnnotationsNoScrape(t *testing.T) { p := &corev1.Pod{ObjectMeta: metav1.ObjectMeta{}} p.Name = "myPod" p.Annotations = map[string]string{"prometheus.io/scrape": "false"} - url := getScrapeURL(p) + url, err := getScrapeURL(p) + assert.NoError(t, err) assert.Nil(t, url) } func TestScrapeURLAnnotations(t *testing.T) { p := pod() p.Annotations = map[string]string{"prometheus.io/scrape": "true"} - url := getScrapeURL(p) - assert.Equal(t, "http://127.0.0.1:9102/metrics", *url) + url, err := getScrapeURL(p) + assert.NoError(t, err) + assert.Equal(t, "http://127.0.0.1:9102/metrics", url.String()) } func TestScrapeURLAnnotationsCustomPort(t *testing.T) { p := pod() p.Annotations = map[string]string{"prometheus.io/scrape": "true", "prometheus.io/port": "9000"} - url := getScrapeURL(p) - assert.Equal(t, "http://127.0.0.1:9000/metrics", *url) + url, err := getScrapeURL(p) + assert.NoError(t, err) + assert.Equal(t, "http://127.0.0.1:9000/metrics", url.String()) } func TestScrapeURLAnnotationsCustomPath(t *testing.T) { p := pod() p.Annotations = map[string]string{"prometheus.io/scrape": "true", "prometheus.io/path": "mymetrics"} - url := getScrapeURL(p) - assert.Equal(t, "http://127.0.0.1:9102/mymetrics", *url) + url, err := getScrapeURL(p) + assert.NoError(t, err) + assert.Equal(t, "http://127.0.0.1:9102/mymetrics", url.String()) } func TestScrapeURLAnnotationsCustomPathWithSep(t *testing.T) { p := pod() p.Annotations = map[string]string{"prometheus.io/scrape": "true", "prometheus.io/path": "/mymetrics"} - url := getScrapeURL(p) - assert.Equal(t, "http://127.0.0.1:9102/mymetrics", *url) + url, err := getScrapeURL(p) + assert.NoError(t, err) + assert.Equal(t, "http://127.0.0.1:9102/mymetrics", url.String()) +} + +func TestScrapeURLAnnotationsCustomPathWithQueryParameters(t *testing.T) { + p := pod() + p.Annotations = map[string]string{"prometheus.io/scrape": "true", "prometheus.io/path": "/v1/agent/metrics?format=prometheus"} + url, err := getScrapeURL(p) + assert.NoError(t, err) + assert.Equal(t, "http://127.0.0.1:9102/v1/agent/metrics?format=prometheus", url.String()) +} + +func TestScrapeURLAnnotationsCustomPathWithFragment(t *testing.T) { + p := pod() + p.Annotations = map[string]string{"prometheus.io/scrape": "true", "prometheus.io/path": "/v1/agent/metrics#prometheus"} + url, err := getScrapeURL(p) + assert.NoError(t, err) + assert.Equal(t, "http://127.0.0.1:9102/v1/agent/metrics#prometheus", url.String()) } func TestAddPod(t *testing.T) { From 4d254bde1deb75d66881aa8bbc9bc8cbb6bd8c2b Mon Sep 17 00:00:00 2001 From: Mya Date: Mon, 18 Oct 2021 17:25:03 -0600 Subject: [PATCH 147/176] chore: update readme go version from 1.14 to 1.17 (#9944) --- README.md | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/README.md b/README.md index 9c75311e4e2cb..c7c846d2b3e75 100644 --- a/README.md +++ b/README.md @@ -42,9 +42,9 @@ Ansible role: https://github.com/rossmcdonald/telegraf ### From Source: -Telegraf requires Go version 1.14 or newer, the Makefile requires GNU make. +Telegraf requires Go version 1.17 or newer, the Makefile requires GNU make. -1. [Install Go](https://golang.org/doc/install) >=1.14 (1.15 recommended) +1. [Install Go](https://golang.org/doc/install) >=1.17 (1.17.2 recommended) 2. Clone the Telegraf repository: ``` cd ~/src From 81cc2ef197a057f448c46c7aa2579b2186c63020 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Pavel=20Z=C3=A1vora?= Date: Tue, 19 Oct 2021 15:40:12 +0200 Subject: [PATCH 148/176] chore: reference db2 external plugin (#9952) --- EXTERNAL_PLUGINS.md | 1 + 1 file changed, 1 insertion(+) diff --git a/EXTERNAL_PLUGINS.md b/EXTERNAL_PLUGINS.md index 7ba0166a9bcc6..0de5ae47949d9 100644 --- a/EXTERNAL_PLUGINS.md +++ b/EXTERNAL_PLUGINS.md @@ -24,6 +24,7 @@ Pull requests welcome. - [net_irtt](https://github.com/iAnatoly/telegraf-input-net_irtt) - Gather information from IRTT network test - [dht_sensor](https://github.com/iAnatoly/telegraf-input-dht_sensor) - Gather temperature and humidity from DHTXX sensors - [oracle](https://github.com/bonitoo-io/telegraf-input-oracle) - Gather the statistic data from Oracle RDBMS +- [db2](https://github.com/bonitoo-io/telegraf-input-db2) - Gather the statistic data from DB2 RDBMS ## Outputs - [kinesis](https://github.com/morfien101/telegraf-output-kinesis) - Aggregation and compression of metrics to send Amazon Kinesis. From 62d5f635bbf68582150f7d4f27376ecaac61653a Mon Sep 17 00:00:00 2001 From: Sanyam Arya Date: Tue, 19 Oct 2021 18:42:15 +0200 Subject: [PATCH 149/176] fix: internet_speed input plugin not collecting/reporting latency (#9957) --- plugins/inputs/internet_speed/internet_speed.go | 7 ++++++- 1 file changed, 6 insertions(+), 1 deletion(-) diff --git a/plugins/inputs/internet_speed/internet_speed.go b/plugins/inputs/internet_speed/internet_speed.go index cf0c5cfb13117..58fb29c5949c1 100644 --- a/plugins/inputs/internet_speed/internet_speed.go +++ b/plugins/inputs/internet_speed/internet_speed.go @@ -2,6 +2,7 @@ package internet_speed import ( "fmt" + "time" "github.com/influxdata/telegraf" "github.com/influxdata/telegraf/plugins/inputs" @@ -68,7 +69,7 @@ func (is *InternetSpeed) Gather(acc telegraf.Accumulator) error { fields := make(map[string]interface{}) fields["download"] = s.DLSpeed fields["upload"] = s.ULSpeed - fields["latency"] = s.Latency + fields["latency"] = timeDurationMillisecondToFloat64(s.Latency) tags := make(map[string]string) @@ -80,3 +81,7 @@ func init() { return &InternetSpeed{} }) } + +func timeDurationMillisecondToFloat64(d time.Duration) float64 { + return float64(d) / float64(time.Millisecond) +} From 47301e6ef4e28e231b360afca95b3950d8474de7 Mon Sep 17 00:00:00 2001 From: Sebastian Spaink <3441183+sspaink@users.noreply.github.com> Date: Tue, 19 Oct 2021 14:21:43 -0500 Subject: [PATCH 150/176] chore: remove empty build.py (#9958) --- .lgtm.yml | 2 -- scripts/build.py | 0 2 files changed, 2 deletions(-) delete mode 100644 .lgtm.yml delete mode 100644 scripts/build.py diff --git a/.lgtm.yml b/.lgtm.yml deleted file mode 100644 index 5b0b2e3367a14..0000000000000 --- a/.lgtm.yml +++ /dev/null @@ -1,2 +0,0 @@ -queries: - - exclude: go/disabled-certificate-check diff --git a/scripts/build.py b/scripts/build.py deleted file mode 100644 index e69de29bb2d1d..0000000000000 From 3e1ebdb4c7d6cf24e46bb84c57439c05530ead21 Mon Sep 17 00:00:00 2001 From: Joshua Powers Date: Tue, 19 Oct 2021 14:44:36 -0600 Subject: [PATCH 151/176] fix: add normalization of tags for ethtool input plugin (#9901) --- plugins/inputs/ethtool/README.md | 9 ++ plugins/inputs/ethtool/ethtool.go | 12 +++ plugins/inputs/ethtool/ethtool_linux.go | 45 ++++++++- plugins/inputs/ethtool/ethtool_test.go | 116 ++++++++++++++++++++++++ 4 files changed, 181 insertions(+), 1 deletion(-) diff --git a/plugins/inputs/ethtool/README.md b/plugins/inputs/ethtool/README.md index 1b36001d9b74c..333630c958703 100644 --- a/plugins/inputs/ethtool/README.md +++ b/plugins/inputs/ethtool/README.md @@ -12,6 +12,15 @@ The ethtool input plugin pulls ethernet device stats. Fields pulled will depend ## List of interfaces to ignore when pulling metrics. # interface_exclude = ["eth1"] + + ## Some drivers declare statistics with extra whitespace, different spacing, + ## and mix cases. This list, when enabled, can be used to clean the keys. + ## Here are the current possible normalizations: + ## * snakecase: converts fooBarBaz to foo_bar_baz + ## * trim: removes leading and trailing whitespace + ## * lower: changes all capitalized letters to lowercase + ## * underscore: replaces spaces with underscores + # normalize_keys = ["snakecase", "trim", "lower", "underscore"] ``` Interfaces can be included or ignored using: diff --git a/plugins/inputs/ethtool/ethtool.go b/plugins/inputs/ethtool/ethtool.go index 0978bef837383..256652640f383 100644 --- a/plugins/inputs/ethtool/ethtool.go +++ b/plugins/inputs/ethtool/ethtool.go @@ -20,6 +20,9 @@ type Ethtool struct { // This is the list of interface names to ignore InterfaceExclude []string `toml:"interface_exclude"` + // Normalization on the key names + NormalizeKeys []string `toml:"normalize_keys"` + Log telegraf.Logger `toml:"-"` // the ethtool command @@ -38,6 +41,15 @@ const ( ## List of interfaces to ignore when pulling metrics. # interface_exclude = ["eth1"] + + ## Some drivers declare statistics with extra whitespace, different spacing, + ## and mix cases. This list, when enabled, can be used to clean the keys. + ## Here are the current possible normalizations: + ## * snakecase: converts fooBarBaz to foo_bar_baz + ## * trim: removes leading and trailing whitespace + ## * lower: changes all capitalized letters to lowercase + ## * underscore: replaces spaces with underscores + # normalize_keys = ["snakecase", "trim", "lower", "underscore"] ` ) diff --git a/plugins/inputs/ethtool/ethtool_linux.go b/plugins/inputs/ethtool/ethtool_linux.go index 6c0116e6e8089..16081e4cd831a 100644 --- a/plugins/inputs/ethtool/ethtool_linux.go +++ b/plugins/inputs/ethtool/ethtool_linux.go @@ -5,6 +5,8 @@ package ethtool import ( "net" + "regexp" + "strings" "sync" "github.com/pkg/errors" @@ -81,12 +83,53 @@ func (e *Ethtool) gatherEthtoolStats(iface net.Interface, acc telegraf.Accumulat fields[fieldInterfaceUp] = e.interfaceUp(iface) for k, v := range stats { - fields[k] = v + fields[e.normalizeKey(k)] = v } acc.AddFields(pluginName, fields, tags) } +// normalize key string; order matters to avoid replacing whitespace with +// underscores, then trying to trim those same underscores. Likewise with +// camelcase before trying to lower case things. +func (e *Ethtool) normalizeKey(key string) string { + // must trim whitespace or this will have a leading _ + if inStringSlice(e.NormalizeKeys, "snakecase") { + key = camelCase2SnakeCase(strings.TrimSpace(key)) + } + // must occur before underscore, otherwise nothing to trim + if inStringSlice(e.NormalizeKeys, "trim") { + key = strings.TrimSpace(key) + } + if inStringSlice(e.NormalizeKeys, "lower") { + key = strings.ToLower(key) + } + if inStringSlice(e.NormalizeKeys, "underscore") { + key = strings.ReplaceAll(key, " ", "_") + } + + return key +} + +func camelCase2SnakeCase(value string) string { + matchFirstCap := regexp.MustCompile("(.)([A-Z][a-z]+)") + matchAllCap := regexp.MustCompile("([a-z0-9])([A-Z])") + + snake := matchFirstCap.ReplaceAllString(value, "${1}_${2}") + snake = matchAllCap.ReplaceAllString(snake, "${1}_${2}") + return strings.ToLower(snake) +} + +func inStringSlice(slice []string, value string) bool { + for _, item := range slice { + if item == value { + return true + } + } + + return false +} + func (e *Ethtool) interfaceUp(iface net.Interface) bool { return (iface.Flags & net.FlagUp) != 0 } diff --git a/plugins/inputs/ethtool/ethtool_test.go b/plugins/inputs/ethtool/ethtool_test.go index 14cf14d811683..f9573ee054429 100644 --- a/plugins/inputs/ethtool/ethtool_test.go +++ b/plugins/inputs/ethtool/ethtool_test.go @@ -380,3 +380,119 @@ func TestGatherIgnoreInterfaces(t *testing.T) { } acc.AssertContainsTaggedFields(t, pluginName, expectedFieldsEth2, expectedTagsEth2) } + +type TestCase struct { + normalization []string + stats map[string]uint64 + expectedFields map[string]uint64 +} + +func TestNormalizedKeys(t *testing.T) { + cases := []TestCase{ + { + normalization: []string{"underscore"}, + stats: map[string]uint64{ + "port rx": 1, + " Port_tx": 0, + "interface_up": 0, + }, + expectedFields: map[string]uint64{ + "port_rx": 1, + "_Port_tx": 0, + "interface_up": 0, + }, + }, + { + normalization: []string{"underscore", "lower"}, + stats: map[string]uint64{ + "Port rx": 1, + " Port_tx": 0, + "interface_up": 0, + }, + expectedFields: map[string]uint64{ + "port_rx": 1, + "_port_tx": 0, + "interface_up": 0, + }, + }, + { + normalization: []string{"underscore", "lower", "trim"}, + stats: map[string]uint64{ + " Port RX ": 1, + " Port_tx": 0, + "interface_up": 0, + }, + expectedFields: map[string]uint64{ + "port_rx": 1, + "port_tx": 0, + "interface_up": 0, + }, + }, + { + normalization: []string{"underscore", "lower", "snakecase", "trim"}, + stats: map[string]uint64{ + " Port RX ": 1, + " Port_tx": 0, + "interface_up": 0, + }, + expectedFields: map[string]uint64{ + "port_rx": 1, + "port_tx": 0, + "interface_up": 0, + }, + }, + { + normalization: []string{"snakecase"}, + stats: map[string]uint64{ + " PortRX ": 1, + " PortTX": 0, + "interface_up": 0, + }, + expectedFields: map[string]uint64{ + "port_rx": 1, + "port_tx": 0, + "interface_up": 0, + }, + }, + { + normalization: []string{}, + stats: map[string]uint64{ + " Port RX ": 1, + " Port_tx": 0, + "interface_up": 0, + }, + expectedFields: map[string]uint64{ + " Port RX ": 1, + " Port_tx": 0, + "interface_up": 0, + }, + }, + } + for _, c := range cases { + eth0 := &InterfaceMock{"eth0", "e1000e", c.stats, false, true} + expectedTags := map[string]string{ + "interface": eth0.Name, + "driver": eth0.DriverName, + } + + interfaceMap = make(map[string]*InterfaceMock) + interfaceMap[eth0.Name] = eth0 + + cmd := &CommandEthtoolMock{interfaceMap} + command = &Ethtool{ + InterfaceInclude: []string{}, + InterfaceExclude: []string{}, + NormalizeKeys: c.normalization, + command: cmd, + } + + var acc testutil.Accumulator + err := command.Gather(&acc) + + assert.NoError(t, err) + assert.Len(t, acc.Metrics, 1) + + acc.AssertContainsFields(t, pluginName, toStringMapInterface(c.expectedFields)) + acc.AssertContainsTaggedFields(t, pluginName, toStringMapInterface(c.expectedFields), expectedTags) + } +} From a7582fb8932815f0d0f6e520696f9c18e8d9ae1a Mon Sep 17 00:00:00 2001 From: Aleksandr Venger Date: Wed, 20 Oct 2021 00:09:37 +0300 Subject: [PATCH 152/176] fix: segfault in ingress, persistentvolumeclaim, statefulset in kube_inventory (#9585) --- plugins/inputs/kube_inventory/ingress.go | 10 +- plugins/inputs/kube_inventory/ingress_test.go | 108 ++++++++++++++++++ .../kube_inventory/persistentvolumeclaim.go | 10 +- .../persistentvolumeclaim_test.go | 52 +++++++++ plugins/inputs/kube_inventory/statefulset.go | 12 +- .../inputs/kube_inventory/statefulset_test.go | 108 ++++++++++++++++++ 6 files changed, 290 insertions(+), 10 deletions(-) diff --git a/plugins/inputs/kube_inventory/ingress.go b/plugins/inputs/kube_inventory/ingress.go index 69765b4dd3257..6fd424dc0aef5 100644 --- a/plugins/inputs/kube_inventory/ingress.go +++ b/plugins/inputs/kube_inventory/ingress.go @@ -39,11 +39,17 @@ func (ki *KubernetesInventory) gatherIngress(i netv1.Ingress, acc telegraf.Accum tags["ip"] = ingress.IP for _, rule := range i.Spec.Rules { + if rule.IngressRuleValue.HTTP == nil { + continue + } for _, path := range rule.IngressRuleValue.HTTP.Paths { - fields["backend_service_port"] = path.Backend.Service.Port.Number + if path.Backend.Service != nil { + tags["backend_service_name"] = path.Backend.Service.Name + fields["backend_service_port"] = path.Backend.Service.Port.Number + } + fields["tls"] = i.Spec.TLS != nil - tags["backend_service_name"] = path.Backend.Service.Name tags["path"] = path.Path tags["host"] = rule.Host diff --git a/plugins/inputs/kube_inventory/ingress_test.go b/plugins/inputs/kube_inventory/ingress_test.go index cd2af76d34045..77ceceaac22ba 100644 --- a/plugins/inputs/kube_inventory/ingress_test.go +++ b/plugins/inputs/kube_inventory/ingress_test.go @@ -109,6 +109,114 @@ func TestIngress(t *testing.T) { }, hasError: false, }, + { + name: "no HTTPIngressRuleValue", + handler: &mockHandler{ + responseMap: map[string]interface{}{ + "/ingress/": netv1.IngressList{ + Items: []netv1.Ingress{ + { + Status: netv1.IngressStatus{ + LoadBalancer: v1.LoadBalancerStatus{ + Ingress: []v1.LoadBalancerIngress{ + { + Hostname: "chron-1", + IP: "1.0.0.127", + }, + }, + }, + }, + Spec: netv1.IngressSpec{ + Rules: []netv1.IngressRule{ + { + Host: "ui.internal", + IngressRuleValue: netv1.IngressRuleValue{ + HTTP: nil, + }, + }, + }, + }, + ObjectMeta: metav1.ObjectMeta{ + Generation: 12, + Namespace: "ns1", + Name: "ui-lb", + CreationTimestamp: metav1.Time{Time: now}, + }, + }, + }, + }, + }, + }, + hasError: false, + }, + { + name: "no IngressServiceBackend", + handler: &mockHandler{ + responseMap: map[string]interface{}{ + "/ingress/": netv1.IngressList{ + Items: []netv1.Ingress{ + { + Status: netv1.IngressStatus{ + LoadBalancer: v1.LoadBalancerStatus{ + Ingress: []v1.LoadBalancerIngress{ + { + Hostname: "chron-1", + IP: "1.0.0.127", + }, + }, + }, + }, + Spec: netv1.IngressSpec{ + Rules: []netv1.IngressRule{ + { + Host: "ui.internal", + IngressRuleValue: netv1.IngressRuleValue{ + HTTP: &netv1.HTTPIngressRuleValue{ + Paths: []netv1.HTTPIngressPath{ + { + Path: "/", + Backend: netv1.IngressBackend{ + Service: nil, + }, + }, + }, + }, + }, + }, + }, + }, + ObjectMeta: metav1.ObjectMeta{ + Generation: 12, + Namespace: "ns1", + Name: "ui-lb", + CreationTimestamp: metav1.Time{Time: now}, + }, + }, + }, + }, + }, + }, + output: []telegraf.Metric{ + testutil.MustMetric( + "kubernetes_ingress", + map[string]string{ + "ingress_name": "ui-lb", + "namespace": "ns1", + "ip": "1.0.0.127", + "hostname": "chron-1", + "host": "ui.internal", + "path": "/", + }, + map[string]interface{}{ + "tls": false, + "generation": int64(12), + "created": now.UnixNano(), + }, + time.Unix(0, 0), + ), + }, + hasError: false, + }, } for _, v := range tests { diff --git a/plugins/inputs/kube_inventory/persistentvolumeclaim.go b/plugins/inputs/kube_inventory/persistentvolumeclaim.go index a5d30d6dca6f4..2b06cce6b9fbb 100644 --- a/plugins/inputs/kube_inventory/persistentvolumeclaim.go +++ b/plugins/inputs/kube_inventory/persistentvolumeclaim.go @@ -34,10 +34,12 @@ func (ki *KubernetesInventory) gatherPersistentVolumeClaim(pvc corev1.Persistent "phase_type": phaseType, } tags := map[string]string{ - "pvc_name": pvc.Name, - "namespace": pvc.Namespace, - "phase": string(pvc.Status.Phase), - "storageclass": *pvc.Spec.StorageClassName, + "pvc_name": pvc.Name, + "namespace": pvc.Namespace, + "phase": string(pvc.Status.Phase), + } + if pvc.Spec.StorageClassName != nil { + tags["storageclass"] = *pvc.Spec.StorageClassName } if pvc.Spec.Selector != nil { for key, val := range pvc.Spec.Selector.MatchLabels { diff --git a/plugins/inputs/kube_inventory/persistentvolumeclaim_test.go b/plugins/inputs/kube_inventory/persistentvolumeclaim_test.go index b4e468acd71e7..00da84f9f757a 100644 --- a/plugins/inputs/kube_inventory/persistentvolumeclaim_test.go +++ b/plugins/inputs/kube_inventory/persistentvolumeclaim_test.go @@ -134,6 +134,58 @@ func TestPersistentVolumeClaim(t *testing.T) { ), }, }, + { + name: "no storage class name", + handler: &mockHandler{ + responseMap: map[string]interface{}{ + "/persistentvolumeclaims/": &corev1.PersistentVolumeClaimList{ + Items: []corev1.PersistentVolumeClaim{ + { + Status: corev1.PersistentVolumeClaimStatus{ + Phase: "bound", + }, + Spec: corev1.PersistentVolumeClaimSpec{ + VolumeName: "pvc-dc870fd6-1e08-11e8-b226-02aa4bc06eb8", + StorageClassName: nil, + Selector: &metav1.LabelSelector{ + MatchLabels: map[string]string{ + "select1": "s1", + "select2": "s2", + }, + }, + }, + ObjectMeta: metav1.ObjectMeta{ + Namespace: "ns1", + Name: "pc1", + Labels: map[string]string{ + "lab1": "v1", + "lab2": "v2", + }, + CreationTimestamp: metav1.Time{Time: now}, + }, + }, + }, + }, + }, + }, + output: []telegraf.Metric{ + testutil.MustMetric( + "kubernetes_persistentvolumeclaim", + map[string]string{ + "pvc_name": "pc1", + "namespace": "ns1", + "phase": "bound", + "selector_select1": "s1", + "selector_select2": "s2", + }, + map[string]interface{}{ + "phase_type": 0, + }, + time.Unix(0, 0), + ), + }, + hasError: false, + }, } for _, v := range tests { diff --git a/plugins/inputs/kube_inventory/statefulset.go b/plugins/inputs/kube_inventory/statefulset.go index 22b235116b22c..06335fc612a1e 100644 --- a/plugins/inputs/kube_inventory/statefulset.go +++ b/plugins/inputs/kube_inventory/statefulset.go @@ -28,16 +28,20 @@ func (ki *KubernetesInventory) gatherStatefulSet(s v1.StatefulSet, acc telegraf. "replicas_current": status.CurrentReplicas, "replicas_ready": status.ReadyReplicas, "replicas_updated": status.UpdatedReplicas, - "spec_replicas": *s.Spec.Replicas, "observed_generation": s.Status.ObservedGeneration, } + if s.Spec.Replicas != nil { + fields["spec_replicas"] = *s.Spec.Replicas + } tags := map[string]string{ "statefulset_name": s.Name, "namespace": s.Namespace, } - for key, val := range s.Spec.Selector.MatchLabels { - if ki.selectorFilter.Match(key) { - tags["selector_"+key] = val + if s.Spec.Selector != nil { + for key, val := range s.Spec.Selector.MatchLabels { + if ki.selectorFilter.Match(key) { + tags["selector_"+key] = val + } } } diff --git a/plugins/inputs/kube_inventory/statefulset_test.go b/plugins/inputs/kube_inventory/statefulset_test.go index cbbc453f58f35..6f30acc8b7435 100644 --- a/plugins/inputs/kube_inventory/statefulset_test.go +++ b/plugins/inputs/kube_inventory/statefulset_test.go @@ -92,6 +92,114 @@ func TestStatefulSet(t *testing.T) { }, hasError: false, }, + { + name: "no label selector", + handler: &mockHandler{ + responseMap: map[string]interface{}{ + "/statefulsets/": &v1.StatefulSetList{ + Items: []v1.StatefulSet{ + { + Status: v1.StatefulSetStatus{ + Replicas: 2, + CurrentReplicas: 4, + ReadyReplicas: 1, + UpdatedReplicas: 3, + ObservedGeneration: 119, + }, + Spec: v1.StatefulSetSpec{ + Replicas: toInt32Ptr(3), + Selector: nil, + }, + ObjectMeta: metav1.ObjectMeta{ + Generation: 332, + Namespace: "ns1", + Name: "sts1", + CreationTimestamp: metav1.Time{Time: now}, + }, + }, + }, + }, + }, + }, + output: []telegraf.Metric{ + testutil.MustMetric( + "kubernetes_statefulset", + map[string]string{ + "namespace": "ns1", + "statefulset_name": "sts1", + }, + map[string]interface{}{ + "generation": int64(332), + "observed_generation": int64(119), + "created": now.UnixNano(), + "spec_replicas": int32(3), + "replicas": int32(2), + "replicas_current": int32(4), + "replicas_ready": int32(1), + "replicas_updated": int32(3), + }, + time.Unix(0, 0), + ), + }, + hasError: false, + }, + { + name: "no desired number of replicas", + handler: &mockHandler{ + responseMap: map[string]interface{}{ + "/statefulsets/": &v1.StatefulSetList{ + Items: []v1.StatefulSet{ + { + Status: v1.StatefulSetStatus{ + Replicas: 2, + CurrentReplicas: 4, + ReadyReplicas: 1, + UpdatedReplicas: 3, + ObservedGeneration: 119, + }, + Spec: v1.StatefulSetSpec{ + Replicas: nil, + Selector: &metav1.LabelSelector{ + MatchLabels: map[string]string{ + "select1": "s1", + "select2": "s2", + }, + }, + }, + ObjectMeta: metav1.ObjectMeta{ + Generation: 332, + Namespace: "ns1", + Name: "sts1", + CreationTimestamp: metav1.Time{Time: now}, + }, + }, + }, + }, + }, + }, + output: []telegraf.Metric{ + testutil.MustMetric( + "kubernetes_statefulset", + map[string]string{ + "namespace": "ns1", + "statefulset_name": "sts1", + "selector_select1": "s1", + "selector_select2": "s2", + }, + map[string]interface{}{ + "generation": int64(332), + "observed_generation": int64(119), + "created": now.UnixNano(), + "replicas": int32(2), + "replicas_current": int32(4), + "replicas_ready": int32(1), + "replicas_updated": int32(3), + }, + time.Unix(0, 0), + ), + }, + hasError: false, + }, } for _, v := range tests { From cf605b5d9aaac7a6a1098d3b125757f19e1b9fa2 Mon Sep 17 00:00:00 2001 From: Sven Rebhan <36194019+srebhan@users.noreply.github.com> Date: Tue, 19 Oct 2021 23:12:13 +0200 Subject: [PATCH 153/176] feat: Modbus connection settings (serial) (#9256) --- plugins/inputs/modbus/README.md | 32 +++++++++++- plugins/inputs/modbus/modbus.go | 88 ++++++++++++++++++++++++++++----- 2 files changed, 105 insertions(+), 15 deletions(-) diff --git a/plugins/inputs/modbus/README.md b/plugins/inputs/modbus/README.md index 9f4cf5e37487c..ac01e140b695c 100644 --- a/plugins/inputs/modbus/README.md +++ b/plugins/inputs/modbus/README.md @@ -29,7 +29,7 @@ Registers via Modbus TCP or Modbus RTU/ASCII. # TCP - connect via Modbus/TCP controller = "tcp://localhost:502" - + ## Serial (RS485; RS232) # controller = "file:///dev/ttyUSB0" # baud_rate = 9600 @@ -42,6 +42,10 @@ Registers via Modbus TCP or Modbus RTU/ASCII. ## For Serial you can choose between "RTU" and "ASCII" # transmission_mode = "RTU" + ## Trace the connection to the modbus device as debug messages + ## Note: You have to enable telegraf's debug mode to see those messages! + # debug_connection = false + ## Measurements ## @@ -88,8 +92,22 @@ Registers via Modbus TCP or Modbus RTU/ASCII. { name = "tank_ph", byte_order = "AB", data_type = "INT16", scale=1.0, address = [1]}, { name = "pump1_speed", byte_order = "ABCD", data_type = "INT32", scale=1.0, address = [3,4]}, ] + + ## Enable workarounds required by some devices to work correctly + # [inputs.modbus.workarounds] + ## Pause between read requests sent to the device. This might be necessary for (slow) serial devices. + # pause_between_requests = "0ms" + ## Close the connection after every gather cycle. Usually the plugin closes the connection after a certain + ## idle-timeout, however, if you query a device with limited simultaneous connectivity (e.g. serial devices) + ## from multiple instances you might want to only stay connected during gather and disconnect afterwards. + # close_connection_after_gather = false ``` +### Notes +You can debug Modbus connection issues by enabling `debug_connection`. To see those debug messages Telegraf has to be started with debugging enabled (i.e. with `--debug` option). Please be aware that connection tracing will produce a lot of messages and should **NOT** be used in production environments. + +Please use `pause_between_requests` with care. Especially make sure that the total gather time, including the pause(s), does not exceed the configured collection interval. Note, that pauses add up if multiple requests are sent! + ### Metrics Metric are custom and configured using the `discrete_inputs`, `coils`, @@ -131,6 +149,8 @@ with N decimal places'. from unsigned values). ### Trouble shooting + +#### Strange data Modbus documentations are often a mess. People confuse memory-address (starts at one) and register address (starts at zero) or stay unclear about the used word-order. Furthermore, there are some non-standard implementations that also swap the bytes within the register word (16-bit). @@ -142,7 +162,15 @@ In case you get an `exception '2' (illegal data address)` error you might try to In case you see strange values, the `byte_order` might be off. You can either probe all combinations (`ABCD`, `CDBA`, `BADC` or `DCBA`) or you set `byte_order="ABCD" data_type="UINT32"` and use the resulting value(s) in an online converter like [this](https://www.scadacore.com/tools/programming-calculators/online-hex-converter/). This makes especially sense if you don't want to mess with the device, deal with 64-bit values and/or don't know the `data_type` of your register (e.g. fix-point floating values vs. IEEE floating point). -If nothing helps, please post your configuration, error message and/or the output of `byte_order="ABCD" data_type="UINT32"` to one of the telegraf support channels (forum, slack or as issue). +If your data still looks corrupted, please post your configuration, error message and/or the output of `byte_order="ABCD" data_type="UINT32"` to one of the telegraf support channels (forum, slack or as issue). + +#### Workarounds +Some Modbus devices need special read characteristics when reading data and will fail otherwise. For example, there are certain serial devices that need a certain pause between register read requests. Others might only offer a limited number of simultaneously connected devices, like serial devices or some ModbusTCP devices. In case you need to access those devices in parallel you might want to disconnect immediately after the plugin finished reading. + +To allow this plugin to also handle those "special" devices there is the `workarounds` configuration options. In case your documentation states certain read requirements or you get read timeouts or other read errors you might want to try one or more workaround options. +If you find that other/more workarounds are required for your device, please let us know. + +In case your device needs a workaround that is not yet implemented, please open an issue or submit a pull-request. ### Example Output diff --git a/plugins/inputs/modbus/modbus.go b/plugins/inputs/modbus/modbus.go index 18a00e990dc66..c5dfee2f6cbe6 100644 --- a/plugins/inputs/modbus/modbus.go +++ b/plugins/inputs/modbus/modbus.go @@ -15,19 +15,26 @@ import ( "github.com/influxdata/telegraf/plugins/inputs" ) +type ModbusWorkarounds struct { + PollPause config.Duration `toml:"pause_between_requests"` + CloseAfterGather bool `toml:"close_connection_after_gather"` +} + // Modbus holds all data relevant to the plugin type Modbus struct { - Name string `toml:"name"` - Controller string `toml:"controller"` - TransmissionMode string `toml:"transmission_mode"` - BaudRate int `toml:"baud_rate"` - DataBits int `toml:"data_bits"` - Parity string `toml:"parity"` - StopBits int `toml:"stop_bits"` - Timeout config.Duration `toml:"timeout"` - Retries int `toml:"busy_retries"` - RetriesWaitTime config.Duration `toml:"busy_retries_wait"` - Log telegraf.Logger `toml:"-"` + Name string `toml:"name"` + Controller string `toml:"controller"` + TransmissionMode string `toml:"transmission_mode"` + BaudRate int `toml:"baud_rate"` + DataBits int `toml:"data_bits"` + Parity string `toml:"parity"` + StopBits int `toml:"stop_bits"` + Timeout config.Duration `toml:"timeout"` + Retries int `toml:"busy_retries"` + RetriesWaitTime config.Duration `toml:"busy_retries_wait"` + DebugConnection bool `toml:"debug_connection"` + Workarounds ModbusWorkarounds `toml:"workarounds"` + Log telegraf.Logger `toml:"-"` // Register configuration ConfigurationOriginal // Connection handling @@ -88,19 +95,24 @@ const sampleConfig = ` # TCP - connect via Modbus/TCP controller = "tcp://localhost:502" - + ## Serial (RS485; RS232) # controller = "file:///dev/ttyUSB0" # baud_rate = 9600 # data_bits = 8 # parity = "N" # stop_bits = 1 + # transmission_mode = "RTU" + + ## Trace the connection to the modbus device as debug messages + ## Note: You have to enable telegraf's debug mode to see those messages! + # debug_connection = false ## For Modbus over TCP you can choose between "TCP", "RTUoverTCP" and "ASCIIoverTCP" ## default behaviour is "TCP" if the controller is TCP ## For Serial you can choose between "RTU" and "ASCII" # transmission_mode = "RTU" - + ## Measurements ## @@ -148,6 +160,15 @@ const sampleConfig = ` { name = "tank_ph", byte_order = "AB", data_type = "INT16", scale=1.0, address = [1]}, { name = "pump1_speed", byte_order = "ABCD", data_type = "INT32", scale=1.0, address = [3,4]}, ] + + ## Enable workarounds required by some devices to work correctly + # [inputs.modbus.workarounds] + ## Pause between read requests sent to the device. This might be necessary for (slow) serial devices. + # pause_between_requests = "0ms" + ## Close the connection after every gather cycle. Usually the plugin closes the connection after a certain + ## idle-timeout, however, if you query a device with limited simultaneous connectivity (e.g. serial devices) + ## from multiple instances you might want to only stay connected during gather and disconnect afterwards. + # close_connection_after_gather = false ` // SampleConfig returns a basic configuration for the plugin @@ -234,6 +255,11 @@ func (m *Modbus) Gather(acc telegraf.Accumulator) error { m.collectFields(acc, timestamp, tags, requests.input) } + // Disconnect after read if configured + if m.Workarounds.CloseAfterGather { + return m.disconnect() + } + return nil } @@ -253,14 +279,23 @@ func (m *Modbus) initClient() error { case "RTUoverTCP": handler := mb.NewRTUOverTCPClientHandler(host + ":" + port) handler.Timeout = time.Duration(m.Timeout) + if m.DebugConnection { + handler.Logger = m + } m.handler = handler case "ASCIIoverTCP": handler := mb.NewASCIIOverTCPClientHandler(host + ":" + port) handler.Timeout = time.Duration(m.Timeout) + if m.DebugConnection { + handler.Logger = m + } m.handler = handler default: handler := mb.NewTCPClientHandler(host + ":" + port) handler.Timeout = time.Duration(m.Timeout) + if m.DebugConnection { + handler.Logger = m + } m.handler = handler } case "file": @@ -272,6 +307,9 @@ func (m *Modbus) initClient() error { handler.DataBits = m.DataBits handler.Parity = m.Parity handler.StopBits = m.StopBits + if m.DebugConnection { + handler.Logger = m + } m.handler = handler case "ASCII": handler := mb.NewASCIIClientHandler(u.Path) @@ -280,6 +318,9 @@ func (m *Modbus) initClient() error { handler.DataBits = m.DataBits handler.Parity = m.Parity handler.StopBits = m.StopBits + if m.DebugConnection { + handler.Logger = m + } m.handler = handler default: return fmt.Errorf("invalid protocol '%s' - '%s' ", u.Scheme, m.TransmissionMode) @@ -334,6 +375,7 @@ func (m *Modbus) gatherRequestsCoil(requests []request) error { if err != nil { return err } + nextRequest := time.Now().Add(time.Duration(m.Workarounds.PollPause)) m.Log.Debugf("got coil@%v[%v]: %v", request.address, request.length, bytes) // Bit value handling @@ -345,6 +387,9 @@ func (m *Modbus) gatherRequestsCoil(requests []request) error { request.fields[i].value = uint16((bytes[idx] >> bit) & 0x01) m.Log.Debugf(" field %s with bit %d @ byte %d: %v --> %v", field.name, bit, idx, (bytes[idx]>>bit)&0x01, request.fields[i].value) } + + // Some (serial) devices require a pause between requests... + time.Sleep(time.Until(nextRequest)) } return nil } @@ -356,6 +401,7 @@ func (m *Modbus) gatherRequestsDiscrete(requests []request) error { if err != nil { return err } + nextRequest := time.Now().Add(time.Duration(m.Workarounds.PollPause)) m.Log.Debugf("got discrete@%v[%v]: %v", request.address, request.length, bytes) // Bit value handling @@ -367,6 +413,9 @@ func (m *Modbus) gatherRequestsDiscrete(requests []request) error { request.fields[i].value = uint16((bytes[idx] >> bit) & 0x01) m.Log.Debugf(" field %s with bit %d @ byte %d: %v --> %v", field.name, bit, idx, (bytes[idx]>>bit)&0x01, request.fields[i].value) } + + // Some (serial) devices require a pause between requests... + time.Sleep(time.Until(nextRequest)) } return nil } @@ -378,6 +427,7 @@ func (m *Modbus) gatherRequestsHolding(requests []request) error { if err != nil { return err } + nextRequest := time.Now().Add(time.Duration(m.Workarounds.PollPause)) m.Log.Debugf("got holding@%v[%v]: %v", request.address, request.length, bytes) // Non-bit value handling @@ -390,6 +440,9 @@ func (m *Modbus) gatherRequestsHolding(requests []request) error { request.fields[i].value = field.converter(bytes[offset : offset+length]) m.Log.Debugf(" field %s with offset %d with len %d: %v --> %v", field.name, offset, length, bytes[offset:offset+length], request.fields[i].value) } + + // Some (serial) devices require a pause between requests... + time.Sleep(time.Until(nextRequest)) } return nil } @@ -401,6 +454,7 @@ func (m *Modbus) gatherRequestsInput(requests []request) error { if err != nil { return err } + nextRequest := time.Now().Add(time.Duration(m.Workarounds.PollPause)) m.Log.Debugf("got input@%v[%v]: %v", request.address, request.length, bytes) // Non-bit value handling @@ -413,6 +467,9 @@ func (m *Modbus) gatherRequestsInput(requests []request) error { request.fields[i].value = field.converter(bytes[offset : offset+length]) m.Log.Debugf(" field %s with offset %d with len %d: %v --> %v", field.name, offset, length, bytes[offset:offset+length], request.fields[i].value) } + + // Some (serial) devices require a pause between requests... + time.Sleep(time.Until(nextRequest)) } return nil } @@ -441,6 +498,11 @@ func (m *Modbus) collectFields(acc telegraf.Accumulator, timestamp time.Time, ta } } +// Implement the logger interface of the modbus client +func (m *Modbus) Printf(format string, v ...interface{}) { + m.Log.Debugf(format, v...) +} + // Add this plugin to telegraf func init() { inputs.Add("modbus", func() telegraf.Input { return &Modbus{} }) From c6698747503a3eee36ceca7f1ed10739b03b6ebf Mon Sep 17 00:00:00 2001 From: Joshua Powers Date: Wed, 20 Oct 2021 14:05:57 -0600 Subject: [PATCH 154/176] docs: update README with info on package repos (#9964) --- README.md | 94 ++++++++++++++++++++++++++++++++----------------------- 1 file changed, 55 insertions(+), 39 deletions(-) diff --git a/README.md b/README.md index c7c846d2b3e75..b76ad45c0d1a3 100644 --- a/README.md +++ b/README.md @@ -3,27 +3,26 @@ ![tiger](TelegrafTiger.png "tiger") -[![Circle CI](https://circleci.com/gh/influxdata/telegraf.svg?style=svg)](https://circleci.com/gh/influxdata/telegraf) [![Docker pulls](https://img.shields.io/docker/pulls/library/telegraf.svg)](https://hub.docker.com/_/telegraf/) [![Total alerts](https://img.shields.io/lgtm/alerts/g/influxdata/telegraf.svg?logo=lgtm&logoWidth=18)](https://lgtm.com/projects/g/influxdata/telegraf/alerts/) +[![Circle CI](https://circleci.com/gh/influxdata/telegraf.svg?style=svg)](https://circleci.com/gh/influxdata/telegraf) [![Docker pulls](https://img.shields.io/docker/pulls/library/telegraf.svg)](https://hub.docker.com/_/telegraf/) [![Slack Status](https://img.shields.io/badge/slack-join_chat-white.svg?logo=slack&style=social)](https://www.influxdata.com/slack) -Telegraf is an agent for collecting, processing, aggregating, and writing metrics. - -Design goal: -- Have a minimal memory footprint with a plugin system so that developers in the community can easily add support for collecting metrics. - -Telegraf is plugin-driven and has the concept of 4 distinct plugin types: +Telegraf is an agent for collecting, processing, aggregating, and writing metrics. Based on a +plugin system to enable developers in the community to easily add support for additional +metric collection. There are four distinct types of plugins: 1. [Input Plugins](/docs/INPUTS.md) collect metrics from the system, services, or 3rd party APIs 2. [Processor Plugins](/docs/PROCESSORS.md) transform, decorate, and/or filter metrics 3. [Aggregator Plugins](/docs/AGGREGATORS.md) create aggregate metrics (e.g. mean, min, max, quantiles, etc.) 4. [Output Plugins](/docs/OUTPUTS.md) write metrics to various destinations -New plugins are designed to be easy to contribute, pull requests are welcomed and we work to incorporate as many pull requests as possible. If none of the internal plugins fit your needs, you could have a look at the -[list of external plugins](EXTERNAL_PLUGINS.md). +New plugins are designed to be easy to contribute, pull requests are welcomed, and we work to +incorporate as many pull requests as possible. Consider looking at the +[list of external plugins](EXTERNAL_PLUGINS.md) as well. ## Minimum Requirements Telegraf shares the same [minimum requirements][] as Go: + - Linux kernel version 2.6.23 or later - Windows 7 or later - FreeBSD 11.2 or later @@ -31,105 +30,122 @@ Telegraf shares the same [minimum requirements][] as Go: [minimum requirements]: https://github.com/golang/go/wiki/MinimumRequirements#minimum-requirements -## Installation: +## Obtaining Telegraf + +View the [changelog](/CHANGELOG.md) for the latest updates and changes by version. + +### Binary Downloads + +Binary downloads are available from the [InfluxData downloads](https://www.influxdata.com/downloads) +page or from each [GitHub Releases](https://github.com/influxdata/telegraf/releases) page. -You can download the binaries directly from the [downloads](https://www.influxdata.com/downloads) page -or from the [releases](https://github.com/influxdata/telegraf/releases) section. +### Package Repository -### Ansible Role: +InfluxData also provides a package repo that contains both DEB and RPM downloads. -Ansible role: https://github.com/rossmcdonald/telegraf +For deb-based platforms run the following to add the repo key and setup a new +sources.list entry: + +```shell +curl -s https://repos.influxdata.com/influxdb.key | gpg --dearmor > /etc/apt/trusted.gpg.d/influxdb.gpg +export DISTRIB_ID=$(lsb_release -si); export DISTRIB_CODENAME=$(lsb_release -sc) +echo "deb [signed-by=/etc/apt/trusted.gpg.d/influxdb.gpg] https://repos.influxdata.com/${DISTRIB_ID,,} ${DISTRIB_CODENAME} stable" > /etc/apt/sources.list.d/influxdb.list +``` + +For RPM-based platforms use the following repo file in `/etc/yum.repos.d/`: + +```text +[influxdb] +name = InfluxDB Repository - RHEL $releasever +baseurl = https://repos.influxdata.com/rhel/$releasever/$basearch/stable +enabled = 1 +gpgcheck = 1 +gpgkey = https://repos.influxdata.com/influxdb.key +``` -### From Source: +### Build From Source Telegraf requires Go version 1.17 or newer, the Makefile requires GNU make. 1. [Install Go](https://golang.org/doc/install) >=1.17 (1.17.2 recommended) 2. Clone the Telegraf repository: ``` - cd ~/src git clone https://github.com/influxdata/telegraf.git ``` 3. Run `make` from the source directory ``` - cd ~/src/telegraf + cd telegraf make ``` -### Changelog - -View the [changelog](/CHANGELOG.md) for the latest updates and changes by -version. - ### Nightly Builds [Nightly](/docs/NIGHTLIES.md) builds are available, generated from the master branch. ### 3rd Party Builds -Builds for other platforms or package formats are provided by members of the Telegraf community. These packages are not built, tested or supported by the Telegraf project or InfluxData, we make no guarantees that they will work. Please get in touch with the package author if you need support. +Builds for other platforms or package formats are provided by members of theTelegraf community. +These packages are not built, tested, or supported by the Telegraf project or InfluxData. Please +get in touch with the package author if support is needed: -* Windows - * [Chocolatey](https://chocolatey.org/packages/telegraf) by [ripclawffb](https://chocolatey.org/profiles/ripclawffb) - * [Scoop](https://github.com/ScoopInstaller/Main/blob/master/bucket/telegraf.json) -* Linux - * [Snap](https://snapcraft.io/telegraf) by Laurent Sesquès (sajoupa) +- [Ansible Role](https://github.com/rossmcdonald/telegraf) +- [Chocolatey](https://chocolatey.org/packages/telegraf) by [ripclawffb](https://chocolatey.org/profiles/ripclawffb) +- [Scoop](https://github.com/ScoopInstaller/Main/blob/master/bucket/telegraf.json) +- [Snap](https://snapcraft.io/telegraf) by Laurent Sesquès (sajoupa) -## How to use it: +## Getting Started See usage with: -``` +```shell telegraf --help ``` #### Generate a telegraf config file: -``` +```shell telegraf config > telegraf.conf ``` #### Generate config with only cpu input & influxdb output plugins defined: -``` +```shell telegraf --section-filter agent:inputs:outputs --input-filter cpu --output-filter influxdb config ``` #### Run a single telegraf collection, outputting metrics to stdout: -``` +```shell telegraf --config telegraf.conf --test ``` #### Run telegraf with all plugins defined in config file: -``` +```shell telegraf --config telegraf.conf ``` #### Run telegraf, enabling the cpu & memory input, and influxdb output plugins: -``` +```shell telegraf --config telegraf.conf --input-filter cpu:mem --output-filter influxdb ``` ## Documentation -[Latest Release Documentation][release docs]. +[Latest Release Documentation](https://docs.influxdata.com/telegraf) For documentation on the latest development code see the [documentation index](/docs). -[release docs]: https://docs.influxdata.com/telegraf -[developer docs]: docs - [Input Plugins](/docs/INPUTS.md) - [Output Plugins](/docs/OUTPUTS.md) - [Processor Plugins](/docs/PROCESSORS.md) - [Aggregator Plugins](/docs/AGGREGATORS.md) - ## Contributing There are many ways to contribute: + - Fix and [report bugs](https://github.com/influxdata/telegraf/issues/new) - [Improve documentation](https://github.com/influxdata/telegraf/issues?q=is%3Aopen+label%3Adocumentation) - [Review code and feature proposals](https://github.com/influxdata/telegraf/pulls) From e685f3be468bb4696438e2d06dcbb900b21e282f Mon Sep 17 00:00:00 2001 From: alespour <42931850+alespour@users.noreply.github.com> Date: Thu, 21 Oct 2021 16:39:24 +0200 Subject: [PATCH 155/176] fix: Graylog plugin TLS support and message format (#9862) --- etc/telegraf.conf | 7 + plugins/outputs/graylog/README.md | 14 +- plugins/outputs/graylog/graylog.go | 85 ++++++-- plugins/outputs/graylog/graylog_test.go | 258 +++++++++++++++--------- 4 files changed, 249 insertions(+), 115 deletions(-) diff --git a/etc/telegraf.conf b/etc/telegraf.conf index 2f2dce2f61df6..8b5fe63d19cd4 100644 --- a/etc/telegraf.conf +++ b/etc/telegraf.conf @@ -769,6 +769,13 @@ # ## "telegraf" will be used. # ## example: short_message_field = "message" # # short_message_field = "" +# +# ## Optional TLS Config +# # tls_ca = "/etc/telegraf/ca.pem" +# # tls_cert = "/etc/telegraf/cert.pem" +# # tls_key = "/etc/telegraf/key.pem" +# ## Use TLS but skip chain & host verification +# # insecure_skip_verify = false # # Configurable HTTP health check resource based on metrics diff --git a/plugins/outputs/graylog/README.md b/plugins/outputs/graylog/README.md index 6003122894f6d..96e290b09f5a6 100644 --- a/plugins/outputs/graylog/README.md +++ b/plugins/outputs/graylog/README.md @@ -18,7 +18,19 @@ This plugin writes to a Graylog instance using the "[GELF][]" format. ## "telegraf" will be used. ## example: short_message_field = "message" # short_message_field = "" + + ## According to GELF payload specification, additional fields names must be prefixed + ## with an underscore. Previous versions did not prefix custom field 'name' with underscore. + ## Set to true for backward compatibility. + # name_field_no_prefix = false + + ## Optional TLS Config + # tls_ca = "/etc/telegraf/ca.pem" + # tls_cert = "/etc/telegraf/cert.pem" + # tls_key = "/etc/telegraf/key.pem" + ## Use TLS but skip chain & host verification + # insecure_skip_verify = false ``` Server endpoint may be specified without UDP or TCP scheme (eg. "127.0.0.1:12201"). -In such case, UDP protocol is assumed. +In such case, UDP protocol is assumed. TLS config is ignored for UDP endpoints. diff --git a/plugins/outputs/graylog/graylog.go b/plugins/outputs/graylog/graylog.go index 951273e2e7703..16b744f35ccdc 100644 --- a/plugins/outputs/graylog/graylog.go +++ b/plugins/outputs/graylog/graylog.go @@ -4,6 +4,7 @@ import ( "bytes" "compress/zlib" "crypto/rand" + "crypto/tls" "encoding/binary" ejson "encoding/json" "fmt" @@ -16,11 +17,12 @@ import ( "github.com/influxdata/telegraf" "github.com/influxdata/telegraf/config" + tlsint "github.com/influxdata/telegraf/plugins/common/tls" "github.com/influxdata/telegraf/plugins/outputs" ) const ( - defaultGraylogEndpoint = "127.0.0.1:12201" + defaultEndpoint = "127.0.0.1:12201" defaultConnection = "wan" defaultMaxChunkSizeWan = 1420 defaultMaxChunkSizeLan = 8154 @@ -29,7 +31,7 @@ const ( ) type gelfConfig struct { - GraylogEndpoint string + Endpoint string Connection string MaxChunkSizeWan int MaxChunkSizeLan int @@ -37,6 +39,7 @@ type gelfConfig struct { type gelf interface { io.WriteCloser + Connect() error } type gelfCommon struct { @@ -51,11 +54,12 @@ type gelfUDP struct { type gelfTCP struct { gelfCommon + tlsConfig *tls.Config } -func newGelfWriter(cfg gelfConfig, dialer *net.Dialer) gelf { - if cfg.GraylogEndpoint == "" { - cfg.GraylogEndpoint = defaultGraylogEndpoint +func newGelfWriter(cfg gelfConfig, dialer *net.Dialer, tlsConfig *tls.Config) gelf { + if cfg.Endpoint == "" { + cfg.Endpoint = defaultEndpoint } if cfg.Connection == "" { @@ -71,10 +75,10 @@ func newGelfWriter(cfg gelfConfig, dialer *net.Dialer) gelf { } scheme := defaultScheme - parts := strings.SplitN(cfg.GraylogEndpoint, "://", 2) + parts := strings.SplitN(cfg.Endpoint, "://", 2) if len(parts) == 2 { scheme = strings.ToLower(parts[0]) - cfg.GraylogEndpoint = parts[1] + cfg.Endpoint = parts[1] } common := gelfCommon{ gelfConfig: cfg, @@ -84,7 +88,7 @@ func newGelfWriter(cfg gelfConfig, dialer *net.Dialer) gelf { var g gelf switch scheme { case "tcp": - g = &gelfTCP{gelfCommon: common} + g = &gelfTCP{gelfCommon: common, tlsConfig: tlsConfig} default: g = &gelfUDP{gelfCommon: common} } @@ -178,13 +182,21 @@ func (g *gelfUDP) compress(b []byte) bytes.Buffer { return buf } +func (g *gelfUDP) Connect() error { + conn, err := g.dialer.Dial("udp", g.gelfConfig.Endpoint) + if err != nil { + return err + } + g.conn = conn + return nil +} + func (g *gelfUDP) send(b []byte) error { if g.conn == nil { - conn, err := g.dialer.Dial("udp", g.gelfConfig.GraylogEndpoint) + err := g.Connect() if err != nil { return err } - g.conn = conn } _, err := g.conn.Write(b) @@ -216,13 +228,27 @@ func (g *gelfTCP) Close() (err error) { return err } +func (g *gelfTCP) Connect() error { + var err error + var conn net.Conn + if g.tlsConfig == nil { + conn, err = g.dialer.Dial("tcp", g.gelfConfig.Endpoint) + } else { + conn, err = tls.DialWithDialer(g.dialer, "tcp", g.gelfConfig.Endpoint, g.tlsConfig) + } + if err != nil { + return err + } + g.conn = conn + return nil +} + func (g *gelfTCP) send(b []byte) error { if g.conn == nil { - conn, err := g.dialer.Dial("tcp", g.gelfConfig.GraylogEndpoint) + err := g.Connect() if err != nil { return err } - g.conn = conn } _, err := g.conn.Write(b) @@ -243,7 +269,9 @@ func (g *gelfTCP) send(b []byte) error { type Graylog struct { Servers []string `toml:"servers"` ShortMessageField string `toml:"short_message_field"` + NameFieldNoPrefix bool `toml:"name_field_noprefix"` Timeout config.Duration `toml:"timeout"` + tlsint.ClientConfig writer io.Writer closers []io.WriteCloser @@ -260,18 +288,39 @@ var sampleConfig = ` ## "telegraf" will be used. ## example: short_message_field = "message" # short_message_field = "" + + ## According to GELF payload specification, additional fields names must be prefixed + ## with an underscore. Previous versions did not prefix custom field 'name' with underscore. + ## Set to true for backward compatibility. + # name_field_no_prefix = false + + ## Optional TLS Config + # tls_ca = "/etc/telegraf/ca.pem" + # tls_cert = "/etc/telegraf/cert.pem" + # tls_key = "/etc/telegraf/key.pem" + ## Use TLS but skip chain & host verification + # insecure_skip_verify = false ` func (g *Graylog) Connect() error { - writers := []io.Writer{} - dialer := net.Dialer{Timeout: time.Duration(g.Timeout)} + var writers []io.Writer + dialer := &net.Dialer{Timeout: time.Duration(g.Timeout)} if len(g.Servers) == 0 { g.Servers = append(g.Servers, "localhost:12201") } + tlsCfg, err := g.ClientConfig.TLSConfig() + if err != nil { + return err + } + for _, server := range g.Servers { - w := newGelfWriter(gelfConfig{GraylogEndpoint: server}, &dialer) + w := newGelfWriter(gelfConfig{Endpoint: server}, dialer, tlsCfg) + err := w.Connect() + if err != nil { + return fmt.Errorf("failed to connect to server [%s]: %v", server, err) + } writers = append(writers, w) g.closers = append(g.closers, w) } @@ -319,7 +368,11 @@ func (g *Graylog) serialize(metric telegraf.Metric) ([]string, error) { m["version"] = "1.1" m["timestamp"] = float64(metric.Time().UnixNano()) / 1_000_000_000 m["short_message"] = "telegraf" - m["name"] = metric.Name() + if g.NameFieldNoPrefix { + m["name"] = metric.Name() + } else { + m["_name"] = metric.Name() + } if host, ok := metric.GetTag("host"); ok { m["host"] = host diff --git a/plugins/outputs/graylog/graylog_test.go b/plugins/outputs/graylog/graylog_test.go index fcf61ae77d51e..3932c736c2aff 100644 --- a/plugins/outputs/graylog/graylog_test.go +++ b/plugins/outputs/graylog/graylog_test.go @@ -3,125 +3,174 @@ package graylog import ( "bytes" "compress/zlib" + "crypto/tls" "encoding/json" "io" "net" "sync" "testing" + "time" + tlsint "github.com/influxdata/telegraf/plugins/common/tls" "github.com/influxdata/telegraf/testutil" "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" ) -func TestWriteDefault(t *testing.T) { - if testing.Short() { - t.Skip("skipping test in short mode.") - } - - scenarioUDP(t, "127.0.0.1:12201") -} - func TestWriteUDP(t *testing.T) { - if testing.Short() { - t.Skip("skipping test in short mode.") + tests := []struct { + name string + instance Graylog + }{ + { + name: "default without scheme", + instance: Graylog{ + Servers: []string{"127.0.0.1:12201"}, + }, + }, + { + name: "UDP", + instance: Graylog{ + Servers: []string{"udp://127.0.0.1:12201"}, + }, + }, + { + name: "UDP non-standard name field", + instance: Graylog{ + Servers: []string{"udp://127.0.0.1:12201"}, + NameFieldNoPrefix: true, + }, + }, } - scenarioUDP(t, "udp://127.0.0.1:12201") -} - -func TestWriteTCP(t *testing.T) { - if testing.Short() { - t.Skip("skipping test in short mode.") - } + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + var wg sync.WaitGroup + var wg2 sync.WaitGroup + wg.Add(1) + wg2.Add(1) + go UDPServer(t, &wg, &wg2, &tt.instance) + wg2.Wait() + + i := tt.instance + err := i.Connect() + require.NoError(t, err) + defer i.Close() + defer wg.Wait() - scenarioTCP(t, "tcp://127.0.0.1:12201") -} + metrics := testutil.MockMetrics() -func scenarioUDP(t *testing.T, server string) { - var wg sync.WaitGroup - var wg2 sync.WaitGroup - wg.Add(1) - wg2.Add(1) - go UDPServer(t, &wg, &wg2) - wg2.Wait() + // UDP scenario: + // 4 messages are send - i := Graylog{ - Servers: []string{server}, + err = i.Write(metrics) + require.NoError(t, err) + err = i.Write(metrics) + require.NoError(t, err) + err = i.Write(metrics) + require.NoError(t, err) + err = i.Write(metrics) + require.NoError(t, err) + }) } - err := i.Connect() - require.NoError(t, err) - - metrics := testutil.MockMetrics() - - // UDP scenario: - // 4 messages are send +} - err = i.Write(metrics) - require.NoError(t, err) - err = i.Write(metrics) - require.NoError(t, err) - err = i.Write(metrics) - require.NoError(t, err) - err = i.Write(metrics) +func TestWriteTCP(t *testing.T) { + pki := testutil.NewPKI("../../../testutil/pki") + tlsClientConfig := pki.TLSClientConfig() + tlsServerConfig, err := pki.TLSServerConfig().TLSConfig() require.NoError(t, err) - wg.Wait() - i.Close() -} - -func scenarioTCP(t *testing.T, server string) { - var wg sync.WaitGroup - var wg2 sync.WaitGroup - var wg3 sync.WaitGroup - wg.Add(1) - wg2.Add(1) - wg3.Add(1) - go TCPServer(t, &wg, &wg2, &wg3) - wg2.Wait() - - i := Graylog{ - Servers: []string{server}, + tests := []struct { + name string + instance Graylog + tlsServerConfig *tls.Config + }{ + { + name: "TCP", + instance: Graylog{ + Servers: []string{"tcp://127.0.0.1:12201"}, + }, + }, + { + name: "TLS", + instance: Graylog{ + Servers: []string{"tcp://127.0.0.1:12201"}, + ClientConfig: tlsint.ClientConfig{ + ServerName: "localhost", + TLSCA: tlsClientConfig.TLSCA, + TLSKey: tlsClientConfig.TLSKey, + TLSCert: tlsClientConfig.TLSCert, + }, + }, + tlsServerConfig: tlsServerConfig, + }, + { + name: "TLS no validation", + instance: Graylog{ + Servers: []string{"tcp://127.0.0.1:12201"}, + ClientConfig: tlsint.ClientConfig{ + InsecureSkipVerify: true, + ServerName: "localhost", + TLSKey: tlsClientConfig.TLSKey, + TLSCert: tlsClientConfig.TLSCert, + }, + }, + tlsServerConfig: tlsServerConfig, + }, } - err := i.Connect() - require.NoError(t, err) - metrics := testutil.MockMetrics() + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + var wg sync.WaitGroup + var wg2 sync.WaitGroup + var wg3 sync.WaitGroup + wg.Add(1) + wg2.Add(1) + wg3.Add(1) + go TCPServer(t, &wg, &wg2, &wg3, tt.tlsServerConfig) + wg2.Wait() + + i := tt.instance + err = i.Connect() + require.NoError(t, err) + defer i.Close() + defer wg.Wait() - // TCP scenario: - // 4 messages are send - // -> connection gets broken after the 2nd message (server closes connection) - // -> the 3rd write ends with error - // -> in the 4th write connection is restored and write is successful + metrics := testutil.MockMetrics() - err = i.Write(metrics) - require.NoError(t, err) - err = i.Write(metrics) - require.NoError(t, err) - wg3.Wait() - err = i.Write(metrics) - require.Error(t, err) - err = i.Write(metrics) - require.NoError(t, err) + // TCP scenario: + // 4 messages are send + // -> connection gets forcefully broken after the 2nd message (server closes connection) + // -> the 3rd write fails with error + // -> during the 4th write connection is restored and write is successful - wg.Wait() - i.Close() + err = i.Write(metrics) + require.NoError(t, err) + err = i.Write(metrics) + require.NoError(t, err) + wg3.Wait() + err = i.Write(metrics) + require.Error(t, err) + err = i.Write(metrics) + require.NoError(t, err) + }) + } } type GelfObject map[string]interface{} -func UDPServer(t *testing.T, wg *sync.WaitGroup, wg2 *sync.WaitGroup) { +func UDPServer(t *testing.T, wg *sync.WaitGroup, wg2 *sync.WaitGroup, config *Graylog) { serverAddr, err := net.ResolveUDPAddr("udp", "127.0.0.1:12201") require.NoError(t, err) udpServer, err := net.ListenUDP("udp", serverAddr) require.NoError(t, err) defer udpServer.Close() defer wg.Done() - - bufR := make([]byte, 1024) wg2.Done() recv := func() { + bufR := make([]byte, 1024) n, _, err := udpServer.ReadFromUDP(bufR) require.NoError(t, err) @@ -135,6 +184,13 @@ func UDPServer(t *testing.T, wg *sync.WaitGroup, wg2 *sync.WaitGroup) { var obj GelfObject _ = json.Unmarshal(bufW.Bytes(), &obj) require.NoError(t, err) + assert.Equal(t, obj["short_message"], "telegraf") + if config.NameFieldNoPrefix { + assert.Equal(t, obj["name"], "test1") + } else { + assert.Equal(t, obj["_name"], "test1") + } + assert.Equal(t, obj["_tag1"], "value1") assert.Equal(t, obj["_value"], float64(1)) } @@ -146,29 +202,29 @@ func UDPServer(t *testing.T, wg *sync.WaitGroup, wg2 *sync.WaitGroup) { recv() } -func TCPServer(t *testing.T, wg *sync.WaitGroup, wg2 *sync.WaitGroup, wg3 *sync.WaitGroup) { - serverAddr, err := net.ResolveTCPAddr("tcp", "127.0.0.1:12201") - require.NoError(t, err) - tcpServer, err := net.ListenTCP("tcp", serverAddr) +func TCPServer(t *testing.T, wg *sync.WaitGroup, wg2 *sync.WaitGroup, wg3 *sync.WaitGroup, tlsConfig *tls.Config) { + tcpServer, err := net.Listen("tcp", "127.0.0.1:12201") require.NoError(t, err) defer tcpServer.Close() defer wg.Done() - - bufR := make([]byte, 1) - bufW := bytes.NewBuffer(nil) wg2.Done() - accept := func() *net.TCPConn { - conn, err := tcpServer.AcceptTCP() + accept := func() net.Conn { + conn, err := tcpServer.Accept() require.NoError(t, err) - _ = conn.SetLinger(0) + if tcpConn, ok := conn.(*net.TCPConn); ok { + _ = tcpConn.SetLinger(0) + } + _ = conn.SetDeadline(time.Now().Add(15 * time.Second)) + if tlsConfig != nil { + conn = tls.Server(conn, tlsConfig) + } return conn } - conn := accept() - defer conn.Close() - recv := func() { - bufW.Reset() + recv := func(conn net.Conn) { + bufR := make([]byte, 1) + bufW := bytes.NewBuffer(nil) for { n, err := conn.Read(bufR) require.NoError(t, err) @@ -183,16 +239,22 @@ func TCPServer(t *testing.T, wg *sync.WaitGroup, wg2 *sync.WaitGroup, wg3 *sync. var obj GelfObject err = json.Unmarshal(bufW.Bytes(), &obj) require.NoError(t, err) + assert.Equal(t, obj["short_message"], "telegraf") + assert.Equal(t, obj["_name"], "test1") + assert.Equal(t, obj["_tag1"], "value1") assert.Equal(t, obj["_value"], float64(1)) } - // in TCP scenario only 3 messages are received (1st, 2dn and 4th) due to connection break after the 2nd + conn := accept() + defer conn.Close() - recv() - recv() + // in TCP scenario only 3 messages are received, the 3rd is lost due to simulated connection break after the 2nd + + recv(conn) + recv(conn) _ = conn.Close() wg3.Done() conn = accept() defer conn.Close() - recv() + recv(conn) } From 8f35d74c5e545b581f5591f379fd4658df3dc4f4 Mon Sep 17 00:00:00 2001 From: Sebastian Spaink <3441183+sspaink@users.noreply.github.com> Date: Thu, 21 Oct 2021 09:40:03 -0500 Subject: [PATCH 156/176] chore: update go to 1.17.2 (#9873) --- .circleci/config.yml | 2 +- Makefile | 4 ++-- scripts/alpine.docker | 2 +- scripts/buster.docker | 2 +- scripts/ci-1.17.docker | 2 +- scripts/installgo_mac.sh | 6 +++--- scripts/installgo_windows.sh | 2 +- 7 files changed, 10 insertions(+), 10 deletions(-) diff --git a/.circleci/config.yml b/.circleci/config.yml index 3cc2ac7f9b0ec..27f7e75e7a225 100644 --- a/.circleci/config.yml +++ b/.circleci/config.yml @@ -7,7 +7,7 @@ executors: go-1_17: working_directory: '/go/src/github.com/influxdata/telegraf' docker: - - image: 'quay.io/influxdb/telegraf-ci:1.17.0' + - image: 'quay.io/influxdb/telegraf-ci:1.17.2' environment: GOFLAGS: -p=8 mac: diff --git a/Makefile b/Makefile index 0c59319de710d..09a6babaee73f 100644 --- a/Makefile +++ b/Makefile @@ -201,8 +201,8 @@ plugin-%: .PHONY: ci-1.17 ci-1.17: - docker build -t quay.io/influxdb/telegraf-ci:1.17.0 - < scripts/ci-1.17.docker - docker push quay.io/influxdb/telegraf-ci:1.17.0 + docker build -t quay.io/influxdb/telegraf-ci:1.17.2 - < scripts/ci-1.17.docker + docker push quay.io/influxdb/telegraf-ci:1.17.2 .PHONY: install install: $(buildbin) diff --git a/scripts/alpine.docker b/scripts/alpine.docker index 8c2418083ef8c..84cfcac2268a0 100644 --- a/scripts/alpine.docker +++ b/scripts/alpine.docker @@ -1,4 +1,4 @@ -FROM golang:1.17.0 as builder +FROM golang:1.17.2 as builder WORKDIR /go/src/github.com/influxdata/telegraf COPY . /go/src/github.com/influxdata/telegraf diff --git a/scripts/buster.docker b/scripts/buster.docker index fbb18eee24f17..17b0cb581cc92 100644 --- a/scripts/buster.docker +++ b/scripts/buster.docker @@ -1,4 +1,4 @@ -FROM golang:1.17.0-buster as builder +FROM golang:1.17.2-buster as builder WORKDIR /go/src/github.com/influxdata/telegraf COPY . /go/src/github.com/influxdata/telegraf diff --git a/scripts/ci-1.17.docker b/scripts/ci-1.17.docker index 574ab7be7a896..a69a0d7eddbe3 100644 --- a/scripts/ci-1.17.docker +++ b/scripts/ci-1.17.docker @@ -1,4 +1,4 @@ -FROM golang:1.17.0 +FROM golang:1.17.2 RUN chmod -R 755 "$GOPATH" diff --git a/scripts/installgo_mac.sh b/scripts/installgo_mac.sh index b839358136d98..4e7fb756161dc 100644 --- a/scripts/installgo_mac.sh +++ b/scripts/installgo_mac.sh @@ -3,8 +3,8 @@ set -eux GO_ARCH="darwin-amd64" -GO_VERSION="1.17" -GO_VERSION_SHA="355bd544ce08d7d484d9d7de05a71b5c6f5bc10aa4b316688c2192aeb3dacfd1" # from https://golang.org/dl +GO_VERSION="1.17.2" +GO_VERSION_SHA="7914497a302a132a465d33f5ee044ce05568bacdb390ab805cb75a3435a23f94" # from https://golang.org/dl # This path is cachable. (Saving in /usr/local/ would cause issues restoring the cache.) path="/usr/local/Cellar" @@ -18,7 +18,7 @@ setup_go () { echo "Checksum failed" >&2 exit 1 fi - + sudo rm -rf ${path}/go sudo tar -C $path -xzf go${GO_VERSION}.${GO_ARCH}.tar.gz ln -sf ${path}/go/bin/go /usr/local/bin/go diff --git a/scripts/installgo_windows.sh b/scripts/installgo_windows.sh index d035447570c8a..bd5dcca3dbc14 100644 --- a/scripts/installgo_windows.sh +++ b/scripts/installgo_windows.sh @@ -2,7 +2,7 @@ set -eux -GO_VERSION="1.17" +GO_VERSION="1.17.2" setup_go () { choco upgrade golang --version=${GO_VERSION} From aa2f1b150ecc9f0559d87ce60042db36bd82005d Mon Sep 17 00:00:00 2001 From: Sven Rebhan <36194019+srebhan@users.noreply.github.com> Date: Thu, 21 Oct 2021 17:51:23 +0200 Subject: [PATCH 157/176] fix: Check return code of zfs command for FreeBSD. (#9956) --- plugins/inputs/zfs/zfs_freebsd.go | 7 +++++-- 1 file changed, 5 insertions(+), 2 deletions(-) diff --git a/plugins/inputs/zfs/zfs_freebsd.go b/plugins/inputs/zfs/zfs_freebsd.go index 24f6a50997612..e493e3fc9a0bb 100644 --- a/plugins/inputs/zfs/zfs_freebsd.go +++ b/plugins/inputs/zfs/zfs_freebsd.go @@ -174,8 +174,11 @@ func run(command string, args ...string) ([]string, error) { stdout := strings.TrimSpace(outbuf.String()) stderr := strings.TrimSpace(errbuf.String()) - if _, ok := err.(*exec.ExitError); ok { - return nil, fmt.Errorf("%s error: %s", command, stderr) + if err != nil { + if _, ok := err.(*exec.ExitError); ok { + return nil, fmt.Errorf("%s error: %s", command, stderr) + } + return nil, fmt.Errorf("%s error: %s", command, err) } return strings.Split(stdout, "\n"), nil } From e50b415ffd2497ef2b2d27e54b15093f96918052 Mon Sep 17 00:00:00 2001 From: "telegraf-tiger[bot]" <76974415+telegraf-tiger[bot]@users.noreply.github.com> Date: Thu, 21 Oct 2021 12:54:34 -0600 Subject: [PATCH 158/176] feat: update etc/telegraf.conf and etc/telegraf_windows.conf (#9876) --- etc/telegraf.conf | 16 +++++++++++++++- etc/telegraf_windows.conf | 12 +++++++++++- 2 files changed, 26 insertions(+), 2 deletions(-) diff --git a/etc/telegraf.conf b/etc/telegraf.conf index 8b5fe63d19cd4..2b09df6623d58 100644 --- a/etc/telegraf.conf +++ b/etc/telegraf.conf @@ -3881,9 +3881,13 @@ # ## as well as ** to match recursive files and directories. # files = ["/tmp/metrics.out"] # +# # ## Name a tag containing the name of the file the data was parsed from. Leave empty -# ## to disable. +# ## to disable. Cautious when file name variation is high, this can increase the cardinality +# ## significantly. Read more about cardinality here: +# ## https://docs.influxdata.com/influxdb/cloud/reference/glossary/#series-cardinality # # file_tag = "" +# # # # ## Character encoding to use when interpreting the file contents. Invalid # ## characters are replaced using the unicode replacement character. When set @@ -7098,6 +7102,12 @@ # ## Lowering this value will result in *slightly* less memory use, with a potential sacrifice in speed efficiency, if absolutely necessary. # # file_queue_size = 100000 # # +# ## Name a tag containing the name of the file the data was parsed from. Leave empty +# ## to disable. Cautious when file name variation is high, this can increase the cardinality +# ## significantly. Read more about cardinality here: +# ## https://docs.influxdata.com/influxdb/cloud/reference/glossary/#series-cardinality +# # file_tag = "" +# # # ## The dataformat to be read from the files. # ## Each data format has its own unique set of configuration options, read # ## more about them here: @@ -7500,6 +7510,10 @@ # ## Mandatory if cores aren't set and forbidden if cores are specified. # ## e.g. ["qemu", "pmd"] # # processes = ["process"] +# +# ## Specify if the pqos process should be called with sudo. +# ## Mandatory if the telegraf process does not run as root. +# # use_sudo = false # # Read JTI OpenConfig Telemetry from listed sensors diff --git a/etc/telegraf_windows.conf b/etc/telegraf_windows.conf index ee67219c3c3f5..01091328e31ab 100644 --- a/etc/telegraf_windows.conf +++ b/etc/telegraf_windows.conf @@ -3815,9 +3815,13 @@ # ## as well as ** to match recursive files and directories. # files = ["/tmp/metrics.out"] # +# # ## Name a tag containing the name of the file the data was parsed from. Leave empty -# ## to disable. +# ## to disable. Cautious when file name variation is high, this can increase the cardinality +# ## significantly. Read more about cardinality here: +# ## https://docs.influxdata.com/influxdb/cloud/reference/glossary/#series-cardinality # # file_tag = "" +# # # # ## Character encoding to use when interpreting the file contents. Invalid # ## characters are replaced using the unicode replacement character. When set @@ -7080,6 +7084,12 @@ # ## Lowering this value will result in *slightly* less memory use, with a potential sacrifice in speed efficiency, if absolutely necessary. # # file_queue_size = 100000 # # +# ## Name a tag containing the name of the file the data was parsed from. Leave empty +# ## to disable. Cautious when file name variation is high, this can increase the cardinality +# ## significantly. Read more about cardinality here: +# ## https://docs.influxdata.com/influxdb/cloud/reference/glossary/#series-cardinality +# # file_tag = "" +# # # ## The dataformat to be read from the files. # ## Each data format has its own unique set of configuration options, read # ## more about them here: From 112ef7fc26a14318554f9173957c9ede32083088 Mon Sep 17 00:00:00 2001 From: Sven Rebhan <36194019+srebhan@users.noreply.github.com> Date: Thu, 21 Oct 2021 20:55:23 +0200 Subject: [PATCH 159/176] fix: starlark pop operation for non-existing keys (#9954) --- plugins/processors/starlark/field_dict.go | 1 + plugins/processors/starlark/starlark_test.go | 90 ++++++++++++++++++++ plugins/processors/starlark/tag_dict.go | 1 + 3 files changed, 92 insertions(+) diff --git a/plugins/processors/starlark/field_dict.go b/plugins/processors/starlark/field_dict.go index af32da185ba11..4a332b8268d9d 100644 --- a/plugins/processors/starlark/field_dict.go +++ b/plugins/processors/starlark/field_dict.go @@ -175,6 +175,7 @@ func (d FieldDict) Delete(k starlark.Value) (v starlark.Value, found bool, err e sv, err := asStarlarkValue(value) return sv, ok, err } + return starlark.None, false, nil } return starlark.None, false, errors.New("key must be of type 'str'") diff --git a/plugins/processors/starlark/starlark_test.go b/plugins/processors/starlark/starlark_test.go index 9eed069948bb0..6ad169bbf3f87 100644 --- a/plugins/processors/starlark/starlark_test.go +++ b/plugins/processors/starlark/starlark_test.go @@ -705,6 +705,49 @@ def apply(metric): ), }, }, + { + name: "pop tag (default)", + source: ` +def apply(metric): + metric.tags['host2'] = metric.tags.pop('url', 'foo.org') + return metric +`, + input: []telegraf.Metric{ + testutil.MustMetric("cpu", + map[string]string{ + "host": "example.org", + }, + map[string]interface{}{"time_idle": 0}, + time.Unix(0, 0), + ), + testutil.MustMetric("cpu", + map[string]string{ + "host": "example.org", + "url": "bar.org", + }, + map[string]interface{}{"time_idle": 0}, + time.Unix(0, 0), + ), + }, + expected: []telegraf.Metric{ + testutil.MustMetric("cpu", + map[string]string{ + "host": "example.org", + "host2": "foo.org", + }, + map[string]interface{}{"time_idle": 0}, + time.Unix(0, 0), + ), + testutil.MustMetric("cpu", + map[string]string{ + "host": "example.org", + "host2": "bar.org", + }, + map[string]interface{}{"time_idle": 0}, + time.Unix(0, 0), + ), + }, + }, { name: "popitem tags", source: ` @@ -1773,6 +1816,53 @@ def apply(metric): ), }, }, + { + name: "pop field (default)", + source: ` +def apply(metric): + metric.fields['idle_count'] = metric.fields.pop('count', 10) + return metric +`, + input: []telegraf.Metric{ + testutil.MustMetric("cpu", + map[string]string{}, + map[string]interface{}{ + "time_idle": 0, + "time_guest": 0, + }, + time.Unix(0, 0), + ), + testutil.MustMetric("cpu", + map[string]string{}, + map[string]interface{}{ + "time_idle": 0, + "time_guest": 0, + "count": 0, + }, + time.Unix(0, 0), + ), + }, + expected: []telegraf.Metric{ + testutil.MustMetric("cpu", + map[string]string{}, + map[string]interface{}{ + "time_idle": 0, + "time_guest": 0, + "idle_count": 10, + }, + time.Unix(0, 0), + ), + testutil.MustMetric("cpu", + map[string]string{}, + map[string]interface{}{ + "time_idle": 0, + "time_guest": 0, + "idle_count": 0, + }, + time.Unix(0, 0), + ), + }, + }, { name: "popitem field", source: ` diff --git a/plugins/processors/starlark/tag_dict.go b/plugins/processors/starlark/tag_dict.go index b17a6e2f0b6a3..7dbb8c12d0ed6 100644 --- a/plugins/processors/starlark/tag_dict.go +++ b/plugins/processors/starlark/tag_dict.go @@ -162,6 +162,7 @@ func (d TagDict) Delete(k starlark.Value) (v starlark.Value, found bool, err err v := starlark.String(value) return v, ok, err } + return starlark.None, false, nil } return starlark.None, false, errors.New("key must be of type 'str'") From a4d8a4b84fc58d0cd1dda85f9e102d3bf05b7738 Mon Sep 17 00:00:00 2001 From: Sebastian Spaink <3441183+sspaink@users.noreply.github.com> Date: Thu, 21 Oct 2021 14:22:56 -0500 Subject: [PATCH 160/176] chore: lint ignore fmt.Printf unhandled error (#9967) --- .golangci.yml | 1 + 1 file changed, 1 insertion(+) diff --git a/.golangci.yml b/.golangci.yml index 47bfdae26e95f..470fc116bfb37 100644 --- a/.golangci.yml +++ b/.golangci.yml @@ -75,6 +75,7 @@ linters-settings: - name: unconditional-recursion - name: unexported-naming - name: unhandled-error + arguments: ["fmt.Printf", "fmt.Println"] - name: unnecessary-stmt - name: unreachable-code # - name: unused-parameter From 06f2a37b4a13f5666c8460cecf7e9a939b98ec8f Mon Sep 17 00:00:00 2001 From: Alexander Krantz Date: Thu, 21 Oct 2021 14:32:10 -0700 Subject: [PATCH 161/176] chore: Update to AWS SDK v2 (#9647) --- config/aws/credentials.go | 70 ++++--- docs/LICENSE_OF_DEPENDENCIES.md | 14 +- go.mod | 24 ++- go.sum | 105 ++++++++++- plugins/inputs/cloudwatch/cloudwatch.go | 162 ++++++++-------- plugins/inputs/cloudwatch/cloudwatch_test.go | 100 +++++----- .../kinesis_consumer/kinesis_consumer.go | 56 +++--- .../kinesis_consumer/kinesis_consumer_test.go | 45 ++++- plugins/outputs/cloudwatch/cloudwatch.go | 55 +++--- plugins/outputs/cloudwatch/cloudwatch_test.go | 29 ++- .../cloudwatch_logs/cloudwatch_logs.go | 42 ++--- .../cloudwatch_logs/cloudwatch_logs_test.go | 24 +-- plugins/outputs/kinesis/kinesis.go | 30 +-- plugins/outputs/kinesis/kinesis_test.go | 52 +++--- plugins/outputs/timestream/timestream.go | 175 +++++++++--------- .../timestream/timestream_internal_test.go | 42 ++--- plugins/outputs/timestream/timestream_test.go | 158 ++++++++-------- 17 files changed, 663 insertions(+), 520 deletions(-) diff --git a/config/aws/credentials.go b/config/aws/credentials.go index 7b75917393590..358080ab3ba69 100644 --- a/config/aws/credentials.go +++ b/config/aws/credentials.go @@ -1,11 +1,12 @@ package aws import ( - "github.com/aws/aws-sdk-go/aws" - "github.com/aws/aws-sdk-go/aws/client" - "github.com/aws/aws-sdk-go/aws/credentials" - "github.com/aws/aws-sdk-go/aws/credentials/stscreds" - "github.com/aws/aws-sdk-go/aws/session" + "context" + awsV2 "github.com/aws/aws-sdk-go-v2/aws" + configV2 "github.com/aws/aws-sdk-go-v2/config" + credentialsV2 "github.com/aws/aws-sdk-go-v2/credentials" + stscredsV2 "github.com/aws/aws-sdk-go-v2/credentials/stscreds" + "github.com/aws/aws-sdk-go-v2/service/sts" ) type CredentialConfig struct { @@ -21,45 +22,66 @@ type CredentialConfig struct { WebIdentityTokenFile string `toml:"web_identity_token_file"` } -func (c *CredentialConfig) Credentials() (client.ConfigProvider, error) { +func (c *CredentialConfig) Credentials() (awsV2.Config, error) { if c.RoleARN != "" { return c.assumeCredentials() } - return c.rootCredentials() } -func (c *CredentialConfig) rootCredentials() (client.ConfigProvider, error) { - config := &aws.Config{ - Region: aws.String(c.Region), +func (c *CredentialConfig) rootCredentials() (awsV2.Config, error) { + options := []func(*configV2.LoadOptions) error{ + configV2.WithRegion(c.Region), } + if c.EndpointURL != "" { - config.Endpoint = &c.EndpointURL + resolver := awsV2.EndpointResolverFunc(func(service, region string) (awsV2.Endpoint, error) { + return awsV2.Endpoint{ + URL: c.EndpointURL, + HostnameImmutable: true, + Source: awsV2.EndpointSourceCustom, + }, nil + }) + options = append(options, configV2.WithEndpointResolver(resolver)) + } + + if c.Profile != "" { + options = append(options, configV2.WithSharedConfigProfile(c.Profile)) } + if c.Filename != "" { + options = append(options, configV2.WithSharedCredentialsFiles([]string{c.Filename})) + } + if c.AccessKey != "" || c.SecretKey != "" { - config.Credentials = credentials.NewStaticCredentials(c.AccessKey, c.SecretKey, c.Token) - } else if c.Profile != "" || c.Filename != "" { - config.Credentials = credentials.NewSharedCredentials(c.Filename, c.Profile) + provider := credentialsV2.NewStaticCredentialsProvider(c.AccessKey, c.SecretKey, c.Token) + options = append(options, configV2.WithCredentialsProvider(provider)) } - return session.NewSession(config) + return configV2.LoadDefaultConfig(context.Background(), options...) } -func (c *CredentialConfig) assumeCredentials() (client.ConfigProvider, error) { +func (c *CredentialConfig) assumeCredentials() (awsV2.Config, error) { rootCredentials, err := c.rootCredentials() if err != nil { - return nil, err - } - config := &aws.Config{ - Region: aws.String(c.Region), - Endpoint: &c.EndpointURL, + return awsV2.Config{}, err } + var provider awsV2.CredentialsProvider + stsService := sts.NewFromConfig(rootCredentials) if c.WebIdentityTokenFile != "" { - config.Credentials = stscreds.NewWebIdentityCredentials(rootCredentials, c.RoleARN, c.RoleSessionName, c.WebIdentityTokenFile) + provider = stscredsV2.NewWebIdentityRoleProvider(stsService, c.RoleARN, stscredsV2.IdentityTokenFile(c.WebIdentityTokenFile), func(opts *stscredsV2.WebIdentityRoleOptions) { + if c.RoleSessionName != "" { + opts.RoleSessionName = c.RoleSessionName + } + }) } else { - config.Credentials = stscreds.NewCredentials(rootCredentials, c.RoleARN) + provider = stscredsV2.NewAssumeRoleProvider(stsService, c.RoleARN, func(opts *stscredsV2.AssumeRoleOptions) { + if c.RoleSessionName != "" { + opts.RoleSessionName = c.RoleSessionName + } + }) } - return session.NewSession(config) + rootCredentials.Credentials = awsV2.NewCredentialsCache(provider) + return rootCredentials, nil } diff --git a/docs/LICENSE_OF_DEPENDENCIES.md b/docs/LICENSE_OF_DEPENDENCIES.md index c52b110b28fd8..ba1ee5147d99e 100644 --- a/docs/LICENSE_OF_DEPENDENCIES.md +++ b/docs/LICENSE_OF_DEPENDENCIES.md @@ -36,17 +36,27 @@ following works: - github.com/aws/aws-sdk-go-v2 [Apache License 2.0](https://github.com/aws/aws-sdk-go-v2/blob/main/LICENSE.txt) - github.com/aws/aws-sdk-go-v2/config [Apache License 2.0](https://github.com/aws/aws-sdk-go-v2/blob/main/config/LICENSE.txt) - github.com/aws/aws-sdk-go-v2/credentials [Apache License 2.0](https://github.com/aws/aws-sdk-go-v2/blob/main/credentials/LICENSE.txt) +- github.com/aws/aws-sdk-go-v2/feature/dynamodb/attributevalue [Apache License 2.0](https://github.com/aws/aws-sdk-go-v2/blob/main/feature/dynamodb/attributevalue/LICENSE.txt) - github.com/aws/aws-sdk-go-v2/feature/ec2/imds [Apache License 2.0](https://github.com/aws/aws-sdk-go-v2/blob/main/feature/ec2/imds/LICENSE.txt) -- github.com/aws/aws-sdk-go-v2/feature/s3/manager [Apache License 2.0](https://github.com/aws/aws-sdk-go-v2/blob/main/LICENSE.txt) -- github.com/aws/aws-sdk-go-v2/internal/ini [Apache License 2.0](https://github.com/aws/aws-sdk-go-v2/blob/main/service/ec2/LICENSE.txt) +- github.com/aws/aws-sdk-go-v2/feature/s3/manager [Apache License 2.0](https://github.com/aws/aws-sdk-go-v2/blob/main/feature/s3/manager/LICENSE.txt) +- github.com/aws/aws-sdk-go-v2/internal/configsources [Apache License 2.0](https://github.com/aws/aws-sdk-go-v2/blob/main/internal/configsources/LICENSE.txt) +- github.com/aws/aws-sdk-go-v2/internal/ini [Apache License 2.0](https://github.com/aws/aws-sdk-go-v2/blob/main/internal/ini/LICENSE.txt) +- github.com/aws/aws-sdk-go-v2/service/cloudwatch [Apache License 2.0](https://github.com/aws/aws-sdk-go-v2/blob/main/service/cloudwatch/LICENSE.txt) +- github.com/aws/aws-sdk-go-v2/service/cloudwatchlogs [Apache License 2.0](https://github.com/aws/aws-sdk-go-v2/blob/main/service/cloudwatchlogs/LICENSE.txt) +- github.com/aws/aws-sdk-go-v2/service/dynamodb [Apache License 2.0](https://github.com/aws/aws-sdk-go-v2/blob/main/service/dynamodb/LICENSE.txt) +- github.com/aws/aws-sdk-go-v2/service/dynamodbstreams [Apache License 2.0](https://github.com/aws/aws-sdk-go-v2/blob/main/service/dynamodbstreams/LICENSE.txt) - github.com/aws/aws-sdk-go-v2/service/ec2 [Apache License 2.0](https://github.com/aws/aws-sdk-go-v2/blob/main/service/ec2/LICENSE.txt) - github.com/aws/aws-sdk-go-v2/service/internal/accept-encoding [Apache License 2.0](https://github.com/aws/aws-sdk-go-v2/blob/main/service/internal/accept-encoding/LICENSE.txt) +- github.com/aws/aws-sdk-go-v2/service/internal/endpoint-discovery [Apache License 2.0](https://github.com/aws/aws-sdk-go-v2/blob/main/service/internal/endpoint-discovery/LICENSE.txt) - github.com/aws/aws-sdk-go-v2/service/internal/presigned-url [Apache License 2.0](https://github.com/aws/aws-sdk-go-v2/blob/main/service/internal/presigned-url/LICENSE.txt) - github.com/aws/aws-sdk-go-v2/service/internal/s3shared [Apache License 2.0](https://github.com/aws/aws-sdk-go-v2/blob/main/service/internal/s3shared/LICENSE.txt) +- github.com/aws/aws-sdk-go-v2/service/kinesis [Apache License 2.0](https://github.com/aws/aws-sdk-go-v2/blob/main/service/kinesis/LICENSE.txt) - github.com/aws/aws-sdk-go-v2/service/s3 [Apache License 2.0](https://github.com/aws/aws-sdk-go-v2/blob/main/service/s3/LICENSE.txt) - github.com/aws/aws-sdk-go-v2/service/sso [Apache License 2.0](https://github.com/aws/aws-sdk-go-v2/blob/main/service/ec2/LICENSE.txt) - github.com/aws/aws-sdk-go-v2/service/sts [Apache License 2.0](https://github.com/aws/aws-sdk-go-v2/blob/main/service/sts/LICENSE.txt) +- github.com/aws/aws-sdk-go-v2/service/timestreamwrite [Apache License 2.0](https://github.com/aws/aws-sdk-go-v2/blob/main/service/timestreamwrite/LICENSE.txt) - github.com/aws/smithy-go [Apache License 2.0](https://github.com/aws/smithy-go/blob/main/LICENSE) +- github.com/awslabs/kinesis-aggregation/go [Apache License 2.0](https://github.com/awslabs/kinesis-aggregation/blob/master/LICENSE.txt) - github.com/benbjohnson/clock [MIT License](https://github.com/benbjohnson/clock/blob/master/LICENSE) - github.com/beorn7/perks [MIT License](https://github.com/beorn7/perks/blob/master/LICENSE) - github.com/bmatcuk/doublestar [MIT License](https://github.com/bmatcuk/doublestar/blob/master/LICENSE) diff --git a/go.mod b/go.mod index 95992243e964e..09cae700f0c17 100644 --- a/go.mod +++ b/go.mod @@ -47,19 +47,26 @@ require ( github.com/aristanetworks/glog v0.0.0-20191112221043-67e8567f59f3 // indirect github.com/aristanetworks/goarista v0.0.0-20190325233358-a123909ec740 github.com/armon/go-metrics v0.3.3 // indirect - github.com/aws/aws-sdk-go v1.38.69 github.com/aws/aws-sdk-go-v2 v1.9.1 github.com/aws/aws-sdk-go-v2/config v1.8.2 - github.com/aws/aws-sdk-go-v2/credentials v1.4.2 // indirect + github.com/aws/aws-sdk-go-v2/credentials v1.4.2 github.com/aws/aws-sdk-go-v2/feature/ec2/imds v1.5.1 github.com/aws/aws-sdk-go-v2/feature/s3/manager v1.5.3 // indirect + github.com/aws/aws-sdk-go-v2/internal/configsources v1.0.4 // indirect + github.com/aws/aws-sdk-go-v2/internal/ini v1.2.3 // indirect + github.com/aws/aws-sdk-go-v2/service/cloudwatch v1.7.0 + github.com/aws/aws-sdk-go-v2/service/cloudwatchlogs v1.5.2 + github.com/aws/aws-sdk-go-v2/service/dynamodb v1.5.0 github.com/aws/aws-sdk-go-v2/service/ec2 v1.1.0 github.com/aws/aws-sdk-go-v2/service/internal/accept-encoding v1.3.0 // indirect + github.com/aws/aws-sdk-go-v2/service/internal/endpoint-discovery v1.1.0 // indirect github.com/aws/aws-sdk-go-v2/service/internal/presigned-url v1.3.1 // indirect github.com/aws/aws-sdk-go-v2/service/internal/s3shared v1.7.1 // indirect + github.com/aws/aws-sdk-go-v2/service/kinesis v1.6.0 github.com/aws/aws-sdk-go-v2/service/s3 v1.16.0 // indirect github.com/aws/aws-sdk-go-v2/service/sso v1.4.1 // indirect - github.com/aws/aws-sdk-go-v2/service/sts v1.7.1 // indirect + github.com/aws/aws-sdk-go-v2/service/sts v1.7.1 + github.com/aws/aws-sdk-go-v2/service/timestreamwrite v1.3.2 github.com/aws/smithy-go v1.8.0 github.com/benbjohnson/clock v1.1.0 github.com/beorn7/perks v1.0.1 // indirect @@ -129,8 +136,8 @@ require ( github.com/grid-x/serial v0.0.0-20191104121038-e24bc9bf6f08 // indirect github.com/grpc-ecosystem/grpc-gateway v1.16.0 // indirect github.com/hailocab/go-hostpool v0.0.0-20160125115350-e80d13ce29ed // indirect - github.com/harlow/kinesis-consumer v0.3.1-0.20181230152818-2f58b136fee0 - github.com/hashicorp/consul/api v1.11.0 + github.com/harlow/kinesis-consumer v0.3.6-0.20210911031324-5a873d6e9fec + github.com/hashicorp/consul/api v1.9.1 github.com/hashicorp/go-cleanhttp v0.5.1 // indirect github.com/hashicorp/go-hclog v0.16.2 // indirect github.com/hashicorp/go-immutable-radix v1.2.0 // indirect @@ -264,7 +271,7 @@ require ( github.com/xdg/scram v1.0.3 github.com/xdg/stringprep v1.0.3 // indirect github.com/youmark/pkcs8 v0.0.0-20201027041543-1326539a0a0a // indirect - github.com/yuin/gopher-lua v0.0.0-20180630135845-46796da1b0b4 // indirect + github.com/yuin/gopher-lua v0.0.0-20200603152657-dc2b0ca8b37e // indirect go.etcd.io/etcd/api/v3 v3.5.0 // indirect go.mongodb.org/mongo-driver v1.5.3 go.opencensus.io v0.23.0 // indirect @@ -327,7 +334,10 @@ require ( ) require ( - github.com/aws/aws-sdk-go-v2/internal/ini v1.2.3 // indirect + github.com/aws/aws-sdk-go v1.38.3 // indirect + github.com/aws/aws-sdk-go-v2/feature/dynamodb/attributevalue v1.2.0 // indirect + github.com/aws/aws-sdk-go-v2/service/dynamodbstreams v1.4.0 // indirect + github.com/awslabs/kinesis-aggregation/go v0.0.0-20210630091500-54e17340d32f // indirect github.com/cenkalti/backoff/v4 v4.1.1 // indirect github.com/jcmturner/aescts/v2 v2.0.0 // indirect github.com/jcmturner/dnsutils/v2 v2.0.0 // indirect diff --git a/go.sum b/go.sum index db4f0ad82eaed..df4e48542c420 100644 --- a/go.sum +++ b/go.sum @@ -153,7 +153,9 @@ github.com/BurntSushi/toml v0.3.1 h1:WXkYYl6Yr3qBf1K79EBnL4mak0OimBfB0XUf9Vl28OQ github.com/BurntSushi/toml v0.3.1/go.mod h1:xHWCNGjB5oqiDr8zfno3MHue2Ht5sIBksp03qcyfWMU= github.com/BurntSushi/xgb v0.0.0-20160522181843-27f122750802/go.mod h1:IVnqGOEym/WlBOVXweHU+Q+/VP0lqqI8lqeDx9IjBqo= github.com/DATA-DOG/go-sqlmock v1.3.3/go.mod h1:f/Ixk793poVmq4qj/V1dPUg2JEAKC73Q5eFN3EC/SaM= +github.com/DATA-DOG/go-sqlmock v1.4.1/go.mod h1:f/Ixk793poVmq4qj/V1dPUg2JEAKC73Q5eFN3EC/SaM= github.com/DataDog/datadog-go v3.2.0+incompatible/go.mod h1:LButxg5PwREeZtORoXG3tL4fMGNddJ+vMq1mwgfaqoQ= +github.com/DataDog/sketches-go v0.0.0-20190923095040-43f19ad77ff7/go.mod h1:Q5DbzQ+3AkgGwymQO7aZFNP7ns2lZKGtvRBzRXfdi60= github.com/DataDog/zstd v1.3.6-0.20190409195224-796139022798/go.mod h1:1jcaCB/ufaK+sKp1NBhlGmpz41jOoPQ35bpF36t7BBo= github.com/DataDog/zstd v1.4.4/go.mod h1:1jcaCB/ufaK+sKp1NBhlGmpz41jOoPQ35bpF36t7BBo= github.com/Djarvur/go-err113 v0.0.0-20210108212216-aea10b59be24/go.mod h1:4UJr5HIiMZrwgkSPdsjy2uOQExX/WEILpIrO9UPGuXs= @@ -238,6 +240,8 @@ github.com/alecthomas/units v0.0.0-20210208195552-ff826a37aa15 h1:AUNCr9CiJuwrRY github.com/alecthomas/units v0.0.0-20210208195552-ff826a37aa15/go.mod h1:OMCwj8VM1Kc9e19TLln2VL61YJF0x1XFtfdL4JdbSyE= github.com/alexflint/go-filemutex v0.0.0-20171022225611-72bdc8eae2ae/go.mod h1:CgnQgUtFrFz9mxFNtED3jI5tLDjKlOM+oUF/sTk6ps0= github.com/alexkohler/prealloc v1.0.0/go.mod h1:VetnK3dIgFBBKmg0YnD9F9x6Icjd+9cvfHR56wJVlKE= +github.com/alicebob/gopher-json v0.0.0-20200520072559-a9ecdc9d1d3a/go.mod h1:SGnFV6hVsYE877CKEZ6tDNTjaSXYUk6QqoIK6PrAtcc= +github.com/alicebob/miniredis v2.5.0+incompatible/go.mod h1:8HZjEj4yU0dwhYHky+DxYx+6BMjkBbe5ONFIF1MXffk= github.com/aliyun/alibaba-cloud-sdk-go v1.61.1004 h1:YtaYjXmemIMyySUbs0VGFPqsLpsNHf4TW/L6yqpJQ9s= github.com/aliyun/alibaba-cloud-sdk-go v1.61.1004/go.mod h1:pUKYbK5JQ+1Dfxk80P0qxGqe5dkxDoabbZS7zOcouyA= github.com/amir/raidman v0.0.0-20170415203553-1ccc43bfb9c9 h1:FXrPTd8Rdlc94dKccl7KPmdmIbVh/OjelJ8/vgMRzcQ= @@ -265,6 +269,10 @@ github.com/apache/thrift v0.14.1/go.mod h1:cp2SuWMxlEZw2r+iP2GNCdIi4C1qmUzdZFSVb github.com/apache/thrift v0.14.2/go.mod h1:cp2SuWMxlEZw2r+iP2GNCdIi4C1qmUzdZFSVb+bacwQ= github.com/apache/thrift v0.15.0 h1:aGvdaR0v1t9XLgjtBYwxcBvBOTMqClzwE26CHOgjW1Y= github.com/apache/thrift v0.15.0/go.mod h1:PHK3hniurgQaNMZYaCLEqXKsYK8upmhPbmdP2FXSqgU= +github.com/apex/log v1.6.0/go.mod h1:x7s+P9VtvFBXge9Vbn+8TrqKmuzmD35TTkeBHul8UtY= +github.com/apex/logs v1.0.0/go.mod h1:XzxuLZ5myVHDy9SAmYpamKKRNApGj54PfYLcFrXqDwo= +github.com/aphistic/golf v0.0.0-20180712155816-02c07f170c5a/go.mod h1:3NqKYiepwy8kCu4PNA+aP7WUV72eXWJeP9/r3/K9aLE= +github.com/aphistic/sweet v0.2.0/go.mod h1:fWDlIh/isSE9n6EPsRmC0det+whmX6dJid3stzu0Xys= github.com/aristanetworks/glog v0.0.0-20191112221043-67e8567f59f3 h1:Bmjk+DjIi3tTAU0wxGaFbfjGUqlxxSXARq9A96Kgoos= github.com/aristanetworks/glog v0.0.0-20191112221043-67e8567f59f3/go.mod h1:KASm+qXFKs/xjSoWn30NrWBBvdTTQq+UjkhjEJHfSFA= github.com/aristanetworks/goarista v0.0.0-20190325233358-a123909ec740 h1:FD4/ikKOFxwP8muWDypbmBWc634+YcAs3eBrYAmRdZY= @@ -287,56 +295,111 @@ github.com/ashanbrown/forbidigo v1.1.0/go.mod h1:vVW7PEdqEFqapJe95xHkTfB1+XvZXBF github.com/ashanbrown/makezero v0.0.0-20201205152432-7b7cdbb3025a/go.mod h1:oG9Dnez7/ESBqc4EdrdNlryeo7d0KcW1ftXHm7nU/UU= github.com/aws/aws-lambda-go v1.13.3/go.mod h1:4UKl9IzQMoD+QF79YdCuzCwp8VbmG4VAQwij/eHl5CU= github.com/aws/aws-sdk-go v1.15.11/go.mod h1:mFuSZ37Z9YOHbQEwBWztmVzqXrEkub65tZoCYDt7FT0= +github.com/aws/aws-sdk-go v1.19.48/go.mod h1:KmX6BPdI08NWTb3/sm4ZGu5ShLoqVDhKgpiN924inxo= +github.com/aws/aws-sdk-go v1.20.6/go.mod h1:KmX6BPdI08NWTb3/sm4ZGu5ShLoqVDhKgpiN924inxo= github.com/aws/aws-sdk-go v1.27.0/go.mod h1:KmX6BPdI08NWTb3/sm4ZGu5ShLoqVDhKgpiN924inxo= github.com/aws/aws-sdk-go v1.34.28/go.mod h1:H7NKnBqNVzoTJpGfLrQkkD+ytBA93eiDYi/+8rV9s48= +github.com/aws/aws-sdk-go v1.38.3 h1:QCL/le04oAz2jELMRSuJVjGT7H+4hhoQc66eMPCfU/k= github.com/aws/aws-sdk-go v1.38.3/go.mod h1:hcU610XS61/+aQV88ixoOzUoG7v3b31pl2zKMmprdro= -github.com/aws/aws-sdk-go v1.38.69 h1:V489lmrdkIQSfF6OAGZZ1Cavcm7eczCm2JcGvX+yHRg= -github.com/aws/aws-sdk-go v1.38.69/go.mod h1:hcU610XS61/+aQV88ixoOzUoG7v3b31pl2zKMmprdro= github.com/aws/aws-sdk-go-v2 v0.18.0/go.mod h1:JWVYvqSMppoMJC0x5wdwiImzgXTI9FuZwxzkQq9wy+g= github.com/aws/aws-sdk-go-v2 v1.1.0/go.mod h1:smfAbmpW+tcRVuNUjo3MOArSZmW72t62rkCzc2i0TWM= github.com/aws/aws-sdk-go-v2 v1.8.0/go.mod h1:xEFuWz+3TYdlPRuo+CqATbeDWIWyaT5uAPwPaWtgse0= +github.com/aws/aws-sdk-go-v2 v1.8.0/go.mod h1:xEFuWz+3TYdlPRuo+CqATbeDWIWyaT5uAPwPaWtgse0= +github.com/aws/aws-sdk-go-v2 v1.8.0/go.mod h1:xEFuWz+3TYdlPRuo+CqATbeDWIWyaT5uAPwPaWtgse0= +github.com/aws/aws-sdk-go-v2 v1.8.1/go.mod h1:xEFuWz+3TYdlPRuo+CqATbeDWIWyaT5uAPwPaWtgse0= +github.com/aws/aws-sdk-go-v2 v1.9.0/go.mod h1:cK/D0BBs0b/oWPIcX/Z/obahJK1TT7IPVjy53i/mX/4= github.com/aws/aws-sdk-go-v2 v1.9.1 h1:ZbovGV/qo40nrOJ4q8G33AGICzaPI45FHQWJ9650pF4= github.com/aws/aws-sdk-go-v2 v1.9.1/go.mod h1:cK/D0BBs0b/oWPIcX/Z/obahJK1TT7IPVjy53i/mX/4= github.com/aws/aws-sdk-go-v2/config v1.6.0/go.mod h1:TNtBVmka80lRPk5+S9ZqVfFszOQAGJJ9KbT3EM3CHNU= +github.com/aws/aws-sdk-go-v2/config v1.6.0/go.mod h1:TNtBVmka80lRPk5+S9ZqVfFszOQAGJJ9KbT3EM3CHNU= +github.com/aws/aws-sdk-go-v2/config v1.6.1/go.mod h1:t/y3UPu0XEDy0cEw6mvygaBQaPzWiYAxfP2SzgtvclA= github.com/aws/aws-sdk-go-v2/config v1.8.2 h1:Dqy4ySXFmulRmZhfynm/5CD4Y6aXiTVhDtXLIuUe/r0= github.com/aws/aws-sdk-go-v2/config v1.8.2/go.mod h1:r0bkX9NyuCuf28qVcsEMtpAQibT7gA1Q0gzkjvgJdLU= github.com/aws/aws-sdk-go-v2/credentials v1.3.2/go.mod h1:PACKuTJdt6AlXvEq8rFI4eDmoqDFC5DpVKQbWysaDgM= +github.com/aws/aws-sdk-go-v2/credentials v1.3.2/go.mod h1:PACKuTJdt6AlXvEq8rFI4eDmoqDFC5DpVKQbWysaDgM= +github.com/aws/aws-sdk-go-v2/credentials v1.3.3/go.mod h1:oVieKMT3m9BSfqhOfuQ+E0j/yN84ZAJ7Qv8Sfume/ak= github.com/aws/aws-sdk-go-v2/credentials v1.4.2 h1:8kVE4Og6wlhVrMGiORQ3p9gRj2exjzhFRB+QzWBUa5Q= github.com/aws/aws-sdk-go-v2/credentials v1.4.2/go.mod h1:9Sp6u121/f0NnvHyhG7dgoYeUTEFC2vsvJqJ6wXpkaI= +github.com/aws/aws-sdk-go-v2/feature/dynamodb/attributevalue v1.2.0 h1:8kvinmbIDObqsWegKP0JjeanYPiA4GUVpAtciNWE+jw= +github.com/aws/aws-sdk-go-v2/feature/dynamodb/attributevalue v1.2.0/go.mod h1:UVFtSYSWCHj2+brBLDHUdlJXmz8LxUpZhA+Ewypc+xQ= +github.com/aws/aws-sdk-go-v2/feature/ec2/imds v1.4.0/go.mod h1:Mj/U8OpDbcVcoctrYwA2bak8k/HFPdcLzI/vaiXMwuM= github.com/aws/aws-sdk-go-v2/feature/ec2/imds v1.4.0/go.mod h1:Mj/U8OpDbcVcoctrYwA2bak8k/HFPdcLzI/vaiXMwuM= +github.com/aws/aws-sdk-go-v2/feature/ec2/imds v1.4.1/go.mod h1:+GTydg3uHmVlQdkRoetz6VHKbOMEYof70m19IpMLifc= github.com/aws/aws-sdk-go-v2/feature/ec2/imds v1.5.1 h1:Nm+BxqBtT0r+AnD6byGMCGT4Km0QwHBy8mAYptNPXY4= github.com/aws/aws-sdk-go-v2/feature/ec2/imds v1.5.1/go.mod h1:W1ldHfsgeGlKpJ4xZMKZUI6Wmp6EAstU7PxnhbXWWrI= github.com/aws/aws-sdk-go-v2/feature/s3/manager v1.4.0/go.mod h1:eHwXu2+uE/T6gpnYWwBwqoeqRf9IXyCcolyOWDRAErQ= github.com/aws/aws-sdk-go-v2/feature/s3/manager v1.5.3 h1:0O72494cCsazjpsGfo+LXezru6PMSp0HUB1m5UfpaRU= github.com/aws/aws-sdk-go-v2/feature/s3/manager v1.5.3/go.mod h1:claNkz2j/N/AZceFcAbR0NyuWnrn+jCYpI+6Ozjsc0k= +github.com/aws/aws-sdk-go-v2/internal/configsources v1.0.2/go.mod h1:1QsSZvLUuaQ6VJsCXolYCEzV0mVBkNBp64pIJy9yRks= +github.com/aws/aws-sdk-go-v2/internal/configsources v1.0.2/go.mod h1:1QsSZvLUuaQ6VJsCXolYCEzV0mVBkNBp64pIJy9yRks= +github.com/aws/aws-sdk-go-v2/internal/configsources v1.0.4 h1:IM9b6hlCcVFJFydPoyphs/t7YrHfqKy7T4/7AG5Eprs= +github.com/aws/aws-sdk-go-v2/internal/configsources v1.0.4/go.mod h1:W5gGbtNXFpF9/ssYZTaItzG/B+j0bjTnwStiCP2AtWU= github.com/aws/aws-sdk-go-v2/internal/ini v1.2.0/go.mod h1:Q5jATQc+f1MfZp3PDMhn6ry18hGvE0i8yvbXoKbnZaE= +github.com/aws/aws-sdk-go-v2/internal/ini v1.2.0/go.mod h1:Q5jATQc+f1MfZp3PDMhn6ry18hGvE0i8yvbXoKbnZaE= +github.com/aws/aws-sdk-go-v2/internal/ini v1.2.0/go.mod h1:Q5jATQc+f1MfZp3PDMhn6ry18hGvE0i8yvbXoKbnZaE= +github.com/aws/aws-sdk-go-v2/internal/ini v1.2.1/go.mod h1:Pv3WenDjI0v2Jl7UaMFIIbPOBbhn33RmmAmGgkXDoqY= github.com/aws/aws-sdk-go-v2/internal/ini v1.2.3 h1:NnXJXUz7oihrSlPKEM0yZ19b+7GQ47MX/LluLlEyE/Y= github.com/aws/aws-sdk-go-v2/internal/ini v1.2.3/go.mod h1:EES9ToeC3h063zCFDdqWGnARExNdULPaBvARm1FLwxA= +github.com/aws/aws-sdk-go-v2/service/cloudwatch v1.7.0 h1:vXZPcDQg7e5z2IKz0huei6zhfAxDoZdXej2o3jUbjCI= +github.com/aws/aws-sdk-go-v2/service/cloudwatch v1.7.0/go.mod h1:BlrFkwOhSgESkbdS+zJBy4+1mQ3f3Fq9Gp8nT+gaSwk= +github.com/aws/aws-sdk-go-v2/service/cloudwatchlogs v1.5.2 h1:B120/boLr82yRaQFEPn9u01OwWMnc+xGvz5SOHfBrHY= +github.com/aws/aws-sdk-go-v2/service/cloudwatchlogs v1.5.2/go.mod h1:td1djV1rAzEPcit9L8urGneIi2pYvtI7b/kfMWdpe84= +github.com/aws/aws-sdk-go-v2/service/dynamodb v1.5.0 h1:SGwKUQaJudQQZE72dDQlL2FGuHNAEK1CyqKLTjh6mqE= +github.com/aws/aws-sdk-go-v2/service/dynamodb v1.5.0/go.mod h1:XY5YhCS9SLul3JSQ08XG/nfxXxrkh6RR21XPq/J//NY= +github.com/aws/aws-sdk-go-v2/service/dynamodbstreams v1.4.0 h1:QbFWJr2SAyVYvyoOHvJU6sCGLnqNT94ZbWElJMEI1JY= +github.com/aws/aws-sdk-go-v2/service/dynamodbstreams v1.4.0/go.mod h1:bYsEP8w5YnbYyrx/Zi5hy4hTwRRQISSJS3RWrsGRijg= github.com/aws/aws-sdk-go-v2/service/ec2 v1.1.0 h1:+VnEgB1yp+7KlOsk6FXX/v/fU9uL5oSujIMkKQBBmp8= github.com/aws/aws-sdk-go-v2/service/ec2 v1.1.0/go.mod h1:/6514fU/SRcY3+ousB1zjUqiXjruSuti2qcfE70osOc= github.com/aws/aws-sdk-go-v2/service/internal/accept-encoding v1.2.2/go.mod h1:EASdTcM1lGhUe1/p4gkojHwlGJkeoRjjr1sRCzup3Is= +github.com/aws/aws-sdk-go-v2/service/internal/accept-encoding v1.2.2/go.mod h1:EASdTcM1lGhUe1/p4gkojHwlGJkeoRjjr1sRCzup3Is= +github.com/aws/aws-sdk-go-v2/service/internal/accept-encoding v1.3.0 h1:gceOysEWNNwLd6cki65IMBZ4WAM0MwgBQq2n7kejoT8= github.com/aws/aws-sdk-go-v2/service/internal/accept-encoding v1.3.0 h1:gceOysEWNNwLd6cki65IMBZ4WAM0MwgBQq2n7kejoT8= github.com/aws/aws-sdk-go-v2/service/internal/accept-encoding v1.3.0/go.mod h1:v8ygadNyATSm6elwJ/4gzJwcFhri9RqS8skgHKiwXPU= +github.com/aws/aws-sdk-go-v2/service/internal/accept-encoding v1.3.0/go.mod h1:v8ygadNyATSm6elwJ/4gzJwcFhri9RqS8skgHKiwXPU= +github.com/aws/aws-sdk-go-v2/service/internal/endpoint-discovery v1.0.2/go.mod h1:Gej5xRE+MK0r35OnxJJ07iqQ5JC1avTW/4MwGfsC2io= +github.com/aws/aws-sdk-go-v2/service/internal/endpoint-discovery v1.1.0 h1:QCPbsMPMcM4iGbui5SH6O4uxvZffPoBJ4CIGX7dU0l4= +github.com/aws/aws-sdk-go-v2/service/internal/endpoint-discovery v1.1.0/go.mod h1:enkU5tq2HoXY+ZMiQprgF3Q83T3PbO77E83yXXzRZWE= github.com/aws/aws-sdk-go-v2/service/internal/presigned-url v1.0.1/go.mod h1:PISaKWylTYAyruocNk4Lr9miOOJjOcVBd7twCPbydDk= github.com/aws/aws-sdk-go-v2/service/internal/presigned-url v1.2.2/go.mod h1:NXmNI41bdEsJMrD0v9rUvbGCB5GwdBEpKvUvIY3vTFg= +github.com/aws/aws-sdk-go-v2/service/internal/presigned-url v1.2.2/go.mod h1:NXmNI41bdEsJMrD0v9rUvbGCB5GwdBEpKvUvIY3vTFg= +github.com/aws/aws-sdk-go-v2/service/internal/presigned-url v1.2.3/go.mod h1:7gcsONBmFoCcKrAqrm95trrMd2+C/ReYKP7Vfu8yHHA= github.com/aws/aws-sdk-go-v2/service/internal/presigned-url v1.3.1 h1:APEjhKZLFlNVLATnA/TJyA+w1r/xd5r5ACWBDZ9aIvc= github.com/aws/aws-sdk-go-v2/service/internal/presigned-url v1.3.1/go.mod h1:Ve+eJOx9UWaT/lMVebnFhDhO49fSLVedHoA82+Rqme0= github.com/aws/aws-sdk-go-v2/service/internal/s3shared v1.5.2/go.mod h1:QuL2Ym8BkrLmN4lUofXYq6000/i5jPjosCNK//t6gak= github.com/aws/aws-sdk-go-v2/service/internal/s3shared v1.7.1 h1:YEz2KMyqK2zyG3uOa0l2xBc/H6NUVJir8FhwHQHF3rc= github.com/aws/aws-sdk-go-v2/service/internal/s3shared v1.7.1/go.mod h1:yg4EN/BKoc7+DLhNOxxdvoO3+iyW2FuynvaKqLcLDUM= +github.com/aws/aws-sdk-go-v2/service/kinesis v1.6.0 h1:hb+NupVMUzINGUCfDs2+YqMkWKu47dBIQHpulM0XWh4= +github.com/aws/aws-sdk-go-v2/service/kinesis v1.6.0/go.mod h1:9O7UG2pELnP0hq35+Gd7XDjOLBkg7tmgRQ0y14ZjoJI= github.com/aws/aws-sdk-go-v2/service/s3 v1.12.0/go.mod h1:6J++A5xpo7QDsIeSqPK4UHqMSyPOCopa+zKtqAMhqVQ= github.com/aws/aws-sdk-go-v2/service/s3 v1.16.0 h1:dt1JQFj/135ozwGIWeCM3aQ8N/kB3Xu3Uu4r9zuOIyc= github.com/aws/aws-sdk-go-v2/service/s3 v1.16.0/go.mod h1:Tk23mCmfL3wb3tNIeMk/0diUZ0W4R6uZtjYKguMLW2s= github.com/aws/aws-sdk-go-v2/service/sso v1.3.2/go.mod h1:J21I6kF+d/6XHVk7kp/cx9YVD2TMD2TbLwtRGVcinXo= +github.com/aws/aws-sdk-go-v2/service/sso v1.3.2/go.mod h1:J21I6kF+d/6XHVk7kp/cx9YVD2TMD2TbLwtRGVcinXo= +github.com/aws/aws-sdk-go-v2/service/sso v1.3.3/go.mod h1:Jgw5O+SK7MZ2Yi9Yvzb4PggAPYaFSliiQuWR0hNjexk= github.com/aws/aws-sdk-go-v2/service/sso v1.4.1 h1:RfgQyv3bFT2Js6XokcrNtTjQ6wAVBRpoCgTFsypihHA= github.com/aws/aws-sdk-go-v2/service/sso v1.4.1/go.mod h1:ycPdbJZlM0BLhuBnd80WX9PucWPG88qps/2jl9HugXs= github.com/aws/aws-sdk-go-v2/service/sts v1.6.1/go.mod h1:hLZ/AnkIKHLuPGjEiyghNEdvJ2PP0MgOxcmv9EBJ4xs= +github.com/aws/aws-sdk-go-v2/service/sts v1.6.1/go.mod h1:hLZ/AnkIKHLuPGjEiyghNEdvJ2PP0MgOxcmv9EBJ4xs= +github.com/aws/aws-sdk-go-v2/service/sts v1.6.2/go.mod h1:RBhoMJB8yFToaCnbe0jNq5Dcdy0jp6LhHqg55rjClkM= github.com/aws/aws-sdk-go-v2/service/sts v1.7.1 h1:7ce9ugapSgBapwLhg7AJTqKW5U92VRX3vX65k2tsB+g= github.com/aws/aws-sdk-go-v2/service/sts v1.7.1/go.mod h1:r1i8QwKPzwByXqZb3POQfBs7jozrdnHz8PVbsvyx73w= +github.com/aws/aws-sdk-go-v2/service/timestreamwrite v1.3.2 h1:1s/RRA5Owuz4/G/eWCdCKgC+9zaz2vxFsRSwe7R3cPY= +github.com/aws/aws-sdk-go-v2/service/timestreamwrite v1.3.2 h1:1s/RRA5Owuz4/G/eWCdCKgC+9zaz2vxFsRSwe7R3cPY= +github.com/aws/aws-sdk-go-v2/service/timestreamwrite v1.3.2/go.mod h1:XoDkdZ5pBf2za2GWbFHQ8Ps0K8fRbmbwrHh7PF5xnzQ= +github.com/aws/aws-sdk-go-v2/service/timestreamwrite v1.3.2/go.mod h1:XoDkdZ5pBf2za2GWbFHQ8Ps0K8fRbmbwrHh7PF5xnzQ= +github.com/aws/smithy-go v1.0.0/go.mod h1:EzMw8dbp/YJL4A5/sbhGddag+NPT7q084agLbB9LgIw= github.com/aws/smithy-go v1.0.0/go.mod h1:EzMw8dbp/YJL4A5/sbhGddag+NPT7q084agLbB9LgIw= github.com/aws/smithy-go v1.7.0/go.mod h1:SObp3lf9smib00L/v3U2eAKG8FyQ7iLrJnQiAmR5n+E= +github.com/aws/smithy-go v1.7.0/go.mod h1:SObp3lf9smib00L/v3U2eAKG8FyQ7iLrJnQiAmR5n+E= +github.com/aws/smithy-go v1.7.0/go.mod h1:SObp3lf9smib00L/v3U2eAKG8FyQ7iLrJnQiAmR5n+E= github.com/aws/smithy-go v1.8.0 h1:AEwwwXQZtUwP5Mz506FeXXrKBe0jA8gVM+1gEcSRooc= +github.com/aws/smithy-go v1.8.0 h1:AEwwwXQZtUwP5Mz506FeXXrKBe0jA8gVM+1gEcSRooc= +github.com/aws/smithy-go v1.8.0/go.mod h1:SObp3lf9smib00L/v3U2eAKG8FyQ7iLrJnQiAmR5n+E= github.com/aws/smithy-go v1.8.0/go.mod h1:SObp3lf9smib00L/v3U2eAKG8FyQ7iLrJnQiAmR5n+E= +github.com/awslabs/kinesis-aggregation/go v0.0.0-20210630091500-54e17340d32f h1:Pf0BjJDga7C98f0vhw+Ip5EaiE07S3lTKpIYPNS0nMo= +github.com/awslabs/kinesis-aggregation/go v0.0.0-20210630091500-54e17340d32f/go.mod h1:SghidfnxvX7ribW6nHI7T+IBbc9puZ9kk5Tx/88h8P4= +github.com/aybabtme/rgbterm v0.0.0-20170906152045-cc83f3b3ce59/go.mod h1:q/89r3U2H7sSsE2t6Kca0lfwTK8JdoNGS/yzM/4iH5I= +github.com/benbjohnson/clock v1.0.3/go.mod h1:bGMdMPoPVvcYyt1gHDf4J2KE153Yf9BuiUKYMaxlTDM= github.com/benbjohnson/clock v1.1.0 h1:Q92kusRqC1XV2MjkWETPvjJVqKetz1OzxZB7mHJLju8= github.com/benbjohnson/clock v1.1.0/go.mod h1:J11/hYXuz8f4ySSvYwY0FKfm+ezbsZBKZxNJlLklBHA= github.com/beorn7/perks v0.0.0-20160804104726-4c0e84591b9a/go.mod h1:Dwedo/Wpr24TaqPxmxbtue+5NUziq4I4S80YR8gNf3Q= @@ -580,6 +643,7 @@ github.com/dgrijalva/jwt-go v3.2.0+incompatible/go.mod h1:E3ru+11k8xSBh+hMPgOLZm github.com/dgryski/go-bitstream v0.0.0-20180413035011-3522498ce2c8/go.mod h1:VMaSuZ+SZcx/wljOQKvp5srsbCiKDEb6K2wC4+PiBmQ= github.com/dgryski/go-farm v0.0.0-20190423205320-6a90982ecee2/go.mod h1:SqUrOPUnsFjfmXRMNPybcSiG0BgUW2AuFH8PAnS2iTw= github.com/dgryski/go-farm v0.0.0-20200201041132-a6ae2369ad13/go.mod h1:SqUrOPUnsFjfmXRMNPybcSiG0BgUW2AuFH8PAnS2iTw= +github.com/dgryski/go-rendezvous v0.0.0-20200624174652-8d2f3be8b2d9/go.mod h1:cuUVRXasLTGF7a8hSLbxyZXjz+1KgoB3wDUb6vlszIc= github.com/dgryski/go-sip13 v0.0.0-20181026042036-e10d5fee7954/go.mod h1:vAd38F8PWV+bWy6jNmig1y/TA+kYO4g3RSRF0IAv0no= github.com/dgryski/go-sip13 v0.0.0-20200911182023-62edffca9245/go.mod h1:vAd38F8PWV+bWy6jNmig1y/TA+kYO4g3RSRF0IAv0no= github.com/digitalocean/godo v1.58.0/go.mod h1:p7dOjjtSBqCTUksqtA5Fd3uaKs9kyTq2xcz76ulEJRU= @@ -819,6 +883,7 @@ github.com/go-playground/locales v0.12.1/go.mod h1:IUMDtCfWo/w/mtMfIE/IG2K+Ey3yg github.com/go-playground/universal-translator v0.16.0/go.mod h1:1AnU7NaIRDWWzGEKwgtJRd2xk99HeFyHw3yid4rvQIY= github.com/go-redis/redis v6.15.9+incompatible h1:K0pv1D7EQUjfyoMql+r/jZqCLizCGKFlFgcHWWmHQjg= github.com/go-redis/redis v6.15.9+incompatible/go.mod h1:NAIEuMOZ/fxfXJIrKDQDz8wamY7mA7PouImQ2Jvg6kA= +github.com/go-redis/redis/v8 v8.0.0-beta.6/go.mod h1:g79Vpae8JMzg5qjk8BiwU9tK+HmU3iDVyS4UAJLFycI= github.com/go-sql-driver/mysql v1.4.0/go.mod h1:zAC/RDZ24gD3HViQzih4MyKcchzm+sOG5ZlKdlhCg5w= github.com/go-sql-driver/mysql v1.4.1/go.mod h1:zAC/RDZ24gD3HViQzih4MyKcchzm+sOG5ZlKdlhCg5w= github.com/go-sql-driver/mysql v1.5.0/go.mod h1:DCzpHaOWr8IXmIStZouvnhqoel9Qv2LBy8hT2VhHyBg= @@ -954,6 +1019,7 @@ github.com/golangci/maligned v0.0.0-20180506175553-b1d89398deca/go.mod h1:tvlJhZ github.com/golangci/misspell v0.3.5/go.mod h1:dEbvlSfYbMQDtrpRMQU675gSDLDNa8sCPPChZ7PhiVA= github.com/golangci/revgrep v0.0.0-20210208091834-cd28932614b5/go.mod h1:LK+zW4MpyytAWQRz0M4xnzEk50lSvqDQKfx304apFkY= github.com/golangci/unconvert v0.0.0-20180507085042-28b1c447d1f4/go.mod h1:Izgrg8RkN3rCIMLGE9CyYmU9pY2Jer6DgANEnZ/L/cQ= +github.com/gomodule/redigo v2.0.0+incompatible/go.mod h1:B4C85qUVwatsJoIUNIfCRsp7qO0iAmpGFZ4EELWSbC4= github.com/google/addlicense v0.0.0-20200906110928-a0294312aa76/go.mod h1:EMjYTRimagHs1FwlIqKyX3wAM0u3rA+McvlIIWmSamA= github.com/google/btree v0.0.0-20180813153112-4030bb1f1f0c/go.mod h1:lNA+9X1NB3Zf8V7Ke586lFgjr2dZNuvo3lPJSGZ5JPQ= github.com/google/btree v1.0.0/go.mod h1:lNA+9X1NB3Zf8V7Ke586lFgjr2dZNuvo3lPJSGZ5JPQ= @@ -1070,13 +1136,13 @@ github.com/grpc-ecosystem/grpc-gateway v1.16.0/go.mod h1:BDjrQk3hbvj6Nolgz8mAMFb github.com/grpc-ecosystem/grpc-opentracing v0.0.0-20180507213350-8e809c8a8645/go.mod h1:6iZfnjpejD4L/4DwD7NryNaJyCQdzwWwH2MWhCA90Kw= github.com/hailocab/go-hostpool v0.0.0-20160125115350-e80d13ce29ed h1:5upAirOpQc1Q53c0bnx2ufif5kANL7bfZWcc6VJWJd8= github.com/hailocab/go-hostpool v0.0.0-20160125115350-e80d13ce29ed/go.mod h1:tMWxXQ9wFIaZeTI9F+hmhFiGpFmhOHzyShyFUhRm0H4= -github.com/harlow/kinesis-consumer v0.3.1-0.20181230152818-2f58b136fee0 h1:U0KvGD9CJIl1nbgu9yLsfWxMT6WqL8fG0IBB7RvOZZQ= -github.com/harlow/kinesis-consumer v0.3.1-0.20181230152818-2f58b136fee0/go.mod h1:dk23l2BruuUzRP8wbybQbPn3J7sZga2QHICCeaEy5rQ= +github.com/harlow/kinesis-consumer v0.3.6-0.20210911031324-5a873d6e9fec h1:ya+kv1eNnd5QhcHuaj5g5eMq5Ra3VCNaPY2ZI7Aq91o= +github.com/harlow/kinesis-consumer v0.3.6-0.20210911031324-5a873d6e9fec/go.mod h1:FIT1uhdVv2iXO0l6aACPZSVHxdth7RdmoT34jk9MEm0= github.com/hashicorp/consul/api v1.1.0/go.mod h1:VmuI/Lkw1nC05EYQWNKwWGbkg+FbDBtguAZLlVdkD9Q= github.com/hashicorp/consul/api v1.3.0/go.mod h1:MmDNSzIMUjNpY/mQ398R4bk2FnqQLoPndWW5VkKPlCE= github.com/hashicorp/consul/api v1.8.1/go.mod h1:sDjTOq0yUyv5G4h+BqSea7Fn6BU+XbolEz1952UB+mk= -github.com/hashicorp/consul/api v1.11.0 h1:Hw/G8TtRvOElqxVIhBzXciiSTbapq8hZ2XKZsXk5ZCE= -github.com/hashicorp/consul/api v1.11.0/go.mod h1:XjsvQN+RJGWI2TWy1/kqaE16HrR2J/FWgkYjdZQsX9M= +github.com/hashicorp/consul/api v1.9.1 h1:SngrdG2L62qqLsUz85qcPhFZ78rPf8tcD5qjMgs6MME= +github.com/hashicorp/consul/api v1.9.1/go.mod h1:XjsvQN+RJGWI2TWy1/kqaE16HrR2J/FWgkYjdZQsX9M= github.com/hashicorp/consul/sdk v0.1.1/go.mod h1:VKf9jXwCTEY1QZP2MOLRhb5i/I/ssyNV1vwHyQBF0x8= github.com/hashicorp/consul/sdk v0.3.0/go.mod h1:VKf9jXwCTEY1QZP2MOLRhb5i/I/ssyNV1vwHyQBF0x8= github.com/hashicorp/consul/sdk v0.7.0/go.mod h1:fY08Y9z5SvJqevyZNy6WWPXiG3KwBPAvlcdx16zZ0fM= @@ -1344,6 +1410,7 @@ github.com/leoluk/perflib_exporter v0.1.0/go.mod h1:rpV0lYj7lemdTm31t7zpCqYqPnw7 github.com/lib/pq v1.0.0/go.mod h1:5WUZQaWbwv1U+lTReE5YruASi9Al49XbQIvNi/34Woo= github.com/lib/pq v1.1.0/go.mod h1:5WUZQaWbwv1U+lTReE5YruASi9Al49XbQIvNi/34Woo= github.com/lib/pq v1.2.0/go.mod h1:5WUZQaWbwv1U+lTReE5YruASi9Al49XbQIvNi/34Woo= +github.com/lib/pq v1.7.0/go.mod h1:AlVN5x4E4T544tWzH6hKfbfQvm3HdbOxrmggDNAPY9o= github.com/lib/pq v1.9.0 h1:L8nSXQQzAYByakOFMTwpjRoHsMJklur4Gi59b6VivR8= github.com/lib/pq v1.9.0/go.mod h1:AlVN5x4E4T544tWzH6hKfbfQvm3HdbOxrmggDNAPY9o= github.com/lightstep/lightstep-tracer-common/golang/gogo v0.0.0-20190605223551-bc2310a04743/go.mod h1:qklhhLq1aX+mtWk9cPHPzaBjWImj5ULL6C7HFJtXQMM= @@ -1415,6 +1482,7 @@ github.com/mdlayher/netlink v1.1.0 h1:mpdLgm+brq10nI9zM1BpX1kpDbh3NLl3RSnVq6ZSkf github.com/mdlayher/netlink v1.1.0/go.mod h1:H4WCitaheIsdF9yOYu8CFmCgQthAPIWZmcKp9uZHgmY= github.com/mgechev/dots v0.0.0-20190921121421-c36f7dcfbb81/go.mod h1:KQ7+USdGKfpPjXk4Ga+5XxQM4Lm4e3gAogrreFAYpOg= github.com/mgechev/revive v1.0.3/go.mod h1:POGGZagSo/0frdr7VeAifzS5Uka0d0GPiM35MsTO8nE= +github.com/mgutz/ansi v0.0.0-20170206155736-9520e82c474b/go.mod h1:01TrycV0kFyexm33Z7vhZRXopbI8J3TDReVlkTgMUxE= github.com/microsoft/ApplicationInsights-Go v0.4.4 h1:G4+H9WNs6ygSCe6sUyxRc2U81TI5Es90b2t/MwX5KqY= github.com/microsoft/ApplicationInsights-Go v0.4.4/go.mod h1:fKRUseBqkw6bDiXTs3ESTiU/4YTIHsQS4W3fP2ieF4U= github.com/miekg/dns v1.0.14/go.mod h1:W1PPwlIAgtquWBMBEV9nkV9Cazfe8ScdGz/Lj7v3Nrg= @@ -1609,6 +1677,7 @@ github.com/opentracing/basictracer-go v1.0.0/go.mod h1:QfBfYuafItcjQuMwinw9GhYKw github.com/opentracing/opentracing-go v1.0.2/go.mod h1:UkNAQd3GIcIGf0SeVgPpRdFStlNbqXla1AfSYxPUl2o= github.com/opentracing/opentracing-go v1.0.3-0.20180606204148-bd9c31933947/go.mod h1:UkNAQd3GIcIGf0SeVgPpRdFStlNbqXla1AfSYxPUl2o= github.com/opentracing/opentracing-go v1.1.0/go.mod h1:UkNAQd3GIcIGf0SeVgPpRdFStlNbqXla1AfSYxPUl2o= +github.com/opentracing/opentracing-go v1.1.1-0.20190913142402-a7454ce5950e/go.mod h1:UkNAQd3GIcIGf0SeVgPpRdFStlNbqXla1AfSYxPUl2o= github.com/opentracing/opentracing-go v1.2.0 h1:uEJPy/1a5RIPAJ0Ov+OIO8OxWu77jEv+1B0VhjKrZUs= github.com/opentracing/opentracing-go v1.2.0/go.mod h1:GxEUsuufX4nBwe+T+Wl9TAgYrxe9dPLANfrWvHYVTgc= github.com/openzipkin-contrib/zipkin-go-opentracing v0.4.5 h1:ZCnq+JUrvXcDVhX/xRolRBZifmabN1HcS1wrPSvxhrU= @@ -1767,6 +1836,7 @@ github.com/robertkrimen/otto v0.0.0-20191219234010-c382bd3c16ff/go.mod h1:xvqspo github.com/robfig/cron/v3 v3.0.1 h1:WdRxkvbJztn8LMz/QEvLN5sBU+xKpSqwwUO1Pjr4qDs= github.com/robfig/cron/v3 v3.0.1/go.mod h1:eQICP3HwyT7UooqI/z+Ov+PtYAWygg1TEWWzGIFLtro= github.com/rogpeppe/fastuuid v0.0.0-20150106093220-6724a57986af/go.mod h1:XWv6SoW27p1b0cqNHllgS5HIMJraePCO15w5zCzIWYg= +github.com/rogpeppe/fastuuid v1.1.0/go.mod h1:jVj6XXZzXRy/MSR5jhDC/2q6DgLz+nrA6LYCDYWNEvQ= github.com/rogpeppe/fastuuid v1.2.0/go.mod h1:jVj6XXZzXRy/MSR5jhDC/2q6DgLz+nrA6LYCDYWNEvQ= github.com/rogpeppe/go-internal v1.1.0/go.mod h1:M8bDsm7K2OlrFYOpmOWEs/qY81heoFRclV5y23lUDJ4= github.com/rogpeppe/go-internal v1.2.2/go.mod h1:M8bDsm7K2OlrFYOpmOWEs/qY81heoFRclV5y23lUDJ4= @@ -1846,11 +1916,14 @@ github.com/sirupsen/logrus v1.8.1 h1:dJKuHgqk1NNQlqoA6BTlM1Wf9DOH3NBjQyu0h9+AZZE github.com/sirupsen/logrus v1.8.1/go.mod h1:yWOB1SBYBC5VeMP7gHvWumXLIWorT60ONWic61uBYv0= github.com/sleepinggenius2/gosmi v0.4.3 h1:99Zwzy1Cvgsh396sw07oR2G4ab88ILGZFMxSlGWnR6o= github.com/sleepinggenius2/gosmi v0.4.3/go.mod h1:l8OniPmd3bJzw0MXP2/qh7AhP/e+bTY2CNivIhsnDT0= -github.com/smartystreets/assertions v0.0.0-20180927180507-b2de0cb4f26d h1:zE9ykElWQ6/NYmHa3jpm/yHnI4xSofP+UP6SpjHcSeM= github.com/smartystreets/assertions v0.0.0-20180927180507-b2de0cb4f26d/go.mod h1:OnSkiWE9lh6wB0YB77sQom3nweQdgAjqCqsofrRNTgc= +github.com/smartystreets/assertions v1.0.0 h1:UVQPSSmc3qtTi+zPPkCXvZX9VvW/xT/NsRvKfwY81a8= +github.com/smartystreets/assertions v1.0.0/go.mod h1:kHHU4qYBaI3q23Pp3VPrmWhuIUrLW/7eUrw0BU5VaoM= +github.com/smartystreets/go-aws-auth v0.0.0-20180515143844-0c1422d1fdb9/go.mod h1:SnhjPscd9TpLiy1LpzGSKh3bXCfxxXuqd9xmQJy3slM= github.com/smartystreets/goconvey v0.0.0-20190330032615-68dc04aab96a/go.mod h1:syvi0/a8iFYH4r/RixwvyeAJjdLS9QV7WQ/tjFTllLA= github.com/smartystreets/goconvey v1.6.4 h1:fv0U8FUIMPNf1L9lnHLvLhgicrIVChEkdzIKYqbNC9s= github.com/smartystreets/goconvey v1.6.4/go.mod h1:syvi0/a8iFYH4r/RixwvyeAJjdLS9QV7WQ/tjFTllLA= +github.com/smartystreets/gunit v1.0.0/go.mod h1:qwPWnhz6pn0NnRBP++URONOVyNkPyr4SauJk4cUOwJs= github.com/snowflakedb/gosnowflake v1.6.2 h1:drZkX7Ve3qr3lLD/f0vxwesgJZfNerivknAvPRAMy88= github.com/snowflakedb/gosnowflake v1.6.2/go.mod h1:k1Wq+O8dRD/jmFBLyStEv2OrgHoMFQpqHCRSy70P0dI= github.com/soheilhy/cmux v0.1.4/go.mod h1:IM3LyeVVIOuxMH7sFAkER9+bJ4dT7Ms6E4xg4kGIyLM= @@ -1934,6 +2007,12 @@ github.com/timakin/bodyclose v0.0.0-20200424151742-cb6215831a94/go.mod h1:Qimiff github.com/tinylib/msgp v1.0.2/go.mod h1:+d+yLhGm8mzTaHzB+wgMYrodPfmZrzkirds8fDWklFE= github.com/tinylib/msgp v1.1.6 h1:i+SbKraHhnrf9M5MYmvQhFnbLhAXSDWF8WWsuyRdocw= github.com/tinylib/msgp v1.1.6/go.mod h1:75BAfg2hauQhs3qedfdDZmWAPcFMAvJE5b9rGOMufyw= +github.com/tj/assert v0.0.0-20171129193455-018094318fb0/go.mod h1:mZ9/Rh9oLWpLLDRpvE+3b7gP/C2YyLFYxNmcLnPTMe0= +github.com/tj/assert v0.0.3/go.mod h1:Ne6X72Q+TB1AteidzQncjw9PabbMp4PBMZ1k+vd1Pvk= +github.com/tj/go-buffer v1.0.1/go.mod h1:iyiJpfFcR2B9sXu7KvjbT9fpM4mOelRSDTbntVj52Uc= +github.com/tj/go-elastic v0.0.0-20171221160941-36157cbbebc2/go.mod h1:WjeM0Oo1eNAjXGDx2yma7uG2XoyRZTq1uv3M/o7imD0= +github.com/tj/go-kinesis v0.0.0-20171128231115-08b17f58cb1b/go.mod h1:/yhzCV0xPfx6jb1bBgRFjl5lytqVqZXEaeqWP8lTEao= +github.com/tj/go-spin v1.1.0/go.mod h1:Mg1mzmePZm4dva8Qz60H2lHwmJ2loum4VIrLgVnKwh4= github.com/tklauser/go-sysconf v0.3.5/go.mod h1:MkWzOF4RMCshBAMXuhXJs64Rte09mITnppBXY/rYEFI= github.com/tklauser/go-sysconf v0.3.9 h1:JeUVdAOWhhxVcU6Eqr/ATFHgXk/mmiItdKeJPev3vTo= github.com/tklauser/go-sysconf v0.3.9/go.mod h1:11DU/5sG7UexIrp/O6g35hrWzu0JxlwQ3LSFUzyeuhs= @@ -2020,8 +2099,8 @@ github.com/yuin/goldmark v1.1.27/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9de github.com/yuin/goldmark v1.1.32/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74= github.com/yuin/goldmark v1.2.1/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74= github.com/yuin/goldmark v1.3.5/go.mod h1:mwnBkeHKe2W/ZEtQ+71ViKU8L12m81fl3OWwC1Zlc8k= -github.com/yuin/gopher-lua v0.0.0-20180630135845-46796da1b0b4 h1:f6CCNiTjQZ0uWK4jPwhwYB8QIGGfn0ssD9kVzRUUUpk= -github.com/yuin/gopher-lua v0.0.0-20180630135845-46796da1b0b4/go.mod h1:aEV29XrmTYFr3CiRxZeGHpkvbwq+prZduBqMaascyCU= +github.com/yuin/gopher-lua v0.0.0-20200603152657-dc2b0ca8b37e h1:oIpIX9VKxSCFrfjsKpluGbNPBGq9iNnT9crH781j9wY= +github.com/yuin/gopher-lua v0.0.0-20200603152657-dc2b0ca8b37e/go.mod h1:gqRgreBUhTSL0GeU64rtZ3Uq3wtjOa/TB2YfrtkCbVQ= github.com/yvasiyarov/go-metrics v0.0.0-20140926110328-57bccd1ccd43/go.mod h1:aX5oPXxHm3bOH+xeAttToC8pqch2ScQN/JoXYupl6xs= github.com/yvasiyarov/gorelic v0.0.0-20141212073537-a9bba5b9ab50/go.mod h1:NUSPSUX/bi6SeDMUh6brw0nXpxHnc96TguQh0+r/ssA= github.com/yvasiyarov/newrelic_platform_go v0.0.0-20140908184405-b21fdbd4370f/go.mod h1:GlGEuHIJweS1mbCqG+7vt2nvWLzLLnRHbXz5JKd/Qbg= @@ -2064,6 +2143,7 @@ go.opentelemetry.io/collector v0.28.0 h1:XmRwoSj3HZtC7O/12fBoQ9DInvwBwFHgHLZrwNx go.opentelemetry.io/collector v0.28.0/go.mod h1:AP/BTXwo1eedoJO7V+HQ68CSvJU1lcdqOzJCgt1VsNs= go.opentelemetry.io/collector/model v0.35.0 h1:NpKjghiqlei4ecwjOYOMhD6tj4gY8yiWHPJmbFs/ArI= go.opentelemetry.io/collector/model v0.35.0/go.mod h1:+7YCSjJG+MqiIFjauzt7oM2qkqBsaJWh5hcsO4fwsAc= +go.opentelemetry.io/otel v0.7.0/go.mod h1:aZMyHG5TqDOXEgH2tyLiXSUKly1jT3yqE9PmrzIeCdo= go.opentelemetry.io/otel v1.0.0-RC3 h1:kvwiyEkiUT/JaadXzVLI/R1wDO934A7r3Bs2wEe6wqA= go.opentelemetry.io/otel v1.0.0-RC3/go.mod h1:Ka5j3ua8tZs4Rkq4Ex3hwgBgOchyPVq5S6P2lz//nKQ= go.opentelemetry.io/otel/exporters/otlp/otlpmetric v0.23.0 h1:vKIEsT6IJU0NYd+iZccjgCmk80zsa7dTiC2Bu7U1jz0= @@ -2119,6 +2199,7 @@ golang.org/x/crypto v0.0.0-20190325154230-a5d413f7728c/go.mod h1:djNgcEr1/C05ACk golang.org/x/crypto v0.0.0-20190404164418-38d8ce5564a5/go.mod h1:WFFai1msRO1wXaEeE5yQxYXgSfI8pQAWXbQop6sCtWE= golang.org/x/crypto v0.0.0-20190411191339-88737f569e3a/go.mod h1:WFFai1msRO1wXaEeE5yQxYXgSfI8pQAWXbQop6sCtWE= golang.org/x/crypto v0.0.0-20190422162423-af44ce270edf/go.mod h1:WFFai1msRO1wXaEeE5yQxYXgSfI8pQAWXbQop6sCtWE= +golang.org/x/crypto v0.0.0-20190426145343-a29dc8fdc734/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= golang.org/x/crypto v0.0.0-20190510104115-cbcb75029529/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= golang.org/x/crypto v0.0.0-20190530122614-20be4c3c3ed5/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= golang.org/x/crypto v0.0.0-20190605123033-f99c8df09eb5/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= @@ -2164,8 +2245,9 @@ golang.org/x/exp v0.0.0-20191227195350-da58074b4299/go.mod h1:2RIsYlXP63K8oxa1u0 golang.org/x/exp v0.0.0-20200119233911-0405dc783f0a/go.mod h1:2RIsYlXP63K8oxa1u096TMicItID8zy7Y6sNkU49FU4= golang.org/x/exp v0.0.0-20200207192155-f17229e696bd/go.mod h1:J/WKrq2StrnmMY6+EHIKF9dgMWnmCNThgcyBT1FY9mM= golang.org/x/exp v0.0.0-20200224162631-6cc2880d07d6/go.mod h1:3jZMyOhIsHpP37uCMkUooju7aAi5cS1Q23tOzKc+0MU= -golang.org/x/exp v0.0.0-20200331195152-e8c3332aa8e5 h1:FR+oGxGfbQu1d+jglI3rCkjAjUnhRSZcUxr+DqlDLNo= golang.org/x/exp v0.0.0-20200331195152-e8c3332aa8e5/go.mod h1:4M0jN8W1tt0AVLNr8HDosyJCDCDuyL9N9+3m7wDWgKw= +golang.org/x/exp v0.0.0-20200513190911-00229845015e h1:rMqLP+9XLy+LdbCXHjJHAmTfXCr93W7oruWA6Hq1Alc= +golang.org/x/exp v0.0.0-20200513190911-00229845015e/go.mod h1:4M0jN8W1tt0AVLNr8HDosyJCDCDuyL9N9+3m7wDWgKw= golang.org/x/image v0.0.0-20180708004352-c73c2afc3b81/go.mod h1:ux5Hcp/YLpHSI86hEcLt0YII63i6oz57MZXIpbrjZUs= golang.org/x/image v0.0.0-20190227222117-0694c2d4d067/go.mod h1:kZ7UVZpmo3dzQBMxlp+ypCbDeSB+sBbTgSJuh5dn5js= golang.org/x/image v0.0.0-20190802002840-cff245a6509b/go.mod h1:FeLwcggjj3mMvU+oOTbSwawSJRM1uh48EjtB4UJZlP0= @@ -2362,6 +2444,7 @@ golang.org/x/sys v0.0.0-20191001151750-bb3f8db39f24/go.mod h1:h1NjWce9XRLGQEsW7w golang.org/x/sys v0.0.0-20191003212358-c178f38b412c/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20191005200804-aed5e4c7ecf9/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20191008105621-543471e840be/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20191010194322-b09406accb47/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20191022100944-742c48ecaeb7/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20191026070338-33540a1f6037/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20191026070338-33540a1f6037/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= @@ -2670,6 +2753,7 @@ google.golang.org/genproto v0.0.0-20190801165951-fa694d86fc64/go.mod h1:DMBHOl98 google.golang.org/genproto v0.0.0-20190819201941-24fa4b261c55/go.mod h1:DMBHOl98Agz4BDEuKkezgsaosCRResVns1a3J2ZsMNc= google.golang.org/genproto v0.0.0-20190911173649-1774047e7e51/go.mod h1:IbNlFCBrqXvoKpeg0TB2l7cyZUmoaFKYIwrEpbDKLA8= google.golang.org/genproto v0.0.0-20190927181202-20e1ac93f88c/go.mod h1:IbNlFCBrqXvoKpeg0TB2l7cyZUmoaFKYIwrEpbDKLA8= +google.golang.org/genproto v0.0.0-20191009194640-548a555dbc03/go.mod h1:n3cpQtvxv34hfy77yVDNjmbRyujviMdxYliBSkLhpCc= google.golang.org/genproto v0.0.0-20191108220845-16a3f7862a1a/go.mod h1:n3cpQtvxv34hfy77yVDNjmbRyujviMdxYliBSkLhpCc= google.golang.org/genproto v0.0.0-20191115194625-c23dd37a84c9/go.mod h1:n3cpQtvxv34hfy77yVDNjmbRyujviMdxYliBSkLhpCc= google.golang.org/genproto v0.0.0-20191216164720-4f79533eabd1/go.mod h1:n3cpQtvxv34hfy77yVDNjmbRyujviMdxYliBSkLhpCc= @@ -2839,6 +2923,7 @@ gopkg.in/yaml.v2 v2.2.2/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= gopkg.in/yaml.v2 v2.2.3/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= gopkg.in/yaml.v2 v2.2.4/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= gopkg.in/yaml.v2 v2.2.5/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= +gopkg.in/yaml.v2 v2.2.7/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= gopkg.in/yaml.v2 v2.2.8/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= gopkg.in/yaml.v2 v2.3.0/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= gopkg.in/yaml.v2 v2.4.0 h1:D8xgwECY7CYvx+Y2n4sBz93Jn9JRvxdiyyo8CTfuKaY= diff --git a/plugins/inputs/cloudwatch/cloudwatch.go b/plugins/inputs/cloudwatch/cloudwatch.go index 3fb86310946e1..17305f31c93a6 100644 --- a/plugins/inputs/cloudwatch/cloudwatch.go +++ b/plugins/inputs/cloudwatch/cloudwatch.go @@ -1,6 +1,7 @@ package cloudwatch import ( + "context" "fmt" "net" "net/http" @@ -9,8 +10,9 @@ import ( "sync" "time" - "github.com/aws/aws-sdk-go/aws" - cwClient "github.com/aws/aws-sdk-go/service/cloudwatch" + "github.com/aws/aws-sdk-go-v2/aws" + cwClient "github.com/aws/aws-sdk-go-v2/service/cloudwatch" + "github.com/aws/aws-sdk-go-v2/service/cloudwatch/types" "github.com/influxdata/telegraf" "github.com/influxdata/telegraf/config" @@ -23,6 +25,14 @@ import ( "github.com/influxdata/telegraf/plugins/inputs" ) +const ( + StatisticAverage = "Average" + StatisticMaximum = "Maximum" + StatisticMinimum = "Minimum" + StatisticSum = "Sum" + StatisticSampleCount = "SampleCount" +) + // CloudWatch contains the configuration and cache for the cloudwatch plugin. type CloudWatch struct { StatisticExclude []string `toml:"statistic_exclude"` @@ -72,12 +82,12 @@ type metricCache struct { ttl time.Duration built time.Time metrics []filteredMetric - queries map[string][]*cwClient.MetricDataQuery + queries map[string][]types.MetricDataQuery } type cloudwatchClient interface { - ListMetrics(*cwClient.ListMetricsInput) (*cwClient.ListMetricsOutput, error) - GetMetricData(*cwClient.GetMetricDataInput) (*cwClient.GetMetricDataOutput, error) + ListMetrics(context.Context, *cwClient.ListMetricsInput, ...func(*cwClient.Options)) (*cwClient.ListMetricsOutput, error) + GetMetricData(context.Context, *cwClient.GetMetricDataInput, ...func(*cwClient.Options)) (*cwClient.GetMetricDataOutput, error) } // SampleConfig returns the default configuration of the Cloudwatch input plugin. @@ -227,12 +237,12 @@ func (c *CloudWatch) Gather(acc telegraf.Accumulator) error { wg := sync.WaitGroup{} rLock := sync.Mutex{} - results := map[string][]*cwClient.MetricDataResult{} + results := map[string][]types.MetricDataResult{} for namespace, namespacedQueries := range queries { // 500 is the maximum number of metric data queries a `GetMetricData` request can contain. batchSize := 500 - var batches [][]*cwClient.MetricDataQuery + var batches [][]types.MetricDataQuery for batchSize < len(namespacedQueries) { namespacedQueries, batches = namespacedQueries[batchSize:], append(batches, namespacedQueries[0:batchSize:batchSize]) @@ -242,7 +252,7 @@ func (c *CloudWatch) Gather(acc telegraf.Accumulator) error { for i := range batches { wg.Add(1) <-lmtr.C - go func(n string, inm []*cwClient.MetricDataQuery) { + go func(n string, inm []types.MetricDataQuery) { defer wg.Done() result, err := c.gatherMetrics(c.getDataInputs(inm)) if err != nil { @@ -268,8 +278,15 @@ func (c *CloudWatch) initializeCloudWatch() error { return err } - cfg := &aws.Config{ - HTTPClient: &http.Client{ + cfg, err := c.CredentialConfig.Credentials() + if err != nil { + return err + } + c.client = cwClient.NewFromConfig(cfg, func(options *cwClient.Options) { + // Disable logging + options.ClientLogMode = 0 + + options.HTTPClient = &http.Client{ // use values from DefaultTransport Transport: &http.Transport{ Proxy: proxy, @@ -284,15 +301,8 @@ func (c *CloudWatch) initializeCloudWatch() error { ExpectContinueTimeout: 1 * time.Second, }, Timeout: time.Duration(c.Timeout), - }, - } - - loglevel := aws.LogOff - p, err := c.CredentialConfig.Credentials() - if err != nil { - return err - } - c.client = cwClient.New(p, cfg.WithLogLevel(loglevel)) + } + }) // Initialize regex matchers for each Dimension value. for _, m := range c.Metrics { @@ -310,7 +320,7 @@ func (c *CloudWatch) initializeCloudWatch() error { } type filteredMetric struct { - metrics []*cwClient.Metric + metrics []types.Metric statFilter filter.Filter } @@ -325,18 +335,18 @@ func getFilteredMetrics(c *CloudWatch) ([]filteredMetric, error) { // check for provided metric filter if c.Metrics != nil { for _, m := range c.Metrics { - metrics := []*cwClient.Metric{} + metrics := []types.Metric{} if !hasWildcard(m.Dimensions) { - dimensions := make([]*cwClient.Dimension, len(m.Dimensions)) + dimensions := make([]types.Dimension, len(m.Dimensions)) for k, d := range m.Dimensions { - dimensions[k] = &cwClient.Dimension{ + dimensions[k] = types.Dimension{ Name: aws.String(d.Name), Value: aws.String(d.Value), } } for _, name := range m.MetricNames { for _, namespace := range c.Namespaces { - metrics = append(metrics, &cwClient.Metric{ + metrics = append(metrics, types.Metric{ Namespace: aws.String(namespace), MetricName: aws.String(name), Dimensions: dimensions, @@ -352,7 +362,7 @@ func getFilteredMetrics(c *CloudWatch) ([]filteredMetric, error) { for _, metric := range allMetrics { if isSelected(name, metric, m.Dimensions) { for _, namespace := range c.Namespaces { - metrics = append(metrics, &cwClient.Metric{ + metrics = append(metrics, types.Metric{ Namespace: aws.String(namespace), MetricName: aws.String(name), Dimensions: metric.Dimensions, @@ -401,32 +411,24 @@ func getFilteredMetrics(c *CloudWatch) ([]filteredMetric, error) { } // fetchNamespaceMetrics retrieves available metrics for a given CloudWatch namespace. -func (c *CloudWatch) fetchNamespaceMetrics() ([]*cwClient.Metric, error) { - metrics := []*cwClient.Metric{} +func (c *CloudWatch) fetchNamespaceMetrics() ([]types.Metric, error) { + metrics := []types.Metric{} var token *string - var params *cwClient.ListMetricsInput - var recentlyActive *string - - switch c.RecentlyActive { - case "PT3H": - recentlyActive = &c.RecentlyActive - default: - recentlyActive = nil + + params := &cwClient.ListMetricsInput{ + Dimensions: []types.DimensionFilter{}, + NextToken: token, + MetricName: nil, + } + if c.RecentlyActive == "PT3H" { + params.RecentlyActive = types.RecentlyActivePt3h } for _, namespace := range c.Namespaces { - - params = &cwClient.ListMetricsInput{ - Dimensions: []*cwClient.DimensionFilter{}, - NextToken: token, - MetricName: nil, - RecentlyActive: recentlyActive, - Namespace: aws.String(namespace), - } - + params.Namespace = aws.String(namespace) for { - resp, err := c.client.ListMetrics(params) + resp, err := c.client.ListMetrics(context.Background(), params) if err != nil { return nil, fmt.Errorf("failed to list metrics with params per namespace: %v", err) } @@ -457,75 +459,75 @@ func (c *CloudWatch) updateWindow(relativeTo time.Time) { } // getDataQueries gets all of the possible queries so we can maximize the request payload. -func (c *CloudWatch) getDataQueries(filteredMetrics []filteredMetric) map[string][]*cwClient.MetricDataQuery { +func (c *CloudWatch) getDataQueries(filteredMetrics []filteredMetric) map[string][]types.MetricDataQuery { if c.metricCache != nil && c.metricCache.queries != nil && c.metricCache.isValid() { return c.metricCache.queries } c.queryDimensions = map[string]*map[string]string{} - dataQueries := map[string][]*cwClient.MetricDataQuery{} + dataQueries := map[string][]types.MetricDataQuery{} for i, filtered := range filteredMetrics { for j, metric := range filtered.metrics { id := strconv.Itoa(j) + "_" + strconv.Itoa(i) dimension := ctod(metric.Dimensions) if filtered.statFilter.Match("average") { c.queryDimensions["average_"+id] = dimension - dataQueries[*metric.Namespace] = append(dataQueries[*metric.Namespace], &cwClient.MetricDataQuery{ + dataQueries[*metric.Namespace] = append(dataQueries[*metric.Namespace], types.MetricDataQuery{ Id: aws.String("average_" + id), Label: aws.String(snakeCase(*metric.MetricName + "_average")), - MetricStat: &cwClient.MetricStat{ - Metric: metric, - Period: aws.Int64(int64(time.Duration(c.Period).Seconds())), - Stat: aws.String(cwClient.StatisticAverage), + MetricStat: &types.MetricStat{ + Metric: &metric, + Period: aws.Int32(int32(time.Duration(c.Period).Seconds())), + Stat: aws.String(StatisticAverage), }, }) } if filtered.statFilter.Match("maximum") { c.queryDimensions["maximum_"+id] = dimension - dataQueries[*metric.Namespace] = append(dataQueries[*metric.Namespace], &cwClient.MetricDataQuery{ + dataQueries[*metric.Namespace] = append(dataQueries[*metric.Namespace], types.MetricDataQuery{ Id: aws.String("maximum_" + id), Label: aws.String(snakeCase(*metric.MetricName + "_maximum")), - MetricStat: &cwClient.MetricStat{ - Metric: metric, - Period: aws.Int64(int64(time.Duration(c.Period).Seconds())), - Stat: aws.String(cwClient.StatisticMaximum), + MetricStat: &types.MetricStat{ + Metric: &metric, + Period: aws.Int32(int32(time.Duration(c.Period).Seconds())), + Stat: aws.String(StatisticMaximum), }, }) } if filtered.statFilter.Match("minimum") { c.queryDimensions["minimum_"+id] = dimension - dataQueries[*metric.Namespace] = append(dataQueries[*metric.Namespace], &cwClient.MetricDataQuery{ + dataQueries[*metric.Namespace] = append(dataQueries[*metric.Namespace], types.MetricDataQuery{ Id: aws.String("minimum_" + id), Label: aws.String(snakeCase(*metric.MetricName + "_minimum")), - MetricStat: &cwClient.MetricStat{ - Metric: metric, - Period: aws.Int64(int64(time.Duration(c.Period).Seconds())), - Stat: aws.String(cwClient.StatisticMinimum), + MetricStat: &types.MetricStat{ + Metric: &metric, + Period: aws.Int32(int32(time.Duration(c.Period).Seconds())), + Stat: aws.String(StatisticMinimum), }, }) } if filtered.statFilter.Match("sum") { c.queryDimensions["sum_"+id] = dimension - dataQueries[*metric.Namespace] = append(dataQueries[*metric.Namespace], &cwClient.MetricDataQuery{ + dataQueries[*metric.Namespace] = append(dataQueries[*metric.Namespace], types.MetricDataQuery{ Id: aws.String("sum_" + id), Label: aws.String(snakeCase(*metric.MetricName + "_sum")), - MetricStat: &cwClient.MetricStat{ - Metric: metric, - Period: aws.Int64(int64(time.Duration(c.Period).Seconds())), - Stat: aws.String(cwClient.StatisticSum), + MetricStat: &types.MetricStat{ + Metric: &metric, + Period: aws.Int32(int32(time.Duration(c.Period).Seconds())), + Stat: aws.String(StatisticSum), }, }) } if filtered.statFilter.Match("sample_count") { c.queryDimensions["sample_count_"+id] = dimension - dataQueries[*metric.Namespace] = append(dataQueries[*metric.Namespace], &cwClient.MetricDataQuery{ + dataQueries[*metric.Namespace] = append(dataQueries[*metric.Namespace], types.MetricDataQuery{ Id: aws.String("sample_count_" + id), Label: aws.String(snakeCase(*metric.MetricName + "_sample_count")), - MetricStat: &cwClient.MetricStat{ - Metric: metric, - Period: aws.Int64(int64(time.Duration(c.Period).Seconds())), - Stat: aws.String(cwClient.StatisticSampleCount), + MetricStat: &types.MetricStat{ + Metric: &metric, + Period: aws.Int32(int32(time.Duration(c.Period).Seconds())), + Stat: aws.String(StatisticSampleCount), }, }) } @@ -553,11 +555,11 @@ func (c *CloudWatch) getDataQueries(filteredMetrics []filteredMetric) map[string // gatherMetrics gets metric data from Cloudwatch. func (c *CloudWatch) gatherMetrics( params *cwClient.GetMetricDataInput, -) ([]*cwClient.MetricDataResult, error) { - results := []*cwClient.MetricDataResult{} +) ([]types.MetricDataResult, error) { + results := []types.MetricDataResult{} for { - resp, err := c.client.GetMetricData(params) + resp, err := c.client.GetMetricData(context.Background(), params) if err != nil { return nil, fmt.Errorf("failed to get metric data: %v", err) } @@ -574,7 +576,7 @@ func (c *CloudWatch) gatherMetrics( func (c *CloudWatch) aggregateMetrics( acc telegraf.Accumulator, - metricDataResults map[string][]*cwClient.MetricDataResult, + metricDataResults map[string][]types.MetricDataResult, ) error { var ( grouper = internalMetric.NewSeriesGrouper() @@ -592,7 +594,7 @@ func (c *CloudWatch) aggregateMetrics( tags["region"] = c.Region for i := range result.Values { - if err := grouper.Add(namespace, tags, *result.Timestamps[i], *result.Label, *result.Values[i]); err != nil { + if err := grouper.Add(namespace, tags, result.Timestamps[i], *result.Label, result.Values[i]); err != nil { acc.AddError(err) } } @@ -635,7 +637,7 @@ func snakeCase(s string) string { } // ctod converts cloudwatch dimensions to regular dimensions. -func ctod(cDimensions []*cwClient.Dimension) *map[string]string { +func ctod(cDimensions []types.Dimension) *map[string]string { dimensions := map[string]string{} for i := range cDimensions { dimensions[snakeCase(*cDimensions[i].Name)] = *cDimensions[i].Value @@ -643,7 +645,7 @@ func ctod(cDimensions []*cwClient.Dimension) *map[string]string { return &dimensions } -func (c *CloudWatch) getDataInputs(dataQueries []*cwClient.MetricDataQuery) *cwClient.GetMetricDataInput { +func (c *CloudWatch) getDataInputs(dataQueries []types.MetricDataQuery) *cwClient.GetMetricDataInput { return &cwClient.GetMetricDataInput{ StartTime: aws.Time(c.windowStart), EndTime: aws.Time(c.windowEnd), @@ -665,7 +667,7 @@ func hasWildcard(dimensions []*Dimension) bool { return false } -func isSelected(name string, metric *cwClient.Metric, dimensions []*Dimension) bool { +func isSelected(name string, metric types.Metric, dimensions []*Dimension) bool { if name != *metric.MetricName { return false } diff --git a/plugins/inputs/cloudwatch/cloudwatch_test.go b/plugins/inputs/cloudwatch/cloudwatch_test.go index 3114240ec77a9..9672ff88a2c1b 100644 --- a/plugins/inputs/cloudwatch/cloudwatch_test.go +++ b/plugins/inputs/cloudwatch/cloudwatch_test.go @@ -1,12 +1,14 @@ package cloudwatch import ( + "context" "net/http" "testing" "time" - "github.com/aws/aws-sdk-go/aws" - cwClient "github.com/aws/aws-sdk-go/service/cloudwatch" + "github.com/aws/aws-sdk-go-v2/aws" + cwClient "github.com/aws/aws-sdk-go-v2/service/cloudwatch" + "github.com/aws/aws-sdk-go-v2/service/cloudwatch/types" "github.com/stretchr/testify/require" "github.com/influxdata/telegraf/config" @@ -18,13 +20,13 @@ import ( type mockGatherCloudWatchClient struct{} -func (m *mockGatherCloudWatchClient) ListMetrics(params *cwClient.ListMetricsInput) (*cwClient.ListMetricsOutput, error) { +func (m *mockGatherCloudWatchClient) ListMetrics(_ context.Context, params *cwClient.ListMetricsInput, _ ...func(*cwClient.Options)) (*cwClient.ListMetricsOutput, error) { return &cwClient.ListMetricsOutput{ - Metrics: []*cwClient.Metric{ + Metrics: []types.Metric{ { Namespace: params.Namespace, MetricName: aws.String("Latency"), - Dimensions: []*cwClient.Dimension{ + Dimensions: []types.Dimension{ { Name: aws.String("LoadBalancerName"), Value: aws.String("p-example"), @@ -35,63 +37,53 @@ func (m *mockGatherCloudWatchClient) ListMetrics(params *cwClient.ListMetricsInp }, nil } -func (m *mockGatherCloudWatchClient) GetMetricData(params *cwClient.GetMetricDataInput) (*cwClient.GetMetricDataOutput, error) { +func (m *mockGatherCloudWatchClient) GetMetricData(_ context.Context, params *cwClient.GetMetricDataInput, _ ...func(*cwClient.Options)) (*cwClient.GetMetricDataOutput, error) { return &cwClient.GetMetricDataOutput{ - MetricDataResults: []*cwClient.MetricDataResult{ + MetricDataResults: []types.MetricDataResult{ { Id: aws.String("minimum_0_0"), Label: aws.String("latency_minimum"), - StatusCode: aws.String("completed"), - Timestamps: []*time.Time{ - params.EndTime, - }, - Values: []*float64{ - aws.Float64(0.1), + StatusCode: types.StatusCodeComplete, + Timestamps: []time.Time{ + *params.EndTime, }, + Values: []float64{0.1}, }, { Id: aws.String("maximum_0_0"), Label: aws.String("latency_maximum"), - StatusCode: aws.String("completed"), - Timestamps: []*time.Time{ - params.EndTime, - }, - Values: []*float64{ - aws.Float64(0.3), + StatusCode: types.StatusCodeComplete, + Timestamps: []time.Time{ + *params.EndTime, }, + Values: []float64{0.3}, }, { Id: aws.String("average_0_0"), Label: aws.String("latency_average"), - StatusCode: aws.String("completed"), - Timestamps: []*time.Time{ - params.EndTime, - }, - Values: []*float64{ - aws.Float64(0.2), + StatusCode: types.StatusCodeComplete, + Timestamps: []time.Time{ + *params.EndTime, }, + Values: []float64{0.2}, }, { Id: aws.String("sum_0_0"), Label: aws.String("latency_sum"), - StatusCode: aws.String("completed"), - Timestamps: []*time.Time{ - params.EndTime, - }, - Values: []*float64{ - aws.Float64(123), + StatusCode: types.StatusCodeComplete, + Timestamps: []time.Time{ + *params.EndTime, }, + Values: []float64{123}, }, { Id: aws.String("sample_count_0_0"), Label: aws.String("latency_sample_count"), - StatusCode: aws.String("completed"), - Timestamps: []*time.Time{ - params.EndTime, - }, - Values: []*float64{ - aws.Float64(100), + StatusCode: types.StatusCodeComplete, + Timestamps: []time.Time{ + *params.EndTime, }, + Values: []float64{100}, }, }, }, nil @@ -158,8 +150,8 @@ func TestGather_MultipleNamespaces(t *testing.T) { type mockSelectMetricsCloudWatchClient struct{} -func (m *mockSelectMetricsCloudWatchClient) ListMetrics(_ *cwClient.ListMetricsInput) (*cwClient.ListMetricsOutput, error) { - metrics := []*cwClient.Metric{} +func (m *mockSelectMetricsCloudWatchClient) ListMetrics(_ context.Context, params *cwClient.ListMetricsInput, _ ...func(*cwClient.Options)) (*cwClient.ListMetricsOutput, error) { + metrics := []types.Metric{} // 4 metrics are available metricNames := []string{"Latency", "RequestCount", "HealthyHostCount", "UnHealthyHostCount"} // for 3 ELBs @@ -169,10 +161,10 @@ func (m *mockSelectMetricsCloudWatchClient) ListMetrics(_ *cwClient.ListMetricsI for _, m := range metricNames { for _, lb := range loadBalancers { // For each metric/ELB pair, we get an aggregate value across all AZs. - metrics = append(metrics, &cwClient.Metric{ + metrics = append(metrics, types.Metric{ Namespace: aws.String("AWS/ELB"), MetricName: aws.String(m), - Dimensions: []*cwClient.Dimension{ + Dimensions: []types.Dimension{ { Name: aws.String("LoadBalancerName"), Value: aws.String(lb), @@ -181,10 +173,10 @@ func (m *mockSelectMetricsCloudWatchClient) ListMetrics(_ *cwClient.ListMetricsI }) for _, az := range availabilityZones { // We get a metric for each metric/ELB/AZ triplet. - metrics = append(metrics, &cwClient.Metric{ + metrics = append(metrics, types.Metric{ Namespace: aws.String("AWS/ELB"), MetricName: aws.String(m), - Dimensions: []*cwClient.Dimension{ + Dimensions: []types.Dimension{ { Name: aws.String("LoadBalancerName"), Value: aws.String(lb), @@ -205,7 +197,7 @@ func (m *mockSelectMetricsCloudWatchClient) ListMetrics(_ *cwClient.ListMetricsI return result, nil } -func (m *mockSelectMetricsCloudWatchClient) GetMetricData(_ *cwClient.GetMetricDataInput) (*cwClient.GetMetricDataOutput, error) { +func (m *mockSelectMetricsCloudWatchClient) GetMetricData(_ context.Context, params *cwClient.GetMetricDataInput, _ ...func(*cwClient.Options)) (*cwClient.GetMetricDataOutput, error) { return nil, nil } @@ -246,16 +238,16 @@ func TestSelectMetrics(t *testing.T) { } func TestGenerateStatisticsInputParams(t *testing.T) { - d := &cwClient.Dimension{ + d := types.Dimension{ Name: aws.String("LoadBalancerName"), Value: aws.String("p-example"), } namespace := "AWS/ELB" - m := &cwClient.Metric{ + m := types.Metric{ MetricName: aws.String("Latency"), - Dimensions: []*cwClient.Dimension{d}, - Namespace: &namespace, + Dimensions: []types.Dimension{d}, + Namespace: aws.String(namespace), } duration, _ := time.ParseDuration("1m") @@ -274,7 +266,7 @@ func TestGenerateStatisticsInputParams(t *testing.T) { c.updateWindow(now) statFilter, _ := filter.NewIncludeExcludeFilter(nil, nil) - queries := c.getDataQueries([]filteredMetric{{metrics: []*cwClient.Metric{m}, statFilter: statFilter}}) + queries := c.getDataQueries([]filteredMetric{{metrics: []types.Metric{m}, statFilter: statFilter}}) params := c.getDataInputs(queries[namespace]) require.EqualValues(t, *params.EndTime, now.Add(-time.Duration(c.Delay))) @@ -285,16 +277,16 @@ func TestGenerateStatisticsInputParams(t *testing.T) { } func TestGenerateStatisticsInputParamsFiltered(t *testing.T) { - d := &cwClient.Dimension{ + d := types.Dimension{ Name: aws.String("LoadBalancerName"), Value: aws.String("p-example"), } namespace := "AWS/ELB" - m := &cwClient.Metric{ + m := types.Metric{ MetricName: aws.String("Latency"), - Dimensions: []*cwClient.Dimension{d}, - Namespace: &namespace, + Dimensions: []types.Dimension{d}, + Namespace: aws.String(namespace), } duration, _ := time.ParseDuration("1m") @@ -313,7 +305,7 @@ func TestGenerateStatisticsInputParamsFiltered(t *testing.T) { c.updateWindow(now) statFilter, _ := filter.NewIncludeExcludeFilter([]string{"average", "sample_count"}, nil) - queries := c.getDataQueries([]filteredMetric{{metrics: []*cwClient.Metric{m}, statFilter: statFilter}}) + queries := c.getDataQueries([]filteredMetric{{metrics: []types.Metric{m}, statFilter: statFilter}}) params := c.getDataInputs(queries[namespace]) require.EqualValues(t, *params.EndTime, now.Add(-time.Duration(c.Delay))) diff --git a/plugins/inputs/kinesis_consumer/kinesis_consumer.go b/plugins/inputs/kinesis_consumer/kinesis_consumer.go index 88b5fef660112..4ff66ed1d2aaf 100644 --- a/plugins/inputs/kinesis_consumer/kinesis_consumer.go +++ b/plugins/inputs/kinesis_consumer/kinesis_consumer.go @@ -12,10 +12,10 @@ import ( "sync" "time" - "github.com/aws/aws-sdk-go/service/dynamodb" - "github.com/aws/aws-sdk-go/service/kinesis" + "github.com/aws/aws-sdk-go-v2/service/dynamodb" + "github.com/aws/aws-sdk-go-v2/service/kinesis" consumer "github.com/harlow/kinesis-consumer" - "github.com/harlow/kinesis-consumer/checkpoint/ddb" + "github.com/harlow/kinesis-consumer/store/ddb" "github.com/influxdata/telegraf" internalaws "github.com/influxdata/telegraf/config/aws" @@ -44,7 +44,7 @@ type ( acc telegraf.TrackingAccumulator sem chan struct{} - checkpoint consumer.Checkpoint + checkpoint consumer.Store checkpoints map[string]checkpoint records map[telegraf.TrackingID]string checkpointTex sync.Mutex @@ -153,31 +153,19 @@ func (k *KinesisConsumer) SetParser(parser parsers.Parser) { } func (k *KinesisConsumer) connect(ac telegraf.Accumulator) error { - p, err := k.CredentialConfig.Credentials() + cfg, err := k.CredentialConfig.Credentials() if err != nil { return err } - client := kinesis.New(p) + client := kinesis.NewFromConfig(cfg) - k.checkpoint = &noopCheckpoint{} + k.checkpoint = &noopStore{} if k.DynamoDB != nil { - p, err := (&internalaws.CredentialConfig{ - Region: k.Region, - AccessKey: k.AccessKey, - SecretKey: k.SecretKey, - RoleARN: k.RoleARN, - Profile: k.Profile, - Filename: k.Filename, - Token: k.Token, - EndpointURL: k.EndpointURL, - }).Credentials() - if err != nil { - return err - } + var err error k.checkpoint, err = ddb.New( k.DynamoDB.AppName, k.DynamoDB.TableName, - ddb.WithDynamoClient(dynamodb.New(p)), + ddb.WithDynamoClient(dynamodb.NewFromConfig(cfg)), ddb.WithMaxInterval(time.Second*10), ) if err != nil { @@ -189,7 +177,7 @@ func (k *KinesisConsumer) connect(ac telegraf.Accumulator) error { k.StreamName, consumer.WithClient(client), consumer.WithShardIteratorType(k.ShardIteratorType), - consumer.WithCheckpoint(k), + consumer.WithStore(k), ) if err != nil { return err @@ -214,10 +202,10 @@ func (k *KinesisConsumer) connect(ac telegraf.Accumulator) error { k.wg.Add(1) go func() { defer k.wg.Done() - err := k.cons.Scan(ctx, func(r *consumer.Record) consumer.ScanStatus { + err := k.cons.Scan(ctx, func(r *consumer.Record) error { select { case <-ctx.Done(): - return consumer.ScanStatus{Error: ctx.Err()} + return ctx.Err() case k.sem <- struct{}{}: break } @@ -227,7 +215,7 @@ func (k *KinesisConsumer) connect(ac telegraf.Accumulator) error { k.Log.Errorf("Scan parser error: %s", err.Error()) } - return consumer.ScanStatus{} + return nil }) if err != nil { k.cancel() @@ -298,7 +286,7 @@ func (k *KinesisConsumer) onDelivery(ctx context.Context) { } k.lastSeqNum = strToBint(sequenceNum) - if err := k.checkpoint.Set(chk.streamName, chk.shardID, sequenceNum); err != nil { + if err := k.checkpoint.SetCheckpoint(chk.streamName, chk.shardID, sequenceNum); err != nil { k.Log.Debug("Setting checkpoint failed: %v", err) } } else { @@ -332,13 +320,13 @@ func (k *KinesisConsumer) Gather(acc telegraf.Accumulator) error { return nil } -// Get wraps the checkpoint's Get function (called by consumer library) -func (k *KinesisConsumer) Get(streamName, shardID string) (string, error) { - return k.checkpoint.Get(streamName, shardID) +// Get wraps the checkpoint's GetCheckpoint function (called by consumer library) +func (k *KinesisConsumer) GetCheckpoint(streamName, shardID string) (string, error) { + return k.checkpoint.GetCheckpoint(streamName, shardID) } -// Set wraps the checkpoint's Set function (called by consumer library) -func (k *KinesisConsumer) Set(streamName, shardID, sequenceNumber string) error { +// Set wraps the checkpoint's SetCheckpoint function (called by consumer library) +func (k *KinesisConsumer) SetCheckpoint(streamName, shardID, sequenceNumber string) error { if sequenceNumber == "" { return fmt.Errorf("sequence number should not be empty") } @@ -390,10 +378,10 @@ func (k *KinesisConsumer) Init() error { return k.configureProcessContentEncodingFunc() } -type noopCheckpoint struct{} +type noopStore struct{} -func (n noopCheckpoint) Set(string, string, string) error { return nil } -func (n noopCheckpoint) Get(string, string) (string, error) { return "", nil } +func (n noopStore) SetCheckpoint(string, string, string) error { return nil } +func (n noopStore) GetCheckpoint(string, string) (string, error) { return "", nil } func init() { negOne, _ = new(big.Int).SetString("-1", 10) diff --git a/plugins/inputs/kinesis_consumer/kinesis_consumer_test.go b/plugins/inputs/kinesis_consumer/kinesis_consumer_test.go index b8becece054fc..6d52f07835e6b 100644 --- a/plugins/inputs/kinesis_consumer/kinesis_consumer_test.go +++ b/plugins/inputs/kinesis_consumer/kinesis_consumer_test.go @@ -2,7 +2,8 @@ package kinesis_consumer import ( "encoding/base64" - "github.com/aws/aws-sdk-go/aws" + "github.com/aws/aws-sdk-go-v2/aws" + "github.com/aws/aws-sdk-go-v2/service/kinesis/types" consumer "github.com/harlow/kinesis-consumer" "github.com/influxdata/telegraf" "github.com/influxdata/telegraf/plugins/parsers" @@ -53,7 +54,12 @@ func TestKinesisConsumer_onMessage(t *testing.T) { records: make(map[telegraf.TrackingID]string), }, args: args{ - r: &consumer.Record{Data: notZippedBytes, SequenceNumber: aws.String("anything")}, + r: &consumer.Record{ + Record: types.Record{ + Data: notZippedBytes, + SequenceNumber: aws.String("anything"), + }, + }, }, wantErr: false, expected: expected{ @@ -69,7 +75,12 @@ func TestKinesisConsumer_onMessage(t *testing.T) { records: make(map[telegraf.TrackingID]string), }, args: args{ - r: &consumer.Record{Data: notZippedBytes, SequenceNumber: aws.String("anything")}, + r: &consumer.Record{ + Record: types.Record{ + Data: notZippedBytes, + SequenceNumber: aws.String("anything"), + }, + }, }, wantErr: false, expected: expected{ @@ -85,7 +96,12 @@ func TestKinesisConsumer_onMessage(t *testing.T) { records: make(map[telegraf.TrackingID]string), }, args: args{ - r: &consumer.Record{Data: notZippedBytes, SequenceNumber: aws.String("anything")}, + r: &consumer.Record{ + Record: types.Record{ + Data: notZippedBytes, + SequenceNumber: aws.String("anything"), + }, + }, }, wantErr: false, expected: expected{ @@ -100,7 +116,12 @@ func TestKinesisConsumer_onMessage(t *testing.T) { records: make(map[telegraf.TrackingID]string), }, args: args{ - r: &consumer.Record{Data: notZippedBytes, SequenceNumber: aws.String("anything")}, + r: &consumer.Record{ + Record: types.Record{ + Data: notZippedBytes, + SequenceNumber: aws.String("anything"), + }, + }, }, wantErr: false, expected: expected{ @@ -116,7 +137,12 @@ func TestKinesisConsumer_onMessage(t *testing.T) { records: make(map[telegraf.TrackingID]string), }, args: args{ - r: &consumer.Record{Data: gzippedBytes, SequenceNumber: aws.String("anything")}, + r: &consumer.Record{ + Record: types.Record{ + Data: gzippedBytes, + SequenceNumber: aws.String("anything"), + }, + }, }, wantErr: false, expected: expected{ @@ -132,7 +158,12 @@ func TestKinesisConsumer_onMessage(t *testing.T) { records: make(map[telegraf.TrackingID]string), }, args: args{ - r: &consumer.Record{Data: zlibBytpes, SequenceNumber: aws.String("anything")}, + r: &consumer.Record{ + Record: types.Record{ + Data: zlibBytpes, + SequenceNumber: aws.String("anything"), + }, + }, }, wantErr: false, expected: expected{ diff --git a/plugins/outputs/cloudwatch/cloudwatch.go b/plugins/outputs/cloudwatch/cloudwatch.go index 85f9570b3d5ea..129f014bfb548 100644 --- a/plugins/outputs/cloudwatch/cloudwatch.go +++ b/plugins/outputs/cloudwatch/cloudwatch.go @@ -1,13 +1,15 @@ package cloudwatch import ( + "context" "math" "sort" "strings" "time" - "github.com/aws/aws-sdk-go/aws" - "github.com/aws/aws-sdk-go/service/cloudwatch" + "github.com/aws/aws-sdk-go-v2/aws" + "github.com/aws/aws-sdk-go-v2/service/cloudwatch" + "github.com/aws/aws-sdk-go-v2/service/cloudwatch/types" "github.com/influxdata/telegraf" internalaws "github.com/influxdata/telegraf/config/aws" @@ -17,7 +19,7 @@ import ( type CloudWatch struct { Namespace string `toml:"namespace"` // CloudWatch Metrics Namespace HighResolutionMetrics bool `toml:"high_resolution_metrics"` - svc *cloudwatch.CloudWatch + svc *cloudwatch.Client WriteStatistics bool `toml:"write_statistics"` @@ -38,7 +40,7 @@ const ( type cloudwatchField interface { addValue(sType statisticType, value float64) - buildDatum() []*cloudwatch.MetricDatum + buildDatum() []types.MetricDatum } type statisticField struct { @@ -56,8 +58,8 @@ func (f *statisticField) addValue(sType statisticType, value float64) { } } -func (f *statisticField) buildDatum() []*cloudwatch.MetricDatum { - var datums []*cloudwatch.MetricDatum +func (f *statisticField) buildDatum() []types.MetricDatum { + var datums []types.MetricDatum if f.hasAllFields() { // If we have all required fields, we build datum with StatisticValues @@ -66,24 +68,24 @@ func (f *statisticField) buildDatum() []*cloudwatch.MetricDatum { sum := f.values[statisticTypeSum] count := f.values[statisticTypeCount] - datum := &cloudwatch.MetricDatum{ + datum := types.MetricDatum{ MetricName: aws.String(strings.Join([]string{f.metricName, f.fieldName}, "_")), Dimensions: BuildDimensions(f.tags), Timestamp: aws.Time(f.timestamp), - StatisticValues: &cloudwatch.StatisticSet{ + StatisticValues: &types.StatisticSet{ Minimum: aws.Float64(min), Maximum: aws.Float64(max), Sum: aws.Float64(sum), SampleCount: aws.Float64(count), }, - StorageResolution: aws.Int64(f.storageResolution), + StorageResolution: aws.Int32(int32(f.storageResolution)), } datums = append(datums, datum) } else { // If we don't have all required fields, we build each field as independent datum for sType, value := range f.values { - datum := &cloudwatch.MetricDatum{ + datum := types.MetricDatum{ Value: aws.Float64(value), Dimensions: BuildDimensions(f.tags), Timestamp: aws.Time(f.timestamp), @@ -134,14 +136,14 @@ func (f *valueField) addValue(sType statisticType, value float64) { } } -func (f *valueField) buildDatum() []*cloudwatch.MetricDatum { - return []*cloudwatch.MetricDatum{ +func (f *valueField) buildDatum() []types.MetricDatum { + return []types.MetricDatum{ { MetricName: aws.String(strings.Join([]string{f.metricName, f.fieldName}, "_")), Value: aws.Float64(f.value), Dimensions: BuildDimensions(f.tags), Timestamp: aws.Time(f.timestamp), - StorageResolution: aws.Int64(f.storageResolution), + StorageResolution: aws.Int32(int32(f.storageResolution)), }, } } @@ -198,11 +200,12 @@ func (c *CloudWatch) Description() string { } func (c *CloudWatch) Connect() error { - p, err := c.CredentialConfig.Credentials() + cfg, err := c.CredentialConfig.Credentials() if err != nil { return err } - c.svc = cloudwatch.New(p) + + c.svc = cloudwatch.NewFromConfig(cfg) return nil } @@ -211,7 +214,7 @@ func (c *CloudWatch) Close() error { } func (c *CloudWatch) Write(metrics []telegraf.Metric) error { - var datums []*cloudwatch.MetricDatum + var datums []types.MetricDatum for _, m := range metrics { d := BuildMetricDatum(c.WriteStatistics, c.HighResolutionMetrics, m) datums = append(datums, d...) @@ -229,13 +232,13 @@ func (c *CloudWatch) Write(metrics []telegraf.Metric) error { return nil } -func (c *CloudWatch) WriteToCloudWatch(datums []*cloudwatch.MetricDatum) error { +func (c *CloudWatch) WriteToCloudWatch(datums []types.MetricDatum) error { params := &cloudwatch.PutMetricDataInput{ MetricData: datums, Namespace: aws.String(c.Namespace), } - _, err := c.svc.PutMetricData(params) + _, err := c.svc.PutMetricData(context.Background(), params) if err != nil { c.Log.Errorf("Unable to write to CloudWatch : %+v", err.Error()) @@ -246,13 +249,13 @@ func (c *CloudWatch) WriteToCloudWatch(datums []*cloudwatch.MetricDatum) error { // Partition the MetricDatums into smaller slices of a max size so that are under the limit // for the AWS API calls. -func PartitionDatums(size int, datums []*cloudwatch.MetricDatum) [][]*cloudwatch.MetricDatum { +func PartitionDatums(size int, datums []types.MetricDatum) [][]types.MetricDatum { numberOfPartitions := len(datums) / size if len(datums)%size != 0 { numberOfPartitions++ } - partitions := make([][]*cloudwatch.MetricDatum, numberOfPartitions) + partitions := make([][]types.MetricDatum, numberOfPartitions) for i := 0; i < numberOfPartitions; i++ { start := size * i @@ -270,7 +273,7 @@ func PartitionDatums(size int, datums []*cloudwatch.MetricDatum) [][]*cloudwatch // Make a MetricDatum from telegraf.Metric. It would check if all required fields of // cloudwatch.StatisticSet are available. If so, it would build MetricDatum from statistic values. // Otherwise, fields would still been built independently. -func BuildMetricDatum(buildStatistic bool, highResolutionMetrics bool, point telegraf.Metric) []*cloudwatch.MetricDatum { +func BuildMetricDatum(buildStatistic bool, highResolutionMetrics bool, point telegraf.Metric) []types.MetricDatum { fields := make(map[string]cloudwatchField) tags := point.Tags() storageResolution := int64(60) @@ -320,7 +323,7 @@ func BuildMetricDatum(buildStatistic bool, highResolutionMetrics bool, point tel } } - var datums []*cloudwatch.MetricDatum + var datums []types.MetricDatum for _, f := range fields { d := f.buildDatum() datums = append(datums, d...) @@ -332,13 +335,13 @@ func BuildMetricDatum(buildStatistic bool, highResolutionMetrics bool, point tel // Make a list of Dimensions by using a Point's tags. CloudWatch supports up to // 10 dimensions per metric so we only keep up to the first 10 alphabetically. // This always includes the "host" tag if it exists. -func BuildDimensions(mTags map[string]string) []*cloudwatch.Dimension { +func BuildDimensions(mTags map[string]string) []types.Dimension { const MaxDimensions = 10 - dimensions := make([]*cloudwatch.Dimension, 0, MaxDimensions) + dimensions := make([]types.Dimension, 0, MaxDimensions) // This is pretty ugly but we always want to include the "host" tag if it exists. if host, ok := mTags["host"]; ok { - dimensions = append(dimensions, &cloudwatch.Dimension{ + dimensions = append(dimensions, types.Dimension{ Name: aws.String("host"), Value: aws.String(host), }) @@ -362,7 +365,7 @@ func BuildDimensions(mTags map[string]string) []*cloudwatch.Dimension { continue } - dimensions = append(dimensions, &cloudwatch.Dimension{ + dimensions = append(dimensions, types.Dimension{ Name: aws.String(k), Value: aws.String(mTags[k]), }) diff --git a/plugins/outputs/cloudwatch/cloudwatch_test.go b/plugins/outputs/cloudwatch/cloudwatch_test.go index 95987f591830d..df98381cf3f90 100644 --- a/plugins/outputs/cloudwatch/cloudwatch_test.go +++ b/plugins/outputs/cloudwatch/cloudwatch_test.go @@ -2,14 +2,13 @@ package cloudwatch import ( "fmt" + "github.com/aws/aws-sdk-go-v2/service/cloudwatch/types" "math" "sort" "testing" "time" - "github.com/aws/aws-sdk-go/aws" - "github.com/aws/aws-sdk-go/service/cloudwatch" - + "github.com/aws/aws-sdk-go-v2/aws" "github.com/influxdata/telegraf" "github.com/influxdata/telegraf/metric" "github.com/influxdata/telegraf/testutil" @@ -117,8 +116,8 @@ func TestBuildMetricDatums(t *testing.T) { } func TestMetricDatumResolution(t *testing.T) { - const expectedStandardResolutionValue = int64(60) - const expectedHighResolutionValue = int64(1) + const expectedStandardResolutionValue = int32(60) + const expectedHighResolutionValue = int32(1) assert := assert.New(t) @@ -153,19 +152,19 @@ func TestBuildMetricDatums_SkipEmptyTags(t *testing.T) { func TestPartitionDatums(t *testing.T) { assert := assert.New(t) - testDatum := cloudwatch.MetricDatum{ + testDatum := types.MetricDatum{ MetricName: aws.String("Foo"), Value: aws.Float64(1), } - zeroDatum := []*cloudwatch.MetricDatum{} - oneDatum := []*cloudwatch.MetricDatum{&testDatum} - twoDatum := []*cloudwatch.MetricDatum{&testDatum, &testDatum} - threeDatum := []*cloudwatch.MetricDatum{&testDatum, &testDatum, &testDatum} + zeroDatum := []types.MetricDatum{} + oneDatum := []types.MetricDatum{testDatum} + twoDatum := []types.MetricDatum{testDatum, testDatum} + threeDatum := []types.MetricDatum{testDatum, testDatum, testDatum} - assert.Equal([][]*cloudwatch.MetricDatum{}, PartitionDatums(2, zeroDatum)) - assert.Equal([][]*cloudwatch.MetricDatum{oneDatum}, PartitionDatums(2, oneDatum)) - assert.Equal([][]*cloudwatch.MetricDatum{oneDatum}, PartitionDatums(2, oneDatum)) - assert.Equal([][]*cloudwatch.MetricDatum{twoDatum}, PartitionDatums(2, twoDatum)) - assert.Equal([][]*cloudwatch.MetricDatum{twoDatum, oneDatum}, PartitionDatums(2, threeDatum)) + assert.Equal([][]types.MetricDatum{}, PartitionDatums(2, zeroDatum)) + assert.Equal([][]types.MetricDatum{oneDatum}, PartitionDatums(2, oneDatum)) + assert.Equal([][]types.MetricDatum{oneDatum}, PartitionDatums(2, oneDatum)) + assert.Equal([][]types.MetricDatum{twoDatum}, PartitionDatums(2, twoDatum)) + assert.Equal([][]types.MetricDatum{twoDatum, oneDatum}, PartitionDatums(2, threeDatum)) } diff --git a/plugins/outputs/cloudwatch_logs/cloudwatch_logs.go b/plugins/outputs/cloudwatch_logs/cloudwatch_logs.go index f9ef289089363..952fea4b2a9a4 100644 --- a/plugins/outputs/cloudwatch_logs/cloudwatch_logs.go +++ b/plugins/outputs/cloudwatch_logs/cloudwatch_logs.go @@ -1,19 +1,21 @@ package cloudwatch_logs import ( + "context" "fmt" "sort" "strings" "time" - "github.com/aws/aws-sdk-go/service/cloudwatchlogs" + "github.com/aws/aws-sdk-go-v2/service/cloudwatchlogs" + "github.com/aws/aws-sdk-go-v2/service/cloudwatchlogs/types" "github.com/influxdata/telegraf" internalaws "github.com/influxdata/telegraf/config/aws" "github.com/influxdata/telegraf/plugins/outputs" ) type messageBatch struct { - logEvents []*cloudwatchlogs.InputLogEvent + logEvents []types.InputLogEvent messageCount int } type logStreamContainer struct { @@ -25,16 +27,16 @@ type logStreamContainer struct { //Cloudwatch Logs service interface type cloudWatchLogs interface { - DescribeLogGroups(*cloudwatchlogs.DescribeLogGroupsInput) (*cloudwatchlogs.DescribeLogGroupsOutput, error) - DescribeLogStreams(*cloudwatchlogs.DescribeLogStreamsInput) (*cloudwatchlogs.DescribeLogStreamsOutput, error) - CreateLogStream(*cloudwatchlogs.CreateLogStreamInput) (*cloudwatchlogs.CreateLogStreamOutput, error) - PutLogEvents(*cloudwatchlogs.PutLogEventsInput) (*cloudwatchlogs.PutLogEventsOutput, error) + DescribeLogGroups(context.Context, *cloudwatchlogs.DescribeLogGroupsInput, ...func(options *cloudwatchlogs.Options)) (*cloudwatchlogs.DescribeLogGroupsOutput, error) + DescribeLogStreams(context.Context, *cloudwatchlogs.DescribeLogStreamsInput, ...func(options *cloudwatchlogs.Options)) (*cloudwatchlogs.DescribeLogStreamsOutput, error) + CreateLogStream(context.Context, *cloudwatchlogs.CreateLogStreamInput, ...func(options *cloudwatchlogs.Options)) (*cloudwatchlogs.CreateLogStreamOutput, error) + PutLogEvents(context.Context, *cloudwatchlogs.PutLogEventsInput, ...func(options *cloudwatchlogs.Options)) (*cloudwatchlogs.PutLogEventsOutput, error) } // CloudWatchLogs plugin object definition type CloudWatchLogs struct { - LogGroup string `toml:"log_group"` - lg *cloudwatchlogs.LogGroup //log group data + LogGroup string `toml:"log_group"` + lg *types.LogGroup //log group data LogStream string `toml:"log_stream"` lsKey string //log stream source: tag or field @@ -187,19 +189,17 @@ func (c *CloudWatchLogs) Connect() error { var logGroupsOutput = &cloudwatchlogs.DescribeLogGroupsOutput{NextToken: &dummyToken} var err error - p, err := c.CredentialConfig.Credentials() + cfg, err := c.CredentialConfig.Credentials() if err != nil { return err } - c.svc = cloudwatchlogs.New(p) - if c.svc == nil { - return fmt.Errorf("can't create cloudwatch logs service endpoint") - } + c.svc = cloudwatchlogs.NewFromConfig(cfg) //Find log group with name 'c.LogGroup' if c.lg == nil { //In case connection is not retried, first time for logGroupsOutput.NextToken != nil { logGroupsOutput, err = c.svc.DescribeLogGroups( + context.Background(), &cloudwatchlogs.DescribeLogGroupsInput{ LogGroupNamePrefix: &c.LogGroup, NextToken: queryToken}) @@ -212,7 +212,7 @@ func (c *CloudWatchLogs) Connect() error { for _, logGroup := range logGroupsOutput.LogGroups { if *(logGroup.LogGroupName) == c.LogGroup { c.Log.Debugf("Found log group %q", c.LogGroup) - c.lg = logGroup + c.lg = &logGroup //nolint:revive } } } @@ -325,7 +325,7 @@ func (c *CloudWatchLogs) Write(metrics []telegraf.Metric) error { lsContainer = val } else { lsContainer.messageBatches[0].messageCount = 0 - lsContainer.messageBatches[0].logEvents = []*cloudwatchlogs.InputLogEvent{} + lsContainer.messageBatches[0].logEvents = []types.InputLogEvent{} c.ls[logStream] = lsContainer } @@ -335,7 +335,7 @@ func (c *CloudWatchLogs) Write(metrics []telegraf.Metric) error { lsContainer.currentBatchIndex++ lsContainer.messageBatches = append(lsContainer.messageBatches, messageBatch{ - logEvents: []*cloudwatchlogs.InputLogEvent{}, + logEvents: []types.InputLogEvent{}, messageCount: 0}) lsContainer.currentBatchSizeBytes = messageSizeInBytesForAWS } else { @@ -349,7 +349,7 @@ func (c *CloudWatchLogs) Write(metrics []telegraf.Metric) error { //Adding metring to batch lsContainer.messageBatches[lsContainer.currentBatchIndex].logEvents = append(lsContainer.messageBatches[lsContainer.currentBatchIndex].logEvents, - &cloudwatchlogs.InputLogEvent{ + types.InputLogEvent{ Message: &logData, Timestamp: &metricTime}) } @@ -370,11 +370,11 @@ func (c *CloudWatchLogs) Write(metrics []telegraf.Metric) error { if elem.sequenceToken == "" { //This is the first attempt to write to log stream, //need to check log stream existence and create it if necessary - describeLogStreamOutput, err := c.svc.DescribeLogStreams(&cloudwatchlogs.DescribeLogStreamsInput{ + describeLogStreamOutput, err := c.svc.DescribeLogStreams(context.Background(), &cloudwatchlogs.DescribeLogStreamsInput{ LogGroupName: &c.LogGroup, LogStreamNamePrefix: &logStream}) if err == nil && len(describeLogStreamOutput.LogStreams) == 0 { - _, err := c.svc.CreateLogStream(&cloudwatchlogs.CreateLogStreamInput{ + _, err := c.svc.CreateLogStream(context.Background(), &cloudwatchlogs.CreateLogStreamInput{ LogGroupName: &c.LogGroup, LogStreamName: &logStream}) if err != nil { @@ -404,14 +404,14 @@ func (c *CloudWatchLogs) Write(metrics []telegraf.Metric) error { //There is a quota of 5 requests per second per log stream. Additional //requests are throttled. This quota can't be changed. - putLogEventsOutput, err := c.svc.PutLogEvents(&putLogEvents) + putLogEventsOutput, err := c.svc.PutLogEvents(context.Background(), &putLogEvents) if err != nil { c.Log.Errorf("Can't push logs batch to AWS. Reason: %v", err) continue } //Cleanup batch elem.messageBatches[index] = messageBatch{ - logEvents: []*cloudwatchlogs.InputLogEvent{}, + logEvents: []types.InputLogEvent{}, messageCount: 0} elem.sequenceToken = *putLogEventsOutput.NextSequenceToken diff --git a/plugins/outputs/cloudwatch_logs/cloudwatch_logs_test.go b/plugins/outputs/cloudwatch_logs/cloudwatch_logs_test.go index f2f9f0cac7f3c..e103eb53d24e6 100644 --- a/plugins/outputs/cloudwatch_logs/cloudwatch_logs_test.go +++ b/plugins/outputs/cloudwatch_logs/cloudwatch_logs_test.go @@ -1,6 +1,7 @@ package cloudwatch_logs import ( + "context" "fmt" "math/rand" "net/http" @@ -8,7 +9,8 @@ import ( "testing" "time" - "github.com/aws/aws-sdk-go/service/cloudwatchlogs" + cloudwatchlogsV2 "github.com/aws/aws-sdk-go-v2/service/cloudwatchlogs" + "github.com/aws/aws-sdk-go-v2/service/cloudwatchlogs/types" "github.com/influxdata/telegraf" internalaws "github.com/influxdata/telegraf/config/aws" "github.com/influxdata/telegraf/testutil" @@ -17,24 +19,24 @@ import ( type mockCloudWatchLogs struct { logStreamName string - pushedLogEvents []cloudwatchlogs.InputLogEvent + pushedLogEvents []types.InputLogEvent } func (c *mockCloudWatchLogs) Init(lsName string) { c.logStreamName = lsName - c.pushedLogEvents = make([]cloudwatchlogs.InputLogEvent, 0) + c.pushedLogEvents = make([]types.InputLogEvent, 0) } -func (c *mockCloudWatchLogs) DescribeLogGroups(*cloudwatchlogs.DescribeLogGroupsInput) (*cloudwatchlogs.DescribeLogGroupsOutput, error) { +func (c *mockCloudWatchLogs) DescribeLogGroups(context.Context, *cloudwatchlogsV2.DescribeLogGroupsInput, ...func(options *cloudwatchlogsV2.Options)) (*cloudwatchlogsV2.DescribeLogGroupsOutput, error) { return nil, nil } -func (c *mockCloudWatchLogs) DescribeLogStreams(*cloudwatchlogs.DescribeLogStreamsInput) (*cloudwatchlogs.DescribeLogStreamsOutput, error) { +func (c *mockCloudWatchLogs) DescribeLogStreams(context.Context, *cloudwatchlogsV2.DescribeLogStreamsInput, ...func(options *cloudwatchlogsV2.Options)) (*cloudwatchlogsV2.DescribeLogStreamsOutput, error) { arn := "arn" creationTime := time.Now().Unix() sequenceToken := "arbitraryToken" - output := &cloudwatchlogs.DescribeLogStreamsOutput{ - LogStreams: []*cloudwatchlogs.LogStream{ + output := &cloudwatchlogsV2.DescribeLogStreamsOutput{ + LogStreams: []types.LogStream{ { Arn: &arn, CreationTime: &creationTime, @@ -48,15 +50,15 @@ func (c *mockCloudWatchLogs) DescribeLogStreams(*cloudwatchlogs.DescribeLogStrea } return output, nil } -func (c *mockCloudWatchLogs) CreateLogStream(*cloudwatchlogs.CreateLogStreamInput) (*cloudwatchlogs.CreateLogStreamOutput, error) { +func (c *mockCloudWatchLogs) CreateLogStream(context.Context, *cloudwatchlogsV2.CreateLogStreamInput, ...func(options *cloudwatchlogsV2.Options)) (*cloudwatchlogsV2.CreateLogStreamOutput, error) { return nil, nil } -func (c *mockCloudWatchLogs) PutLogEvents(input *cloudwatchlogs.PutLogEventsInput) (*cloudwatchlogs.PutLogEventsOutput, error) { +func (c *mockCloudWatchLogs) PutLogEvents(_ context.Context, input *cloudwatchlogsV2.PutLogEventsInput, _ ...func(options *cloudwatchlogsV2.Options)) (*cloudwatchlogsV2.PutLogEventsOutput, error) { sequenceToken := "arbitraryToken" - output := &cloudwatchlogs.PutLogEventsOutput{NextSequenceToken: &sequenceToken} + output := &cloudwatchlogsV2.PutLogEventsOutput{NextSequenceToken: &sequenceToken} //Saving messages for _, event := range input.LogEvents { - c.pushedLogEvents = append(c.pushedLogEvents, *event) + c.pushedLogEvents = append(c.pushedLogEvents, event) } return output, nil diff --git a/plugins/outputs/kinesis/kinesis.go b/plugins/outputs/kinesis/kinesis.go index 412e3d9742b72..56858340887f5 100644 --- a/plugins/outputs/kinesis/kinesis.go +++ b/plugins/outputs/kinesis/kinesis.go @@ -1,11 +1,12 @@ package kinesis import ( + "context" "time" - "github.com/aws/aws-sdk-go/aws" - "github.com/aws/aws-sdk-go/service/kinesis" - "github.com/aws/aws-sdk-go/service/kinesis/kinesisiface" + "github.com/aws/aws-sdk-go-v2/aws" + "github.com/aws/aws-sdk-go-v2/service/kinesis" + "github.com/aws/aws-sdk-go-v2/service/kinesis/types" "github.com/gofrs/uuid" "github.com/influxdata/telegraf" internalaws "github.com/influxdata/telegraf/config/aws" @@ -26,7 +27,7 @@ type ( Log telegraf.Logger `toml:"-"` serializer serializers.Serializer - svc kinesisiface.KinesisAPI + svc kinesisClient internalaws.CredentialConfig } @@ -38,6 +39,10 @@ type ( } ) +type kinesisClient interface { + PutRecords(context.Context, *kinesis.PutRecordsInput, ...func(*kinesis.Options)) (*kinesis.PutRecordsOutput, error) +} + var sampleConfig = ` ## Amazon REGION of kinesis endpoint. region = "ap-southeast-2" @@ -126,13 +131,14 @@ func (k *KinesisOutput) Connect() error { k.Log.Infof("Establishing a connection to Kinesis in %s", k.Region) } - p, err := k.CredentialConfig.Credentials() + cfg, err := k.CredentialConfig.Credentials() if err != nil { return err } - svc := kinesis.New(p) - _, err = svc.DescribeStreamSummary(&kinesis.DescribeStreamSummaryInput{ + svc := kinesis.NewFromConfig(cfg) + + _, err = svc.DescribeStreamSummary(context.Background(), &kinesis.DescribeStreamSummaryInput{ StreamName: aws.String(k.StreamName), }) k.svc = svc @@ -147,14 +153,14 @@ func (k *KinesisOutput) SetSerializer(serializer serializers.Serializer) { k.serializer = serializer } -func (k *KinesisOutput) writeKinesis(r []*kinesis.PutRecordsRequestEntry) time.Duration { +func (k *KinesisOutput) writeKinesis(r []types.PutRecordsRequestEntry) time.Duration { start := time.Now() payload := &kinesis.PutRecordsInput{ Records: r, StreamName: aws.String(k.StreamName), } - resp, err := k.svc.PutRecords(payload) + resp, err := k.svc.PutRecords(context.Background(), payload) if err != nil { k.Log.Errorf("Unable to write to Kinesis : %s", err.Error()) return time.Since(start) @@ -214,7 +220,7 @@ func (k *KinesisOutput) Write(metrics []telegraf.Metric) error { return nil } - r := []*kinesis.PutRecordsRequestEntry{} + r := []types.PutRecordsRequestEntry{} for _, metric := range metrics { sz++ @@ -227,12 +233,12 @@ func (k *KinesisOutput) Write(metrics []telegraf.Metric) error { partitionKey := k.getPartitionKey(metric) - d := kinesis.PutRecordsRequestEntry{ + d := types.PutRecordsRequestEntry{ Data: values, PartitionKey: aws.String(partitionKey), } - r = append(r, &d) + r = append(r, d) if sz == maxRecordsPerRequest { elapsed := k.writeKinesis(r) diff --git a/plugins/outputs/kinesis/kinesis_test.go b/plugins/outputs/kinesis/kinesis_test.go index 22b8e83e48e24..89724ef1805d2 100644 --- a/plugins/outputs/kinesis/kinesis_test.go +++ b/plugins/outputs/kinesis/kinesis_test.go @@ -1,13 +1,13 @@ package kinesis import ( + "context" "fmt" "testing" - "github.com/aws/aws-sdk-go/aws" - "github.com/aws/aws-sdk-go/aws/awserr" - "github.com/aws/aws-sdk-go/service/kinesis" - "github.com/aws/aws-sdk-go/service/kinesis/kinesisiface" + "github.com/aws/aws-sdk-go-v2/aws" + "github.com/aws/aws-sdk-go-v2/service/kinesis" + "github.com/aws/aws-sdk-go-v2/service/kinesis/types" "github.com/gofrs/uuid" "github.com/influxdata/telegraf" "github.com/influxdata/telegraf/plugins/serializers" @@ -110,7 +110,7 @@ func TestPartitionKey(t *testing.T) { func TestWriteKinesis_WhenSuccess(t *testing.T) { assert := assert.New(t) - records := []*kinesis.PutRecordsRequestEntry{ + records := []types.PutRecordsRequestEntry{ { PartitionKey: aws.String(testPartitionKey), Data: []byte{0x65}, @@ -120,7 +120,7 @@ func TestWriteKinesis_WhenSuccess(t *testing.T) { svc := &mockKinesisPutRecords{} svc.SetupResponse( 0, - []*kinesis.PutRecordsResultEntry{ + []types.PutRecordsResultEntry{ { SequenceNumber: aws.String(testSequenceNumber), ShardId: aws.String(testShardID), @@ -148,7 +148,7 @@ func TestWriteKinesis_WhenSuccess(t *testing.T) { func TestWriteKinesis_WhenRecordErrors(t *testing.T) { assert := assert.New(t) - records := []*kinesis.PutRecordsRequestEntry{ + records := []types.PutRecordsRequestEntry{ { PartitionKey: aws.String(testPartitionKey), Data: []byte{0x66}, @@ -158,7 +158,7 @@ func TestWriteKinesis_WhenRecordErrors(t *testing.T) { svc := &mockKinesisPutRecords{} svc.SetupResponse( 1, - []*kinesis.PutRecordsResultEntry{ + []types.PutRecordsResultEntry{ { ErrorCode: aws.String("InternalFailure"), ErrorMessage: aws.String("Internal Service Failure"), @@ -186,7 +186,7 @@ func TestWriteKinesis_WhenRecordErrors(t *testing.T) { func TestWriteKinesis_WhenServiceError(t *testing.T) { assert := assert.New(t) - records := []*kinesis.PutRecordsRequestEntry{ + records := []types.PutRecordsRequestEntry{ { PartitionKey: aws.String(testPartitionKey), Data: []byte{}, @@ -195,7 +195,7 @@ func TestWriteKinesis_WhenServiceError(t *testing.T) { svc := &mockKinesisPutRecords{} svc.SetupErrorResponse( - awserr.New("InvalidArgumentException", "Invalid record", nil), + &types.InvalidArgumentException{Message: aws.String("Invalid record")}, ) k := KinesisOutput{ @@ -262,7 +262,7 @@ func TestWrite_SingleMetric(t *testing.T) { svc.AssertRequests(t, []*kinesis.PutRecordsInput{ { StreamName: aws.String(testStreamName), - Records: []*kinesis.PutRecordsRequestEntry{ + Records: []types.PutRecordsRequestEntry{ { PartitionKey: aws.String(testPartitionKey), Data: metricData, @@ -449,7 +449,7 @@ func TestWrite_SerializerError(t *testing.T) { svc.AssertRequests(t, []*kinesis.PutRecordsInput{ { StreamName: aws.String(testStreamName), - Records: []*kinesis.PutRecordsRequestEntry{ + Records: []types.PutRecordsRequestEntry{ { PartitionKey: aws.String(testPartitionKey), Data: metric1Data, @@ -469,20 +469,18 @@ type mockKinesisPutRecordsResponse struct { } type mockKinesisPutRecords struct { - kinesisiface.KinesisAPI - requests []*kinesis.PutRecordsInput responses []*mockKinesisPutRecordsResponse } func (m *mockKinesisPutRecords) SetupResponse( - failedRecordCount int64, - records []*kinesis.PutRecordsResultEntry, + failedRecordCount int32, + records []types.PutRecordsResultEntry, ) { m.responses = append(m.responses, &mockKinesisPutRecordsResponse{ Err: nil, Output: &kinesis.PutRecordsOutput{ - FailedRecordCount: aws.Int64(failedRecordCount), + FailedRecordCount: aws.Int32(failedRecordCount), Records: records, }, }) @@ -490,25 +488,25 @@ func (m *mockKinesisPutRecords) SetupResponse( func (m *mockKinesisPutRecords) SetupGenericResponse( successfulRecordCount uint32, - failedRecordCount uint32, + failedRecordCount int32, ) { - records := []*kinesis.PutRecordsResultEntry{} + records := []types.PutRecordsResultEntry{} for i := uint32(0); i < successfulRecordCount; i++ { - records = append(records, &kinesis.PutRecordsResultEntry{ + records = append(records, types.PutRecordsResultEntry{ SequenceNumber: aws.String(testSequenceNumber), ShardId: aws.String(testShardID), }) } - for i := uint32(0); i < failedRecordCount; i++ { - records = append(records, &kinesis.PutRecordsResultEntry{ + for i := int32(0); i < failedRecordCount; i++ { + records = append(records, types.PutRecordsResultEntry{ ErrorCode: aws.String("InternalFailure"), ErrorMessage: aws.String("Internal Service Failure"), }) } - m.SetupResponse(int64(failedRecordCount), records) + m.SetupResponse(failedRecordCount, records) } func (m *mockKinesisPutRecords) SetupErrorResponse(err error) { @@ -518,7 +516,7 @@ func (m *mockKinesisPutRecords) SetupErrorResponse(err error) { }) } -func (m *mockKinesisPutRecords) PutRecords(input *kinesis.PutRecordsInput) (*kinesis.PutRecordsOutput, error) { +func (m *mockKinesisPutRecords) PutRecords(_ context.Context, input *kinesis.PutRecordsInput, _ ...func(*kinesis.Options)) (*kinesis.PutRecordsOutput, error) { reqNum := len(m.requests) if reqNum > len(m.responses) { return nil, fmt.Errorf("Response for request %+v not setup", reqNum) @@ -612,12 +610,12 @@ func createTestMetrics( func createPutRecordsRequestEntries( metricsData [][]byte, -) []*kinesis.PutRecordsRequestEntry { +) []types.PutRecordsRequestEntry { count := len(metricsData) - records := make([]*kinesis.PutRecordsRequestEntry, count) + records := make([]types.PutRecordsRequestEntry, count) for i := 0; i < count; i++ { - records[i] = &kinesis.PutRecordsRequestEntry{ + records[i] = types.PutRecordsRequestEntry{ PartitionKey: aws.String(testPartitionKey), Data: metricsData[i], } diff --git a/plugins/outputs/timestream/timestream.go b/plugins/outputs/timestream/timestream.go index 42ea706afa9e6..6478563b6b245 100644 --- a/plugins/outputs/timestream/timestream.go +++ b/plugins/outputs/timestream/timestream.go @@ -1,7 +1,9 @@ package timestream import ( + "context" "encoding/binary" + "errors" "fmt" "hash/fnv" "reflect" @@ -11,9 +13,10 @@ import ( "github.com/influxdata/telegraf" "github.com/influxdata/telegraf/plugins/outputs" - "github.com/aws/aws-sdk-go/aws" - "github.com/aws/aws-sdk-go/aws/awserr" - "github.com/aws/aws-sdk-go/service/timestreamwrite" + "github.com/aws/aws-sdk-go-v2/aws" + "github.com/aws/aws-sdk-go-v2/service/timestreamwrite" + "github.com/aws/aws-sdk-go-v2/service/timestreamwrite/types" + "github.com/aws/smithy-go" internalaws "github.com/influxdata/telegraf/config/aws" ) @@ -38,9 +41,9 @@ type ( } WriteClient interface { - CreateTable(*timestreamwrite.CreateTableInput) (*timestreamwrite.CreateTableOutput, error) - WriteRecords(*timestreamwrite.WriteRecordsInput) (*timestreamwrite.WriteRecordsOutput, error) - DescribeDatabase(*timestreamwrite.DescribeDatabaseInput) (*timestreamwrite.DescribeDatabaseOutput, error) + CreateTable(context.Context, *timestreamwrite.CreateTableInput, ...func(*timestreamwrite.Options)) (*timestreamwrite.CreateTableOutput, error) + WriteRecords(context.Context, *timestreamwrite.WriteRecordsInput, ...func(*timestreamwrite.Options)) (*timestreamwrite.WriteRecordsOutput, error) + DescribeDatabase(context.Context, *timestreamwrite.DescribeDatabaseInput, ...func(*timestreamwrite.Options)) (*timestreamwrite.DescribeDatabaseOutput, error) } ) @@ -170,11 +173,11 @@ var sampleConfig = ` // WriteFactory function provides a way to mock the client instantiation for testing purposes. var WriteFactory = func(credentialConfig *internalaws.CredentialConfig) (WriteClient, error) { - configProvider, err := credentialConfig.Credentials() + cfg, err := credentialConfig.Credentials() if err != nil { - return nil, err + return ×treamwrite.Client{}, err } - return timestreamwrite.New(configProvider), nil + return timestreamwrite.NewFromConfig(cfg), nil } func (t *Timestream) Connect() error { @@ -235,7 +238,7 @@ func (t *Timestream) Connect() error { describeDatabaseInput := ×treamwrite.DescribeDatabaseInput{ DatabaseName: aws.String(t.DatabaseName), } - describeDatabaseOutput, err := svc.DescribeDatabase(describeDatabaseInput) + describeDatabaseOutput, err := svc.DescribeDatabase(context.Background(), describeDatabaseInput) if err != nil { t.Log.Errorf("Couldn't describe database '%s'. Check error, fix permissions, connectivity, create database.", t.DatabaseName) return err @@ -278,33 +281,45 @@ func (t *Timestream) Write(metrics []telegraf.Metric) error { func (t *Timestream) writeToTimestream(writeRecordsInput *timestreamwrite.WriteRecordsInput, resourceNotFoundRetry bool) error { t.Log.Debugf("Writing to Timestream: '%v' with ResourceNotFoundRetry: '%t'", writeRecordsInput, resourceNotFoundRetry) - _, err := t.svc.WriteRecords(writeRecordsInput) + _, err := t.svc.WriteRecords(context.Background(), writeRecordsInput) if err != nil { // Telegraf will retry ingesting the metrics if an error is returned from the plugin. // Therefore, return error only for retryable exceptions: ThrottlingException and 5xx exceptions. - if e, ok := err.(awserr.Error); ok { - switch e.Code() { - case timestreamwrite.ErrCodeResourceNotFoundException: - if resourceNotFoundRetry { - t.Log.Warnf("Failed to write to Timestream database '%s' table '%s'. Error: '%s'", - t.DatabaseName, *writeRecordsInput.TableName, e) - return t.createTableAndRetry(writeRecordsInput) - } - t.logWriteToTimestreamError(err, writeRecordsInput.TableName) - case timestreamwrite.ErrCodeThrottlingException: - return fmt.Errorf("unable to write to Timestream database '%s' table '%s'. Error: %s", - t.DatabaseName, *writeRecordsInput.TableName, err) - case timestreamwrite.ErrCodeInternalServerException: - return fmt.Errorf("unable to write to Timestream database '%s' table '%s'. Error: %s", - t.DatabaseName, *writeRecordsInput.TableName, err) - default: - t.logWriteToTimestreamError(err, writeRecordsInput.TableName) + var notFound *types.ResourceNotFoundException + if errors.As(err, ¬Found) { + if resourceNotFoundRetry { + t.Log.Warnf("Failed to write to Timestream database '%s' table '%s'. Error: '%s'", + t.DatabaseName, *writeRecordsInput.TableName, notFound) + return t.createTableAndRetry(writeRecordsInput) } - } else { + t.logWriteToTimestreamError(notFound, writeRecordsInput.TableName) + } + + var rejected *types.RejectedRecordsException + if errors.As(err, &rejected) { + t.logWriteToTimestreamError(err, writeRecordsInput.TableName) + return nil + } + + var throttling *types.ThrottlingException + if errors.As(err, &throttling) { + return fmt.Errorf("unable to write to Timestream database '%s' table '%s'. Error: %s", + t.DatabaseName, *writeRecordsInput.TableName, throttling) + } + + var internal *types.InternalServerException + if errors.As(err, &internal) { + return fmt.Errorf("unable to write to Timestream database '%s' table '%s'. Error: %s", + t.DatabaseName, *writeRecordsInput.TableName, internal) + } + + var operation *smithy.OperationError + if !errors.As(err, &operation) { // Retry other, non-aws errors. return fmt.Errorf("unable to write to Timestream database '%s' table '%s'. Error: %s", t.DatabaseName, *writeRecordsInput.TableName, err) } + t.logWriteToTimestreamError(err, writeRecordsInput.TableName) } return nil } @@ -334,27 +349,25 @@ func (t *Timestream) createTable(tableName *string) error { createTableInput := ×treamwrite.CreateTableInput{ DatabaseName: aws.String(t.DatabaseName), TableName: aws.String(*tableName), - RetentionProperties: ×treamwrite.RetentionProperties{ - MagneticStoreRetentionPeriodInDays: aws.Int64(t.CreateTableMagneticStoreRetentionPeriodInDays), - MemoryStoreRetentionPeriodInHours: aws.Int64(t.CreateTableMemoryStoreRetentionPeriodInHours), + RetentionProperties: &types.RetentionProperties{ + MagneticStoreRetentionPeriodInDays: t.CreateTableMagneticStoreRetentionPeriodInDays, + MemoryStoreRetentionPeriodInHours: t.CreateTableMemoryStoreRetentionPeriodInHours, }, } - var tags []*timestreamwrite.Tag + var tags []types.Tag for key, val := range t.CreateTableTags { - tags = append(tags, ×treamwrite.Tag{ + tags = append(tags, types.Tag{ Key: aws.String(key), Value: aws.String(val), }) } - createTableInput.SetTags(tags) + createTableInput.Tags = tags - _, err := t.svc.CreateTable(createTableInput) + _, err := t.svc.CreateTable(context.Background(), createTableInput) if err != nil { - if e, ok := err.(awserr.Error); ok { + if _, ok := err.(*types.ConflictException); ok { // if the table was created in the meantime, it's ok. - if e.Code() == timestreamwrite.ErrCodeConflictException { - return nil - } + return nil } return err } @@ -380,17 +393,17 @@ func (t *Timestream) TransformMetrics(metrics []telegraf.Metric) []*timestreamwr newWriteRecord := ×treamwrite.WriteRecordsInput{ DatabaseName: aws.String(t.DatabaseName), Records: records, - CommonAttributes: ×treamwrite.Record{ + CommonAttributes: &types.Record{ Dimensions: dimensions, Time: aws.String(timeValue), - TimeUnit: aws.String(timeUnit), + TimeUnit: timeUnit, }, } if t.MappingMode == MappingModeSingleTable { - newWriteRecord.SetTableName(t.SingleTableName) + newWriteRecord.TableName = &t.SingleTableName } if t.MappingMode == MappingModeMultiTable { - newWriteRecord.SetTableName(m.Name()) + newWriteRecord.TableName = aws.String(m.Name()) } writeRequests[id] = newWriteRecord @@ -440,17 +453,17 @@ func hashFromMetricTimeNameTagKeys(m telegraf.Metric) uint64 { return h.Sum64() } -func (t *Timestream) buildDimensions(point telegraf.Metric) []*timestreamwrite.Dimension { - var dimensions []*timestreamwrite.Dimension +func (t *Timestream) buildDimensions(point telegraf.Metric) []types.Dimension { + var dimensions []types.Dimension for tagName, tagValue := range point.Tags() { - dimension := ×treamwrite.Dimension{ + dimension := types.Dimension{ Name: aws.String(tagName), Value: aws.String(tagValue), } dimensions = append(dimensions, dimension) } if t.MappingMode == MappingModeSingleTable { - dimension := ×treamwrite.Dimension{ + dimension := types.Dimension{ Name: aws.String(t.SingleTableDimensionNameForTelegrafMeasurementName), Value: aws.String(point.Name()), } @@ -463,8 +476,8 @@ func (t *Timestream) buildDimensions(point telegraf.Metric) []*timestreamwrite.D // Tags and time are not included - common attributes are built separately. // Records with unsupported Metric Field type are skipped. // It returns an array of Timestream write records. -func (t *Timestream) buildWriteRecords(point telegraf.Metric) []*timestreamwrite.Record { - var records []*timestreamwrite.Record +func (t *Timestream) buildWriteRecords(point telegraf.Metric) []types.Record { + var records []types.Record for fieldName, fieldValue := range point.Fields() { stringFieldValue, stringFieldValueType, ok := convertValue(fieldValue) if !ok { @@ -473,9 +486,9 @@ func (t *Timestream) buildWriteRecords(point telegraf.Metric) []*timestreamwrite fieldName, reflect.TypeOf(fieldValue)) continue } - record := ×treamwrite.Record{ + record := types.Record{ MeasureName: aws.String(fieldName), - MeasureValueType: aws.String(stringFieldValueType), + MeasureValueType: stringFieldValueType, MeasureValue: aws.String(stringFieldValue), } records = append(records, record) @@ -486,13 +499,13 @@ func (t *Timestream) buildWriteRecords(point telegraf.Metric) []*timestreamwrite // partitionRecords splits the Timestream records into smaller slices of a max size // so that are under the limit for the Timestream API call. // It returns the array of array of records. -func partitionRecords(size int, records []*timestreamwrite.Record) [][]*timestreamwrite.Record { +func partitionRecords(size int, records []types.Record) [][]types.Record { numberOfPartitions := len(records) / size if len(records)%size != 0 { numberOfPartitions++ } - partitions := make([][]*timestreamwrite.Record, numberOfPartitions) + partitions := make([][]types.Record, numberOfPartitions) for i := 0; i < numberOfPartitions; i++ { start := size * i @@ -509,25 +522,19 @@ func partitionRecords(size int, records []*timestreamwrite.Record) [][]*timestre // getTimestreamTime produces Timestream TimeUnit and TimeValue with minimum possible granularity // while maintaining the same information. -func getTimestreamTime(time time.Time) (timeUnit string, timeValue string) { - const ( - TimeUnitS = "SECONDS" - TimeUnitMS = "MILLISECONDS" - TimeUnitUS = "MICROSECONDS" - TimeUnitNS = "NANOSECONDS" - ) - nanosTime := time.UnixNano() +func getTimestreamTime(t time.Time) (timeUnit types.TimeUnit, timeValue string) { + nanosTime := t.UnixNano() if nanosTime%1e9 == 0 { - timeUnit = TimeUnitS + timeUnit = types.TimeUnitSeconds timeValue = strconv.FormatInt(nanosTime/1e9, 10) } else if nanosTime%1e6 == 0 { - timeUnit = TimeUnitMS + timeUnit = types.TimeUnitMilliseconds timeValue = strconv.FormatInt(nanosTime/1e6, 10) } else if nanosTime%1e3 == 0 { - timeUnit = TimeUnitUS + timeUnit = types.TimeUnitMicroseconds timeValue = strconv.FormatInt(nanosTime/1e3, 10) } else { - timeUnit = TimeUnitNS + timeUnit = types.TimeUnitNanoseconds timeValue = strconv.FormatInt(nanosTime, 10) } return @@ -535,61 +542,55 @@ func getTimestreamTime(time time.Time) (timeUnit string, timeValue string) { // convertValue converts single Field value from Telegraf Metric and produces // value, valueType Timestream representation. -func convertValue(v interface{}) (value string, valueType string, ok bool) { - const ( - TypeBigInt = "BIGINT" - TypeDouble = "DOUBLE" - TypeBoolean = "BOOLEAN" - TypeVarchar = "VARCHAR" - ) +func convertValue(v interface{}) (value string, valueType types.MeasureValueType, ok bool) { ok = true switch t := v.(type) { case int: - valueType = TypeBigInt + valueType = types.MeasureValueTypeBigint value = strconv.FormatInt(int64(t), 10) case int8: - valueType = TypeBigInt + valueType = types.MeasureValueTypeBigint value = strconv.FormatInt(int64(t), 10) case int16: - valueType = TypeBigInt + valueType = types.MeasureValueTypeBigint value = strconv.FormatInt(int64(t), 10) case int32: - valueType = TypeBigInt + valueType = types.MeasureValueTypeBigint value = strconv.FormatInt(int64(t), 10) case int64: - valueType = TypeBigInt + valueType = types.MeasureValueTypeBigint value = strconv.FormatInt(t, 10) case uint: - valueType = TypeBigInt + valueType = types.MeasureValueTypeBigint value = strconv.FormatUint(uint64(t), 10) case uint8: - valueType = TypeBigInt + valueType = types.MeasureValueTypeBigint value = strconv.FormatUint(uint64(t), 10) case uint16: - valueType = TypeBigInt + valueType = types.MeasureValueTypeBigint value = strconv.FormatUint(uint64(t), 10) case uint32: - valueType = TypeBigInt + valueType = types.MeasureValueTypeBigint value = strconv.FormatUint(uint64(t), 10) case uint64: - valueType = TypeBigInt + valueType = types.MeasureValueTypeBigint value = strconv.FormatUint(t, 10) case float32: - valueType = TypeDouble + valueType = types.MeasureValueTypeDouble value = strconv.FormatFloat(float64(t), 'f', -1, 32) case float64: - valueType = TypeDouble + valueType = types.MeasureValueTypeDouble value = strconv.FormatFloat(t, 'f', -1, 64) case bool: - valueType = TypeBoolean + valueType = types.MeasureValueTypeBoolean if t { value = "true" } else { value = "false" } case string: - valueType = TypeVarchar + valueType = types.MeasureValueTypeVarchar value = t default: // Skip unsupported type. diff --git a/plugins/outputs/timestream/timestream_internal_test.go b/plugins/outputs/timestream/timestream_internal_test.go index 1f1194d707b69..d151c10d4b146 100644 --- a/plugins/outputs/timestream/timestream_internal_test.go +++ b/plugins/outputs/timestream/timestream_internal_test.go @@ -4,8 +4,8 @@ import ( "testing" "time" - "github.com/aws/aws-sdk-go/aws" - "github.com/aws/aws-sdk-go/service/timestreamwrite" + "github.com/aws/aws-sdk-go-v2/aws" + "github.com/aws/aws-sdk-go-v2/service/timestreamwrite/types" "github.com/stretchr/testify/assert" ) @@ -19,57 +19,57 @@ func TestGetTimestreamTime(t *testing.T) { tOnlySeconds := time.Date(2020, time.November, 10, 23, 44, 20, 0, time.UTC) tUnitNanos, tValueNanos := getTimestreamTime(tWithNanos) - assertions.Equal("NANOSECONDS", tUnitNanos) + assertions.Equal(types.TimeUnitNanoseconds, tUnitNanos) assertions.Equal("1605051860000000123", tValueNanos) tUnitMicros, tValueMicros := getTimestreamTime(tWithMicros) - assertions.Equal("MICROSECONDS", tUnitMicros) + assertions.Equal(types.TimeUnitMicroseconds, tUnitMicros) assertions.Equal("1605051860000123", tValueMicros) tUnitMillis, tValueMillis := getTimestreamTime(tWithMillis) - assertions.Equal("MILLISECONDS", tUnitMillis) + assertions.Equal(types.TimeUnitMilliseconds, tUnitMillis) assertions.Equal("1605051860123", tValueMillis) tUnitSeconds, tValueSeconds := getTimestreamTime(tOnlySeconds) - assertions.Equal("SECONDS", tUnitSeconds) + assertions.Equal(types.TimeUnitSeconds, tUnitSeconds) assertions.Equal("1605051860", tValueSeconds) } func TestPartitionRecords(t *testing.T) { assertions := assert.New(t) - testDatum := timestreamwrite.Record{ + testDatum := types.Record{ MeasureName: aws.String("Foo"), - MeasureValueType: aws.String("DOUBLE"), + MeasureValueType: types.MeasureValueTypeDouble, MeasureValue: aws.String("123"), } - var zeroDatum []*timestreamwrite.Record - oneDatum := []*timestreamwrite.Record{&testDatum} - twoDatum := []*timestreamwrite.Record{&testDatum, &testDatum} - threeDatum := []*timestreamwrite.Record{&testDatum, &testDatum, &testDatum} + var zeroDatum []types.Record + oneDatum := []types.Record{testDatum} + twoDatum := []types.Record{testDatum, testDatum} + threeDatum := []types.Record{testDatum, testDatum, testDatum} - assertions.Equal([][]*timestreamwrite.Record{}, partitionRecords(2, zeroDatum)) - assertions.Equal([][]*timestreamwrite.Record{oneDatum}, partitionRecords(2, oneDatum)) - assertions.Equal([][]*timestreamwrite.Record{oneDatum}, partitionRecords(2, oneDatum)) - assertions.Equal([][]*timestreamwrite.Record{twoDatum}, partitionRecords(2, twoDatum)) - assertions.Equal([][]*timestreamwrite.Record{twoDatum, oneDatum}, partitionRecords(2, threeDatum)) + assertions.Equal([][]types.Record{}, partitionRecords(2, zeroDatum)) + assertions.Equal([][]types.Record{oneDatum}, partitionRecords(2, oneDatum)) + assertions.Equal([][]types.Record{oneDatum}, partitionRecords(2, oneDatum)) + assertions.Equal([][]types.Record{twoDatum}, partitionRecords(2, twoDatum)) + assertions.Equal([][]types.Record{twoDatum, oneDatum}, partitionRecords(2, threeDatum)) } func TestConvertValueSupported(t *testing.T) { intInputValues := []interface{}{-1, int8(-2), int16(-3), int32(-4), int64(-5)} intOutputValues := []string{"-1", "-2", "-3", "-4", "-5"} - intOutputValueTypes := []string{"BIGINT", "BIGINT", "BIGINT", "BIGINT", "BIGINT"} + intOutputValueTypes := []types.MeasureValueType{types.MeasureValueTypeBigint, types.MeasureValueTypeBigint, types.MeasureValueTypeBigint, types.MeasureValueTypeBigint, types.MeasureValueTypeBigint} testConvertValueSupportedCases(t, intInputValues, intOutputValues, intOutputValueTypes) uintInputValues := []interface{}{uint(1), uint8(2), uint16(3), uint32(4), uint64(5)} uintOutputValues := []string{"1", "2", "3", "4", "5"} - uintOutputValueTypes := []string{"BIGINT", "BIGINT", "BIGINT", "BIGINT", "BIGINT"} + uintOutputValueTypes := []types.MeasureValueType{types.MeasureValueTypeBigint, types.MeasureValueTypeBigint, types.MeasureValueTypeBigint, types.MeasureValueTypeBigint, types.MeasureValueTypeBigint} testConvertValueSupportedCases(t, uintInputValues, uintOutputValues, uintOutputValueTypes) otherInputValues := []interface{}{"foo", float32(22.123), 22.1234, true} otherOutputValues := []string{"foo", "22.123", "22.1234", "true"} - otherOutputValueTypes := []string{"VARCHAR", "DOUBLE", "DOUBLE", "BOOLEAN"} + otherOutputValueTypes := []types.MeasureValueType{types.MeasureValueTypeVarchar, types.MeasureValueTypeDouble, types.MeasureValueTypeDouble, types.MeasureValueTypeBoolean} testConvertValueSupportedCases(t, otherInputValues, otherOutputValues, otherOutputValueTypes) } @@ -80,7 +80,7 @@ func TestConvertValueUnsupported(t *testing.T) { } func testConvertValueSupportedCases(t *testing.T, - inputValues []interface{}, outputValues []string, outputValueTypes []string) { + inputValues []interface{}, outputValues []string, outputValueTypes []types.MeasureValueType) { assertions := assert.New(t) for i, inputValue := range inputValues { v, vt, ok := convertValue(inputValue) diff --git a/plugins/outputs/timestream/timestream_test.go b/plugins/outputs/timestream/timestream_test.go index 868e24d745a9c..be61a06a15358 100644 --- a/plugins/outputs/timestream/timestream_test.go +++ b/plugins/outputs/timestream/timestream_test.go @@ -1,6 +1,7 @@ -package timestream_test +package timestream import ( + "context" "fmt" "reflect" "sort" @@ -9,13 +10,11 @@ import ( "testing" "time" - "github.com/aws/aws-sdk-go/aws/awserr" - - "github.com/aws/aws-sdk-go/aws" - "github.com/aws/aws-sdk-go/service/timestreamwrite" + "github.com/aws/aws-sdk-go-v2/aws" + "github.com/aws/aws-sdk-go-v2/service/timestreamwrite" + "github.com/aws/aws-sdk-go-v2/service/timestreamwrite/types" "github.com/influxdata/telegraf" internalaws "github.com/influxdata/telegraf/config/aws" - ts "github.com/influxdata/telegraf/plugins/outputs/timestream" "github.com/influxdata/telegraf/testutil" "github.com/stretchr/testify/assert" @@ -34,40 +33,37 @@ var time2 = time.Date(2009, time.November, 10, 23, 0, 0, 0, time.UTC) const time2Epoch = "1257894000" -const timeUnit = "SECONDS" - const metricName1 = "metricName1" const metricName2 = "metricName2" -type mockTimestreamClient struct { -} +type mockTimestreamClient struct{} -func (m *mockTimestreamClient) CreateTable(*timestreamwrite.CreateTableInput) (*timestreamwrite.CreateTableOutput, error) { +func (m *mockTimestreamClient) CreateTable(context.Context, *timestreamwrite.CreateTableInput, ...func(*timestreamwrite.Options)) (*timestreamwrite.CreateTableOutput, error) { return nil, nil } -func (m *mockTimestreamClient) WriteRecords(*timestreamwrite.WriteRecordsInput) (*timestreamwrite.WriteRecordsOutput, error) { +func (m *mockTimestreamClient) WriteRecords(context.Context, *timestreamwrite.WriteRecordsInput, ...func(*timestreamwrite.Options)) (*timestreamwrite.WriteRecordsOutput, error) { return nil, nil } -func (m *mockTimestreamClient) DescribeDatabase(*timestreamwrite.DescribeDatabaseInput) (*timestreamwrite.DescribeDatabaseOutput, error) { +func (m *mockTimestreamClient) DescribeDatabase(context.Context, *timestreamwrite.DescribeDatabaseInput, ...func(*timestreamwrite.Options)) (*timestreamwrite.DescribeDatabaseOutput, error) { return nil, fmt.Errorf("hello from DescribeDatabase") } func TestConnectValidatesConfigParameters(t *testing.T) { assertions := assert.New(t) - ts.WriteFactory = func(credentialConfig *internalaws.CredentialConfig) (ts.WriteClient, error) { + WriteFactory = func(credentialConfig *internalaws.CredentialConfig) (WriteClient, error) { return &mockTimestreamClient{}, nil } // checking base arguments - noDatabaseName := ts.Timestream{Log: testutil.Logger{}} + noDatabaseName := Timestream{Log: testutil.Logger{}} assertions.Contains(noDatabaseName.Connect().Error(), "DatabaseName") - noMappingMode := ts.Timestream{ + noMappingMode := Timestream{ DatabaseName: tsDbName, Log: testutil.Logger{}, } assertions.Contains(noMappingMode.Connect().Error(), "MappingMode") - incorrectMappingMode := ts.Timestream{ + incorrectMappingMode := Timestream{ DatabaseName: tsDbName, MappingMode: "foo", Log: testutil.Logger{}, @@ -75,24 +71,24 @@ func TestConnectValidatesConfigParameters(t *testing.T) { assertions.Contains(incorrectMappingMode.Connect().Error(), "single-table") // multi-table arguments - validMappingModeMultiTable := ts.Timestream{ + validMappingModeMultiTable := Timestream{ DatabaseName: tsDbName, - MappingMode: ts.MappingModeMultiTable, + MappingMode: MappingModeMultiTable, Log: testutil.Logger{}, } assertions.Nil(validMappingModeMultiTable.Connect()) - singleTableNameWithMultiTable := ts.Timestream{ + singleTableNameWithMultiTable := Timestream{ DatabaseName: tsDbName, - MappingMode: ts.MappingModeMultiTable, + MappingMode: MappingModeMultiTable, SingleTableName: testSingleTableName, Log: testutil.Logger{}, } assertions.Contains(singleTableNameWithMultiTable.Connect().Error(), "SingleTableName") - singleTableDimensionWithMultiTable := ts.Timestream{ + singleTableDimensionWithMultiTable := Timestream{ DatabaseName: tsDbName, - MappingMode: ts.MappingModeMultiTable, + MappingMode: MappingModeMultiTable, SingleTableDimensionNameForTelegrafMeasurementName: testSingleTableDim, Log: testutil.Logger{}, } @@ -100,25 +96,25 @@ func TestConnectValidatesConfigParameters(t *testing.T) { "SingleTableDimensionNameForTelegrafMeasurementName") // single-table arguments - noTableNameMappingModeSingleTable := ts.Timestream{ + noTableNameMappingModeSingleTable := Timestream{ DatabaseName: tsDbName, - MappingMode: ts.MappingModeSingleTable, + MappingMode: MappingModeSingleTable, Log: testutil.Logger{}, } assertions.Contains(noTableNameMappingModeSingleTable.Connect().Error(), "SingleTableName") - noDimensionNameMappingModeSingleTable := ts.Timestream{ + noDimensionNameMappingModeSingleTable := Timestream{ DatabaseName: tsDbName, - MappingMode: ts.MappingModeSingleTable, + MappingMode: MappingModeSingleTable, SingleTableName: testSingleTableName, Log: testutil.Logger{}, } assertions.Contains(noDimensionNameMappingModeSingleTable.Connect().Error(), "SingleTableDimensionNameForTelegrafMeasurementName") - validConfigurationMappingModeSingleTable := ts.Timestream{ + validConfigurationMappingModeSingleTable := Timestream{ DatabaseName: tsDbName, - MappingMode: ts.MappingModeSingleTable, + MappingMode: MappingModeSingleTable, SingleTableName: testSingleTableName, SingleTableDimensionNameForTelegrafMeasurementName: testSingleTableDim, Log: testutil.Logger{}, @@ -126,18 +122,18 @@ func TestConnectValidatesConfigParameters(t *testing.T) { assertions.Nil(validConfigurationMappingModeSingleTable.Connect()) // create table arguments - createTableNoMagneticRetention := ts.Timestream{ + createTableNoMagneticRetention := Timestream{ DatabaseName: tsDbName, - MappingMode: ts.MappingModeMultiTable, + MappingMode: MappingModeMultiTable, CreateTableIfNotExists: true, Log: testutil.Logger{}, } assertions.Contains(createTableNoMagneticRetention.Connect().Error(), "CreateTableMagneticStoreRetentionPeriodInDays") - createTableNoMemoryRetention := ts.Timestream{ + createTableNoMemoryRetention := Timestream{ DatabaseName: tsDbName, - MappingMode: ts.MappingModeMultiTable, + MappingMode: MappingModeMultiTable, CreateTableIfNotExists: true, CreateTableMagneticStoreRetentionPeriodInDays: 3, Log: testutil.Logger{}, @@ -145,9 +141,9 @@ func TestConnectValidatesConfigParameters(t *testing.T) { assertions.Contains(createTableNoMemoryRetention.Connect().Error(), "CreateTableMemoryStoreRetentionPeriodInHours") - createTableValid := ts.Timestream{ + createTableValid := Timestream{ DatabaseName: tsDbName, - MappingMode: ts.MappingModeMultiTable, + MappingMode: MappingModeMultiTable, CreateTableIfNotExists: true, CreateTableMagneticStoreRetentionPeriodInDays: 3, CreateTableMemoryStoreRetentionPeriodInHours: 3, @@ -156,9 +152,9 @@ func TestConnectValidatesConfigParameters(t *testing.T) { assertions.Nil(createTableValid.Connect()) // describe table on start arguments - describeTableInvoked := ts.Timestream{ + describeTableInvoked := Timestream{ DatabaseName: tsDbName, - MappingMode: ts.MappingModeMultiTable, + MappingMode: MappingModeMultiTable, DescribeDatabaseOnStart: true, Log: testutil.Logger{}, } @@ -169,31 +165,30 @@ type mockTimestreamErrorClient struct { ErrorToReturnOnWriteRecords error } -func (m *mockTimestreamErrorClient) CreateTable(*timestreamwrite.CreateTableInput) (*timestreamwrite.CreateTableOutput, error) { +func (m *mockTimestreamErrorClient) CreateTable(context.Context, *timestreamwrite.CreateTableInput, ...func(*timestreamwrite.Options)) (*timestreamwrite.CreateTableOutput, error) { return nil, nil } -func (m *mockTimestreamErrorClient) WriteRecords(*timestreamwrite.WriteRecordsInput) (*timestreamwrite.WriteRecordsOutput, error) { +func (m *mockTimestreamErrorClient) WriteRecords(context.Context, *timestreamwrite.WriteRecordsInput, ...func(*timestreamwrite.Options)) (*timestreamwrite.WriteRecordsOutput, error) { return nil, m.ErrorToReturnOnWriteRecords } -func (m *mockTimestreamErrorClient) DescribeDatabase(*timestreamwrite.DescribeDatabaseInput) (*timestreamwrite.DescribeDatabaseOutput, error) { +func (m *mockTimestreamErrorClient) DescribeDatabase(context.Context, *timestreamwrite.DescribeDatabaseInput, ...func(*timestreamwrite.Options)) (*timestreamwrite.DescribeDatabaseOutput, error) { return nil, nil } func TestThrottlingErrorIsReturnedToTelegraf(t *testing.T) { assertions := assert.New(t) - - ts.WriteFactory = func(credentialConfig *internalaws.CredentialConfig) (ts.WriteClient, error) { + WriteFactory = func(credentialConfig *internalaws.CredentialConfig) (WriteClient, error) { return &mockTimestreamErrorClient{ - awserr.New(timestreamwrite.ErrCodeThrottlingException, - "Throttling Test", nil), + ErrorToReturnOnWriteRecords: &types.ThrottlingException{Message: aws.String("Throttling Test")}, }, nil } - plugin := ts.Timestream{ - MappingMode: ts.MappingModeMultiTable, + + plugin := Timestream{ + MappingMode: MappingModeMultiTable, DatabaseName: tsDbName, Log: testutil.Logger{}, } - plugin.Connect() + assertions.NoError(plugin.Connect()) input := testutil.MustMetric( metricName1, map[string]string{"tag1": "value1"}, @@ -209,19 +204,18 @@ func TestThrottlingErrorIsReturnedToTelegraf(t *testing.T) { func TestRejectedRecordsErrorResultsInMetricsBeingSkipped(t *testing.T) { assertions := assert.New(t) - - ts.WriteFactory = func(credentialConfig *internalaws.CredentialConfig) (ts.WriteClient, error) { + WriteFactory = func(credentialConfig *internalaws.CredentialConfig) (WriteClient, error) { return &mockTimestreamErrorClient{ - awserr.New(timestreamwrite.ErrCodeRejectedRecordsException, - "RejectedRecords Test", nil), + ErrorToReturnOnWriteRecords: &types.RejectedRecordsException{Message: aws.String("RejectedRecords Test")}, }, nil } - plugin := ts.Timestream{ - MappingMode: ts.MappingModeMultiTable, + + plugin := Timestream{ + MappingMode: MappingModeMultiTable, DatabaseName: tsDbName, Log: testutil.Logger{}, } - plugin.Connect() + assertions.NoError(plugin.Connect()) input := testutil.MustMetric( metricName1, map[string]string{"tag1": "value1"}, @@ -271,7 +265,7 @@ func TestTransformMetricsSkipEmptyMetric(t *testing.T) { dimensions: map[string]string{testSingleTableDim: metricName1}, measureValues: map[string]string{"value": "20"}, }) - comparisonTest(t, ts.MappingModeSingleTable, + comparisonTest(t, MappingModeSingleTable, []telegraf.Metric{input1, input2, input3}, []*timestreamwrite.WriteRecordsInput{expectedResult1SingleTable, expectedResult2SingleTable}) @@ -287,7 +281,7 @@ func TestTransformMetricsSkipEmptyMetric(t *testing.T) { dimensions: map[string]string{}, measureValues: map[string]string{"value": "20"}, }) - comparisonTest(t, ts.MappingModeMultiTable, + comparisonTest(t, MappingModeMultiTable, []telegraf.Metric{input1, input2, input3}, []*timestreamwrite.WriteRecordsInput{expectedResult1MultiTable, expectedResult2MultiTable}) } @@ -326,7 +320,7 @@ func TestTransformMetricsRequestsAboveLimitAreSplit(t *testing.T) { dimensions: map[string]string{"tag1": "value1", testSingleTableDim: metricName1}, measureValues: map[string]string{"value_supported" + strconv.Itoa(maxRecordsInWriteRecordsCall+1): "10"}, }) - comparisonTest(t, ts.MappingModeSingleTable, + comparisonTest(t, MappingModeSingleTable, inputs, []*timestreamwrite.WriteRecordsInput{expectedResult1SingleTable, expectedResult2SingleTable}) @@ -342,7 +336,7 @@ func TestTransformMetricsRequestsAboveLimitAreSplit(t *testing.T) { dimensions: map[string]string{"tag1": "value1"}, measureValues: map[string]string{"value_supported" + strconv.Itoa(maxRecordsInWriteRecordsCall+1): "10"}, }) - comparisonTest(t, ts.MappingModeMultiTable, + comparisonTest(t, MappingModeMultiTable, inputs, []*timestreamwrite.WriteRecordsInput{expectedResult1MultiTable, expectedResult2MultiTable}) } @@ -378,7 +372,7 @@ func TestTransformMetricsDifferentDimensionsSameTimestampsAreWrittenSeparate(t * measureValues: map[string]string{"value_supported3": "30"}, }) - comparisonTest(t, ts.MappingModeSingleTable, + comparisonTest(t, MappingModeSingleTable, []telegraf.Metric{input1, input2}, []*timestreamwrite.WriteRecordsInput{expectedResult1SingleTable, expectedResult2SingleTable}) @@ -395,7 +389,7 @@ func TestTransformMetricsDifferentDimensionsSameTimestampsAreWrittenSeparate(t * measureValues: map[string]string{"value_supported3": "30"}, }) - comparisonTest(t, ts.MappingModeMultiTable, + comparisonTest(t, MappingModeMultiTable, []telegraf.Metric{input1, input2}, []*timestreamwrite.WriteRecordsInput{expectedResult1MultiTable, expectedResult2MultiTable}) } @@ -431,7 +425,7 @@ func TestTransformMetricsSameDimensionsDifferentDimensionValuesAreWrittenSeparat measureValues: map[string]string{"value_supported1": "20"}, }) - comparisonTest(t, ts.MappingModeSingleTable, + comparisonTest(t, MappingModeSingleTable, []telegraf.Metric{input1, input2}, []*timestreamwrite.WriteRecordsInput{expectedResult1SingleTable, expectedResult2SingleTable}) @@ -448,7 +442,7 @@ func TestTransformMetricsSameDimensionsDifferentDimensionValuesAreWrittenSeparat measureValues: map[string]string{"value_supported1": "20"}, }) - comparisonTest(t, ts.MappingModeMultiTable, + comparisonTest(t, MappingModeMultiTable, []telegraf.Metric{input1, input2}, []*timestreamwrite.WriteRecordsInput{expectedResult1MultiTable, expectedResult2MultiTable}) } @@ -484,7 +478,7 @@ func TestTransformMetricsSameDimensionsDifferentTimestampsAreWrittenSeparate(t * measureValues: map[string]string{"value_supported3": "30"}, }) - comparisonTest(t, ts.MappingModeSingleTable, + comparisonTest(t, MappingModeSingleTable, []telegraf.Metric{input1, input2}, []*timestreamwrite.WriteRecordsInput{expectedResult1SingleTable, expectedResult2SingleTable}) @@ -501,7 +495,7 @@ func TestTransformMetricsSameDimensionsDifferentTimestampsAreWrittenSeparate(t * measureValues: map[string]string{"value_supported3": "30"}, }) - comparisonTest(t, ts.MappingModeMultiTable, + comparisonTest(t, MappingModeMultiTable, []telegraf.Metric{input1, input2}, []*timestreamwrite.WriteRecordsInput{expectedResult1MultiTable, expectedResult2MultiTable}) } @@ -531,7 +525,7 @@ func TestTransformMetricsSameDimensionsSameTimestampsAreWrittenTogether(t *testi measureValues: map[string]string{"value_supported1": "10", "value_supported2": "20", "value_supported3": "30"}, }) - comparisonTest(t, ts.MappingModeSingleTable, + comparisonTest(t, MappingModeSingleTable, []telegraf.Metric{input1, input2}, []*timestreamwrite.WriteRecordsInput{expectedResultSingleTable}) @@ -542,7 +536,7 @@ func TestTransformMetricsSameDimensionsSameTimestampsAreWrittenTogether(t *testi measureValues: map[string]string{"value_supported1": "10", "value_supported2": "20", "value_supported3": "30"}, }) - comparisonTest(t, ts.MappingModeMultiTable, + comparisonTest(t, MappingModeMultiTable, []telegraf.Metric{input1, input2}, []*timestreamwrite.WriteRecordsInput{expectedResultMultiTable}) } @@ -578,7 +572,7 @@ func TestTransformMetricsDifferentMetricsAreWrittenToDifferentTablesInMultiTable measureValues: map[string]string{"value_supported3": "30"}, }) - comparisonTest(t, ts.MappingModeSingleTable, + comparisonTest(t, MappingModeSingleTable, []telegraf.Metric{input1, input2}, []*timestreamwrite.WriteRecordsInput{expectedResult1SingleTable, expectedResult2SingleTable}) @@ -595,7 +589,7 @@ func TestTransformMetricsDifferentMetricsAreWrittenToDifferentTablesInMultiTable measureValues: map[string]string{"value_supported3": "30"}, }) - comparisonTest(t, ts.MappingModeMultiTable, + comparisonTest(t, MappingModeMultiTable, []telegraf.Metric{input1, input2}, []*timestreamwrite.WriteRecordsInput{expectedResult1MultiTable, expectedResult2MultiTable}) } @@ -616,7 +610,7 @@ func TestTransformMetricsUnsupportedFieldsAreSkipped(t *testing.T) { measureValues: map[string]string{"value_supported1": "10"}, }) - comparisonTest(t, ts.MappingModeSingleTable, + comparisonTest(t, MappingModeSingleTable, []telegraf.Metric{metricWithUnsupportedField}, []*timestreamwrite.WriteRecordsInput{expectedResultSingleTable}) @@ -627,7 +621,7 @@ func TestTransformMetricsUnsupportedFieldsAreSkipped(t *testing.T) { measureValues: map[string]string{"value_supported1": "10"}, }) - comparisonTest(t, ts.MappingModeMultiTable, + comparisonTest(t, MappingModeMultiTable, []telegraf.Metric{metricWithUnsupportedField}, []*timestreamwrite.WriteRecordsInput{expectedResultMultiTable}) } @@ -637,10 +631,10 @@ func comparisonTest(t *testing.T, telegrafMetrics []telegraf.Metric, timestreamRecords []*timestreamwrite.WriteRecordsInput, ) { - var plugin ts.Timestream + var plugin Timestream switch mappingMode { - case ts.MappingModeSingleTable: - plugin = ts.Timestream{ + case MappingModeSingleTable: + plugin = Timestream{ MappingMode: mappingMode, DatabaseName: tsDbName, @@ -648,8 +642,8 @@ func comparisonTest(t *testing.T, SingleTableDimensionNameForTelegrafMeasurementName: testSingleTableDim, Log: testutil.Logger{}, } - case ts.MappingModeMultiTable: - plugin = ts.Timestream{ + case MappingModeMultiTable: + plugin = Timestream{ MappingMode: mappingMode, DatabaseName: tsDbName, Log: testutil.Logger{}, @@ -710,20 +704,20 @@ type SimpleInput struct { } func buildExpectedRecords(i SimpleInput) *timestreamwrite.WriteRecordsInput { - var tsDimensions []*timestreamwrite.Dimension + var tsDimensions []types.Dimension for k, v := range i.dimensions { - tsDimensions = append(tsDimensions, ×treamwrite.Dimension{ + tsDimensions = append(tsDimensions, types.Dimension{ Name: aws.String(k), Value: aws.String(v), }) } - var tsRecords []*timestreamwrite.Record + var tsRecords []types.Record for k, v := range i.measureValues { - tsRecords = append(tsRecords, ×treamwrite.Record{ + tsRecords = append(tsRecords, types.Record{ MeasureName: aws.String(k), MeasureValue: aws.String(v), - MeasureValueType: aws.String("DOUBLE"), + MeasureValueType: types.MeasureValueTypeDouble, }) } @@ -731,10 +725,10 @@ func buildExpectedRecords(i SimpleInput) *timestreamwrite.WriteRecordsInput { DatabaseName: aws.String(tsDbName), TableName: aws.String(i.tableName), Records: tsRecords, - CommonAttributes: ×treamwrite.Record{ + CommonAttributes: &types.Record{ Dimensions: tsDimensions, Time: aws.String(i.t), - TimeUnit: aws.String(timeUnit), + TimeUnit: types.TimeUnitSeconds, }, } From 8265969f523cddb601211e4f070fef4a56b7aae0 Mon Sep 17 00:00:00 2001 From: Gerald Quintana Date: Thu, 21 Oct 2021 23:32:39 +0200 Subject: [PATCH 162/176] feat: Kafka Add metadata full to config (#9833) --- etc/telegraf.conf | 6 ++++++ plugins/common/kafka/config.go | 8 ++++++++ plugins/inputs/kafka_consumer/README.md | 3 +++ plugins/inputs/kafka_consumer/kafka_consumer.go | 3 +++ plugins/outputs/kafka/README.md | 3 +++ plugins/outputs/kafka/kafka.go | 3 +++ 6 files changed, 26 insertions(+) diff --git a/etc/telegraf.conf b/etc/telegraf.conf index 2b09df6623d58..ae5680b32d52f 100644 --- a/etc/telegraf.conf +++ b/etc/telegraf.conf @@ -1081,6 +1081,9 @@ # ## SASL protocol version. When connecting to Azure EventHub set to 0. # # sasl_version = 1 # +# # Disable Kafka metadata full fetch +# # metadata_full = false +# # ## Data format to output. # ## Each data format has its own unique set of configuration options, read # ## more about them here: @@ -7618,6 +7621,9 @@ # ## SASL protocol version. When connecting to Azure EventHub set to 0. # # sasl_version = 1 # +# # Disable Kafka metadata full fetch +# # metadata_full = false +# # ## Name of the consumer group. # # consumer_group = "telegraf_metrics_consumers" # diff --git a/plugins/common/kafka/config.go b/plugins/common/kafka/config.go index 56e70a26b4a95..432d575529fd6 100644 --- a/plugins/common/kafka/config.go +++ b/plugins/common/kafka/config.go @@ -55,6 +55,9 @@ type Config struct { // EnableTLS deprecated EnableTLS *bool `toml:"enable_tls"` + + // Disable full metadata fetching + MetadataFull *bool `toml:"metadata_full"` } // SetConfig on the sarama.Config object from the Config struct. @@ -89,5 +92,10 @@ func (k *Config) SetConfig(config *sarama.Config) error { config.Net.TLS.Enable = true } + if k.MetadataFull != nil { + // Defaults to true in Sarama + config.Metadata.Full = *k.MetadataFull + } + return k.SetSASLConfig(config) } diff --git a/plugins/inputs/kafka_consumer/README.md b/plugins/inputs/kafka_consumer/README.md index ac04925a23d14..741f24d04e75e 100644 --- a/plugins/inputs/kafka_consumer/README.md +++ b/plugins/inputs/kafka_consumer/README.md @@ -59,6 +59,9 @@ and use the old zookeeper connection method. ## SASL protocol version. When connecting to Azure EventHub set to 0. # sasl_version = 1 + # Disable Kafka metadata full fetch + # metadata_full = false + ## Name of the consumer group. # consumer_group = "telegraf_metrics_consumers" diff --git a/plugins/inputs/kafka_consumer/kafka_consumer.go b/plugins/inputs/kafka_consumer/kafka_consumer.go index c6894fd74ae21..70affdc2372b4 100644 --- a/plugins/inputs/kafka_consumer/kafka_consumer.go +++ b/plugins/inputs/kafka_consumer/kafka_consumer.go @@ -66,6 +66,9 @@ const sampleConfig = ` ## SASL protocol version. When connecting to Azure EventHub set to 0. # sasl_version = 1 + # Disable Kafka metadata full fetch + # metadata_full = false + ## Name of the consumer group. # consumer_group = "telegraf_metrics_consumers" diff --git a/plugins/outputs/kafka/README.md b/plugins/outputs/kafka/README.md index e76522018fb4a..54108d8be4398 100644 --- a/plugins/outputs/kafka/README.md +++ b/plugins/outputs/kafka/README.md @@ -136,6 +136,9 @@ This plugin writes to a [Kafka Broker](http://kafka.apache.org/07/quickstart.htm ## SASL protocol version. When connecting to Azure EventHub set to 0. # sasl_version = 1 + # Disable Kafka metadata full fetch + # metadata_full = false + ## Data format to output. ## Each data format has its own unique set of configuration options, read ## more about them here: diff --git a/plugins/outputs/kafka/kafka.go b/plugins/outputs/kafka/kafka.go index d30c730cfac18..2972427001ef5 100644 --- a/plugins/outputs/kafka/kafka.go +++ b/plugins/outputs/kafka/kafka.go @@ -212,6 +212,9 @@ var sampleConfig = ` ## SASL protocol version. When connecting to Azure EventHub set to 0. # sasl_version = 1 + # Disable Kafka metadata full fetch + # metadata_full = false + ## Data format to output. ## Each data format has its own unique set of configuration options, read ## more about them here: From 69c1b85c264d09380961945a52f5c87df67d0f70 Mon Sep 17 00:00:00 2001 From: Sebastian Spaink <3441183+sspaink@users.noreply.github.com> Date: Fri, 22 Oct 2021 12:21:02 -0500 Subject: [PATCH 163/176] fix: set NIGHTLY=1 for correctly named nightly artifacts (#9987) --- .circleci/config.yml | 259 +++++++++++++++++++++++++++++-------------- 1 file changed, 175 insertions(+), 84 deletions(-) diff --git a/.circleci/config.yml b/.circleci/config.yml index 27f7e75e7a225..76cbd4b280f85 100644 --- a/.circleci/config.yml +++ b/.circleci/config.yml @@ -123,6 +123,9 @@ commands: type: type: string default: "" + nightly: + type: boolean + default: false steps: - checkout - check-changed-files-or-halt @@ -132,9 +135,14 @@ commands: condition: << parameters.release >> steps: - run: 'make package' + - when: + condition: << parameters.nightly >> + steps: + - run: 'NIGHTLY=1 make package include_packages="$(make << parameters.type >>)"' - unless: condition: or: + - << parameters.nightly >> - << parameters.release >> steps: - run: 'make package include_packages="$(make << parameters.type >>)"' @@ -193,65 +201,125 @@ jobs: parallelism: 4 windows-package: + parameters: + nightly: + type: boolean + default: false executor: go-1_17 steps: - package-build: type: windows + nightly: << parameters.nightly >> darwin-package: + parameters: + nightly: + type: boolean + default: false executor: go-1_17 steps: - package-build: type: darwin + nightly: << parameters.nightly >> i386-package: + parameters: + nightly: + type: boolean + default: false executor: go-1_17 steps: - package-build: type: i386 + nightly: << parameters.nightly >> ppc64le-package: + parameters: + nightly: + type: boolean + default: false executor: go-1_17 steps: - package-build: type: ppc64le + nightly: << parameters.nightly >> s390x-package: + parameters: + nightly: + type: boolean + default: false executor: go-1_17 steps: - package-build: type: s390x + nightly: << parameters.nightly >> armel-package: + parameters: + nightly: + type: boolean + default: false executor: go-1_17 steps: - package-build: type: armel + nightly: << parameters.nightly >> amd64-package: + parameters: + nightly: + type: boolean + default: false executor: go-1_17 steps: - package-build: type: amd64 + nightly: << parameters.nightly >> arm64-package: + parameters: + nightly: + type: boolean + default: false executor: go-1_17 steps: - package-build: type: arm64 + nightly: << parameters.nightly >> mipsel-package: + parameters: + nightly: + type: boolean + default: false executor: go-1_17 steps: - package-build: type: mipsel + nightly: << parameters.nightly >> mips-package: + parameters: + nightly: + type: boolean + default: false executor: go-1_17 steps: - package-build: type: mips + nightly: << parameters.nightly >> static-package: + parameters: + nightly: + type: boolean + default: false executor: go-1_17 steps: - package-build: type: static + nightly: << parameters.nightly >> armhf-package: + parameters: + nightly: + type: boolean + default: false executor: go-1_17 steps: - package-build: type: armhf + nightly: << parameters.nightly >> release: executor: go-1_17 @@ -361,54 +429,6 @@ commonjobs: requires: - 'test-go-1_17' - 'test-go-1_17-386' - - &windows-package - 'windows-package': - requires: - - 'test-go-windows' - - &darwin-package - 'darwin-package': - requires: - - 'test-go-mac' - - &i386-package - 'i386-package': - requires: - - 'test-awaiter' - - &ppc64le-package - 'ppc64le-package': - requires: - - 'test-awaiter' - - &s390x-package - 's390x-package': - requires: - - 'test-awaiter' - - &armel-package - 'armel-package': - requires: - - 'test-awaiter' - - &amd64-package - 'amd64-package': - requires: - - 'test-awaiter' - - &arm64-package - 'arm64-package': - requires: - - 'test-awaiter' - - &armhf-package - 'armhf-package': - requires: - - 'test-awaiter' - - &static-package - 'static-package': - requires: - - 'test-awaiter' - - &mipsel-package - 'mipsel-package': - requires: - - 'test-awaiter' - - &mips-package - 'mips-package': - requires: - - 'test-awaiter' workflows: version: 2 @@ -439,18 +459,42 @@ workflows: tags: only: /.*/ - *test-awaiter - - *windows-package - - *darwin-package - - *i386-package - - *ppc64le-package - - *s390x-package - - *armel-package - - *amd64-package - - *arm64-package - - *armhf-package - - *static-package - - *mipsel-package - - *mips-package + - 'windows-package': + requires: + - 'test-go-windows' + - 'darwin-package': + requires: + - 'test-go-mac' + - 'i386-package': + requires: + - 'test-awaiter' + - 'ppc64le-package': + requires: + - 'test-awaiter' + - 's390x-package': + requires: + - 'test-awaiter' + - 'armel-package': + requires: + - 'test-awaiter' + - 'amd64-package': + requires: + - 'test-awaiter' + - 'arm64-package': + requires: + - 'test-awaiter' + - 'armhf-package': + requires: + - 'test-awaiter' + - 'static-package': + requires: + - 'test-awaiter' + - 'mipsel-package': + requires: + - 'test-awaiter' + - 'mips-package': + requires: + - 'test-awaiter' - 'generate-config': requires: - 'amd64-package' @@ -527,32 +571,79 @@ workflows: - 'test-go-mac' - 'test-go-windows' - *test-awaiter - - *windows-package - - *darwin-package - - *i386-package - - *ppc64le-package - - *s390x-package - - *armel-package - - *amd64-package - - *arm64-package - - *armhf-package - - *static-package - - *mipsel-package - - *mips-package + - 'windows-package': + name: 'windows-package-nightly' + nightly: true + requires: + - 'test-go-windows' + - 'darwin-package': + name: 'darwin-package-nightly' + nightly: true + requires: + - 'test-go-mac' + - 'i386-package': + name: 'i386-package-nightly' + nightly: true + requires: + - 'test-awaiter' + - 'ppc64le-package': + name: 'ppc64le-package-nightly' + nightly: true + requires: + - 'test-awaiter' + - 's390x-package': + name: 's390x-package-nightly' + requires: + - 'test-awaiter' + - 'armel-package': + name: 'armel-package-nightly' + nightly: true + requires: + - 'test-awaiter' + - 'amd64-package': + name: 'amd64-package-nightly' + nightly: true + requires: + - 'test-awaiter' + - 'arm64-package': + name: 'arm64-package-nightly' + nightly: true + requires: + - 'test-awaiter' + - 'armhf-package': + name: 'armhf-package-nightly' + nightly: true + requires: + - 'test-awaiter' + - 'static-package': + name: 'static-package-nightly' + nightly: true + requires: + - 'test-awaiter' + - 'mipsel-package': + name: 'mipsel-package-nightly' + nightly: true + requires: + - 'test-awaiter' + - 'mips-package': + name: 'mips-package-nightly' + nightly: true + requires: + - 'test-awaiter' - nightly: requires: - - 'i386-package' - - 'ppc64le-package' - - 's390x-package' - - 'armel-package' - - 'amd64-package' - - 'mipsel-package' - - 'mips-package' - - 'darwin-package' - - 'windows-package' - - 'static-package' - - 'arm64-package' - - 'armhf-package' + - 'i386-package-nightly' + - 'ppc64le-package-nightly' + - 's390x-package-nightly' + - 'armel-package-nightly' + - 'amd64-package-nightly' + - 'mipsel-package-nightly' + - 'mips-package-nightly' + - 'darwin-package-nightly' + - 'windows-package-nightly' + - 'static-package-nightly' + - 'arm64-package-nightly' + - 'armhf-package-nightly' triggers: - schedule: cron: "0 7 * * *" From a71d49a63ac614ec3f1c1afbc5778ba6de196054 Mon Sep 17 00:00:00 2001 From: Sebastian Spaink <3441183+sspaink@users.noreply.github.com> Date: Fri, 22 Oct 2021 13:34:01 -0500 Subject: [PATCH 164/176] docs: Create SECURITY.md (#9951) Co-authored-by: Joshua Powers --- SECURITY.md | 6 ++++++ 1 file changed, 6 insertions(+) create mode 100644 SECURITY.md diff --git a/SECURITY.md b/SECURITY.md new file mode 100644 index 0000000000000..1d74711aa9079 --- /dev/null +++ b/SECURITY.md @@ -0,0 +1,6 @@ +# Security Policy + +## Reporting a Vulnerability + +InfluxData takes security and our users' trust very seriously. If you believe you have found a security issue in any of our open source projects, +please responsibly disclose it by contacting security@influxdata.com. More details about security vulnerability reporting, including our GPG key, can be found [here](https://www.influxdata.com/how-to-report-security-vulnerabilities/). From 1391f05e8047225713c34fefaa8f4ae45161874a Mon Sep 17 00:00:00 2001 From: Thomas Casteleyn Date: Fri, 22 Oct 2021 23:12:12 +0200 Subject: [PATCH 165/176] feat: Add more details to processors.ifname logmessages (#9984) --- plugins/processors/ifname/ifname.go | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/plugins/processors/ifname/ifname.go b/plugins/processors/ifname/ifname.go index 10623c041dd2d..eb3fb2333e278 100644 --- a/plugins/processors/ifname/ifname.go +++ b/plugins/processors/ifname/ifname.go @@ -157,7 +157,7 @@ func (d *IfName) addTag(metric telegraf.Metric) error { for { m, age, err := d.getMap(agent) if err != nil { - return fmt.Errorf("couldn't retrieve the table of interface names: %w", err) + return fmt.Errorf("couldn't retrieve the table of interface names for %s: %w", agent, err) } name, found := m[num] @@ -171,7 +171,7 @@ func (d *IfName) addTag(metric telegraf.Metric) error { // the interface we're interested in. If the entry is old // enough, retrieve it from the agent once more. if age < minRetry { - return fmt.Errorf("interface number %d isn't in the table of interface names", num) + return fmt.Errorf("interface number %d isn't in the table of interface names on %s", num, agent) } if firstTime { @@ -181,7 +181,7 @@ func (d *IfName) addTag(metric telegraf.Metric) error { } // not found, cache hit, retrying - return fmt.Errorf("missing interface but couldn't retrieve table") + return fmt.Errorf("missing interface but couldn't retrieve table for %v", agent) } } @@ -212,7 +212,7 @@ func (d *IfName) Start(acc telegraf.Accumulator) error { fn := func(m telegraf.Metric) []telegraf.Metric { err := d.addTag(m) if err != nil { - d.Log.Debugf("Error adding tag %v", err) + d.Log.Debugf("Error adding tag: %v", err) } return []telegraf.Metric{m} } From b3a078e25cfd2cdadd1b1cc6245dc2e5f388b6b8 Mon Sep 17 00:00:00 2001 From: Joshua Powers Date: Fri, 22 Oct 2021 15:14:13 -0600 Subject: [PATCH 166/176] fix: add s390x to nightlies (#9990) --- .circleci/config.yml | 1 + 1 file changed, 1 insertion(+) diff --git a/.circleci/config.yml b/.circleci/config.yml index 76cbd4b280f85..028198bbdb236 100644 --- a/.circleci/config.yml +++ b/.circleci/config.yml @@ -593,6 +593,7 @@ workflows: - 'test-awaiter' - 's390x-package': name: 's390x-package-nightly' + nightly: true requires: - 'test-awaiter' - 'armel-package': From 76d5e3e4c8c24e86acadd8f5312b84f4f0f7d887 Mon Sep 17 00:00:00 2001 From: Joshua Powers Date: Fri, 22 Oct 2021 15:14:26 -0600 Subject: [PATCH 167/176] docs: update nightlies links (#9989) --- docs/NIGHTLIES.md | 57 +++++++++++++++-------------------------------- 1 file changed, 18 insertions(+), 39 deletions(-) diff --git a/docs/NIGHTLIES.md b/docs/NIGHTLIES.md index 63cdc2d82cfdc..a11b2bdfefecc 100644 --- a/docs/NIGHTLIES.md +++ b/docs/NIGHTLIES.md @@ -1,42 +1,21 @@ -### Nightly Builds -These builds are generated from the master branch: +# Nightly Builds -FreeBSD - .tar.gz -- [telegraf-nightly_freebsd_amd64.tar.gz](https://dl.influxdata.com/telegraf/nightlies/telegraf-nightly_freebsd_amd64.tar.gz) -- [telegraf-nightly_freebsd_armv7.tar.gz](https://dl.influxdata.com/telegraf/nightlies/telegraf-nightly_freebsd_armv7.tar.gz) -- [telegraf-nightly_freebsd_i386.tar.gz](https://dl.influxdata.com/telegraf/nightlies/telegraf-nightly_freebsd_i386.tar.gz) +These builds are generated from the master branch each night: -Linux - .rpm -- [telegraf-nightly.arm64.rpm](https://dl.influxdata.com/telegraf/nightlies/telegraf-nightly.arm64.rpm) -- [telegraf-nightly.armel.rpm](https://dl.influxdata.com/telegraf/nightlies/telegraf-nightly.armel.rpm) -- [telegraf-nightly.armv6hl.rpm](https://dl.influxdata.com/telegraf/nightlies/telegraf-nightly.armv6hl.rpm) -- [telegraf-nightly.i386.rpm](https://dl.influxdata.com/telegraf/nightlies/telegraf-nightly.i386.rpm) -- [telegraf-nightly.ppc64le.rpm](https://dl.influxdata.com/telegraf/nightlies/telegraf-nightly.ppc64le.rpm) -- [telegraf-nightly.s390x.rpm](https://dl.influxdata.com/telegraf/nightlies/telegraf-nightly.s390x.rpm) -- [telegraf-nightly.x86_64.rpm](https://dl.influxdata.com/telegraf/nightlies/telegraf-nightly.x86_64.rpm) - -Linux - .deb -- [telegraf_nightly_amd64.deb](https://dl.influxdata.com/telegraf/nightlies/telegraf_nightly_amd64.deb) -- [telegraf_nightly_arm64.deb](https://dl.influxdata.com/telegraf/nightlies/telegraf_nightly_arm64.deb) -- [telegraf_nightly_armel.deb](https://dl.influxdata.com/telegraf/nightlies/telegraf_nightly_armel.deb) -- [telegraf_nightly_armhf.deb](https://dl.influxdata.com/telegraf/nightlies/telegraf_nightly_armhf.deb) -- [telegraf_nightly_i386.deb](https://dl.influxdata.com/telegraf/nightlies/telegraf_nightly_i386.deb) -- [telegraf_nightly_ppc64el.deb](https://dl.influxdata.com/telegraf/nightlies/telegraf_nightly_ppc64el.deb) -- [telegraf_nightly_s390x.deb](https://dl.influxdata.com/telegraf/nightlies/telegraf_nightly_s390x.deb) - -Linux - .tar.gz -- [telegraf-nightly_linux_amd64.tar.gz](https://dl.influxdata.com/telegraf/nightlies/telegraf-nightly_linux_amd64.tar.gz) -- [telegraf-nightly_linux_arm64.tar.gz](https://dl.influxdata.com/telegraf/nightlies/telegraf-nightly_linux_arm64.tar.gz) -- [telegraf-nightly_linux_armel.tar.gz](https://dl.influxdata.com/telegraf/nightlies/telegraf-nightly_linux_armel.tar.gz) -- [telegraf-nightly_linux_armhf.tar.gz](https://dl.influxdata.com/telegraf/nightlies/telegraf-nightly_linux_armhf.tar.gz) -- [telegraf-nightly_linux_i386.tar.gz](https://dl.influxdata.com/telegraf/nightlies/telegraf-nightly_linux_i386.tar.gz) -- [telegraf-nightly_linux_s390x.tar.gz](https://dl.influxdata.com/telegraf/nightlies/telegraf-nightly_linux_s390x.tar.gz) -- [telegraf-static-nightly_linux_amd64.tar.gz](https://dl.influxdata.com/telegraf/nightlies/telegraf-static-nightly_linux_amd64.tar.gz) - -OSX - .tar.gz -- [telegraf-nightly_darwin_amd64.tar.gz](https://dl.influxdata.com/telegraf/nightlies/telegraf-nightly_darwin_amd64.tar.gz) - -Windows - .zip -- [telegraf-nightly_windows_i386.zip](https://dl.influxdata.com/telegraf/nightlies/telegraf-nightly_windows_i386.zip) -- [telegraf-nightly_windows_amd64.zip](https://dl.influxdata.com/telegraf/nightlies/telegraf-nightly_windows_amd64.zip) +| DEB | RPM | TAR GZ | ZIP | +| --------------- | --------------- | ------------------------------| --- | +| [amd64.deb](https://dl.influxdata.com/telegraf/nightlies/telegraf_nightly_amd64.deb) | [aarch64.rpm](https://dl.influxdata.com/telegraf/nightlies/telegraf-nightly.aarch64.rpm) | [darwin_amd64.tar.gz](https://dl.influxdata.com/telegraf/nightlies/telegraf-nightly_darwin_amd64.tar.gz) | [windows_amd64.zip](https://dl.influxdata.com/telegraf/nightlies/telegraf-nightly_windows_amd64.zip) | +| [arm64.deb](https://dl.influxdata.com/telegraf/nightlies/telegraf_nightly_arm64.deb) | [armel.rpm](https://dl.influxdata.com/telegraf/nightlies/telegraf-nightly.armel.rpm) | [freebsd_amd64.tar.gz](https://dl.influxdata.com/telegraf/nightlies/telegraf-nightly_freebsd_amd64.tar.gz) | [windows_i386.zip](https://dl.influxdata.com/telegraf/nightlies/telegraf-nightly_windows_i386.zip) | +| [armel.deb](https://dl.influxdata.com/telegraf/nightlies/telegraf_nightly_armel.deb) | [armv6hl.rpm](https://dl.influxdata.com/telegraf/nightlies/telegraf-nightly.armv6hl.rpm) | [freebsd_armv7.tar.gz](https://dl.influxdata.com/telegraf/nightlies/telegraf-nightly_freebsd_armv7.tar.gz) | | +| [armhf.deb](https://dl.influxdata.com/telegraf/nightlies/telegraf_nightly_armhf.deb) | [i386.rpm](https://dl.influxdata.com/telegraf/nightlies/telegraf-nightly.i386.rpm) | [freebsd_i386.tar.gz](https://dl.influxdata.com/telegraf/nightlies/telegraf-nightly_freebsd_i386.tar.gz) | | +| [i386.deb](https://dl.influxdata.com/telegraf/nightlies/telegraf_nightly_i386.deb) | [ppc64le.rpm](https://dl.influxdata.com/telegraf/nightlies/telegraf-nightly.ppc64le.rpm) | [linux_amd64.tar.gz](https://dl.influxdata.com/telegraf/nightlies/telegraf-nightly_linux_amd64.tar.gz) | | +| [mips.deb](https://dl.influxdata.com/telegraf/nightlies/telegraf_nightly_mips.deb) | [s390x.rpm](https://dl.influxdata.com/telegraf/nightlies/telegraf-nightly.s390x.rpm) | [linux_arm64.tar.gz](https://dl.influxdata.com/telegraf/nightlies/telegraf-nightly_linux_arm64.tar.gz) | | +| [mipsel.deb](https://dl.influxdata.com/telegraf/nightlies/telegraf_nightly_mipsel.deb) | [x86_64.rpm](https://dl.influxdata.com/telegraf/nightlies/telegraf-nightly.x86_64.rpm) | [linux_armel.tar.gz](https://dl.influxdata.com/telegraf/nightlies/telegraf-nightly_linux_armel.tar.gz) | | +| [ppc64el.deb](https://dl.influxdata.com/telegraf/nightlies/telegraf_nightly_ppc64el.deb) | | [linux_armhf.tar.gz](https://dl.influxdata.com/telegraf/nightlies/telegraf-nightly_linux_armhf.tar.gz) | | +| [s390x.deb](https://dl.influxdata.com/telegraf/nightlies/telegraf_nightly_s390x.deb) | | [linux_i386.tar.gz](https://dl.influxdata.com/telegraf/nightlies/telegraf-nightly_linux_i386.tar.gz) | | +| | | [linux_mips.tar.gz](https://dl.influxdata.com/telegraf/nightlies/telegraf-nightly_linux_mips.tar.gz) | | +| | | [linux_mipsel.tar.gz](https://dl.influxdata.com/telegraf/nightlies/telegraf-nightly_linux_mipsel.tar.gz) | | +| | | [linux_ppc64le.tar.gz](https://dl.influxdata.com/telegraf/nightlies/telegraf-nightly_linux_ppc64le.tar.gz) | | +| | | [linux_s390x.tar.gz](https://dl.influxdata.com/telegraf/nightlies/telegraf-nightly_linux_s390x.tar.gz) | | +| | | [static_linux_amd64.tar.gz](https://dl.influxdata.com/telegraf/nightlies/telegraf-nightly_static_linux_amd64.tar.gz) | | From 9d5eb7dd682885d9a5da35f4c9678da5254e171b Mon Sep 17 00:00:00 2001 From: AsafMah Date: Mon, 25 Oct 2021 17:44:20 +0300 Subject: [PATCH 168/176] feat: add option to skip table creation in azure data explorer output (#9942) --- plugins/outputs/azure_data_explorer/README.md | 9 ++- .../azure_data_explorer.go | 13 ++++- .../azure_data_explorer_test.go | 58 +++++++++++++++---- 3 files changed, 65 insertions(+), 15 deletions(-) diff --git a/plugins/outputs/azure_data_explorer/README.md b/plugins/outputs/azure_data_explorer/README.md index 4ae5bf7139924..db2aba469d292 100644 --- a/plugins/outputs/azure_data_explorer/README.md +++ b/plugins/outputs/azure_data_explorer/README.md @@ -31,6 +31,10 @@ Azure Data Explorer is a distributed, columnar store, purpose built for any type ## Name of the single table to store all the metrics (Only needed if metrics_grouping_type is "SingleTable"). # table_name = "" + + ## Creates tables and relevant mapping if set to true(default). + ## Skips table and mapping creation if set to false, this is useful for running Telegraf with the lowest possible permissions i.e. table ingestor role. + # create_tables = true ``` ## Metrics Grouping @@ -85,7 +89,10 @@ These methods are: [principal]: https://docs.microsoft.com/en-us/azure/active-directory/develop/active-directory-application-objects -Whichever method, the designated Principal needs to be assigned the `Database User` role on the Database level in the Azure Data Explorer. This role will allow the plugin to create the required tables and ingest data into it. +Whichever method, the designated Principal needs to be assigned the `Database User` role on the Database level in the Azure Data Explorer. This role will +allow the plugin to create the required tables and ingest data into it. +If `create_tables=false` then the designated principal only needs the `Database Ingestor` role at least. + ### Configurations of the chosen Authentication Method diff --git a/plugins/outputs/azure_data_explorer/azure_data_explorer.go b/plugins/outputs/azure_data_explorer/azure_data_explorer.go index b4c2054d3c22e..1f958d525004d 100644 --- a/plugins/outputs/azure_data_explorer/azure_data_explorer.go +++ b/plugins/outputs/azure_data_explorer/azure_data_explorer.go @@ -27,6 +27,7 @@ type AzureDataExplorer struct { Timeout config.Duration `toml:"timeout"` MetricsGrouping string `toml:"metrics_grouping_type"` TableName string `toml:"table_name"` + CreateTables bool `toml:"create_tables"` client localClient ingesters map[string]localIngestor serializer serializers.Serializer @@ -57,7 +58,7 @@ func (adx *AzureDataExplorer) Description() string { func (adx *AzureDataExplorer) SampleConfig() string { return ` - ## Azure Data Exlorer cluster endpoint + ## Azure Data Explorer cluster endpoint ## ex: endpoint_url = "https://clustername.australiasoutheast.kusto.windows.net" endpoint_url = "" @@ -77,6 +78,9 @@ func (adx *AzureDataExplorer) SampleConfig() string { ## Name of the single table to store all the metrics (Only needed if metrics_grouping_type is "SingleTable"). # table_name = "" + ## Creates tables and relevant mapping if set to true(default). + ## Skips table and mapping creation if set to false, this is useful for running Telegraf with the lowest possible permissions i.e. table ingestor role. + # create_tables = true ` } @@ -198,6 +202,10 @@ func (adx *AzureDataExplorer) getIngestor(ctx context.Context, tableName string) } func (adx *AzureDataExplorer) createAzureDataExplorerTable(ctx context.Context, tableName string) error { + if !adx.CreateTables { + adx.Log.Info("skipped table creation") + return nil + } createStmt := kusto.NewStmt("", kusto.UnsafeStmt(unsafe.Stmt{Add: true, SuppressWarning: true})).UnsafeAdd(fmt.Sprintf(createTableCommand, tableName)) if _, err := adx.client.Mgmt(ctx, adx.Database, createStmt); err != nil { return err @@ -241,7 +249,8 @@ func (adx *AzureDataExplorer) Init() error { func init() { outputs.Add("azure_data_explorer", func() telegraf.Output { return &AzureDataExplorer{ - Timeout: config.Duration(20 * time.Second), + Timeout: config.Duration(20 * time.Second), + CreateTables: true, } }) } diff --git a/plugins/outputs/azure_data_explorer/azure_data_explorer_test.go b/plugins/outputs/azure_data_explorer/azure_data_explorer_test.go index b8d30d66ce28b..ce53acf43faf4 100644 --- a/plugins/outputs/azure_data_explorer/azure_data_explorer_test.go +++ b/plugins/outputs/azure_data_explorer/azure_data_explorer_test.go @@ -31,10 +31,12 @@ func TestWrite(t *testing.T) { tableName string expected map[string]interface{} expectedWriteError string + createTables bool }{ { - name: "Valid metric", - inputMetric: testutil.MockMetrics(), + name: "Valid metric", + inputMetric: testutil.MockMetrics(), + createTables: true, client: &fakeClient{ queries: make([]string, 0), internalMgmt: func(f *fakeClient, ctx context.Context, db string, query kusto.Stmt, options ...kusto.MgmtOption) (*kusto.RowIterator, error) { @@ -56,8 +58,34 @@ func TestWrite(t *testing.T) { }, }, { - name: "Error in Mgmt", - inputMetric: testutil.MockMetrics(), + name: "Don't create tables'", + inputMetric: testutil.MockMetrics(), + createTables: false, + client: &fakeClient{ + queries: make([]string, 0), + internalMgmt: func(f *fakeClient, ctx context.Context, db string, query kusto.Stmt, options ...kusto.MgmtOption) (*kusto.RowIterator, error) { + require.Fail(t, "Mgmt shouldn't be called when create_tables is false") + f.queries = append(f.queries, query.String()) + return &kusto.RowIterator{}, nil + }, + }, + createIngestor: createFakeIngestor, + metricsGrouping: tablePerMetric, + expected: map[string]interface{}{ + "metricName": "test1", + "fields": map[string]interface{}{ + "value": 1.0, + }, + "tags": map[string]interface{}{ + "tag1": "value1", + }, + "timestamp": float64(time.Date(2009, time.November, 10, 23, 0, 0, 0, time.UTC).UnixNano() / int64(time.Second)), + }, + }, + { + name: "Error in Mgmt", + inputMetric: testutil.MockMetrics(), + createTables: true, client: &fakeClient{ queries: make([]string, 0), internalMgmt: func(f *fakeClient, ctx context.Context, db string, query kusto.Stmt, options ...kusto.MgmtOption) (*kusto.RowIterator, error) { @@ -79,8 +107,9 @@ func TestWrite(t *testing.T) { expectedWriteError: "creating table for \"test1\" failed: Something went wrong", }, { - name: "SingleTable metric grouping type", - inputMetric: testutil.MockMetrics(), + name: "SingleTable metric grouping type", + inputMetric: testutil.MockMetrics(), + createTables: true, client: &fakeClient{ queries: make([]string, 0), internalMgmt: func(f *fakeClient, ctx context.Context, db string, query kusto.Stmt, options ...kusto.MgmtOption) (*kusto.RowIterator, error) { @@ -114,6 +143,7 @@ func TestWrite(t *testing.T) { Log: testutil.Logger{}, MetricsGrouping: tC.metricsGrouping, TableName: tC.tableName, + CreateTables: tC.createTables, client: tC.client, ingesters: map[string]localIngestor{}, createIngestor: tC.createIngestor, @@ -149,11 +179,15 @@ func TestWrite(t *testing.T) { expectedTime := tC.expected["timestamp"].(float64) require.Equal(t, expectedTime, createdFakeIngestor.actualOutputMetric["timestamp"]) - createTableString := fmt.Sprintf(createTableCommandExpected, expectedNameOfTable) - require.Equal(t, createTableString, tC.client.queries[0]) + if tC.createTables { + createTableString := fmt.Sprintf(createTableCommandExpected, expectedNameOfTable) + require.Equal(t, createTableString, tC.client.queries[0]) - createTableMappingString := fmt.Sprintf(createTableMappingCommandExpected, expectedNameOfTable, expectedNameOfTable) - require.Equal(t, createTableMappingString, tC.client.queries[1]) + createTableMappingString := fmt.Sprintf(createTableMappingCommandExpected, expectedNameOfTable, expectedNameOfTable) + require.Equal(t, createTableMappingString, tC.client.queries[1]) + } else { + require.Empty(t, tC.client.queries) + } } }) } @@ -185,10 +219,10 @@ type fakeIngestor struct { actualOutputMetric map[string]interface{} } -func createFakeIngestor(client localClient, database string, tableName string) (localIngestor, error) { +func createFakeIngestor(localClient, string, string) (localIngestor, error) { return &fakeIngestor{}, nil } -func (f *fakeIngestor) FromReader(ctx context.Context, reader io.Reader, options ...ingest.FileOption) (*ingest.Result, error) { +func (f *fakeIngestor) FromReader(_ context.Context, reader io.Reader, _ ...ingest.FileOption) (*ingest.Result, error) { scanner := bufio.NewScanner(reader) scanner.Scan() firstLine := scanner.Text() From 76251d34f37678f6e89868106e00c79d5ec1d3be Mon Sep 17 00:00:00 2001 From: Josef Johansson Date: Mon, 25 Oct 2021 16:47:26 +0200 Subject: [PATCH 169/176] feat: plugins/common/tls/config.go: Filter client certificates by DNS names (#9910) --- docs/TLS.md | 6 +++ plugins/common/tls/config.go | 40 ++++++++++++++++---- plugins/common/tls/config_test.go | 63 ++++++++++++++++++++++++++----- 3 files changed, 92 insertions(+), 17 deletions(-) diff --git a/docs/TLS.md b/docs/TLS.md index 355da32bb98be..74b2512f1e59d 100644 --- a/docs/TLS.md +++ b/docs/TLS.md @@ -31,6 +31,12 @@ The server TLS configuration provides support for TLS mutual authentication: ## enable mutually authenticated TLS connections. # tls_allowed_cacerts = ["/etc/telegraf/clientca.pem"] +## Set one or more allowed DNS name to enable a whitelist +## to verify incoming client certificates. +## It will go through all available SAN in the certificate, +## if of them matches the request is accepted. +# tls_allowed_dns_names = ["client.example.org"] + ## Add service certificate and key. # tls_cert = "/etc/telegraf/cert.pem" # tls_key = "/etc/telegraf/key.pem" diff --git a/plugins/common/tls/config.go b/plugins/common/tls/config.go index 586ec8fd4a417..271d63e7cac2e 100644 --- a/plugins/common/tls/config.go +++ b/plugins/common/tls/config.go @@ -4,6 +4,7 @@ import ( "crypto/tls" "crypto/x509" "fmt" + "github.com/influxdata/telegraf/internal/choice" "os" "strings" ) @@ -24,12 +25,13 @@ type ClientConfig struct { // ServerConfig represents the standard server TLS config. type ServerConfig struct { - TLSCert string `toml:"tls_cert"` - TLSKey string `toml:"tls_key"` - TLSAllowedCACerts []string `toml:"tls_allowed_cacerts"` - TLSCipherSuites []string `toml:"tls_cipher_suites"` - TLSMinVersion string `toml:"tls_min_version"` - TLSMaxVersion string `toml:"tls_max_version"` + TLSCert string `toml:"tls_cert"` + TLSKey string `toml:"tls_key"` + TLSAllowedCACerts []string `toml:"tls_allowed_cacerts"` + TLSCipherSuites []string `toml:"tls_cipher_suites"` + TLSMinVersion string `toml:"tls_min_version"` + TLSMaxVersion string `toml:"tls_max_version"` + TLSAllowedDNSNames []string `toml:"tls_allowed_dns_names"` } // TLSConfig returns a tls.Config, may be nil without error if TLS is not @@ -141,6 +143,12 @@ func (c *ServerConfig) TLSConfig() (*tls.Config, error) { "tls min version %q can't be greater than tls max version %q", tlsConfig.MinVersion, tlsConfig.MaxVersion) } + // Since clientAuth is tlsConfig.ClientAuth = tls.RequireAndVerifyClientCert + // there must be certs to validate. + if len(c.TLSAllowedCACerts) > 0 && len(c.TLSAllowedDNSNames) > 0 { + tlsConfig.VerifyPeerCertificate = c.verifyPeerCertificate + } + return tlsConfig, nil } @@ -152,8 +160,7 @@ func makeCertPool(certFiles []string) (*x509.CertPool, error) { return nil, fmt.Errorf( "could not read certificate %q: %v", certFile, err) } - ok := pool.AppendCertsFromPEM(pem) - if !ok { + if !pool.AppendCertsFromPEM(pem) { return nil, fmt.Errorf( "could not parse any PEM certificates %q: %v", certFile, err) } @@ -172,3 +179,20 @@ func loadCertificate(config *tls.Config, certFile, keyFile string) error { config.BuildNameToCertificate() return nil } + +func (c *ServerConfig) verifyPeerCertificate(rawCerts [][]byte, verifiedChains [][]*x509.Certificate) error { + // The certificate chain is client + intermediate + root. + // Let's review the client certificate. + cert, err := x509.ParseCertificate(rawCerts[0]) + if err != nil { + return fmt.Errorf("could not validate peer certificate: %v", err) + } + + for _, name := range cert.DNSNames { + if choice.Contains(name, c.TLSAllowedDNSNames) { + return nil + } + } + + return fmt.Errorf("peer certificate not in allowed DNS Name list: %v", cert.DNSNames) +} diff --git a/plugins/common/tls/config_test.go b/plugins/common/tls/config_test.go index 2784ace6920e3..b118c48b5f912 100644 --- a/plugins/common/tls/config_test.go +++ b/plugins/common/tls/config_test.go @@ -128,12 +128,13 @@ func TestServerConfig(t *testing.T) { { name: "success", server: tls.ServerConfig{ - TLSCert: pki.ServerCertPath(), - TLSKey: pki.ServerKeyPath(), - TLSAllowedCACerts: []string{pki.CACertPath()}, - TLSCipherSuites: []string{pki.CipherSuite()}, - TLSMinVersion: pki.TLSMinVersion(), - TLSMaxVersion: pki.TLSMaxVersion(), + TLSCert: pki.ServerCertPath(), + TLSKey: pki.ServerKeyPath(), + TLSAllowedCACerts: []string{pki.CACertPath()}, + TLSCipherSuites: []string{pki.CipherSuite()}, + TLSAllowedDNSNames: []string{"localhost", "127.0.0.1"}, + TLSMinVersion: pki.TLSMinVersion(), + TLSMaxVersion: pki.TLSMaxVersion(), }, }, { @@ -293,9 +294,10 @@ func TestConnect(t *testing.T) { } serverConfig := tls.ServerConfig{ - TLSCert: pki.ServerCertPath(), - TLSKey: pki.ServerKeyPath(), - TLSAllowedCACerts: []string{pki.CACertPath()}, + TLSCert: pki.ServerCertPath(), + TLSKey: pki.ServerKeyPath(), + TLSAllowedCACerts: []string{pki.CACertPath()}, + TLSAllowedDNSNames: []string{"localhost", "127.0.0.1"}, } serverTLSConfig, err := serverConfig.TLSConfig() @@ -323,3 +325,46 @@ func TestConnect(t *testing.T) { require.NoError(t, err) require.Equal(t, 200, resp.StatusCode) } + +func TestConnectWrongDNS(t *testing.T) { + clientConfig := tls.ClientConfig{ + TLSCA: pki.CACertPath(), + TLSCert: pki.ClientCertPath(), + TLSKey: pki.ClientKeyPath(), + } + + serverConfig := tls.ServerConfig{ + TLSCert: pki.ServerCertPath(), + TLSKey: pki.ServerKeyPath(), + TLSAllowedCACerts: []string{pki.CACertPath()}, + TLSAllowedDNSNames: []string{"localhos", "127.0.0.2"}, + } + + serverTLSConfig, err := serverConfig.TLSConfig() + require.NoError(t, err) + + ts := httptest.NewUnstartedServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + w.WriteHeader(http.StatusOK) + })) + ts.TLS = serverTLSConfig + + ts.StartTLS() + defer ts.Close() + + clientTLSConfig, err := clientConfig.TLSConfig() + require.NoError(t, err) + + client := http.Client{ + Transport: &http.Transport{ + TLSClientConfig: clientTLSConfig, + }, + Timeout: 10 * time.Second, + } + + resp, err := client.Get(ts.URL) + require.Error(t, err) + if resp != nil { + err = resp.Body.Close() + require.NoError(t, err) + } +} From 374662be07452d1de1bb5c0e59fe39915253bfe2 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Jean-S=C3=A9bastien=20Dupuy?= Date: Mon, 25 Oct 2021 17:35:12 +0200 Subject: [PATCH 170/176] test: add sqlserver plugin integration tests (#9943) * Adding integration tests for AzureSQLDBResourceStats. * Adding azuresqlmanagedqueries_test.go * Updated integration tests for sql managed instance queries. * Updated integration tests for sql database queries. * Updated integration tests for azure sql database queries * Updated integration tests for azure sql managed instance queries * Fixed lint issues with make fmt. Co-authored-by: yannick --- .../sqlserver/azuresqldbqueries_test.go | 450 ++++++++++++++++++ .../sqlserver/azuresqlmanagedqueries_test.go | 378 +++++++++++++++ 2 files changed, 828 insertions(+) create mode 100644 plugins/inputs/sqlserver/azuresqldbqueries_test.go create mode 100644 plugins/inputs/sqlserver/azuresqlmanagedqueries_test.go diff --git a/plugins/inputs/sqlserver/azuresqldbqueries_test.go b/plugins/inputs/sqlserver/azuresqldbqueries_test.go new file mode 100644 index 0000000000000..6d5712f39509a --- /dev/null +++ b/plugins/inputs/sqlserver/azuresqldbqueries_test.go @@ -0,0 +1,450 @@ +package sqlserver + +import ( + "os" + "testing" + + "github.com/influxdata/telegraf/testutil" + "github.com/stretchr/testify/require" +) + +func TestAzureSQL_Database_ResourceStats_Query(t *testing.T) { + if testing.Short() { + t.Skip("Skipping integration test in short mode") + } + + if os.Getenv("AZURESQL_DB_CONNECTION_STRING") == "" { + t.Skip("Missing environment variable AZURESQL_DB_CONNECTION_STRING") + } + + connectionString := os.Getenv("AZURESQL_DB_CONNECTION_STRING") + + server := &SQLServer{ + Servers: []string{connectionString}, + IncludeQuery: []string{"AzureSQLDBResourceStats"}, + AuthMethod: "connection_string", + DatabaseType: "AzureSQLDB", + } + + var acc testutil.Accumulator + + require.NoError(t, server.Start(&acc)) + require.NoError(t, server.Gather(&acc)) + + require.True(t, acc.HasMeasurement("sqlserver_azure_db_resource_stats")) + require.True(t, acc.HasTag("sqlserver_azure_db_resource_stats", "sql_instance")) + require.True(t, acc.HasTag("sqlserver_azure_db_resource_stats", "database_name")) + require.True(t, acc.HasFloatField("sqlserver_azure_db_resource_stats", "avg_cpu_percent")) + require.True(t, acc.HasFloatField("sqlserver_azure_db_resource_stats", "avg_data_io_percent")) + require.True(t, acc.HasFloatField("sqlserver_azure_db_resource_stats", "avg_log_write_percent")) + require.True(t, acc.HasFloatField("sqlserver_azure_db_resource_stats", "avg_memory_usage_percent")) + require.True(t, acc.HasFloatField("sqlserver_azure_db_resource_stats", "xtp_storage_percent")) + require.True(t, acc.HasFloatField("sqlserver_azure_db_resource_stats", "max_worker_percent")) + require.True(t, acc.HasFloatField("sqlserver_azure_db_resource_stats", "max_session_percent")) + require.True(t, acc.HasField("sqlserver_azure_db_resource_stats", "dtu_limit")) // Can be null. + require.True(t, acc.HasField("sqlserver_azure_db_resource_stats", "avg_login_rate_percent")) // Can be null. + require.True(t, acc.HasField("sqlserver_azure_db_resource_stats", "end_time")) // Time field. + require.True(t, acc.HasFloatField("sqlserver_azure_db_resource_stats", "avg_instance_memory_percent")) + require.True(t, acc.HasFloatField("sqlserver_azure_db_resource_stats", "avg_instance_cpu_percent")) + require.True(t, acc.HasTag("sqlserver_azure_db_resource_stats", "replica_updateability")) + + // This query should only return one row + require.Equal(t, 1, len(acc.Metrics)) + server.Stop() +} + +func TestAzureSQL_Database_ResourceGovernance_Query(t *testing.T) { + if testing.Short() { + t.Skip("Skipping integration test in short mode") + } + + if os.Getenv("AZURESQL_DB_CONNECTION_STRING") == "" { + t.Skip("Missing environment variable AZURESQL_DB_CONNECTION_STRING") + } + + connectionString := os.Getenv("AZURESQL_DB_CONNECTION_STRING") + + server := &SQLServer{ + Servers: []string{connectionString}, + IncludeQuery: []string{"AzureSQLDBResourceGovernance"}, + AuthMethod: "connection_string", + DatabaseType: "AzureSQLDB", + } + + var acc testutil.Accumulator + + require.NoError(t, server.Start(&acc)) + require.NoError(t, server.Gather(&acc)) + + require.True(t, acc.HasMeasurement("sqlserver_db_resource_governance")) + require.True(t, acc.HasTag("sqlserver_db_resource_governance", "sql_instance")) + require.True(t, acc.HasTag("sqlserver_db_resource_governance", "database_name")) + require.True(t, acc.HasTag("sqlserver_db_resource_governance", "slo_name")) + require.True(t, acc.HasInt64Field("sqlserver_db_resource_governance", "dtu_limit")) + require.True(t, acc.HasInt64Field("sqlserver_db_resource_governance", "max_cpu")) + require.True(t, acc.HasInt64Field("sqlserver_db_resource_governance", "cap_cpu")) + require.True(t, acc.HasInt64Field("sqlserver_db_resource_governance", "instance_cap_cpu")) + require.True(t, acc.HasInt64Field("sqlserver_db_resource_governance", "max_db_memory")) + require.True(t, acc.HasInt64Field("sqlserver_db_resource_governance", "max_db_max_size_in_mb")) + require.True(t, acc.HasInt64Field("sqlserver_db_resource_governance", "db_file_growth_in_mb")) + require.True(t, acc.HasInt64Field("sqlserver_db_resource_governance", "log_size_in_mb")) + require.True(t, acc.HasInt64Field("sqlserver_db_resource_governance", "instance_max_worker_threads")) + require.True(t, acc.HasInt64Field("sqlserver_db_resource_governance", "primary_group_max_workers")) + require.True(t, acc.HasInt64Field("sqlserver_db_resource_governance", "instance_max_log_rate")) + require.True(t, acc.HasInt64Field("sqlserver_db_resource_governance", "primary_min_log_rate")) + require.True(t, acc.HasInt64Field("sqlserver_db_resource_governance", "primary_max_log_rate")) + require.True(t, acc.HasInt64Field("sqlserver_db_resource_governance", "primary_group_min_io")) + require.True(t, acc.HasInt64Field("sqlserver_db_resource_governance", "primary_group_max_io")) + require.True(t, acc.HasFloatField("sqlserver_db_resource_governance", "primary_group_min_cpu")) + require.True(t, acc.HasFloatField("sqlserver_db_resource_governance", "primary_group_max_cpu")) + require.True(t, acc.HasInt64Field("sqlserver_db_resource_governance", "primary_pool_max_workers")) + require.True(t, acc.HasInt64Field("sqlserver_db_resource_governance", "pool_max_io")) + require.True(t, acc.HasInt64Field("sqlserver_db_resource_governance", "checkpoint_rate_mbps")) + require.True(t, acc.HasInt64Field("sqlserver_db_resource_governance", "checkpoint_rate_io")) + require.True(t, acc.HasInt64Field("sqlserver_db_resource_governance", "volume_local_iops")) + require.True(t, acc.HasInt64Field("sqlserver_db_resource_governance", "volume_managed_xstore_iops")) + require.True(t, acc.HasInt64Field("sqlserver_db_resource_governance", "volume_external_xstore_iops")) + require.True(t, acc.HasInt64Field("sqlserver_db_resource_governance", "volume_type_local_iops")) + require.True(t, acc.HasInt64Field("sqlserver_db_resource_governance", "volume_type_managed_xstore_iops")) + require.True(t, acc.HasInt64Field("sqlserver_db_resource_governance", "volume_type_external_xstore_iops")) + require.True(t, acc.HasInt64Field("sqlserver_db_resource_governance", "volume_pfs_iops")) + require.True(t, acc.HasInt64Field("sqlserver_db_resource_governance", "volume_type_pfs_iops")) + require.True(t, acc.HasTag("sqlserver_db_resource_governance", "replica_updateability")) + + server.Stop() +} + +func TestAzureSQL_Database_WaitStats_Query(t *testing.T) { + if testing.Short() { + t.Skip("Skipping integration test in short mode") + } + + if os.Getenv("AZURESQL_DB_CONNECTION_STRING") == "" { + t.Skip("Missing environment variable AZURESQL_DB_CONNECTION_STRING") + } + + connectionString := os.Getenv("AZURESQL_DB_CONNECTION_STRING") + + server := &SQLServer{ + Servers: []string{connectionString}, + IncludeQuery: []string{"AzureSQLDBWaitStats"}, + AuthMethod: "connection_string", + DatabaseType: "AzureSQLDB", + } + + var acc testutil.Accumulator + + require.NoError(t, server.Start(&acc)) + require.NoError(t, server.Gather(&acc)) + + require.True(t, acc.HasMeasurement("sqlserver_azuredb_waitstats")) + require.True(t, acc.HasTag("sqlserver_azuredb_waitstats", "sql_instance")) + require.True(t, acc.HasTag("sqlserver_azuredb_waitstats", "database_name")) + require.True(t, acc.HasTag("sqlserver_azuredb_waitstats", "wait_type")) + require.True(t, acc.HasInt64Field("sqlserver_azuredb_waitstats", "wait_time_ms")) + require.True(t, acc.HasInt64Field("sqlserver_azuredb_waitstats", "resource_wait_ms")) + require.True(t, acc.HasInt64Field("sqlserver_azuredb_waitstats", "signal_wait_time_ms")) + require.True(t, acc.HasInt64Field("sqlserver_azuredb_waitstats", "max_wait_time_ms")) + require.True(t, acc.HasInt64Field("sqlserver_azuredb_waitstats", "waiting_tasks_count")) + require.True(t, acc.HasTag("sqlserver_azuredb_waitstats", "replica_updateability")) + + server.Stop() +} + +func TestAzureSQL_Database_DatabaseIO_Query(t *testing.T) { + if testing.Short() { + t.Skip("Skipping integration test in short mode") + } + + if os.Getenv("AZURESQL_DB_CONNECTION_STRING") == "" { + t.Skip("Missing environment variable AZURESQL_DB_CONNECTION_STRING") + } + + connectionString := os.Getenv("AZURESQL_DB_CONNECTION_STRING") + + server := &SQLServer{ + Servers: []string{connectionString}, + IncludeQuery: []string{"AzureSQLDBDatabaseIO"}, + AuthMethod: "connection_string", + DatabaseType: "AzureSQLDB", + } + + var acc testutil.Accumulator + + require.NoError(t, server.Start(&acc)) + require.NoError(t, server.Gather(&acc)) + + require.True(t, acc.HasMeasurement("sqlserver_database_io")) + require.True(t, acc.HasTag("sqlserver_database_io", "sql_instance")) + require.True(t, acc.HasTag("sqlserver_database_io", "database_name")) + require.True(t, acc.HasInt64Field("sqlserver_database_io", "database_id")) + require.True(t, acc.HasInt64Field("sqlserver_database_io", "file_id")) + require.True(t, acc.HasInt64Field("sqlserver_database_io", "read_latency_ms")) + require.True(t, acc.HasInt64Field("sqlserver_database_io", "reads")) + require.True(t, acc.HasInt64Field("sqlserver_database_io", "read_bytes")) + require.True(t, acc.HasInt64Field("sqlserver_database_io", "write_latency_ms")) + require.True(t, acc.HasInt64Field("sqlserver_database_io", "writes")) + require.True(t, acc.HasInt64Field("sqlserver_database_io", "write_bytes")) + require.True(t, acc.HasInt64Field("sqlserver_database_io", "rg_read_stall_ms")) + require.True(t, acc.HasInt64Field("sqlserver_database_io", "rg_write_stall_ms")) + require.True(t, acc.HasTag("sqlserver_database_io", "logical_filename")) + require.True(t, acc.HasTag("sqlserver_database_io", "physical_filename")) + require.True(t, acc.HasTag("sqlserver_database_io", "file_type")) + require.True(t, acc.HasInt64Field("sqlserver_database_io", "current_size_mb")) + require.True(t, acc.HasInt64Field("sqlserver_database_io", "space_used_mb")) + require.True(t, acc.HasTag("sqlserver_database_io", "replica_updateability")) + + server.Stop() +} + +func TestAzureSQL_Database_ServerProperties_Query(t *testing.T) { + if testing.Short() { + t.Skip("Skipping integration test in short mode") + } + + if os.Getenv("AZURESQL_DB_CONNECTION_STRING") == "" { + t.Skip("Missing environment variable AZURESQL_DB_CONNECTION_STRING") + } + + connectionString := os.Getenv("AZURESQL_DB_CONNECTION_STRING") + + server := &SQLServer{ + Servers: []string{connectionString}, + IncludeQuery: []string{"AzureSQLDBServerProperties"}, + AuthMethod: "connection_string", + DatabaseType: "AzureSQLDB", + } + + var acc testutil.Accumulator + + require.NoError(t, server.Start(&acc)) + require.NoError(t, server.Gather(&acc)) + + require.True(t, acc.HasMeasurement("sqlserver_server_properties")) + require.True(t, acc.HasTag("sqlserver_server_properties", "sql_instance")) + require.True(t, acc.HasTag("sqlserver_server_properties", "database_name")) + require.True(t, acc.HasInt64Field("sqlserver_server_properties", "cpu_count")) + require.True(t, acc.HasInt64Field("sqlserver_server_properties", "server_memory")) + require.True(t, acc.HasTag("sqlserver_server_properties", "sku")) + require.True(t, acc.HasInt64Field("sqlserver_server_properties", "engine_edition")) + require.True(t, acc.HasTag("sqlserver_server_properties", "hardware_type")) + require.True(t, acc.HasInt64Field("sqlserver_server_properties", "total_storage_mb")) + require.True(t, acc.HasInt64Field("sqlserver_server_properties", "available_storage_mb")) + require.True(t, acc.HasField("sqlserver_server_properties", "uptime")) // Time field. + require.True(t, acc.HasTag("sqlserver_server_properties", "replica_updateability")) + + // This query should only return one row + require.Equal(t, 1, len(acc.Metrics)) + server.Stop() +} + +func TestAzureSQL_Database_OsWaitstats_Query(t *testing.T) { + if testing.Short() { + t.Skip("Skipping integration test in short mode") + } + + if os.Getenv("AZURESQL_DB_CONNECTION_STRING") == "" { + t.Skip("Missing environment variable AZURESQL_DB_CONNECTION_STRING") + } + + connectionString := os.Getenv("AZURESQL_DB_CONNECTION_STRING") + + server := &SQLServer{ + Servers: []string{connectionString}, + IncludeQuery: []string{"AzureSQLDBOsWaitstats"}, + AuthMethod: "connection_string", + DatabaseType: "AzureSQLDB", + } + + var acc testutil.Accumulator + + require.NoError(t, server.Start(&acc)) + require.NoError(t, server.Gather(&acc)) + + require.True(t, acc.HasMeasurement("sqlserver_waitstats")) + require.True(t, acc.HasTag("sqlserver_waitstats", "sql_instance")) + require.True(t, acc.HasTag("sqlserver_waitstats", "database_name")) + require.True(t, acc.HasTag("sqlserver_waitstats", "wait_type")) + require.True(t, acc.HasInt64Field("sqlserver_waitstats", "wait_time_ms")) + require.True(t, acc.HasInt64Field("sqlserver_waitstats", "resource_wait_ms")) + require.True(t, acc.HasInt64Field("sqlserver_waitstats", "signal_wait_time_ms")) + require.True(t, acc.HasInt64Field("sqlserver_waitstats", "max_wait_time_ms")) + require.True(t, acc.HasInt64Field("sqlserver_waitstats", "waiting_tasks_count")) + require.True(t, acc.HasTag("sqlserver_waitstats", "wait_category")) + require.True(t, acc.HasTag("sqlserver_waitstats", "replica_updateability")) + + server.Stop() +} + +func TestAzureSQL_Database_MemoryClerks_Query(t *testing.T) { + if testing.Short() { + t.Skip("Skipping integration test in short mode") + } + + if os.Getenv("AZURESQL_DB_CONNECTION_STRING") == "" { + t.Skip("Missing environment variable AZURESQL_DB_CONNECTION_STRING") + } + + connectionString := os.Getenv("AZURESQL_DB_CONNECTION_STRING") + + server := &SQLServer{ + Servers: []string{connectionString}, + IncludeQuery: []string{"AzureSQLDBMemoryClerks"}, + AuthMethod: "connection_string", + DatabaseType: "AzureSQLDB", + } + + var acc testutil.Accumulator + + require.NoError(t, server.Start(&acc)) + require.NoError(t, server.Gather(&acc)) + + require.True(t, acc.HasMeasurement("sqlserver_memory_clerks")) + require.True(t, acc.HasTag("sqlserver_memory_clerks", "sql_instance")) + require.True(t, acc.HasTag("sqlserver_memory_clerks", "database_name")) + require.True(t, acc.HasTag("sqlserver_memory_clerks", "clerk_type")) + require.True(t, acc.HasInt64Field("sqlserver_memory_clerks", "size_kb")) + require.True(t, acc.HasTag("sqlserver_memory_clerks", "replica_updateability")) + + server.Stop() +} + +func TestAzureSQL_Database_PerformanceCounters_Query(t *testing.T) { + if testing.Short() { + t.Skip("Skipping integration test in short mode") + } + + if os.Getenv("AZURESQL_DB_CONNECTION_STRING") == "" { + t.Skip("Missing environment variable AZURESQL_DB_CONNECTION_STRING") + } + + connectionString := os.Getenv("AZURESQL_DB_CONNECTION_STRING") + + server := &SQLServer{ + Servers: []string{connectionString}, + IncludeQuery: []string{"AzureSQLDBPerformanceCounters"}, + AuthMethod: "connection_string", + DatabaseType: "AzureSQLDB", + } + + var acc testutil.Accumulator + + require.NoError(t, server.Start(&acc)) + require.NoError(t, server.Gather(&acc)) + + require.True(t, acc.HasMeasurement("sqlserver_performance")) + require.True(t, acc.HasTag("sqlserver_performance", "sql_instance")) + require.True(t, acc.HasTag("sqlserver_performance", "database_name")) + require.True(t, acc.HasTag("sqlserver_performance", "object")) + require.True(t, acc.HasTag("sqlserver_performance", "counter")) + require.True(t, acc.HasTag("sqlserver_performance", "instance")) + require.True(t, acc.HasFloatField("sqlserver_performance", "value")) + require.True(t, acc.HasTag("sqlserver_performance", "counter_type")) + require.True(t, acc.HasTag("sqlserver_performance", "replica_updateability")) + + server.Stop() +} + +func TestAzureSQL_Database_Requests_Query(t *testing.T) { + if testing.Short() { + t.Skip("Skipping integration test in short mode") + } + + if os.Getenv("AZURESQL_DB_CONNECTION_STRING") == "" { + t.Skip("Missing environment variable AZURESQL_DB_CONNECTION_STRING") + } + + connectionString := os.Getenv("AZURESQL_DB_CONNECTION_STRING") + + server := &SQLServer{ + Servers: []string{connectionString}, + IncludeQuery: []string{"AzureSQLDBRequests"}, + AuthMethod: "connection_string", + DatabaseType: "AzureSQLDB", + } + + var acc testutil.Accumulator + + require.NoError(t, server.Start(&acc)) + require.NoError(t, server.Gather(&acc)) + + require.True(t, acc.HasMeasurement("sqlserver_requests")) + require.True(t, acc.HasTag("sqlserver_requests", "sql_instance")) + require.True(t, acc.HasTag("sqlserver_requests", "database_name")) + require.True(t, acc.HasInt64Field("sqlserver_requests", "session_id")) + require.True(t, acc.HasInt64Field("sqlserver_requests", "request_id")) + require.True(t, acc.HasTag("sqlserver_requests", "session_db_name")) + require.True(t, acc.HasTag("sqlserver_requests", "status")) + require.True(t, acc.HasInt64Field("sqlserver_requests", "cpu_time_ms")) + require.True(t, acc.HasInt64Field("sqlserver_requests", "total_elapsed_time_ms")) + require.True(t, acc.HasInt64Field("sqlserver_requests", "logical_reads")) + require.True(t, acc.HasInt64Field("sqlserver_requests", "writes")) + require.True(t, acc.HasTag("sqlserver_requests", "command")) + require.True(t, acc.HasInt64Field("sqlserver_requests", "wait_time_ms")) + require.True(t, acc.HasField("sqlserver_requests", "wait_type")) // Can be null. + require.True(t, acc.HasTag("sqlserver_requests", "wait_resource")) + require.True(t, acc.HasInt64Field("sqlserver_requests", "blocking_session_id")) + require.True(t, acc.HasTag("sqlserver_requests", "program_name")) + require.True(t, acc.HasTag("sqlserver_requests", "host_name")) + require.True(t, acc.HasField("sqlserver_requests", "nt_user_name")) // Can be null. + require.True(t, acc.HasTag("sqlserver_requests", "login_name")) + require.True(t, acc.HasInt64Field("sqlserver_requests", "open_transaction")) + require.True(t, acc.HasTag("sqlserver_requests", "transaction_isolation_level")) + require.True(t, acc.HasInt64Field("sqlserver_requests", "granted_query_memory_pages")) + require.True(t, acc.HasFloatField("sqlserver_requests", "percent_complete")) + require.True(t, acc.HasTag("sqlserver_requests", "statement_text")) + require.True(t, acc.HasField("sqlserver_requests", "objectid")) // Can be null. + require.True(t, acc.HasField("sqlserver_requests", "stmt_object_name")) // Can be null. + require.True(t, acc.HasField("sqlserver_requests", "stmt_db_name")) // Can be null. + require.True(t, acc.HasTag("sqlserver_requests", "query_hash")) + require.True(t, acc.HasTag("sqlserver_requests", "query_plan_hash")) + require.True(t, acc.HasTag("sqlserver_requests", "replica_updateability")) + + server.Stop() +} + +func TestAzureSQL_Database_Schedulers_Query(t *testing.T) { + if testing.Short() { + t.Skip("Skipping integration test in short mode") + } + + if os.Getenv("AZURESQL_DB_CONNECTION_STRING") == "" { + t.Skip("Missing environment variable AZURESQL_DB_CONNECTION_STRING") + } + + connectionString := os.Getenv("AZURESQL_DB_CONNECTION_STRING") + + server := &SQLServer{ + Servers: []string{connectionString}, + IncludeQuery: []string{"AzureSQLDBSchedulers"}, + AuthMethod: "connection_string", + DatabaseType: "AzureSQLDB", + } + + var acc testutil.Accumulator + + require.NoError(t, server.Start(&acc)) + require.NoError(t, server.Gather(&acc)) + + require.True(t, acc.HasMeasurement("sqlserver_schedulers")) + require.True(t, acc.HasTag("sqlserver_schedulers", "sql_instance")) + require.True(t, acc.HasTag("sqlserver_schedulers", "scheduler_id")) + require.True(t, acc.HasTag("sqlserver_schedulers", "cpu_id")) + require.True(t, acc.HasField("sqlserver_schedulers", "is_online")) // Bool field. + require.True(t, acc.HasField("sqlserver_schedulers", "is_idle")) // Bool field. + require.True(t, acc.HasInt64Field("sqlserver_schedulers", "preemptive_switches_count")) + require.True(t, acc.HasInt64Field("sqlserver_schedulers", "context_switches_count")) + require.True(t, acc.HasInt64Field("sqlserver_schedulers", "current_tasks_count")) + require.True(t, acc.HasInt64Field("sqlserver_schedulers", "runnable_tasks_count")) + require.True(t, acc.HasInt64Field("sqlserver_schedulers", "current_workers_count")) + require.True(t, acc.HasInt64Field("sqlserver_schedulers", "active_workers_count")) + require.True(t, acc.HasInt64Field("sqlserver_schedulers", "work_queue_count")) + require.True(t, acc.HasInt64Field("sqlserver_schedulers", "pending_disk_io_count")) + require.True(t, acc.HasInt64Field("sqlserver_schedulers", "load_factor")) + require.True(t, acc.HasInt64Field("sqlserver_schedulers", "yield_count")) + require.True(t, acc.HasInt64Field("sqlserver_schedulers", "total_cpu_usage_ms")) + require.True(t, acc.HasInt64Field("sqlserver_schedulers", "total_scheduler_delay_ms")) + require.True(t, acc.HasTag("sqlserver_schedulers", "replica_updateability")) + + server.Stop() +} diff --git a/plugins/inputs/sqlserver/azuresqlmanagedqueries_test.go b/plugins/inputs/sqlserver/azuresqlmanagedqueries_test.go new file mode 100644 index 0000000000000..72a74174a8722 --- /dev/null +++ b/plugins/inputs/sqlserver/azuresqlmanagedqueries_test.go @@ -0,0 +1,378 @@ +package sqlserver + +import ( + "github.com/influxdata/telegraf/testutil" + "github.com/stretchr/testify/require" + "os" + "testing" +) + +func TestAzureSQL_Managed_ResourceStats_Query(t *testing.T) { + if testing.Short() { + t.Skip("Skipping integration test in short mode") + } + + if os.Getenv("AZURESQL_MI_CONNECTION_STRING") == "" { + t.Skip("Missing environment variable AZURESQL_MI_CONNECTION_STRING") + } + + connectionString := os.Getenv("AZURESQL_MI_CONNECTION_STRING") + + server := &SQLServer{ + Servers: []string{connectionString}, + IncludeQuery: []string{"AzureSQLMIResourceStats"}, + AuthMethod: "connection_string", + DatabaseType: "AzureSQLManagedInstance", + } + + var acc testutil.Accumulator + + require.NoError(t, server.Start(&acc)) + require.NoError(t, server.Gather(&acc)) + + require.True(t, acc.HasMeasurement("sqlserver_azure_db_resource_stats")) + require.True(t, acc.HasTag("sqlserver_azure_db_resource_stats", "sql_instance")) + require.True(t, acc.HasFloatField("sqlserver_azure_db_resource_stats", "avg_cpu_percent")) + require.True(t, acc.HasTag("sqlserver_azure_db_resource_stats", "replica_updateability")) + + // This query should only return one row + require.Equal(t, 1, len(acc.Metrics)) + server.Stop() +} + +func TestAzureSQL_Managed_ResourceGovernance_Query(t *testing.T) { + if testing.Short() { + t.Skip("Skipping integration test in short mode") + } + + if os.Getenv("AZURESQL_MI_CONNECTION_STRING") == "" { + t.Skip("Missing environment variable AZURESQL_MI_CONNECTION_STRING") + } + + connectionString := os.Getenv("AZURESQL_MI_CONNECTION_STRING") + + server := &SQLServer{ + Servers: []string{connectionString}, + IncludeQuery: []string{"AzureSQLMIResourceGovernance"}, + AuthMethod: "connection_string", + DatabaseType: "AzureSQLManagedInstance", + } + + var acc testutil.Accumulator + + require.NoError(t, server.Start(&acc)) + require.NoError(t, server.Gather(&acc)) + + require.True(t, acc.HasMeasurement("sqlserver_instance_resource_governance")) + require.True(t, acc.HasTag("sqlserver_instance_resource_governance", "sql_instance")) + require.True(t, acc.HasInt64Field("sqlserver_instance_resource_governance", "instance_cap_cpu")) + require.True(t, acc.HasInt64Field("sqlserver_instance_resource_governance", "instance_max_log_rate")) + require.True(t, acc.HasInt64Field("sqlserver_instance_resource_governance", "instance_max_worker_threads")) + require.True(t, acc.HasInt64Field("sqlserver_instance_resource_governance", "tempdb_log_file_number")) + require.True(t, acc.HasInt64Field("sqlserver_instance_resource_governance", "volume_local_iops")) + require.True(t, acc.HasInt64Field("sqlserver_instance_resource_governance", "volume_external_xstore_iops")) + require.True(t, acc.HasInt64Field("sqlserver_instance_resource_governance", "volume_managed_xstore_iops")) + require.True(t, acc.HasInt64Field("sqlserver_instance_resource_governance", "voltype_local_iops")) + require.True(t, acc.HasInt64Field("sqlserver_instance_resource_governance", "voltype_man_xtore_iops")) + require.True(t, acc.HasInt64Field("sqlserver_instance_resource_governance", "voltype_ext_xtore_iops")) + require.True(t, acc.HasInt64Field("sqlserver_instance_resource_governance", "vol_ext_xtore_iops")) + require.True(t, acc.HasTag("sqlserver_instance_resource_governance", "replica_updateability")) + + server.Stop() +} + +func TestAzureSQL_Managed_DatabaseIO_Query(t *testing.T) { + if testing.Short() { + t.Skip("Skipping integration test in short mode") + } + + if os.Getenv("AZURESQL_MI_CONNECTION_STRING") == "" { + t.Skip("Missing environment variable AZURESQL_MI_CONNECTION_STRING") + } + + connectionString := os.Getenv("AZURESQL_MI_CONNECTION_STRING") + + server := &SQLServer{ + Servers: []string{connectionString}, + IncludeQuery: []string{"AzureSQLMIDatabaseIO"}, + AuthMethod: "connection_string", + DatabaseType: "AzureSQLManagedInstance", + } + + var acc testutil.Accumulator + + require.NoError(t, server.Start(&acc)) + require.NoError(t, server.Gather(&acc)) + + require.True(t, acc.HasMeasurement("sqlserver_database_io")) + require.True(t, acc.HasTag("sqlserver_database_io", "sql_instance")) + require.True(t, acc.HasTag("sqlserver_database_io", "physical_filename")) + require.True(t, acc.HasTag("sqlserver_database_io", "logical_filename")) + require.True(t, acc.HasTag("sqlserver_database_io", "file_type")) + require.True(t, acc.HasInt64Field("sqlserver_database_io", "reads")) + require.True(t, acc.HasInt64Field("sqlserver_database_io", "read_bytes")) + require.True(t, acc.HasInt64Field("sqlserver_database_io", "read_latency_ms")) + require.True(t, acc.HasInt64Field("sqlserver_database_io", "write_latency_ms")) + require.True(t, acc.HasInt64Field("sqlserver_database_io", "writes")) + require.True(t, acc.HasInt64Field("sqlserver_database_io", "write_bytes")) + require.True(t, acc.HasInt64Field("sqlserver_database_io", "rg_read_stall_ms")) + require.True(t, acc.HasInt64Field("sqlserver_database_io", "rg_write_stall_ms")) + require.True(t, acc.HasTag("sqlserver_database_io", "replica_updateability")) + + server.Stop() +} + +func TestAzureSQL_Managed_ServerProperties_Query(t *testing.T) { + if testing.Short() { + t.Skip("Skipping integration test in short mode") + } + + if os.Getenv("AZURESQL_MI_CONNECTION_STRING") == "" { + t.Skip("Missing environment variable AZURESQL_MI_CONNECTION_STRING") + } + + connectionString := os.Getenv("AZURESQL_MI_CONNECTION_STRING") + + server := &SQLServer{ + Servers: []string{connectionString}, + IncludeQuery: []string{"AzureSQLMIServerProperties"}, + AuthMethod: "connection_string", + DatabaseType: "AzureSQLManagedInstance", + } + + var acc testutil.Accumulator + + require.NoError(t, server.Start(&acc)) + require.NoError(t, server.Gather(&acc)) + + require.True(t, acc.HasMeasurement("sqlserver_server_properties")) + require.True(t, acc.HasTag("sqlserver_server_properties", "sql_instance")) + require.True(t, acc.HasInt64Field("sqlserver_server_properties", "cpu_count")) + require.True(t, acc.HasInt64Field("sqlserver_server_properties", "server_memory")) + require.True(t, acc.HasTag("sqlserver_server_properties", "sku")) + require.True(t, acc.HasInt64Field("sqlserver_server_properties", "engine_edition")) + require.True(t, acc.HasTag("sqlserver_server_properties", "hardware_type")) + require.True(t, acc.HasInt64Field("sqlserver_server_properties", "total_storage_mb")) + require.True(t, acc.HasInt64Field("sqlserver_server_properties", "available_storage_mb")) + require.True(t, acc.HasField("sqlserver_server_properties", "uptime")) // Time field. + require.True(t, acc.HasTag("sqlserver_server_properties", "sql_version")) + require.True(t, acc.HasTag("sqlserver_server_properties", "sql_version_desc")) + require.True(t, acc.HasInt64Field("sqlserver_server_properties", "db_online")) + require.True(t, acc.HasInt64Field("sqlserver_server_properties", "db_restoring")) + require.True(t, acc.HasInt64Field("sqlserver_server_properties", "db_recovering")) + require.True(t, acc.HasInt64Field("sqlserver_server_properties", "db_recoveryPending")) + require.True(t, acc.HasInt64Field("sqlserver_server_properties", "db_suspect")) + require.True(t, acc.HasTag("sqlserver_server_properties", "replica_updateability")) + + // This query should only return one row + require.Equal(t, 1, len(acc.Metrics)) + server.Stop() +} + +func TestAzureSQL_Managed_OsWaitStats_Query(t *testing.T) { + if testing.Short() { + t.Skip("Skipping integration test in short mode") + } + + if os.Getenv("AZURESQL_MI_CONNECTION_STRING") == "" { + t.Skip("Missing environment variable AZURESQL_MI_CONNECTION_STRING") + } + + connectionString := os.Getenv("AZURESQL_MI_CONNECTION_STRING") + + server := &SQLServer{ + Servers: []string{connectionString}, + IncludeQuery: []string{"AzureSQLMIOsWaitstats"}, + AuthMethod: "connection_string", + DatabaseType: "AzureSQLManagedInstance", + } + + var acc testutil.Accumulator + + require.NoError(t, server.Start(&acc)) + require.NoError(t, server.Gather(&acc)) + + require.True(t, acc.HasMeasurement("sqlserver_waitstats")) + require.True(t, acc.HasTag("sqlserver_waitstats", "sql_instance")) + require.True(t, acc.HasTag("sqlserver_waitstats", "wait_type")) + require.True(t, acc.HasInt64Field("sqlserver_waitstats", "waiting_tasks_count")) + require.True(t, acc.HasInt64Field("sqlserver_waitstats", "wait_time_ms")) + require.True(t, acc.HasInt64Field("sqlserver_waitstats", "max_wait_time_ms")) + require.True(t, acc.HasInt64Field("sqlserver_waitstats", "signal_wait_time_ms")) + require.True(t, acc.HasInt64Field("sqlserver_waitstats", "resource_wait_ms")) + require.True(t, acc.HasTag("sqlserver_waitstats", "wait_category")) + require.True(t, acc.HasTag("sqlserver_waitstats", "replica_updateability")) + + server.Stop() +} + +func TestAzureSQL_Managed_MemoryClerks_Query(t *testing.T) { + if testing.Short() { + t.Skip("Skipping integration test in short mode") + } + + if os.Getenv("AZURESQL_MI_CONNECTION_STRING") == "" { + t.Skip("Missing environment variable AZURESQL_MI_CONNECTION_STRING") + } + + connectionString := os.Getenv("AZURESQL_MI_CONNECTION_STRING") + + server := &SQLServer{ + Servers: []string{connectionString}, + IncludeQuery: []string{"AzureSQLMIMemoryClerks"}, + AuthMethod: "connection_string", + DatabaseType: "AzureSQLManagedInstance", + } + + var acc testutil.Accumulator + + require.NoError(t, server.Start(&acc)) + require.NoError(t, server.Gather(&acc)) + + require.True(t, acc.HasMeasurement("sqlserver_memory_clerks")) + require.True(t, acc.HasTag("sqlserver_memory_clerks", "sql_instance")) + require.True(t, acc.HasTag("sqlserver_memory_clerks", "clerk_type")) + require.True(t, acc.HasInt64Field("sqlserver_memory_clerks", "size_kb")) + require.True(t, acc.HasTag("sqlserver_memory_clerks", "replica_updateability")) + + server.Stop() +} + +func TestAzureSQL_Managed_PerformanceCounters_Query(t *testing.T) { + if testing.Short() { + t.Skip("Skipping integration test in short mode") + } + + if os.Getenv("AZURESQL_MI_CONNECTION_STRING") == "" { + t.Skip("Missing environment variable AZURESQL_MI_CONNECTION_STRING") + } + + connectionString := os.Getenv("AZURESQL_MI_CONNECTION_STRING") + + server := &SQLServer{ + Servers: []string{connectionString}, + IncludeQuery: []string{"AzureSQLMIPerformanceCounters"}, + AuthMethod: "connection_string", + DatabaseType: "AzureSQLManagedInstance", + } + + var acc testutil.Accumulator + + require.NoError(t, server.Start(&acc)) + require.NoError(t, server.Gather(&acc)) + + require.True(t, acc.HasMeasurement("sqlserver_performance")) + require.True(t, acc.HasTag("sqlserver_performance", "sql_instance")) + require.True(t, acc.HasTag("sqlserver_performance", "object")) + require.True(t, acc.HasTag("sqlserver_performance", "counter")) + require.True(t, acc.HasTag("sqlserver_performance", "instance")) + require.True(t, acc.HasFloatField("sqlserver_performance", "value")) + require.True(t, acc.HasTag("sqlserver_performance", "counter_type")) + require.True(t, acc.HasTag("sqlserver_performance", "replica_updateability")) + + server.Stop() +} + +func TestAzureSQL_Managed_Requests_Query(t *testing.T) { + if testing.Short() { + t.Skip("Skipping integration test in short mode") + } + + if os.Getenv("AZURESQL_MI_CONNECTION_STRING") == "" { + t.Skip("Missing environment variable AZURESQL_MI_CONNECTION_STRING") + } + + connectionString := os.Getenv("AZURESQL_MI_CONNECTION_STRING") + + server := &SQLServer{ + Servers: []string{connectionString}, + IncludeQuery: []string{"AzureSQLMIRequests"}, + AuthMethod: "connection_string", + DatabaseType: "AzureSQLManagedInstance", + } + + var acc testutil.Accumulator + + require.NoError(t, server.Start(&acc)) + require.NoError(t, server.Gather(&acc)) + + require.True(t, acc.HasMeasurement("sqlserver_requests")) + require.True(t, acc.HasTag("sqlserver_requests", "sql_instance")) + require.True(t, acc.HasTag("sqlserver_requests", "database_name")) + require.True(t, acc.HasInt64Field("sqlserver_requests", "session_id")) + require.True(t, acc.HasInt64Field("sqlserver_requests", "request_id")) + require.True(t, acc.HasTag("sqlserver_requests", "status")) + require.True(t, acc.HasInt64Field("sqlserver_requests", "cpu_time_ms")) + require.True(t, acc.HasInt64Field("sqlserver_requests", "total_elapsed_time_ms")) + require.True(t, acc.HasInt64Field("sqlserver_requests", "logical_reads")) + require.True(t, acc.HasInt64Field("sqlserver_requests", "writes")) + require.True(t, acc.HasTag("sqlserver_requests", "command")) + require.True(t, acc.HasInt64Field("sqlserver_requests", "wait_time_ms")) + require.True(t, acc.HasTag("sqlserver_requests", "wait_type")) + require.True(t, acc.HasTag("sqlserver_requests", "wait_resource")) + require.True(t, acc.HasInt64Field("sqlserver_requests", "blocking_session_id")) + require.True(t, acc.HasTag("sqlserver_requests", "program_name")) + require.True(t, acc.HasTag("sqlserver_requests", "host_name")) + require.True(t, acc.HasTag("sqlserver_requests", "nt_user_name")) + require.True(t, acc.HasTag("sqlserver_requests", "login_name")) + require.True(t, acc.HasInt64Field("sqlserver_requests", "open_transaction")) + require.True(t, acc.HasTag("sqlserver_requests", "transaction_isolation_level")) + require.True(t, acc.HasInt64Field("sqlserver_requests", "granted_query_memory_pages")) + require.True(t, acc.HasFloatField("sqlserver_requests", "percent_complete")) + require.True(t, acc.HasTag("sqlserver_requests", "statement_text")) + require.True(t, acc.HasField("sqlserver_requests", "objectid")) // Can be null. + require.True(t, acc.HasField("sqlserver_requests", "stmt_object_name")) // Can be null. + require.True(t, acc.HasField("sqlserver_requests", "stmt_db_name")) // Can be null. + require.True(t, acc.HasTag("sqlserver_requests", "query_hash")) + require.True(t, acc.HasTag("sqlserver_requests", "query_plan_hash")) + require.True(t, acc.HasTag("sqlserver_requests", "session_db_name")) + require.True(t, acc.HasTag("sqlserver_requests", "replica_updateability")) + + server.Stop() +} + +func TestAzureSQL_Managed_Schedulers_Query(t *testing.T) { + if testing.Short() { + t.Skip("Skipping integration test in short mode") + } + + if os.Getenv("AZURESQL_MI_CONNECTION_STRING") == "" { + t.Skip("Missing environment variable AZURESQL_MI_CONNECTION_STRING") + } + + connectionString := os.Getenv("AZURESQL_MI_CONNECTION_STRING") + + server := &SQLServer{ + Servers: []string{connectionString}, + IncludeQuery: []string{"AzureSQLMISchedulers"}, + AuthMethod: "connection_string", + DatabaseType: "AzureSQLManagedInstance", + } + + var acc testutil.Accumulator + + require.NoError(t, server.Start(&acc)) + require.NoError(t, server.Gather(&acc)) + + require.True(t, acc.HasMeasurement("sqlserver_schedulers")) + require.True(t, acc.HasTag("sqlserver_schedulers", "sql_instance")) + require.True(t, acc.HasTag("sqlserver_schedulers", "scheduler_id")) + require.True(t, acc.HasTag("sqlserver_schedulers", "cpu_id")) + require.True(t, acc.HasField("sqlserver_schedulers", "is_online")) // Bool field. + require.True(t, acc.HasField("sqlserver_schedulers", "is_idle")) // Bool field. + require.True(t, acc.HasInt64Field("sqlserver_schedulers", "preemptive_switches_count")) + require.True(t, acc.HasInt64Field("sqlserver_schedulers", "context_switches_count")) + require.True(t, acc.HasInt64Field("sqlserver_schedulers", "current_tasks_count")) + require.True(t, acc.HasInt64Field("sqlserver_schedulers", "runnable_tasks_count")) + require.True(t, acc.HasInt64Field("sqlserver_schedulers", "current_workers_count")) + require.True(t, acc.HasInt64Field("sqlserver_schedulers", "active_workers_count")) + require.True(t, acc.HasInt64Field("sqlserver_schedulers", "work_queue_count")) + require.True(t, acc.HasInt64Field("sqlserver_schedulers", "pending_disk_io_count")) + require.True(t, acc.HasInt64Field("sqlserver_schedulers", "load_factor")) + require.True(t, acc.HasInt64Field("sqlserver_schedulers", "yield_count")) + require.True(t, acc.HasInt64Field("sqlserver_schedulers", "total_cpu_usage_ms")) + require.True(t, acc.HasInt64Field("sqlserver_schedulers", "total_scheduler_delay_ms")) + require.True(t, acc.HasTag("sqlserver_schedulers", "replica_updateability")) + + server.Stop() +} From da11cd708e34fae2a4e3615ed355381d4e81ea6f Mon Sep 17 00:00:00 2001 From: Alan Pope Date: Mon, 25 Oct 2021 16:35:58 +0100 Subject: [PATCH 171/176] fix: Allow for non x86 macs in Go install script (#9982) --- scripts/installgo_mac.sh | 16 ++++++++++++---- 1 file changed, 12 insertions(+), 4 deletions(-) diff --git a/scripts/installgo_mac.sh b/scripts/installgo_mac.sh index 4e7fb756161dc..f15aefa6a1641 100644 --- a/scripts/installgo_mac.sh +++ b/scripts/installgo_mac.sh @@ -2,12 +2,19 @@ set -eux -GO_ARCH="darwin-amd64" +ARCH=$(uname -m) GO_VERSION="1.17.2" -GO_VERSION_SHA="7914497a302a132a465d33f5ee044ce05568bacdb390ab805cb75a3435a23f94" # from https://golang.org/dl +if [ "$ARCH" = 'arm64' ]; then + GO_ARCH="darwin-arm64" + GO_VERSION_SHA="ce8771bd3edfb5b28104084b56bbb532eeb47fbb7769c3e664c6223712c30904" # from https://golang.org/dl +elif [ "$ARCH" = 'x86_64' ]; then + GO_ARCH="darwin-amd64" + GO_VERSION_SHA="7914497a302a132a465d33f5ee044ce05568bacdb390ab805cb75a3435a23f94" # from https://golang.org/dl +fi # This path is cachable. (Saving in /usr/local/ would cause issues restoring the cache.) path="/usr/local/Cellar" +sudo mkdir -p ${path} # Download Go and verify Go tarball. (Note: we aren't using brew because # it is slow to update and we can't pull specific minor versions.) @@ -21,8 +28,9 @@ setup_go () { sudo rm -rf ${path}/go sudo tar -C $path -xzf go${GO_VERSION}.${GO_ARCH}.tar.gz - ln -sf ${path}/go/bin/go /usr/local/bin/go - ln -sf ${path}/go/bin/gofmt /usr/local/bin/gofmt + sudo mkdir -p /usr/local/bin + sudo ln -sf ${path}/go/bin/go /usr/local/bin/go + sudo ln -sf ${path}/go/bin/gofmt /usr/local/bin/gofmt } if command -v go >/dev/null 2>&1; then From 90eeee78241b5d2a10d214030fc516079095767f Mon Sep 17 00:00:00 2001 From: Samantha Wang <32681364+sjwang90@users.noreply.github.com> Date: Mon, 25 Oct 2021 08:36:16 -0700 Subject: [PATCH 172/176] chore: create bug report form (#9976) --- .github/ISSUE_TEMPLATE/BUG_REPORT.yml | 67 +++++++++++++++++++++++++++ 1 file changed, 67 insertions(+) create mode 100644 .github/ISSUE_TEMPLATE/BUG_REPORT.yml diff --git a/.github/ISSUE_TEMPLATE/BUG_REPORT.yml b/.github/ISSUE_TEMPLATE/BUG_REPORT.yml new file mode 100644 index 0000000000000..eb6187bc2f382 --- /dev/null +++ b/.github/ISSUE_TEMPLATE/BUG_REPORT.yml @@ -0,0 +1,67 @@ +name: Bug Report +description: File a bug report +title: "[Bug]: " +labels: ["bug"] +body: + - type: markdown + attributes: + value: | + Thanks for taking time to fill out this bug report! We reserve Telegraf issues for bugs for reproducible problems. + Please redirect any questions about Telegraf usage to our [Community Slack](https://influxdata.com/slack) or [Community Page](https://community.influxdata.com/) we have a lot of talented community members there who could help answer your question more quickly. + - type: textarea + id: config + attributes: + label: Relevent telegraf.conf + description: Place config in the toml code section. This will be automatically formatted into toml, so no need for backticks. + render: toml + validations: + required: true + - type: input + id: system-info + attributes: + label: System info + description: Include Telegraf version, operating system, and other relevant details + placeholder: ex. Telegraf 1.20.0, Ubuntu 20.04, Docker 20.10.8 + validations: + required: true + - type: textarea + id: docker + attributes: + label: Docker + description: If your bug involves third party dependencies or services, it can be very helpful to provide a Dockerfile or docker-compose.yml that reproduces the environment you're testing against. + validations: + required: false + - type: textarea + id: reproduce + attributes: + label: Steps to reproduce + description: Describe the steps to reproduce the bug. + value: | + 1. + 2. + 3. + ... + validations: + required: true + - type: textarea + id: expected-behavior + attributes: + label: Expected behavior + description: Describe what you expected to happen when you performed the above steps. + validations: + required: true + - type: textarea + id: actual-behavior + attributes: + label: Actual behavior + description: Describe what actually happened when you performed the above steps. + validations: + required: true + - type: textarea + id: additional-info + attributes: + label: Additional info + description: Include gist of relevant config, logs, etc. + validations: + required: false + From 85a7fbc13fa170b793c7e0297454c8d25f463322 Mon Sep 17 00:00:00 2001 From: Joshua Powers Date: Mon, 25 Oct 2021 13:21:39 -0600 Subject: [PATCH 173/176] fix: procstat tags were not getting generated correctly (#9973) Fixes: #9961 --- plugins/inputs/procstat/procstat.go | 9 +++++++-- 1 file changed, 7 insertions(+), 2 deletions(-) diff --git a/plugins/inputs/procstat/procstat.go b/plugins/inputs/procstat/procstat.go index cb10d34d2a5a9..09b5cc7cfa325 100644 --- a/plugins/inputs/procstat/procstat.go +++ b/plugins/inputs/procstat/procstat.go @@ -154,13 +154,18 @@ func (p *Procstat) Gather(acc telegraf.Accumulator) error { } } - tags := make(map[string]string) p.procs = newProcs for _, proc := range p.procs { - tags = proc.Tags() p.addMetric(proc, acc, now) } + tags := make(map[string]string) + for _, pidTag := range pidTags { + for key, value := range pidTag.Tags { + tags[key] = value + } + } + fields := map[string]interface{}{ "pid_count": pidCount, "running": len(p.procs), From 036ae299a5b81ee966d199c1ec401b09d23bbe71 Mon Sep 17 00:00:00 2001 From: Sebastian Spaink <3441183+sspaink@users.noreply.github.com> Date: Mon, 25 Oct 2021 15:13:15 -0500 Subject: [PATCH 174/176] fix: update gjson to v1.10.2 (#9998) --- go.mod | 6 +++--- go.sum | 12 ++++++------ 2 files changed, 9 insertions(+), 9 deletions(-) diff --git a/go.mod b/go.mod index 09cae700f0c17..a7fd28cd6462e 100644 --- a/go.mod +++ b/go.mod @@ -251,9 +251,9 @@ require ( github.com/stretchr/testify v1.7.0 github.com/tbrandon/mbserver v0.0.0-20170611213546-993e1772cc62 github.com/testcontainers/testcontainers-go v0.11.1 - github.com/tidwall/gjson v1.9.0 - github.com/tidwall/match v1.0.3 // indirect - github.com/tidwall/pretty v1.1.0 // indirect + github.com/tidwall/gjson v1.10.2 + github.com/tidwall/match v1.1.1 // indirect + github.com/tidwall/pretty v1.2.0 // indirect github.com/tinylib/msgp v1.1.6 github.com/tklauser/go-sysconf v0.3.9 // indirect github.com/tklauser/numcpus v0.3.0 // indirect diff --git a/go.sum b/go.sum index df4e48542c420..63dfa7dbed880 100644 --- a/go.sum +++ b/go.sum @@ -1996,13 +1996,13 @@ github.com/tedsuo/ifrit v0.0.0-20180802180643-bea94bb476cc/go.mod h1:eyZnKCc955u github.com/testcontainers/testcontainers-go v0.11.1 h1:FiYsB83LSGbiawoV8TpAZGfcCUbtaeeg1SXqEKUxh08= github.com/testcontainers/testcontainers-go v0.11.1/go.mod h1:/V0UVq+1e7NWYoqTPog179clf0Qp9TOyp4EcXaEFQz8= github.com/tetafro/godot v1.4.4/go.mod h1:FVDd4JuKliW3UgjswZfJfHq4vAx0bD/Jd5brJjGeaz4= -github.com/tidwall/gjson v1.9.0 h1:+Od7AE26jAaMgVC31cQV/Ope5iKXulNMflrlB7k+F9E= -github.com/tidwall/gjson v1.9.0/go.mod h1:5/xDoumyyDNerp2U36lyolv46b3uF/9Bu6OfyQ9GImk= -github.com/tidwall/match v1.0.3 h1:FQUVvBImDutD8wJLN6c5eMzWtjgONK9MwIBCOrUJKeE= -github.com/tidwall/match v1.0.3/go.mod h1:eRSPERbgtNPcGhD8UCthc6PmLEQXEWd3PRB5JTxsfmM= +github.com/tidwall/gjson v1.10.2 h1:APbLGOM0rrEkd8WBw9C24nllro4ajFuJu0Sc9hRz8Bo= +github.com/tidwall/gjson v1.10.2/go.mod h1:/wbyibRr2FHMks5tjHJ5F8dMZh3AcwJEMf5vlfC0lxk= +github.com/tidwall/match v1.1.1 h1:+Ho715JplO36QYgwN9PGYNhgZvoUSc9X2c80KVTi+GA= +github.com/tidwall/match v1.1.1/go.mod h1:eRSPERbgtNPcGhD8UCthc6PmLEQXEWd3PRB5JTxsfmM= github.com/tidwall/pretty v1.0.0/go.mod h1:XNkn88O1ChpSDQmQeStsy+sBenx6DDtFZJxhVysOjyk= -github.com/tidwall/pretty v1.1.0 h1:K3hMW5epkdAVwibsQEfR/7Zj0Qgt4DxtNumTq/VloO8= -github.com/tidwall/pretty v1.1.0/go.mod h1:XNkn88O1ChpSDQmQeStsy+sBenx6DDtFZJxhVysOjyk= +github.com/tidwall/pretty v1.2.0 h1:RWIZEg2iJ8/g6fDDYzMpobmaoGh5OLl4AXtGUGPcqCs= +github.com/tidwall/pretty v1.2.0/go.mod h1:ITEVvHYasfjBbM0u2Pg8T2nJnzm8xPwvNhhsoaGGjNU= github.com/timakin/bodyclose v0.0.0-20200424151742-cb6215831a94/go.mod h1:Qimiffbc6q9tBWlVV6x0P9sat/ao1xEkREYPPj9hphk= github.com/tinylib/msgp v1.0.2/go.mod h1:+d+yLhGm8mzTaHzB+wgMYrodPfmZrzkirds8fDWklFE= github.com/tinylib/msgp v1.1.6 h1:i+SbKraHhnrf9M5MYmvQhFnbLhAXSDWF8WWsuyRdocw= From ecd4d3782c4ad16baf546cfbf5f287fce234c4e1 Mon Sep 17 00:00:00 2001 From: Sam Lai <70988+slai@users.noreply.github.com> Date: Mon, 25 Oct 2021 22:01:35 +0100 Subject: [PATCH 175/176] fix(inputs/kube_inventory): don't skip resources with zero s/ns timestamps (#9978) --- plugins/inputs/kube_inventory/daemonset.go | 3 ++- plugins/inputs/kube_inventory/endpoint.go | 3 ++- plugins/inputs/kube_inventory/ingress.go | 3 ++- plugins/inputs/kube_inventory/pod.go | 3 ++- plugins/inputs/kube_inventory/pod_test.go | 2 +- plugins/inputs/kube_inventory/service.go | 3 ++- 6 files changed, 11 insertions(+), 6 deletions(-) diff --git a/plugins/inputs/kube_inventory/daemonset.go b/plugins/inputs/kube_inventory/daemonset.go index c365d169cacdb..e169c8f274662 100644 --- a/plugins/inputs/kube_inventory/daemonset.go +++ b/plugins/inputs/kube_inventory/daemonset.go @@ -40,7 +40,8 @@ func (ki *KubernetesInventory) gatherDaemonSet(d v1.DaemonSet, acc telegraf.Accu } } - if d.GetCreationTimestamp().Second() != 0 { + creationTs := d.GetCreationTimestamp() + if !creationTs.IsZero() { fields["created"] = d.GetCreationTimestamp().UnixNano() } diff --git a/plugins/inputs/kube_inventory/endpoint.go b/plugins/inputs/kube_inventory/endpoint.go index 89cbf6587bf0d..1eb86eea13b76 100644 --- a/plugins/inputs/kube_inventory/endpoint.go +++ b/plugins/inputs/kube_inventory/endpoint.go @@ -20,7 +20,8 @@ func collectEndpoints(ctx context.Context, acc telegraf.Accumulator, ki *Kuberne } func (ki *KubernetesInventory) gatherEndpoint(e corev1.Endpoints, acc telegraf.Accumulator) { - if e.GetCreationTimestamp().Second() == 0 && e.GetCreationTimestamp().Nanosecond() == 0 { + creationTs := e.GetCreationTimestamp() + if creationTs.IsZero() { return } diff --git a/plugins/inputs/kube_inventory/ingress.go b/plugins/inputs/kube_inventory/ingress.go index 6fd424dc0aef5..f8a966bc15a46 100644 --- a/plugins/inputs/kube_inventory/ingress.go +++ b/plugins/inputs/kube_inventory/ingress.go @@ -20,7 +20,8 @@ func collectIngress(ctx context.Context, acc telegraf.Accumulator, ki *Kubernete } func (ki *KubernetesInventory) gatherIngress(i netv1.Ingress, acc telegraf.Accumulator) { - if i.GetCreationTimestamp().Second() == 0 && i.GetCreationTimestamp().Nanosecond() == 0 { + creationTs := i.GetCreationTimestamp() + if creationTs.IsZero() { return } diff --git a/plugins/inputs/kube_inventory/pod.go b/plugins/inputs/kube_inventory/pod.go index ebd3ae43a7d4c..ab4e5dd287cbe 100644 --- a/plugins/inputs/kube_inventory/pod.go +++ b/plugins/inputs/kube_inventory/pod.go @@ -20,7 +20,8 @@ func collectPods(ctx context.Context, acc telegraf.Accumulator, ki *KubernetesIn } func (ki *KubernetesInventory) gatherPod(p corev1.Pod, acc telegraf.Accumulator) { - if p.GetCreationTimestamp().Second() == 0 && p.GetCreationTimestamp().Nanosecond() == 0 { + creationTs := p.GetCreationTimestamp() + if creationTs.IsZero() { return } diff --git a/plugins/inputs/kube_inventory/pod_test.go b/plugins/inputs/kube_inventory/pod_test.go index 31600b1010d28..962805a67e3a3 100644 --- a/plugins/inputs/kube_inventory/pod_test.go +++ b/plugins/inputs/kube_inventory/pod_test.go @@ -20,7 +20,7 @@ func TestPod(t *testing.T) { selectExclude := []string{} now := time.Now() started := time.Date(now.Year(), now.Month(), now.Day(), now.Hour()-1, 1, 36, 0, now.Location()) - created := time.Date(now.Year(), now.Month(), now.Day(), now.Hour()-2, 1, 36, 0, now.Location()) + created := time.Date(now.Year(), now.Month(), now.Day(), now.Hour()-2, 1, 0, 0, now.Location()) cond1 := time.Date(now.Year(), 7, 5, 7, 53, 29, 0, now.Location()) cond2 := time.Date(now.Year(), 7, 5, 7, 53, 31, 0, now.Location()) diff --git a/plugins/inputs/kube_inventory/service.go b/plugins/inputs/kube_inventory/service.go index c2a7b7077e498..d589188605c85 100644 --- a/plugins/inputs/kube_inventory/service.go +++ b/plugins/inputs/kube_inventory/service.go @@ -20,7 +20,8 @@ func collectServices(ctx context.Context, acc telegraf.Accumulator, ki *Kubernet } func (ki *KubernetesInventory) gatherService(s corev1.Service, acc telegraf.Accumulator) { - if s.GetCreationTimestamp().Second() == 0 && s.GetCreationTimestamp().Nanosecond() == 0 { + creationTs := s.GetCreationTimestamp() + if creationTs.IsZero() { return } From 77248978c748a43f55a25f7f722ca7338fb192e6 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Pawe=C5=82=20=C5=BBak?= Date: Tue, 26 Oct 2021 15:45:03 +0200 Subject: [PATCH 176/176] fix: Linter fixes for plugins/inputs/[h-j]* (#9986) --- plugins/inputs/haproxy/haproxy_test.go | 5 +- plugins/inputs/hddtemp/hddtemp_test.go | 9 +-- plugins/inputs/http/http_test.go | 63 +++++++++--------- .../http_listener_v2/http_listener_v2_test.go | 4 +- plugins/inputs/icinga2/icinga2.go | 12 ++-- .../influxdb_listener_test.go | 5 +- .../influxdb_v2_listener_test.go | 4 +- plugins/inputs/interrupts/interrupts.go | 28 +++++--- plugins/inputs/ipmi_sensor/ipmi.go | 23 +++---- plugins/inputs/ipmi_sensor/ipmi_test.go | 65 +++++++++++++------ plugins/inputs/jenkins/jenkins_test.go | 8 +-- plugins/inputs/jolokia2/client.go | 4 +- plugins/inputs/jolokia2/gatherer.go | 2 +- plugins/inputs/jolokia2/jolokia_agent.go | 6 +- plugins/inputs/jolokia2/jolokia_proxy.go | 4 +- plugins/inputs/jolokia2/jolokia_test.go | 11 ++-- .../openconfig_telemetry.go | 13 ++-- 17 files changed, 155 insertions(+), 111 deletions(-) diff --git a/plugins/inputs/haproxy/haproxy_test.go b/plugins/inputs/haproxy/haproxy_test.go index c5c06e930c15c..21a1b09c10d02 100644 --- a/plugins/inputs/haproxy/haproxy_test.go +++ b/plugins/inputs/haproxy/haproxy_test.go @@ -12,8 +12,9 @@ import ( "strings" "testing" - "github.com/influxdata/telegraf/testutil" "github.com/stretchr/testify/require" + + "github.com/influxdata/telegraf/testutil" ) type statServer struct{} @@ -134,7 +135,7 @@ func TestHaproxyGeneratesMetricsUsingSocket(t *testing.T) { } sockets[i] = sock - defer sock.Close() + defer sock.Close() //nolint:revive // done on purpose, closing will be executed properly s := statServer{} go s.serverSocket(sock) diff --git a/plugins/inputs/hddtemp/hddtemp_test.go b/plugins/inputs/hddtemp/hddtemp_test.go index 79fceb72e8129..769022049d17a 100644 --- a/plugins/inputs/hddtemp/hddtemp_test.go +++ b/plugins/inputs/hddtemp/hddtemp_test.go @@ -3,10 +3,11 @@ package hddtemp import ( "testing" - hddtemp "github.com/influxdata/telegraf/plugins/inputs/hddtemp/go-hddtemp" - "github.com/influxdata/telegraf/testutil" "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" + + "github.com/influxdata/telegraf/plugins/inputs/hddtemp/go-hddtemp" + "github.com/influxdata/telegraf/testutil" ) type mockFetcher struct { @@ -33,14 +34,14 @@ func newMockFetcher() *mockFetcher { } func TestFetch(t *testing.T) { - hddtemp := &HDDTemp{ + hddTemp := &HDDTemp{ fetcher: newMockFetcher(), Address: "localhost", Devices: []string{"*"}, } acc := &testutil.Accumulator{} - err := hddtemp.Gather(acc) + err := hddTemp.Gather(acc) require.NoError(t, err) assert.Equal(t, acc.NFields(), 2) diff --git a/plugins/inputs/http/http_test.go b/plugins/inputs/http/http_test.go index da9fed2251514..c485167205708 100644 --- a/plugins/inputs/http/http_test.go +++ b/plugins/inputs/http/http_test.go @@ -9,15 +9,16 @@ import ( "net/url" "testing" + "github.com/stretchr/testify/require" + httpconfig "github.com/influxdata/telegraf/plugins/common/http" - oauth "github.com/influxdata/telegraf/plugins/common/oauth" - plugin "github.com/influxdata/telegraf/plugins/inputs/http" + "github.com/influxdata/telegraf/plugins/common/oauth" + httpplugin "github.com/influxdata/telegraf/plugins/inputs/http" "github.com/influxdata/telegraf/plugins/parsers" "github.com/influxdata/telegraf/testutil" - "github.com/stretchr/testify/require" ) -func TestHTTPwithJSONFormat(t *testing.T) { +func TestHTTPWithJSONFormat(t *testing.T) { fakeServer := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { if r.URL.Path == "/endpoint" { _, _ = w.Write([]byte(simpleJSON)) @@ -27,9 +28,9 @@ func TestHTTPwithJSONFormat(t *testing.T) { })) defer fakeServer.Close() - url := fakeServer.URL + "/endpoint" - plugin := &plugin.HTTP{ - URLs: []string{url}, + address := fakeServer.URL + "/endpoint" + plugin := &httpplugin.HTTP{ + URLs: []string{address}, } metricName := "metricName" @@ -50,7 +51,7 @@ func TestHTTPwithJSONFormat(t *testing.T) { require.Equal(t, metric.Measurement, metricName) require.Len(t, acc.Metrics[0].Fields, 1) require.Equal(t, acc.Metrics[0].Fields["a"], 1.2) - require.Equal(t, acc.Metrics[0].Tags["url"], url) + require.Equal(t, acc.Metrics[0].Tags["url"], address) } func TestHTTPHeaders(t *testing.T) { @@ -69,9 +70,9 @@ func TestHTTPHeaders(t *testing.T) { })) defer fakeServer.Close() - url := fakeServer.URL + "/endpoint" - plugin := &plugin.HTTP{ - URLs: []string{url}, + address := fakeServer.URL + "/endpoint" + plugin := &httpplugin.HTTP{ + URLs: []string{address}, Headers: map[string]string{header: headerValue}, } @@ -92,9 +93,9 @@ func TestInvalidStatusCode(t *testing.T) { })) defer fakeServer.Close() - url := fakeServer.URL + "/endpoint" - plugin := &plugin.HTTP{ - URLs: []string{url}, + address := fakeServer.URL + "/endpoint" + plugin := &httpplugin.HTTP{ + URLs: []string{address}, } metricName := "metricName" @@ -115,9 +116,9 @@ func TestSuccessStatusCodes(t *testing.T) { })) defer fakeServer.Close() - url := fakeServer.URL + "/endpoint" - plugin := &plugin.HTTP{ - URLs: []string{url}, + address := fakeServer.URL + "/endpoint" + plugin := &httpplugin.HTTP{ + URLs: []string{address}, SuccessStatusCodes: []int{200, 202}, } @@ -143,7 +144,7 @@ func TestMethod(t *testing.T) { })) defer fakeServer.Close() - plugin := &plugin.HTTP{ + plugin := &httpplugin.HTTP{ URLs: []string{fakeServer.URL}, Method: "POST", } @@ -169,18 +170,18 @@ func TestBodyAndContentEncoding(t *testing.T) { ts := httptest.NewServer(http.NotFoundHandler()) defer ts.Close() - url := fmt.Sprintf("http://%s", ts.Listener.Addr().String()) + address := fmt.Sprintf("http://%s", ts.Listener.Addr().String()) tests := []struct { name string - plugin *plugin.HTTP + plugin *httpplugin.HTTP queryHandlerFunc func(t *testing.T, w http.ResponseWriter, r *http.Request) }{ { name: "no body", - plugin: &plugin.HTTP{ + plugin: &httpplugin.HTTP{ Method: "POST", - URLs: []string{url}, + URLs: []string{address}, }, queryHandlerFunc: func(t *testing.T, w http.ResponseWriter, r *http.Request) { body, err := io.ReadAll(r.Body) @@ -191,8 +192,8 @@ func TestBodyAndContentEncoding(t *testing.T) { }, { name: "post body", - plugin: &plugin.HTTP{ - URLs: []string{url}, + plugin: &httpplugin.HTTP{ + URLs: []string{address}, Method: "POST", Body: "test", }, @@ -205,8 +206,8 @@ func TestBodyAndContentEncoding(t *testing.T) { }, { name: "get method body is sent", - plugin: &plugin.HTTP{ - URLs: []string{url}, + plugin: &httpplugin.HTTP{ + URLs: []string{address}, Method: "GET", Body: "test", }, @@ -219,8 +220,8 @@ func TestBodyAndContentEncoding(t *testing.T) { }, { name: "gzip encoding", - plugin: &plugin.HTTP{ - URLs: []string{url}, + plugin: &httpplugin.HTTP{ + URLs: []string{address}, Method: "GET", Body: "test", ContentEncoding: "gzip", @@ -269,13 +270,13 @@ func TestOAuthClientCredentialsGrant(t *testing.T) { tests := []struct { name string - plugin *plugin.HTTP + plugin *httpplugin.HTTP tokenHandler TestHandlerFunc handler TestHandlerFunc }{ { name: "no credentials", - plugin: &plugin.HTTP{ + plugin: &httpplugin.HTTP{ URLs: []string{u.String()}, }, handler: func(t *testing.T, w http.ResponseWriter, r *http.Request) { @@ -285,7 +286,7 @@ func TestOAuthClientCredentialsGrant(t *testing.T) { }, { name: "success", - plugin: &plugin.HTTP{ + plugin: &httpplugin.HTTP{ URLs: []string{u.String() + "/write"}, HTTPClientConfig: httpconfig.HTTPClientConfig{ OAuth2Config: oauth.OAuth2Config{ diff --git a/plugins/inputs/http_listener_v2/http_listener_v2_test.go b/plugins/inputs/http_listener_v2/http_listener_v2_test.go index da70f443998e1..bf320d6f05174 100644 --- a/plugins/inputs/http_listener_v2/http_listener_v2_test.go +++ b/plugins/inputs/http_listener_v2/http_listener_v2_test.go @@ -14,10 +14,11 @@ import ( "time" "github.com/golang/snappy" + "github.com/stretchr/testify/require" + "github.com/influxdata/telegraf/config" "github.com/influxdata/telegraf/plugins/parsers" "github.com/influxdata/telegraf/testutil" - "github.com/stretchr/testify/require" ) const ( @@ -371,6 +372,7 @@ func TestWriteHTTPGzippedData(t *testing.T) { client := &http.Client{} resp, err := client.Do(req) require.NoError(t, err) + require.NoError(t, resp.Body.Close()) require.EqualValues(t, 204, resp.StatusCode) hostTags := []string{"server02", "server03", diff --git a/plugins/inputs/icinga2/icinga2.go b/plugins/inputs/icinga2/icinga2.go index 17069e169c81b..f56192a7a9282 100644 --- a/plugins/inputs/icinga2/icinga2.go +++ b/plugins/inputs/icinga2/icinga2.go @@ -82,7 +82,7 @@ func (i *Icinga2) SampleConfig() string { func (i *Icinga2) GatherStatus(acc telegraf.Accumulator, checks []Object) { for _, check := range checks { - url, err := url.Parse(i.Server) + serverURL, err := url.Parse(i.Server) if err != nil { i.Log.Error(err.Error()) continue @@ -106,9 +106,9 @@ func (i *Icinga2) GatherStatus(acc telegraf.Accumulator, checks []Object) { "check_command": check.Attrs.CheckCommand, "source": source, "state": levels[state], - "server": url.Hostname(), - "scheme": url.Scheme, - "port": url.Port(), + "server": serverURL.Hostname(), + "scheme": serverURL.Scheme, + "port": serverURL.Port(), } acc.AddFields(fmt.Sprintf("icinga2_%s", i.ObjectType), fields, tags) @@ -152,9 +152,9 @@ func (i *Icinga2) Gather(acc telegraf.Accumulator) error { requestURL += "&attrs=host_name" } - url := fmt.Sprintf(requestURL, i.Server, i.ObjectType) + address := fmt.Sprintf(requestURL, i.Server, i.ObjectType) - req, err := http.NewRequest("GET", url, nil) + req, err := http.NewRequest("GET", address, nil) if err != nil { return err } diff --git a/plugins/inputs/influxdb_listener/influxdb_listener_test.go b/plugins/inputs/influxdb_listener/influxdb_listener_test.go index 6b88907f95801..36952f6851064 100644 --- a/plugins/inputs/influxdb_listener/influxdb_listener_test.go +++ b/plugins/inputs/influxdb_listener/influxdb_listener_test.go @@ -13,10 +13,11 @@ import ( "testing" "time" + "github.com/stretchr/testify/require" + "github.com/influxdata/telegraf" "github.com/influxdata/telegraf/config" "github.com/influxdata/telegraf/testutil" - "github.com/stretchr/testify/require" ) const ( @@ -416,6 +417,7 @@ func TestWriteGzippedData(t *testing.T) { client := &http.Client{} resp, err := client.Do(req) require.NoError(t, err) + require.NoError(t, resp.Body.Close()) require.EqualValues(t, 204, resp.StatusCode) hostTags := []string{"server02", "server03", @@ -526,6 +528,7 @@ func TestQuery(t *testing.T) { resp, err := http.Post( createURL(listener, "http", "/query", "db=&q=CREATE+DATABASE+IF+NOT+EXISTS+%22mydb%22"), "", nil) require.NoError(t, err) + require.NoError(t, resp.Body.Close()) require.EqualValues(t, 200, resp.StatusCode) } diff --git a/plugins/inputs/influxdb_v2_listener/influxdb_v2_listener_test.go b/plugins/inputs/influxdb_v2_listener/influxdb_v2_listener_test.go index 055dfc395ba7b..4338f34f89567 100644 --- a/plugins/inputs/influxdb_v2_listener/influxdb_v2_listener_test.go +++ b/plugins/inputs/influxdb_v2_listener/influxdb_v2_listener_test.go @@ -15,9 +15,10 @@ import ( "testing" "time" + "github.com/stretchr/testify/require" + "github.com/influxdata/telegraf/config" "github.com/influxdata/telegraf/testutil" - "github.com/stretchr/testify/require" ) const ( @@ -374,6 +375,7 @@ func TestWriteGzippedData(t *testing.T) { client := &http.Client{} resp, err := client.Do(req) require.NoError(t, err) + require.NoError(t, resp.Body.Close()) require.EqualValues(t, 204, resp.StatusCode) hostTags := []string{"server02", "server03", diff --git a/plugins/inputs/interrupts/interrupts.go b/plugins/inputs/interrupts/interrupts.go index 6d68818a8f055..d9e9dd287361c 100644 --- a/plugins/inputs/interrupts/interrupts.go +++ b/plugins/inputs/interrupts/interrupts.go @@ -57,7 +57,7 @@ func parseInterrupts(r io.Reader) ([]IRQ, error) { if scanner.Scan() { cpus := strings.Fields(scanner.Text()) if cpus[0] != "CPU0" { - return nil, fmt.Errorf("Expected first line to start with CPU0, but was %s", scanner.Text()) + return nil, fmt.Errorf("expected first line to start with CPU0, but was %s", scanner.Text()) } cpucount = len(cpus) } @@ -93,7 +93,7 @@ scan: irqs = append(irqs, *irq) } if scanner.Err() != nil { - return nil, fmt.Errorf("Error scanning file: %s", scanner.Err()) + return nil, fmt.Errorf("error scanning file: %s", scanner.Err()) } return irqs, nil } @@ -110,15 +110,9 @@ func gatherTagsFields(irq IRQ) (map[string]string, map[string]interface{}) { func (s *Interrupts) Gather(acc telegraf.Accumulator) error { for measurement, file := range map[string]string{"interrupts": "/proc/interrupts", "soft_interrupts": "/proc/softirqs"} { - f, err := os.Open(file) + irqs, err := parseFile(file) if err != nil { - acc.AddError(fmt.Errorf("Could not open file: %s", file)) - continue - } - defer f.Close() - irqs, err := parseInterrupts(f) - if err != nil { - acc.AddError(fmt.Errorf("Parsing %s: %s", file, err)) + acc.AddError(err) continue } reportMetrics(measurement, irqs, acc, s.CPUAsTag) @@ -126,6 +120,20 @@ func (s *Interrupts) Gather(acc telegraf.Accumulator) error { return nil } +func parseFile(file string) ([]IRQ, error) { + f, err := os.Open(file) + if err != nil { + return nil, fmt.Errorf("could not open file: %s", file) + } + defer f.Close() + + irqs, err := parseInterrupts(f) + if err != nil { + return nil, fmt.Errorf("parsing %s: %s", file, err) + } + return irqs, nil +} + func reportMetrics(measurement string, irqs []IRQ, acc telegraf.Accumulator, cpusAsTags bool) { for _, irq := range irqs { tags, fields := gatherTagsFields(irq) diff --git a/plugins/inputs/ipmi_sensor/ipmi.go b/plugins/inputs/ipmi_sensor/ipmi.go index c7f23dbc30e36..d26e739e96d43 100644 --- a/plugins/inputs/ipmi_sensor/ipmi.go +++ b/plugins/inputs/ipmi_sensor/ipmi.go @@ -4,7 +4,6 @@ import ( "bufio" "bytes" "fmt" - "log" "os" "os/exec" "path/filepath" @@ -39,6 +38,8 @@ type Ipmi struct { UseSudo bool UseCache bool CachePath string + + Log telegraf.Logger `toml:"-"` } var sampleConfig = ` @@ -172,17 +173,17 @@ func (m *Ipmi) parse(acc telegraf.Accumulator, server string) error { return fmt.Errorf("failed to run command %s: %s - %s", strings.Join(cmd.Args, " "), err, string(out)) } if m.MetricVersion == 2 { - return parseV2(acc, hostname, out, timestamp) + return m.parseV2(acc, hostname, out, timestamp) } - return parseV1(acc, hostname, out, timestamp) + return m.parseV1(acc, hostname, out, timestamp) } -func parseV1(acc telegraf.Accumulator, hostname string, cmdOut []byte, measuredAt time.Time) error { +func (m *Ipmi) parseV1(acc telegraf.Accumulator, hostname string, cmdOut []byte, measuredAt time.Time) error { // each line will look something like // Planar VBAT | 3.05 Volts | ok scanner := bufio.NewScanner(bytes.NewReader(cmdOut)) for scanner.Scan() { - ipmiFields := extractFieldsFromRegex(reV1ParseLine, scanner.Text()) + ipmiFields := m.extractFieldsFromRegex(reV1ParseLine, scanner.Text()) if len(ipmiFields) != 3 { continue } @@ -234,14 +235,14 @@ func parseV1(acc telegraf.Accumulator, hostname string, cmdOut []byte, measuredA return scanner.Err() } -func parseV2(acc telegraf.Accumulator, hostname string, cmdOut []byte, measuredAt time.Time) error { +func (m *Ipmi) parseV2(acc telegraf.Accumulator, hostname string, cmdOut []byte, measuredAt time.Time) error { // each line will look something like // CMOS Battery | 65h | ok | 7.1 | // Temp | 0Eh | ok | 3.1 | 55 degrees C // Drive 0 | A0h | ok | 7.1 | Drive Present scanner := bufio.NewScanner(bytes.NewReader(cmdOut)) for scanner.Scan() { - ipmiFields := extractFieldsFromRegex(reV2ParseLine, scanner.Text()) + ipmiFields := m.extractFieldsFromRegex(reV2ParseLine, scanner.Text()) if len(ipmiFields) < 3 || len(ipmiFields) > 4 { continue } @@ -257,7 +258,7 @@ func parseV2(acc telegraf.Accumulator, hostname string, cmdOut []byte, measuredA tags["entity_id"] = transform(ipmiFields["entity_id"]) tags["status_code"] = trim(ipmiFields["status_code"]) fields := make(map[string]interface{}) - descriptionResults := extractFieldsFromRegex(reV2ParseDescription, trim(ipmiFields["description"])) + descriptionResults := m.extractFieldsFromRegex(reV2ParseDescription, trim(ipmiFields["description"])) // This is an analog value with a unit if descriptionResults["analogValue"] != "" && len(descriptionResults["analogUnit"]) >= 1 { var err error @@ -266,7 +267,7 @@ func parseV2(acc telegraf.Accumulator, hostname string, cmdOut []byte, measuredA continue } // Some implementations add an extra status to their analog units - unitResults := extractFieldsFromRegex(reV2ParseUnit, descriptionResults["analogUnit"]) + unitResults := m.extractFieldsFromRegex(reV2ParseUnit, descriptionResults["analogUnit"]) tags["unit"] = transform(unitResults["realAnalogUnit"]) if unitResults["statusDesc"] != "" { tags["status_desc"] = transform(unitResults["statusDesc"]) @@ -289,12 +290,12 @@ func parseV2(acc telegraf.Accumulator, hostname string, cmdOut []byte, measuredA } // extractFieldsFromRegex consumes a regex with named capture groups and returns a kvp map of strings with the results -func extractFieldsFromRegex(re *regexp.Regexp, input string) map[string]string { +func (m *Ipmi) extractFieldsFromRegex(re *regexp.Regexp, input string) map[string]string { submatches := re.FindStringSubmatch(input) results := make(map[string]string) subexpNames := re.SubexpNames() if len(subexpNames) > len(submatches) { - log.Printf("D! No matches found in '%s'", input) + m.Log.Debugf("No matches found in '%s'", input) return results } for i, name := range subexpNames { diff --git a/plugins/inputs/ipmi_sensor/ipmi_test.go b/plugins/inputs/ipmi_sensor/ipmi_test.go index cf53214dbbd66..4a2910101ab82 100644 --- a/plugins/inputs/ipmi_sensor/ipmi_test.go +++ b/plugins/inputs/ipmi_sensor/ipmi_test.go @@ -7,10 +7,11 @@ import ( "testing" "time" + "github.com/stretchr/testify/require" + "github.com/influxdata/telegraf" "github.com/influxdata/telegraf/config" "github.com/influxdata/telegraf/testutil" - "github.com/stretchr/testify/require" ) func TestGather(t *testing.T) { @@ -20,6 +21,7 @@ func TestGather(t *testing.T) { Privilege: "USER", Timeout: config.Duration(time.Second * 5), HexKey: "1234567F", + Log: testutil.Logger{}, } // overwriting exec commands with mock commands @@ -44,7 +46,7 @@ func TestGather(t *testing.T) { { map[string]interface{}{ "value": float64(20), - "status": int(1), + "status": 1, }, map[string]string{ "name": "ambient_temp", @@ -55,7 +57,7 @@ func TestGather(t *testing.T) { { map[string]interface{}{ "value": float64(80), - "status": int(1), + "status": 1, }, map[string]string{ "name": "altitude", @@ -66,7 +68,7 @@ func TestGather(t *testing.T) { { map[string]interface{}{ "value": float64(210), - "status": int(1), + "status": 1, }, map[string]string{ "name": "avg_power", @@ -77,7 +79,7 @@ func TestGather(t *testing.T) { { map[string]interface{}{ "value": float64(4.9), - "status": int(1), + "status": 1, }, map[string]string{ "name": "planar_5v", @@ -88,7 +90,7 @@ func TestGather(t *testing.T) { { map[string]interface{}{ "value": float64(3.05), - "status": int(1), + "status": 1, }, map[string]string{ "name": "planar_vbat", @@ -99,7 +101,7 @@ func TestGather(t *testing.T) { { map[string]interface{}{ "value": float64(2610), - "status": int(1), + "status": 1, }, map[string]string{ "name": "fan_1a_tach", @@ -110,7 +112,7 @@ func TestGather(t *testing.T) { { map[string]interface{}{ "value": float64(1775), - "status": int(1), + "status": 1, }, map[string]string{ "name": "fan_1b_tach", @@ -127,6 +129,7 @@ func TestGather(t *testing.T) { i = &Ipmi{ Path: "ipmitool", Timeout: config.Duration(time.Second * 5), + Log: testutil.Logger{}, } err = acc.GatherError(i.Gather) @@ -139,7 +142,7 @@ func TestGather(t *testing.T) { { map[string]interface{}{ "value": float64(20), - "status": int(1), + "status": 1, }, map[string]string{ "name": "ambient_temp", @@ -149,7 +152,7 @@ func TestGather(t *testing.T) { { map[string]interface{}{ "value": float64(80), - "status": int(1), + "status": 1, }, map[string]string{ "name": "altitude", @@ -159,7 +162,7 @@ func TestGather(t *testing.T) { { map[string]interface{}{ "value": float64(210), - "status": int(1), + "status": 1, }, map[string]string{ "name": "avg_power", @@ -169,7 +172,7 @@ func TestGather(t *testing.T) { { map[string]interface{}{ "value": float64(4.9), - "status": int(1), + "status": 1, }, map[string]string{ "name": "planar_5v", @@ -179,7 +182,7 @@ func TestGather(t *testing.T) { { map[string]interface{}{ "value": float64(3.05), - "status": int(1), + "status": 1, }, map[string]string{ "name": "planar_vbat", @@ -189,7 +192,7 @@ func TestGather(t *testing.T) { { map[string]interface{}{ "value": float64(2610), - "status": int(1), + "status": 1, }, map[string]string{ "name": "fan_1a_tach", @@ -199,7 +202,7 @@ func TestGather(t *testing.T) { { map[string]interface{}{ "value": float64(1775), - "status": int(1), + "status": 1, }, map[string]string{ "name": "fan_1b_tach", @@ -371,7 +374,7 @@ OS RealTime Mod | 0x00 | ok // Previous arguments are tests stuff, that looks like : // /tmp/go-build970079519/…/_test/integration.test -test.run=TestHelperProcess -- - cmd, args := args[3], args[4:] + cmd := args[3] // Ignore the returned errors for the mocked interface as tests will fail anyway if cmd == "ipmitool" { @@ -380,8 +383,10 @@ OS RealTime Mod | 0x00 | ok } else { //nolint:errcheck,revive fmt.Fprint(os.Stdout, "command not found") + //nolint:revive // error code is important for this "test" os.Exit(1) } + //nolint:revive // error code is important for this "test" os.Exit(0) } @@ -393,6 +398,7 @@ func TestGatherV2(t *testing.T) { Timeout: config.Duration(time.Second * 5), MetricVersion: 2, HexKey: "0000000F", + Log: testutil.Logger{}, } // overwriting exec commands with mock commands execCommand = fakeExecCommandV2 @@ -434,6 +440,7 @@ func TestGatherV2(t *testing.T) { Path: "ipmitool", Timeout: config.Duration(time.Second * 5), MetricVersion: 2, + Log: testutil.Logger{}, } err = acc.GatherError(i.Gather) @@ -568,7 +575,7 @@ Power Supply 1 | 03h | ok | 10.1 | 110 Watts, Presence detected // Previous arguments are tests stuff, that looks like : // /tmp/go-build970079519/…/_test/integration.test -test.run=TestHelperProcess -- - cmd, args := args[3], args[4:] + cmd := args[3] // Ignore the returned errors for the mocked interface as tests will fail anyway if cmd == "ipmitool" { @@ -577,8 +584,10 @@ Power Supply 1 | 03h | ok | 10.1 | 110 Watts, Presence detected } else { //nolint:errcheck,revive fmt.Fprint(os.Stdout, "command not found") + //nolint:revive // error code is important for this "test" os.Exit(1) } + //nolint:revive // error code is important for this "test" os.Exit(0) } @@ -613,10 +622,14 @@ Power Supply 1 | 03h | ok | 10.1 | 110 Watts, Presence detected v2Data, } + ipmi := &Ipmi{ + Log: testutil.Logger{}, + } + for i := range tests { t.Logf("Checking v%d data...", i+1) - extractFieldsFromRegex(reV1ParseLine, tests[i]) - extractFieldsFromRegex(reV2ParseLine, tests[i]) + ipmi.extractFieldsFromRegex(reV1ParseLine, tests[i]) + ipmi.extractFieldsFromRegex(reV2ParseLine, tests[i]) } } @@ -653,11 +666,16 @@ func Test_parseV1(t *testing.T) { wantErr: false, }, } + + ipmi := &Ipmi{ + Log: testutil.Logger{}, + } + for _, tt := range tests { t.Run(tt.name, func(t *testing.T) { var acc testutil.Accumulator - if err := parseV1(&acc, tt.args.hostname, tt.args.cmdOut, tt.args.measuredAt); (err != nil) != tt.wantErr { + if err := ipmi.parseV1(&acc, tt.args.hostname, tt.args.cmdOut, tt.args.measuredAt); (err != nil) != tt.wantErr { t.Errorf("parseV1() error = %v, wantErr %v", err, tt.wantErr) } @@ -746,10 +764,15 @@ func Test_parseV2(t *testing.T) { wantErr: false, }, } + + ipmi := &Ipmi{ + Log: testutil.Logger{}, + } + for _, tt := range tests { t.Run(tt.name, func(t *testing.T) { var acc testutil.Accumulator - if err := parseV2(&acc, tt.args.hostname, tt.args.cmdOut, tt.args.measuredAt); (err != nil) != tt.wantErr { + if err := ipmi.parseV2(&acc, tt.args.hostname, tt.args.cmdOut, tt.args.measuredAt); (err != nil) != tt.wantErr { t.Errorf("parseV2() error = %v, wantErr %v", err, tt.wantErr) } testutil.RequireMetricsEqual(t, tt.expected, acc.GetTelegrafMetrics(), testutil.IgnoreTime()) diff --git a/plugins/inputs/jenkins/jenkins_test.go b/plugins/inputs/jenkins/jenkins_test.go index 2b74d654a6d2d..e5f09ad66d1ca 100644 --- a/plugins/inputs/jenkins/jenkins_test.go +++ b/plugins/inputs/jenkins/jenkins_test.go @@ -44,13 +44,13 @@ func TestJobRequest(t *testing.T) { } for _, test := range tests { hierarchyName := test.input.hierarchyName() - URL := test.input.URL() + address := test.input.URL() if hierarchyName != test.hierarchyName { t.Errorf("Expected %s, got %s\n", test.hierarchyName, hierarchyName) } - if test.URL != "" && URL != test.URL { - t.Errorf("Expected %s, got %s\n", test.URL, URL) + if test.URL != "" && address != test.URL { + t.Errorf("Expected %s, got %s\n", test.URL, address) } } } @@ -429,7 +429,7 @@ func TestInitialize(t *testing.T) { } if test.output != nil { if test.input.client == nil { - t.Fatalf("%s: failed %s, jenkins instance shouldn't be nil", test.name, te.Error()) + t.Fatalf("%s: failed %v, jenkins instance shouldn't be nil", test.name, te) } if test.input.MaxConnections != test.output.MaxConnections { t.Fatalf("%s: different MaxConnections Expected %d, got %d\n", test.name, test.output.MaxConnections, test.input.MaxConnections) diff --git a/plugins/inputs/jolokia2/client.go b/plugins/inputs/jolokia2/client.go index 789450e3a1016..e3b42f660dff6 100644 --- a/plugins/inputs/jolokia2/client.go +++ b/plugins/inputs/jolokia2/client.go @@ -95,7 +95,7 @@ type jolokiaResponse struct { Status int `json:"status"` } -func NewClient(url string, config *ClientConfig) (*Client, error) { +func NewClient(address string, config *ClientConfig) (*Client, error) { tlsConfig, err := config.ClientConfig.TLSConfig() if err != nil { return nil, err @@ -112,7 +112,7 @@ func NewClient(url string, config *ClientConfig) (*Client, error) { } return &Client{ - URL: url, + URL: address, config: config, client: client, }, nil diff --git a/plugins/inputs/jolokia2/gatherer.go b/plugins/inputs/jolokia2/gatherer.go index 99cd2f4b91a13..1dfdc057e832b 100644 --- a/plugins/inputs/jolokia2/gatherer.go +++ b/plugins/inputs/jolokia2/gatherer.go @@ -80,7 +80,7 @@ func (g *Gatherer) generatePoints(metric Metric, responses []ReadResponse) ([]po for _, response := range responses { switch response.Status { case 200: - break + // Correct response status - do nothing. case 404: continue default: diff --git a/plugins/inputs/jolokia2/jolokia_agent.go b/plugins/inputs/jolokia2/jolokia_agent.go index 5b2e3da37c16e..23336dd6f4351 100644 --- a/plugins/inputs/jolokia2/jolokia_agent.go +++ b/plugins/inputs/jolokia2/jolokia_agent.go @@ -68,7 +68,7 @@ func (ja *JolokiaAgent) Gather(acc telegraf.Accumulator) error { for _, url := range ja.URLs { client, err := ja.createClient(url) if err != nil { - acc.AddError(fmt.Errorf("Unable to create client for %s: %v", url, err)) + acc.AddError(fmt.Errorf("unable to create client for %s: %v", url, err)) continue } ja.clients = append(ja.clients, client) @@ -97,8 +97,8 @@ func (ja *JolokiaAgent) Gather(acc telegraf.Accumulator) error { func (ja *JolokiaAgent) createMetrics() []Metric { var metrics []Metric - for _, config := range ja.Metrics { - metrics = append(metrics, NewMetric(config, + for _, metricConfig := range ja.Metrics { + metrics = append(metrics, NewMetric(metricConfig, ja.DefaultFieldPrefix, ja.DefaultFieldSeparator, ja.DefaultTagPrefix)) } diff --git a/plugins/inputs/jolokia2/jolokia_proxy.go b/plugins/inputs/jolokia2/jolokia_proxy.go index 1f91e1cb911fe..8654c9308762c 100644 --- a/plugins/inputs/jolokia2/jolokia_proxy.go +++ b/plugins/inputs/jolokia2/jolokia_proxy.go @@ -93,8 +93,8 @@ func (jp *JolokiaProxy) Gather(acc telegraf.Accumulator) error { func (jp *JolokiaProxy) createMetrics() []Metric { var metrics []Metric - for _, config := range jp.Metrics { - metrics = append(metrics, NewMetric(config, + for _, metricConfig := range jp.Metrics { + metrics = append(metrics, NewMetric(metricConfig, jp.DefaultFieldPrefix, jp.DefaultFieldSeparator, jp.DefaultTagPrefix)) } diff --git a/plugins/inputs/jolokia2/jolokia_test.go b/plugins/inputs/jolokia2/jolokia_test.go index eddcebfce0892..01750bf002ff5 100644 --- a/plugins/inputs/jolokia2/jolokia_test.go +++ b/plugins/inputs/jolokia2/jolokia_test.go @@ -6,11 +6,12 @@ import ( "net/http/httptest" "testing" + "github.com/stretchr/testify/assert" + "github.com/influxdata/telegraf" "github.com/influxdata/telegraf/testutil" "github.com/influxdata/toml" "github.com/influxdata/toml/ast" - "github.com/stretchr/testify/assert" ) func TestJolokia2_ScalarValues(t *testing.T) { @@ -749,15 +750,15 @@ func TestJolokia2_ProxyTargets(t *testing.T) { } func TestFillFields(t *testing.T) { - complex := map[string]interface{}{"Value": []interface{}{0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0}} - scalar := []interface{}{0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0} + complexPoint := map[string]interface{}{"Value": []interface{}{0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0}} + scalarPoint := []interface{}{0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0} results := map[string]interface{}{} - newPointBuilder(Metric{Name: "test", Mbean: "complex"}, []string{"this", "that"}, "/").fillFields("", complex, results) + newPointBuilder(Metric{Name: "test", Mbean: "complex"}, []string{"this", "that"}, "/").fillFields("", complexPoint, results) assert.Equal(t, map[string]interface{}{}, results) results = map[string]interface{}{} - newPointBuilder(Metric{Name: "test", Mbean: "scalar"}, []string{"this", "that"}, "/").fillFields("", scalar, results) + newPointBuilder(Metric{Name: "test", Mbean: "scalar"}, []string{"this", "that"}, "/").fillFields("", scalarPoint, results) assert.Equal(t, map[string]interface{}{}, results) } diff --git a/plugins/inputs/jti_openconfig_telemetry/openconfig_telemetry.go b/plugins/inputs/jti_openconfig_telemetry/openconfig_telemetry.go index 96dce5a88c7e7..b95930cd42f87 100644 --- a/plugins/inputs/jti_openconfig_telemetry/openconfig_telemetry.go +++ b/plugins/inputs/jti_openconfig_telemetry/openconfig_telemetry.go @@ -8,17 +8,18 @@ import ( "sync" "time" + "golang.org/x/net/context" + "google.golang.org/grpc" + "google.golang.org/grpc/codes" + "google.golang.org/grpc/credentials" + "google.golang.org/grpc/status" + "github.com/influxdata/telegraf" "github.com/influxdata/telegraf/config" internaltls "github.com/influxdata/telegraf/plugins/common/tls" "github.com/influxdata/telegraf/plugins/inputs" "github.com/influxdata/telegraf/plugins/inputs/jti_openconfig_telemetry/auth" "github.com/influxdata/telegraf/plugins/inputs/jti_openconfig_telemetry/oc" - "golang.org/x/net/context" - "google.golang.org/grpc" - "google.golang.org/grpc/codes" - "google.golang.org/grpc/credentials" - "google.golang.org/grpc/status" ) type OpenConfigTelemetry struct { @@ -42,7 +43,7 @@ type OpenConfigTelemetry struct { var ( // Regex to match and extract data points from path value in received key - keyPathRegex = regexp.MustCompile("\\/([^\\/]*)\\[([A-Za-z0-9\\-\\/]*\\=[^\\[]*)\\]") + keyPathRegex = regexp.MustCompile(`/([^/]*)\[([A-Za-z0-9\-/]*=[^\[]*)]`) sampleConfig = ` ## List of device addresses to collect telemetry from servers = ["localhost:1883"]