From c4050aed948dd65ed33a2b9d2125712c8159901f Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Thu, 16 Sep 2021 12:01:34 -0400 Subject: [PATCH 01/81] fix: bump github.com/antchfx/xmlquery from 1.3.5 to 1.3.6 (#9750) --- go.mod | 2 +- go.sum | 4 ++-- 2 files changed, 3 insertions(+), 3 deletions(-) diff --git a/go.mod b/go.mod index 4dcb4550d4764..24b6f664b2756 100644 --- a/go.mod +++ b/go.mod @@ -39,7 +39,7 @@ require ( github.com/aliyun/alibaba-cloud-sdk-go v1.61.1004 github.com/amir/raidman v0.0.0-20170415203553-1ccc43bfb9c9 github.com/antchfx/jsonquery v1.1.4 - github.com/antchfx/xmlquery v1.3.5 + github.com/antchfx/xmlquery v1.3.6 github.com/antchfx/xpath v1.1.11 github.com/apache/arrow/go/arrow v0.0.0-20200601151325-b2287a20f230 // indirect github.com/apache/thrift v0.14.2 diff --git a/go.sum b/go.sum index c5a0778443420..e09c7b36318ff 100644 --- a/go.sum +++ b/go.sum @@ -219,8 +219,8 @@ github.com/amir/raidman v0.0.0-20170415203553-1ccc43bfb9c9/go.mod h1:eliMa/PW+RD github.com/andreyvit/diff v0.0.0-20170406064948-c7f18ee00883/go.mod h1:rCTlJbsFo29Kk6CurOXKm700vrz8f0KW0JNfpkRJY/8= github.com/antchfx/jsonquery v1.1.4 h1:+OlFO3QS9wjU0MKx9MgHm5f6o6hdd4e9mUTp0wTjxlM= github.com/antchfx/jsonquery v1.1.4/go.mod h1:cHs8r6Bymd8j6HI6Ej1IJbjahKvLBcIEh54dfmo+E9A= -github.com/antchfx/xmlquery v1.3.5 h1:I7TuBRqsnfFuL11ruavGm911Awx9IqSdiU6W/ztSmVw= -github.com/antchfx/xmlquery v1.3.5/go.mod h1:64w0Xesg2sTaawIdNqMB+7qaW/bSqkQm+ssPaCMWNnc= +github.com/antchfx/xmlquery v1.3.6 h1:kaEVzH1mNo/2AJZrhZjAaAUTy2Nn2zxGfYYU8jWfXOo= +github.com/antchfx/xmlquery v1.3.6/go.mod h1:64w0Xesg2sTaawIdNqMB+7qaW/bSqkQm+ssPaCMWNnc= github.com/antchfx/xpath v1.1.7/go.mod h1:Yee4kTMuNiPYJ7nSNorELQMr1J33uOpXDMByNYhvtNk= github.com/antchfx/xpath v1.1.10/go.mod h1:Yee4kTMuNiPYJ7nSNorELQMr1J33uOpXDMByNYhvtNk= github.com/antchfx/xpath v1.1.11 h1:WOFtK8TVAjLm3lbgqeP0arlHpvCEeTANeWZ/csPpJkQ= From a02f49c6ff5b43955e117f0a1290f2c4b6543d45 Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Thu, 16 Sep 2021 10:02:47 -0600 Subject: [PATCH 02/81] fix: bump github.com/miekg/dns from 1.1.31 to 1.1.43 (#9656) --- go.mod | 2 +- go.sum | 3 ++- 2 files changed, 3 insertions(+), 2 deletions(-) diff --git a/go.mod b/go.mod index 24b6f664b2756..65a894974bea6 100644 --- a/go.mod +++ b/go.mod @@ -180,7 +180,7 @@ require ( github.com/mdlayher/genetlink v1.0.0 // indirect github.com/mdlayher/netlink v1.1.0 // indirect github.com/microsoft/ApplicationInsights-Go v0.4.4 - github.com/miekg/dns v1.1.31 + github.com/miekg/dns v1.1.43 github.com/minio/highwayhash v1.0.1 // indirect github.com/mitchellh/go-homedir v1.1.0 // indirect github.com/mitchellh/mapstructure v1.2.2 // indirect diff --git a/go.sum b/go.sum index e09c7b36318ff..5ff7799dc902b 100644 --- a/go.sum +++ b/go.sum @@ -1146,8 +1146,9 @@ github.com/microsoft/ApplicationInsights-Go v0.4.4 h1:G4+H9WNs6ygSCe6sUyxRc2U81T github.com/microsoft/ApplicationInsights-Go v0.4.4/go.mod h1:fKRUseBqkw6bDiXTs3ESTiU/4YTIHsQS4W3fP2ieF4U= github.com/miekg/dns v1.0.14/go.mod h1:W1PPwlIAgtquWBMBEV9nkV9Cazfe8ScdGz/Lj7v3Nrg= github.com/miekg/dns v1.1.26/go.mod h1:bPDLeHnStXmXAq1m/Ch/hvfNHr14JKNPMBo3VZKjuso= -github.com/miekg/dns v1.1.31 h1:sJFOl9BgwbYAWOGEwr61FU28pqsBNdpRBnhGXtO06Oo= github.com/miekg/dns v1.1.31/go.mod h1:KNUDUusw/aVsxyTYZM1oqvCicbwhgbNgztCETuNZ7xM= +github.com/miekg/dns v1.1.43 h1:JKfpVSCB84vrAmHzyrsxB5NAr5kLoMXZArPSw7Qlgyg= +github.com/miekg/dns v1.1.43/go.mod h1:+evo5L0630/F6ca/Z9+GAqzhjGyn8/c+TBaOyfEl0V4= github.com/miekg/pkcs11 v1.0.3/go.mod h1:XsNlhZGX73bx86s2hdc/FuaLm2CPZJemRLMA+WTFxgs= github.com/mikioh/ipaddr v0.0.0-20190404000644-d465c8ab6721 h1:RlZweED6sbSArvlE924+mUcZuXKLBHA35U7LN621Bws= github.com/mikioh/ipaddr v0.0.0-20190404000644-d465c8ab6721/go.mod h1:Ickgr2WtCLZ2MDGd4Gr0geeCH5HybhRJbonOgQpvSxc= From 81eed8d436ace8b896f23c4fdc2a08a568a686aa Mon Sep 17 00:00:00 2001 From: Alan Pope Date: Thu, 16 Sep 2021 18:22:24 +0100 Subject: [PATCH 03/81] docs: Move nightly builds (#9747) --- README.md | 41 +---------------------------------------- docs/NIGHTLIES.md | 42 ++++++++++++++++++++++++++++++++++++++++++ docs/README.md | 3 +++ 3 files changed, 46 insertions(+), 40 deletions(-) create mode 100644 docs/NIGHTLIES.md diff --git a/README.md b/README.md index c4a89b751c5d2..57b2d4e8cc33d 100644 --- a/README.md +++ b/README.md @@ -80,46 +80,7 @@ version. ### Nightly Builds -These builds are generated from the master branch: - -FreeBSD - .tar.gz -- [telegraf-nightly_freebsd_amd64.tar.gz](https://dl.influxdata.com/telegraf/nightlies/telegraf-nightly_freebsd_amd64.tar.gz) -- [telegraf-nightly_freebsd_armv7.tar.gz](https://dl.influxdata.com/telegraf/nightlies/telegraf-nightly_freebsd_armv7.tar.gz) -- [telegraf-nightly_freebsd_i386.tar.gz](https://dl.influxdata.com/telegraf/nightlies/telegraf-nightly_freebsd_i386.tar.gz) - -Linux - .rpm -- [telegraf-nightly.arm64.rpm](https://dl.influxdata.com/telegraf/nightlies/telegraf-nightly.arm64.rpm) -- [telegraf-nightly.armel.rpm](https://dl.influxdata.com/telegraf/nightlies/telegraf-nightly.armel.rpm) -- [telegraf-nightly.armv6hl.rpm](https://dl.influxdata.com/telegraf/nightlies/telegraf-nightly.armv6hl.rpm) -- [telegraf-nightly.i386.rpm](https://dl.influxdata.com/telegraf/nightlies/telegraf-nightly.i386.rpm) -- [telegraf-nightly.ppc64le.rpm](https://dl.influxdata.com/telegraf/nightlies/telegraf-nightly.ppc64le.rpm) -- [telegraf-nightly.s390x.rpm](https://dl.influxdata.com/telegraf/nightlies/telegraf-nightly.s390x.rpm) -- [telegraf-nightly.x86_64.rpm](https://dl.influxdata.com/telegraf/nightlies/telegraf-nightly.x86_64.rpm) - -Linux - .deb -- [telegraf_nightly_amd64.deb](https://dl.influxdata.com/telegraf/nightlies/telegraf_nightly_amd64.deb) -- [telegraf_nightly_arm64.deb](https://dl.influxdata.com/telegraf/nightlies/telegraf_nightly_arm64.deb) -- [telegraf_nightly_armel.deb](https://dl.influxdata.com/telegraf/nightlies/telegraf_nightly_armel.deb) -- [telegraf_nightly_armhf.deb](https://dl.influxdata.com/telegraf/nightlies/telegraf_nightly_armhf.deb) -- [telegraf_nightly_i386.deb](https://dl.influxdata.com/telegraf/nightlies/telegraf_nightly_i386.deb) -- [telegraf_nightly_ppc64el.deb](https://dl.influxdata.com/telegraf/nightlies/telegraf_nightly_ppc64el.deb) -- [telegraf_nightly_s390x.deb](https://dl.influxdata.com/telegraf/nightlies/telegraf_nightly_s390x.deb) - -Linux - .tar.gz -- [telegraf-nightly_linux_amd64.tar.gz](https://dl.influxdata.com/telegraf/nightlies/telegraf-nightly_linux_amd64.tar.gz) -- [telegraf-nightly_linux_arm64.tar.gz](https://dl.influxdata.com/telegraf/nightlies/telegraf-nightly_linux_arm64.tar.gz) -- [telegraf-nightly_linux_armel.tar.gz](https://dl.influxdata.com/telegraf/nightlies/telegraf-nightly_linux_armel.tar.gz) -- [telegraf-nightly_linux_armhf.tar.gz](https://dl.influxdata.com/telegraf/nightlies/telegraf-nightly_linux_armhf.tar.gz) -- [telegraf-nightly_linux_i386.tar.gz](https://dl.influxdata.com/telegraf/nightlies/telegraf-nightly_linux_i386.tar.gz) -- [telegraf-nightly_linux_s390x.tar.gz](https://dl.influxdata.com/telegraf/nightlies/telegraf-nightly_linux_s390x.tar.gz) -- [telegraf-static-nightly_linux_amd64.tar.gz](https://dl.influxdata.com/telegraf/nightlies/telegraf-static-nightly_linux_amd64.tar.gz) - -OSX - .tar.gz -- [telegraf-nightly_darwin_amd64.tar.gz](https://dl.influxdata.com/telegraf/nightlies/telegraf-nightly_darwin_amd64.tar.gz) - -Windows - .zip -- [telegraf-nightly_windows_i386.zip](https://dl.influxdata.com/telegraf/nightlies/telegraf-nightly_windows_i386.zip) -- [telegraf-nightly_windows_amd64.zip](https://dl.influxdata.com/telegraf/nightlies/telegraf-nightly_windows_amd64.zip) +[Nightly](/docs/NIGHTLIES.md) builds are available, generated from the master branch. ## How to use it: diff --git a/docs/NIGHTLIES.md b/docs/NIGHTLIES.md new file mode 100644 index 0000000000000..63cdc2d82cfdc --- /dev/null +++ b/docs/NIGHTLIES.md @@ -0,0 +1,42 @@ +### Nightly Builds + +These builds are generated from the master branch: + +FreeBSD - .tar.gz +- [telegraf-nightly_freebsd_amd64.tar.gz](https://dl.influxdata.com/telegraf/nightlies/telegraf-nightly_freebsd_amd64.tar.gz) +- [telegraf-nightly_freebsd_armv7.tar.gz](https://dl.influxdata.com/telegraf/nightlies/telegraf-nightly_freebsd_armv7.tar.gz) +- [telegraf-nightly_freebsd_i386.tar.gz](https://dl.influxdata.com/telegraf/nightlies/telegraf-nightly_freebsd_i386.tar.gz) + +Linux - .rpm +- [telegraf-nightly.arm64.rpm](https://dl.influxdata.com/telegraf/nightlies/telegraf-nightly.arm64.rpm) +- [telegraf-nightly.armel.rpm](https://dl.influxdata.com/telegraf/nightlies/telegraf-nightly.armel.rpm) +- [telegraf-nightly.armv6hl.rpm](https://dl.influxdata.com/telegraf/nightlies/telegraf-nightly.armv6hl.rpm) +- [telegraf-nightly.i386.rpm](https://dl.influxdata.com/telegraf/nightlies/telegraf-nightly.i386.rpm) +- [telegraf-nightly.ppc64le.rpm](https://dl.influxdata.com/telegraf/nightlies/telegraf-nightly.ppc64le.rpm) +- [telegraf-nightly.s390x.rpm](https://dl.influxdata.com/telegraf/nightlies/telegraf-nightly.s390x.rpm) +- [telegraf-nightly.x86_64.rpm](https://dl.influxdata.com/telegraf/nightlies/telegraf-nightly.x86_64.rpm) + +Linux - .deb +- [telegraf_nightly_amd64.deb](https://dl.influxdata.com/telegraf/nightlies/telegraf_nightly_amd64.deb) +- [telegraf_nightly_arm64.deb](https://dl.influxdata.com/telegraf/nightlies/telegraf_nightly_arm64.deb) +- [telegraf_nightly_armel.deb](https://dl.influxdata.com/telegraf/nightlies/telegraf_nightly_armel.deb) +- [telegraf_nightly_armhf.deb](https://dl.influxdata.com/telegraf/nightlies/telegraf_nightly_armhf.deb) +- [telegraf_nightly_i386.deb](https://dl.influxdata.com/telegraf/nightlies/telegraf_nightly_i386.deb) +- [telegraf_nightly_ppc64el.deb](https://dl.influxdata.com/telegraf/nightlies/telegraf_nightly_ppc64el.deb) +- [telegraf_nightly_s390x.deb](https://dl.influxdata.com/telegraf/nightlies/telegraf_nightly_s390x.deb) + +Linux - .tar.gz +- [telegraf-nightly_linux_amd64.tar.gz](https://dl.influxdata.com/telegraf/nightlies/telegraf-nightly_linux_amd64.tar.gz) +- [telegraf-nightly_linux_arm64.tar.gz](https://dl.influxdata.com/telegraf/nightlies/telegraf-nightly_linux_arm64.tar.gz) +- [telegraf-nightly_linux_armel.tar.gz](https://dl.influxdata.com/telegraf/nightlies/telegraf-nightly_linux_armel.tar.gz) +- [telegraf-nightly_linux_armhf.tar.gz](https://dl.influxdata.com/telegraf/nightlies/telegraf-nightly_linux_armhf.tar.gz) +- [telegraf-nightly_linux_i386.tar.gz](https://dl.influxdata.com/telegraf/nightlies/telegraf-nightly_linux_i386.tar.gz) +- [telegraf-nightly_linux_s390x.tar.gz](https://dl.influxdata.com/telegraf/nightlies/telegraf-nightly_linux_s390x.tar.gz) +- [telegraf-static-nightly_linux_amd64.tar.gz](https://dl.influxdata.com/telegraf/nightlies/telegraf-static-nightly_linux_amd64.tar.gz) + +OSX - .tar.gz +- [telegraf-nightly_darwin_amd64.tar.gz](https://dl.influxdata.com/telegraf/nightlies/telegraf-nightly_darwin_amd64.tar.gz) + +Windows - .zip +- [telegraf-nightly_windows_i386.zip](https://dl.influxdata.com/telegraf/nightlies/telegraf-nightly_windows_i386.zip) +- [telegraf-nightly_windows_amd64.zip](https://dl.influxdata.com/telegraf/nightlies/telegraf-nightly_windows_amd64.zip) diff --git a/docs/README.md b/docs/README.md index b7b55336c5a04..99320dee95588 100644 --- a/docs/README.md +++ b/docs/README.md @@ -10,6 +10,8 @@ - [Profiling][profiling] - [Windows Service][winsvc] - [FAQ][faq] +- Developer Builds + - [Nightlies](nightlies) [conf]: /docs/CONFIGURATION.md [metrics]: /docs/METRICS.md @@ -19,3 +21,4 @@ [profiling]: /docs/PROFILING.md [winsvc]: /docs/WINDOWS_SERVICE.md [faq]: /docs/FAQ.md +[nightlies]: /docs/NIGHTLIES.md \ No newline at end of file From b806ad88488b057b9864d7365e24b1651726caa3 Mon Sep 17 00:00:00 2001 From: Michael Hall Date: Thu, 16 Sep 2021 14:14:41 -0400 Subject: [PATCH 04/81] docs: Add list of 3rd party builds to the README (#8576) --- README.md | 11 +++++++++++ 1 file changed, 11 insertions(+) diff --git a/README.md b/README.md index 57b2d4e8cc33d..2b49842789db6 100644 --- a/README.md +++ b/README.md @@ -82,6 +82,17 @@ version. [Nightly](/docs/NIGHTLIES.md) builds are available, generated from the master branch. +### 3rd Party Builds + +Builds for other platforms or package formats are provided by members of the Telegraf community. These packages are not built, tested or supported by the Telegraf project or InfluxData, we make no guarantees that they will work. Please get in touch with the package author if you need support. + +* Windows + * [Chocolatey](https://chocolatey.org/packages/telegraf) by [ripclawffb](https://chocolatey.org/profiles/ripclawffb) + * [Scoop](https://github.com/ScoopInstaller/Main/blob/master/bucket/telegraf.json) +* Linux + * [Snap](https://snapcraft.io/telegraf) by Laurent Sesquès (sajoupa) + + ## How to use it: See usage with: From f5a3df429ad969302c765da5dce92f4c63042f37 Mon Sep 17 00:00:00 2001 From: John Seekins Date: Thu, 16 Sep 2021 15:19:51 -0600 Subject: [PATCH 05/81] fix: add additional logstash output plugin stats (#9707) --- plugins/inputs/logstash/README.md | 6 ++ plugins/inputs/logstash/logstash.go | 65 +++++++++++++++++++- plugins/inputs/logstash/logstash_test.go | 58 +++++++++++++++++ plugins/inputs/logstash/samples_logstash7.go | 7 ++- 4 files changed, 131 insertions(+), 5 deletions(-) diff --git a/plugins/inputs/logstash/README.md b/plugins/inputs/logstash/README.md index 9571de5fd8873..95ec3e6feae66 100644 --- a/plugins/inputs/logstash/README.md +++ b/plugins/inputs/logstash/README.md @@ -42,6 +42,8 @@ Logstash 5 and later is supported. ### Metrics +Additional plugin stats may be collected (because logstash doesn't consistently expose all stats) + - logstash_jvm - tags: - node_id @@ -125,6 +127,10 @@ Logstash 5 and later is supported. - duration_in_millis - in - out + - bulk_requests_failures (for Logstash 7+) + - bulk_requests_with_errors (for Logstash 7+) + - documents_successes (for logstash 7+) + - documents_retryable_failures (for logstash 7+) - logstash_queue - tags: diff --git a/plugins/inputs/logstash/logstash.go b/plugins/inputs/logstash/logstash.go index 76f75bc63a6a0..10a3e7b6b8dd0 100644 --- a/plugins/inputs/logstash/logstash.go +++ b/plugins/inputs/logstash/logstash.go @@ -126,9 +126,11 @@ type Pipeline struct { } type Plugin struct { - ID string `json:"id"` - Events interface{} `json:"events"` - Name string `json:"name"` + ID string `json:"id"` + Events interface{} `json:"events"` + Name string `json:"name"` + BulkRequests map[string]interface{} `json:"bulk_requests"` + Documents map[string]interface{} `json:"documents"` } type PipelinePlugins struct { @@ -290,6 +292,63 @@ func (logstash *Logstash) gatherPluginsStats( return err } accumulator.AddFields("logstash_plugins", flattener.Fields, pluginTags) + /* + The elasticsearch output produces additional stats around + bulk requests and document writes (that are elasticsearch specific). + Collect those here + */ + if pluginType == "output" && plugin.Name == "elasticsearch" { + /* + The "bulk_requests" section has details about batch writes + into Elasticsearch + + "bulk_requests" : { + "successes" : 2870, + "responses" : { + "200" : 2870 + }, + "failures": 262, + "with_errors": 9089 + }, + */ + flattener := jsonParser.JSONFlattener{} + err := flattener.FlattenJSON("", plugin.BulkRequests) + if err != nil { + return err + } + for k, v := range flattener.Fields { + if strings.HasPrefix(k, "bulk_requests") { + continue + } + newKey := fmt.Sprintf("bulk_requests_%s", k) + flattener.Fields[newKey] = v + delete(flattener.Fields, k) + } + accumulator.AddFields("logstash_plugins", flattener.Fields, pluginTags) + + /* + The "documents" section has counts of individual documents + written/retried/etc. + "documents" : { + "successes" : 2665549, + "retryable_failures": 13733 + } + */ + flattener = jsonParser.JSONFlattener{} + err = flattener.FlattenJSON("", plugin.Documents) + if err != nil { + return err + } + for k, v := range flattener.Fields { + if strings.HasPrefix(k, "documents") { + continue + } + newKey := fmt.Sprintf("documents_%s", k) + flattener.Fields[newKey] = v + delete(flattener.Fields, k) + } + accumulator.AddFields("logstash_plugins", flattener.Fields, pluginTags) + } } return nil diff --git a/plugins/inputs/logstash/logstash_test.go b/plugins/inputs/logstash/logstash_test.go index d8db3475a1e95..089824c58767f 100644 --- a/plugins/inputs/logstash/logstash_test.go +++ b/plugins/inputs/logstash/logstash_test.go @@ -708,6 +708,64 @@ func Test_Logstash7GatherPipelinesQueueStats(test *testing.T) { }, ) + logstash7accPipelinesStats.AssertContainsTaggedFields( + test, + "logstash_plugins", + map[string]interface{}{ + "duration_in_millis": float64(2802177.0), + "in": float64(2665549.0), + "out": float64(2665549.0), + }, + map[string]string{ + "node_id": string("28580380-ad2c-4032-934b-76359125edca"), + "node_name": string("HOST01.local"), + "source": string("HOST01.local"), + "node_version": string("7.4.2"), + "pipeline": string("infra"), + "plugin_name": string("elasticsearch"), + "plugin_id": string("38967f09bbd2647a95aa00702b6b557bdbbab31da6a04f991d38abe5629779e3"), + "plugin_type": string("output"), + }, + ) + logstash7accPipelinesStats.AssertContainsTaggedFields( + test, + "logstash_plugins", + map[string]interface{}{ + "bulk_requests_successes": float64(2870), + "bulk_requests_responses_200": float64(2870), + "bulk_requests_failures": float64(262), + "bulk_requests_with_errors": float64(9089), + }, + map[string]string{ + "node_id": string("28580380-ad2c-4032-934b-76359125edca"), + "node_name": string("HOST01.local"), + "source": string("HOST01.local"), + "node_version": string("7.4.2"), + "pipeline": string("infra"), + "plugin_name": string("elasticsearch"), + "plugin_id": string("38967f09bbd2647a95aa00702b6b557bdbbab31da6a04f991d38abe5629779e3"), + "plugin_type": string("output"), + }, + ) + logstash7accPipelinesStats.AssertContainsTaggedFields( + test, + "logstash_plugins", + map[string]interface{}{ + "documents_successes": float64(2665549), + "documents_retryable_failures": float64(13733), + }, + map[string]string{ + "node_id": string("28580380-ad2c-4032-934b-76359125edca"), + "node_name": string("HOST01.local"), + "source": string("HOST01.local"), + "node_version": string("7.4.2"), + "pipeline": string("infra"), + "plugin_name": string("elasticsearch"), + "plugin_id": string("38967f09bbd2647a95aa00702b6b557bdbbab31da6a04f991d38abe5629779e3"), + "plugin_type": string("output"), + }, + ) + logstash7accPipelinesStats.AssertContainsTaggedFields( test, "logstash_queue", diff --git a/plugins/inputs/logstash/samples_logstash7.go b/plugins/inputs/logstash/samples_logstash7.go index fe05712909c81..e04bb4319a27a 100644 --- a/plugins/inputs/logstash/samples_logstash7.go +++ b/plugins/inputs/logstash/samples_logstash7.go @@ -110,10 +110,13 @@ const logstash7PipelinesJSON = ` "successes" : 2870, "responses" : { "200" : 2870 - } + }, + "failures": 262, + "with_errors": 9089 }, "documents" : { - "successes" : 2665549 + "successes" : 2665549, + "retryable_failures": 13733 } } ] }, From d441b03b57599257142b7949af8711782209f269 Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Thu, 16 Sep 2021 15:57:45 -0600 Subject: [PATCH 06/81] fix: bump github.com/shirou/gopsutil (#9760) --- go.mod | 2 +- go.sum | 2 ++ 2 files changed, 3 insertions(+), 1 deletion(-) diff --git a/go.mod b/go.mod index 65a894974bea6..72990ab8394bb 100644 --- a/go.mod +++ b/go.mod @@ -231,7 +231,7 @@ require ( github.com/samuel/go-zookeeper v0.0.0-20200724154423-2164a8ac840e // indirect github.com/satori/go.uuid v1.2.1-0.20181028125025-b2ce2384e17b // indirect github.com/sensu/sensu-go/api/core/v2 v2.9.0 - github.com/shirou/gopsutil v3.21.6-0.20210624221800-cb512c850043+incompatible + github.com/shirou/gopsutil v3.21.8+incompatible github.com/shopspring/decimal v0.0.0-20200105231215-408a2507e114 // indirect github.com/showwin/speedtest-go v1.1.4 github.com/signalfx/com_signalfx_metrics_protobuf v0.0.2 // indirect diff --git a/go.sum b/go.sum index 5ff7799dc902b..79046bf42b4ab 100644 --- a/go.sum +++ b/go.sum @@ -1443,6 +1443,8 @@ github.com/sergi/go-diff v1.0.0/go.mod h1:0CfEIISq7TuYL3j771MWULgwwjU+GofnZX9QAm github.com/shirou/gopsutil v2.18.10+incompatible/go.mod h1:5b4v6he4MtMOwMlS0TUMTu2PcXUg8+E1lC7eC3UO/RA= github.com/shirou/gopsutil v3.21.6-0.20210624221800-cb512c850043+incompatible h1:Rucj22V2P6ktUBqN5auqjyxRHLXqNX6CteXBXifRrgY= github.com/shirou/gopsutil v3.21.6-0.20210624221800-cb512c850043+incompatible/go.mod h1:5b4v6he4MtMOwMlS0TUMTu2PcXUg8+E1lC7eC3UO/RA= +github.com/shirou/gopsutil v3.21.8+incompatible h1:sh0foI8tMRlCidUJR+KzqWYWxrkuuPIGiO6Vp+KXdCU= +github.com/shirou/gopsutil v3.21.8+incompatible/go.mod h1:5b4v6he4MtMOwMlS0TUMTu2PcXUg8+E1lC7eC3UO/RA= github.com/shirou/w32 v0.0.0-20160930032740-bb4de0191aa4/go.mod h1:qsXQc7+bwAM3Q1u/4XEfrquwF8Lw7D7y5cD8CuHnfIc= github.com/shopspring/decimal v0.0.0-20180709203117-cd690d0c9e24/go.mod h1:M+9NzErvs504Cn4c5DxATwIqPbtswREoFCre64PpcG4= github.com/shopspring/decimal v0.0.0-20200105231215-408a2507e114 h1:Pm6R878vxWWWR+Sa3ppsLce/Zq+JNTs6aVvRu13jv9A= From 3b20b93a3346a132854f783a148f3c020b375bb9 Mon Sep 17 00:00:00 2001 From: David Reimschussel Date: Thu, 16 Sep 2021 16:26:09 -0600 Subject: [PATCH 07/81] go mod tidy --- go.sum | 2 -- 1 file changed, 2 deletions(-) diff --git a/go.sum b/go.sum index 79046bf42b4ab..e85b07043c3f2 100644 --- a/go.sum +++ b/go.sum @@ -1441,8 +1441,6 @@ github.com/sensu/sensu-go/api/core/v2 v2.9.0 h1:NanHMIWbrHP/L4Ge0V1x2+0G9bxFHpvh github.com/sensu/sensu-go/api/core/v2 v2.9.0/go.mod h1:QcgxKxydmScE66hLBTzbFhhiPSR/JHqUjNi/+Lelh6E= github.com/sergi/go-diff v1.0.0/go.mod h1:0CfEIISq7TuYL3j771MWULgwwjU+GofnZX9QAmXWZgo= github.com/shirou/gopsutil v2.18.10+incompatible/go.mod h1:5b4v6he4MtMOwMlS0TUMTu2PcXUg8+E1lC7eC3UO/RA= -github.com/shirou/gopsutil v3.21.6-0.20210624221800-cb512c850043+incompatible h1:Rucj22V2P6ktUBqN5auqjyxRHLXqNX6CteXBXifRrgY= -github.com/shirou/gopsutil v3.21.6-0.20210624221800-cb512c850043+incompatible/go.mod h1:5b4v6he4MtMOwMlS0TUMTu2PcXUg8+E1lC7eC3UO/RA= github.com/shirou/gopsutil v3.21.8+incompatible h1:sh0foI8tMRlCidUJR+KzqWYWxrkuuPIGiO6Vp+KXdCU= github.com/shirou/gopsutil v3.21.8+incompatible/go.mod h1:5b4v6he4MtMOwMlS0TUMTu2PcXUg8+E1lC7eC3UO/RA= github.com/shirou/w32 v0.0.0-20160930032740-bb4de0191aa4/go.mod h1:qsXQc7+bwAM3Q1u/4XEfrquwF8Lw7D7y5cD8CuHnfIc= From 8014a508e5fdde3b56936e678a9a72502e219b07 Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Fri, 17 Sep 2021 16:06:13 -0600 Subject: [PATCH 08/81] fix: bump github.com/aws/smithy-go from 1.3.1 to 1.8.0 (#9770) --- go.mod | 2 +- go.sum | 3 ++- 2 files changed, 3 insertions(+), 2 deletions(-) diff --git a/go.mod b/go.mod index 72990ab8394bb..6f16bb0fb0f83 100644 --- a/go.mod +++ b/go.mod @@ -59,7 +59,7 @@ require ( github.com/aws/aws-sdk-go-v2/service/s3 v1.5.0 // indirect github.com/aws/aws-sdk-go-v2/service/sso v1.1.5 // indirect github.com/aws/aws-sdk-go-v2/service/sts v1.2.2 // indirect - github.com/aws/smithy-go v1.3.1 + github.com/aws/smithy-go v1.8.0 github.com/benbjohnson/clock v1.0.3 github.com/beorn7/perks v1.0.1 // indirect github.com/bitly/go-hostpool v0.1.0 // indirect diff --git a/go.sum b/go.sum index e85b07043c3f2..19b3febf462ff 100644 --- a/go.sum +++ b/go.sum @@ -282,8 +282,9 @@ github.com/aws/aws-sdk-go-v2/service/sso v1.1.5/go.mod h1:bpGz0tidC4y39sZkQSkpO/ github.com/aws/aws-sdk-go-v2/service/sts v1.2.2 h1:fKw6QSGcFlvZCBPYx3fo4sL0HfTmaT06ZtMHJfQQNQQ= github.com/aws/aws-sdk-go-v2/service/sts v1.2.2/go.mod h1:ssRzzJ2RZOVuKj2Vx1YE7ypfil/BIlgmQnCSW4DistU= github.com/aws/smithy-go v1.0.0/go.mod h1:EzMw8dbp/YJL4A5/sbhGddag+NPT7q084agLbB9LgIw= -github.com/aws/smithy-go v1.3.1 h1:xJFO4pK0y9J8fCl34uGsSJX5KNnGbdARDlA5BPhXnwE= github.com/aws/smithy-go v1.3.1/go.mod h1:SObp3lf9smib00L/v3U2eAKG8FyQ7iLrJnQiAmR5n+E= +github.com/aws/smithy-go v1.8.0 h1:AEwwwXQZtUwP5Mz506FeXXrKBe0jA8gVM+1gEcSRooc= +github.com/aws/smithy-go v1.8.0/go.mod h1:SObp3lf9smib00L/v3U2eAKG8FyQ7iLrJnQiAmR5n+E= github.com/benbjohnson/clock v1.0.3 h1:vkLuvpK4fmtSCuo60+yC63p7y0BmQ8gm5ZXGuBCJyXg= github.com/benbjohnson/clock v1.0.3/go.mod h1:bGMdMPoPVvcYyt1gHDf4J2KE153Yf9BuiUKYMaxlTDM= github.com/beorn7/perks v0.0.0-20160804104726-4c0e84591b9a/go.mod h1:Dwedo/Wpr24TaqPxmxbtue+5NUziq4I4S80YR8gNf3Q= From 38cb624f3729c8bc4d22da3134eb21bde3c7e241 Mon Sep 17 00:00:00 2001 From: David Reimschussel Date: Fri, 17 Sep 2021 16:31:42 -0600 Subject: [PATCH 09/81] Update changelog (cherry picked from commit a9924dea7a9bc642120b23db5ef39d757bff9103) --- CHANGELOG.md | 22 +++++++++++++++++++--- etc/telegraf.conf | 9 ++++++++- 2 files changed, 27 insertions(+), 4 deletions(-) diff --git a/CHANGELOG.md b/CHANGELOG.md index 42ca26772a37b..2ebccd4849220 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -1,4 +1,4 @@ -## v1.20.0-rc0 [2021-09-02] +## v1.20.0 [2021-09-17] #### Release Notes @@ -6,7 +6,7 @@ #### Bugfixes - - [#9700](https://github.com/influxdata/telegraf/pull/9700) Update thrift module to 0.14.2 and zipkin-go-opentracing 0.4.5 + - [#9700](https://github.com/influxdata/telegraf/pull/9700) Update thrift module to 0.14.2 and zipkin-go-opentracing to 0.4.5 - [#9587](https://github.com/influxdata/telegraf/pull/9587) `outputs.opentelemetry` Use headers config in grpc requests - [#9713](https://github.com/influxdata/telegraf/pull/9713) Update runc module to v1.0.0-rc95 to address CVE-2021-30465 - [#9699](https://github.com/influxdata/telegraf/pull/9699) Migrate dgrijalva/jwt-go to golang-jwt/jwt/v4 @@ -16,8 +16,21 @@ - [#9674](https://github.com/influxdata/telegraf/pull/9674) `inputs.mongodb` Change command based on server version - [#9676](https://github.com/influxdata/telegraf/pull/9676) `outputs.dynatrace` Remove hardcoded int value - [#9619](https://github.com/influxdata/telegraf/pull/9619) `outputs.influxdb_v2` Increase accepted retry-after header values. - - [#9652](https://github.com/influxdata/telegraf/pull/9652) Update github.com/tinylib/msgp module from 1.1.5 to 1.1.6 + - [#9652](https://github.com/influxdata/telegraf/pull/9652) Update tinylib/msgp module from 1.1.5 to 1.1.6 - [#9471](https://github.com/influxdata/telegraf/pull/9471) `inputs.sql` Make timeout apply to single query + - [#9760](https://github.com/influxdata/telegraf/pull/9760) Update shirou/gopsutil module to 3.21.8 + - [#9707](https://github.com/influxdata/telegraf/pull/9707) `inputs.logstash` Add additional logstash output plugin stats + - [#9656](https://github.com/influxdata/telegraf/pull/9656) Update miekg/dns module from 1.1.31 to 1.1.43 + - [#9750](https://github.com/influxdata/telegraf/pull/9750) Update antchfx/xmlquery module from 1.3.5 to 1.3.6 + - [#9757](https://github.com/influxdata/telegraf/pull/9757) `parsers.registry.go` Fix panic for non-existing metric names + - [#9677](https://github.com/influxdata/telegraf/pull/9677) Update Azure/azure-event-hubs-go/v3 module from 3.2.0 to 3.3.13 + - [#9653](https://github.com/influxdata/telegraf/pull/9653) Update prometheus/client_golang module from 1.7.1 to 1.11.0 + - [#9693](https://github.com/influxdata/telegraf/pull/9693) `inputs.cloudwatch` Fix pagination error + - [#9727](https://github.com/influxdata/telegraf/pull/9727) `outputs.http` Add error message logging + - [#9718](https://github.com/influxdata/telegraf/pull/9718) Update influxdata/influxdb-observability module from 0.2.4 to 0.2.7 + - [#9560](https://github.com/influxdata/telegraf/pull/9560) Update gopcua/opcua module + - [#9544](https://github.com/influxdata/telegraf/pull/9544) `inputs.couchbase` Fix memory leak + - [#9588](https://github.com/influxdata/telegraf/pull/9588) `outputs.opentelemetry` Use attributes setting #### Features @@ -38,12 +51,15 @@ - [#9343](https://github.com/influxdata/telegraf/pull/9343) `inputs.snmp_trap` Improve MIB lookup performance - [#9342](https://github.com/influxdata/telegraf/pull/9342) `outputs.newrelic` Add option to override metric_url - [#9306](https://github.com/influxdata/telegraf/pull/9306) `inputs.smart` Add power mode status + - [#9762](https://github.com/influxdata/telegraf/pull/9762) `inputs.bond` Add count of bonded slaves (for easier alerting) + - [#9675](https://github.com/influxdata/telegraf/pull/9675) `outputs.dynatrace` Remove special handling from counters and update dynatrace-oss/dynatrace-metric-utils-go module to 0.3.0 #### New Input Plugins - [#9602](https://github.com/influxdata/telegraf/pull/9602) Add rocm_smi input to monitor AMD GPUs - [#9101](https://github.com/influxdata/telegraf/pull/9101) Add mdstat input to gather from /proc/mdstat collection - [#3536](https://github.com/influxdata/telegraf/pull/3536) Add Elasticsearch query input + - [#9623](https://github.com/influxdata/telegraf/pull/9623) Add internet Speed Monitor Input Plugin #### New Output Plugins diff --git a/etc/telegraf.conf b/etc/telegraf.conf index fabd2616141fb..beb22821464d9 100644 --- a/etc/telegraf.conf +++ b/etc/telegraf.conf @@ -592,7 +592,7 @@ # ## Connection timeout, defaults to "5s" if not set. # timeout = "5s" # -# ## If you want to convert values represented as gauges to counters, add the metric names here +# ## If you want metrics to be treated and reported as delta counters, add the metric names here # additional_counters = [ ] # # ## Optional dimensions to be added to every metric @@ -4324,6 +4324,13 @@ # # collect_memstats = true +# # Monitors internet speed using speedtest.net service +# [[inputs.internet_speed]] +# ## Sets if runs file download test +# ## Default: false +# enable_file_download = false + + # # This plugin gathers interrupts data from /proc/interrupts and /proc/softirqs. # [[inputs.interrupts]] # ## When set to true, cpu metrics are tagged with the cpu. Otherwise cpu is From 9ecf6040afd97488365739c423ffc5ed83a15479 Mon Sep 17 00:00:00 2001 From: Samantha Wang <32681364+sjwang90@users.noreply.github.com> Date: Mon, 20 Sep 2021 08:13:39 -0700 Subject: [PATCH 10/81] docs: document telegraf commands and flags (#9635) --- docs/COMMANDS_AND_FLAGS.md | 67 ++++++++++++++++++++++++++++++++++++++ docs/CONFIGURATION.md | 3 ++ 2 files changed, 70 insertions(+) create mode 100644 docs/COMMANDS_AND_FLAGS.md diff --git a/docs/COMMANDS_AND_FLAGS.md b/docs/COMMANDS_AND_FLAGS.md new file mode 100644 index 0000000000000..cb0c31268c9a4 --- /dev/null +++ b/docs/COMMANDS_AND_FLAGS.md @@ -0,0 +1,67 @@ +# Telegraf Commands & Flags + +### Usage + +``` +telegraf [commands] +telegraf [flags] +``` + +### Commands + +|command|description| +|--------|-----------------------------------------------| +|`config` |print out full sample configuration to stdout| +|`version`|print the version to stdout| + +### Flags + +|flag|description| +|-------------------|------------| +|`--aggregator-filter ` |filter the aggregators to enable, separator is `:`| +|`--config ` |configuration file to load| +|`--config-directory ` |directory containing additional *.conf files| +|`--watch-config` |Telegraf will restart on local config changes.
Monitor changes using either fs notifications or polling. Valid values: `inotify` or `poll`.
Monitoring is off by default.| +|`--plugin-directory` |directory containing *.so files, this directory will be searched recursively. Any Plugin found will be loaded and namespaced.| +|`--debug` |turn on debug logging| +|`--input-filter ` |filter the inputs to enable, separator is `:`| +|`--input-list` |print available input plugins.| +|`--output-filter ` |filter the outputs to enable, separator is `:`| +|`--output-list` |print available output plugins.| +|`--pidfile ` |file to write our pid to| +|`--pprof-addr
` |pprof address to listen on, don't activate pprof if empty| +|`--processor-filter ` |filter the processors to enable, separator is `:`| +|`--quiet` |run in quiet mode| +|`--section-filter` |filter config sections to output, separator is `:`
Valid values are `agent`, `global_tags`, `outputs`, `processors`, `aggregators` and `inputs`| +|`--sample-config` |print out full sample configuration| +|`--once` |enable once mode: gather metrics once, write them, and exit| +|`--test` |enable test mode: gather metrics once and print them| +|`--test-wait` |wait up to this many seconds for service inputs to complete in test or once mode| +|`--usage ` |print usage for a plugin, ie, `telegraf --usage mysql`| +|`--version` |display the version and exit| + +### Examples + +**Generate a telegraf config file:** + +`telegraf config > telegraf.conf` + +**Generate config with only cpu input & influxdb output plugins defined:** + +`telegraf --input-filter cpu --output-filter influxdb config` + +**Run a single telegraf collection, outputting metrics to stdout:** + +`telegraf --config telegraf.conf --test` + +**Run telegraf with all plugins defined in config file:** + +`telegraf --config telegraf.conf` + +**Run telegraf, enabling the cpu & memory input, and influxdb output plugins:** + +`telegraf --config telegraf.conf --input-filter cpu:mem --output-filter influxdb` + +**Run telegraf with pprof:** + +`telegraf --config telegraf.conf --pprof-addr localhost:6060` diff --git a/docs/CONFIGURATION.md b/docs/CONFIGURATION.md index 70e7981c9450b..9af88b669ea9f 100644 --- a/docs/CONFIGURATION.md +++ b/docs/CONFIGURATION.md @@ -19,6 +19,8 @@ To generate a file with specific inputs and outputs, you can use the telegraf --input-filter cpu:mem:net:swap --output-filter influxdb:kafka config ``` +[View the full list][flags] of Telegraf commands and flags or by running `telegraf --help`. + ### Configuration Loading The location of the configuration file can be set via the `--config` command @@ -671,3 +673,4 @@ Reference the detailed [TLS][] documentation. [telegraf.conf]: /etc/telegraf.conf [TLS]: /docs/TLS.md [glob pattern]: https://github.com/gobwas/glob#syntax +[flags]: /docs/COMMANDS_AND_FLAGS.md From b93f20068a9b35905137517bb93448bdc5351539 Mon Sep 17 00:00:00 2001 From: Sean Molenaar Date: Mon, 20 Sep 2021 19:10:36 +0300 Subject: [PATCH 11/81] docs: fix jenkins plugin documentation (#9714) --- plugins/inputs/jenkins/README.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/plugins/inputs/jenkins/README.md b/plugins/inputs/jenkins/README.md index 4d82f4e90ba31..e12326031b9ef 100644 --- a/plugins/inputs/jenkins/README.md +++ b/plugins/inputs/jenkins/README.md @@ -57,7 +57,7 @@ This plugin does not require a plugin on jenkins and it makes use of Jenkins API ### Metrics: -- jenkins_node +- jenkins - tags: - source - port From 82bdbce498ad2b1f558145fa9f4cc7cac2bbf1c4 Mon Sep 17 00:00:00 2001 From: Sebastian Spaink <3441183+sspaink@users.noreply.github.com> Date: Mon, 20 Sep 2021 10:26:10 -0700 Subject: [PATCH 12/81] chore: "makefile help" output, but still support building based on arch for CI (#9579) --- .circleci/config.yml | 16 +- Makefile | 355 ++++++++++++++++------------------- docs/developers/PACKAGING.md | 25 ++- 3 files changed, 179 insertions(+), 217 deletions(-) diff --git a/.circleci/config.yml b/.circleci/config.yml index b2043e1fa291c..1f644a7b9d20b 100644 --- a/.circleci/config.yml +++ b/.circleci/config.yml @@ -123,11 +123,11 @@ commands: - when: condition: << parameters.release >> steps: - - run: 'mips=1 mipsel=1 arm64=1 amd64=1 static=1 armel=1 armhf=1 s390x=1 ppc641e=1 i386=1 windows=1 darwin=1 make package' + - run: 'make package' - when: condition: << parameters.nightly >> steps: - - run: 'mips=1 mipsel=1 arm64=1 amd64=1 static=1 armel=1 armhf=1 s390x=1 ppc641e=1 i386=1 windows=1 darwin=1 NIGHTLY=1 make package' + - run: 'make package' - run: 'make upload-nightly' - unless: condition: @@ -135,7 +135,7 @@ commands: - << parameters.nightly >> - << parameters.release >> steps: - - run: '<< parameters.type >>=1 make package' + - run: 'make package include_packages="$(make << parameters.type >>)"' - store_artifacts: path: './build/dist' destination: 'build/dist' @@ -215,11 +215,11 @@ jobs: steps: - package-build: type: i386 - ppc641e-package: - executor: go-1_17 + ppc64le-package: + executor: go-1_16 steps: - package-build: - type: ppc641e + type: ppc64le s390x-package: executor: go-1_17 steps: @@ -391,7 +391,7 @@ workflows: - 'i386-package': requires: - 'test-awaiter' - - 'ppc641e-package': + - 'ppc64le-package': requires: - 'test-awaiter' - 's390x-package': @@ -421,7 +421,7 @@ workflows: - 'share-artifacts': requires: - 'i386-package' - - 'ppc641e-package' + - 'ppc64le-package' - 's390x-package' - 'armel-package' - 'amd64-package' diff --git a/Makefile b/Makefile index 230eedf600f6f..a7797a0e8ce5f 100644 --- a/Makefile +++ b/Makefile @@ -80,9 +80,18 @@ help: @echo ' lint-install - install linter' @echo ' check-deps - check docs/LICENSE_OF_DEPENDENCIES.md' @echo ' clean - delete build artifacts' + @echo ' package - build all supported packages, override include_packages to only build a subset' + @echo ' e.g.: make package include_packages="amd64.deb"' @echo '' - @echo 'Package Targets:' - @$(foreach dist,$(dists),echo " $(dist)";) + @echo 'Possible values for include_packages variable' + @$(foreach package,$(include_packages),echo " $(package)";) + @echo '' + @echo 'Resulting package name format (where arch will be the arch of the package):' + @echo ' telegraf_$(deb_version)_arch.deb' + @echo ' telegraf-$(rpm_version).arch.rpm' + @echo ' telegraf-$(tar_version)_arch.tar.gz' + @echo ' telegraf-$(tar_version)_arch.zip' + .PHONY: deps deps: @@ -224,164 +233,118 @@ $(buildbin): @mkdir -pv $(dir $@) go build -o $(dir $@) -ldflags "$(LDFLAGS)" ./cmd/telegraf -ifdef mips -debs += telegraf_$(deb_version)_mips.deb -tars += telegraf-$(tar_version)_linux_mips.tar.gz -endif - -ifdef mipsel -debs += telegraf_$(deb_version)_mipsel.deb -tars += telegraf-$(tar_version)_linux_mipsel.tar.gz -endif - -ifdef arm64 -tars += telegraf-$(tar_version)_linux_arm64.tar.gz -debs += telegraf_$(deb_version)_arm64.deb -rpms += telegraf-$(rpm_version).aarch64.rpm -endif - -ifdef amd64 -tars += telegraf-$(tar_version)_freebsd_amd64.tar.gz -tars += telegraf-$(tar_version)_linux_amd64.tar.gz -debs += telegraf_$(deb_version)_amd64.deb -rpms += telegraf-$(rpm_version).x86_64.rpm -endif - -ifdef static -tars += telegraf-$(tar_version)_static_linux_amd64.tar.gz -endif - -ifdef armel -tars += telegraf-$(tar_version)_linux_armel.tar.gz -rpms += telegraf-$(rpm_version).armel.rpm -debs += telegraf_$(deb_version)_armel.deb -endif - -ifdef armhf -tars += telegraf-$(tar_version)_linux_armhf.tar.gz -tars += telegraf-$(tar_version)_freebsd_armv7.tar.gz -debs += telegraf_$(deb_version)_armhf.deb -rpms += telegraf-$(rpm_version).armv6hl.rpm -endif - -ifdef s390x -tars += telegraf-$(tar_version)_linux_s390x.tar.gz -debs += telegraf_$(deb_version)_s390x.deb -rpms += telegraf-$(rpm_version).s390x.rpm -endif - -ifdef ppc641e -tars += telegraf-$(tar_version)_linux_ppc64le.tar.gz -rpms += telegraf-$(rpm_version).ppc64le.rpm -debs += telegraf_$(deb_version)_ppc64el.deb -endif - -ifdef i386 -tars += telegraf-$(tar_version)_freebsd_i386.tar.gz -debs += telegraf_$(deb_version)_i386.deb -tars += telegraf-$(tar_version)_linux_i386.tar.gz -rpms += telegraf-$(rpm_version).i386.rpm -endif - -ifdef windows -zips += telegraf-$(tar_version)_windows_i386.zip -zips += telegraf-$(tar_version)_windows_amd64.zip -endif - -ifdef darwin -tars += telegraf-$(tar_version)_darwin_amd64.tar.gz -endif - -dists := $(debs) $(rpms) $(tars) $(zips) +# Define packages Telegraf supports, organized by architecture with a rule to echo the list to limit include_packages +# e.g. make package include_packages="$(make amd64)" +mips += linux_mips.tar.gz mips.deb +.PHONY: mips +mips: + @ echo $(mips) +mipsel += mipsel.deb linux_mipsel.tar.gz +.PHONY: mipsel +mipsel: + @ echo $(mipsel) +arm64 += linux_arm64.tar.gz arm64.deb aarch64.rpm +.PHONY: arm64 +arm64: + @ echo $(arm64) +amd64 += freebsd_amd64.tar.gz linux_amd64.tar.gz amd64.deb x86_64.rpm +.PHONY: amd64 +amd64: + @ echo $(amd64) +static += static_linux_amd64.tar.gz +.PHONY: static +static: + @ echo $(static) +armel += linux_armel.tar.gz armel.rpm armel.deb +.PHONY: armel +armel: + @ echo $(armel) +armhf += linux_armhf.tar.gz freebsd_armv7.tar.gz armhf.deb armv6hl.rpm +.PHONY: armhf +armhf: + @ echo $(armhf) +s390x += linux_s390x.tar.gz s390x.deb s390x.rpm +.PHONY: s390x +s390x: + @ echo $(s390x) +ppc64le += linux_ppc64le.tar.gz ppc64le.rpm ppc64el.deb +.PHONY: ppc64le +ppc64le: + @ echo $(ppc64le) +i386 += freebsd_i386.tar.gz i386.deb linux_i386.tar.gzi386.rpm +.PHONY: i386 +i386: + @ echo $(i386) +windows += windows_i386.zip windows_amd64.zip +.PHONY: windows +windows: + @ echo $(windows) +darwin += darwin_amd64.tar.gz +.PHONY: darwin +darwin: + @ echo $(darwin) + +include_packages := $(mips) $(mipsel) $(arm64) $(amd64) $(static) $(armel) $(armhf) $(s390x) $(ppc64le) $(i386) $(windows) $(darwin) .PHONY: package -package: $(dists) - -rpm_amd64 := amd64 -rpm_386 := i386 -rpm_s390x := s390x -rpm_ppc64le := ppc64le -rpm_arm5 := armel -rpm_arm6 := armv6hl -rpm_arm647 := aarch64 -rpm_arch = $(rpm_$(GOARCH)$(GOARM)) - -.PHONY: $(rpms) -$(rpms): - @$(MAKE) install - @mkdir -p $(pkgdir) - fpm --force \ - --log info \ - --architecture $(rpm_arch) \ - --input-type dir \ - --output-type rpm \ - --vendor InfluxData \ - --url https://github.com/influxdata/telegraf \ - --license MIT \ - --maintainer support@influxdb.com \ - --config-files /etc/telegraf/telegraf.conf \ - --config-files /etc/logrotate.d/telegraf \ - --after-install scripts/rpm/post-install.sh \ - --before-install scripts/rpm/pre-install.sh \ - --after-remove scripts/rpm/post-remove.sh \ - --description "Plugin-driven server agent for reporting metrics into InfluxDB." \ - --depends coreutils \ - --depends shadow-utils \ - --rpm-posttrans scripts/rpm/post-install.sh \ - --name telegraf \ - --version $(version) \ - --iteration $(rpm_iteration) \ - --chdir $(DESTDIR) \ - --package $(pkgdir)/$@ - -deb_amd64 := amd64 -deb_386 := i386 -deb_s390x := s390x -deb_ppc64le := ppc64el -deb_arm5 := armel -deb_arm6 := armhf -deb_arm647 := arm64 -deb_mips := mips -deb_mipsle := mipsel -deb_arch = $(deb_$(GOARCH)$(GOARM)) - -.PHONY: $(debs) -$(debs): - @$(MAKE) install - @mkdir -pv $(pkgdir) - fpm --force \ - --log info \ - --architecture $(deb_arch) \ - --input-type dir \ - --output-type deb \ - --vendor InfluxData \ - --url https://github.com/influxdata/telegraf \ - --license MIT \ - --maintainer support@influxdb.com \ - --config-files /etc/telegraf/telegraf.conf.sample \ - --config-files /etc/logrotate.d/telegraf \ - --after-install scripts/deb/post-install.sh \ - --before-install scripts/deb/pre-install.sh \ - --after-remove scripts/deb/post-remove.sh \ - --before-remove scripts/deb/pre-remove.sh \ - --description "Plugin-driven server agent for reporting metrics into InfluxDB." \ - --name telegraf \ - --version $(version) \ - --iteration $(deb_iteration) \ - --chdir $(DESTDIR) \ - --package $(pkgdir)/$@ - -.PHONY: $(zips) -$(zips): - @$(MAKE) install - @mkdir -p $(pkgdir) - (cd $(dir $(DESTDIR)) && zip -r - ./*) > $(pkgdir)/$@ +package: $(include_packages) -.PHONY: $(tars) -$(tars): +.PHONY: $(include_packages) +$(include_packages): @$(MAKE) install @mkdir -p $(pkgdir) - tar --owner 0 --group 0 -czvf $(pkgdir)/$@ -C $(dir $(DESTDIR)) . + + @if [ "$(suffix $@)" = ".rpm" ]; then \ + fpm --force \ + --log info \ + --architecture $(basename $@) \ + --input-type dir \ + --output-type rpm \ + --vendor InfluxData \ + --url https://github.com/influxdata/telegraf \ + --license MIT \ + --maintainer support@influxdb.com \ + --config-files /etc/telegraf/telegraf.conf \ + --config-files /etc/logrotate.d/telegraf \ + --after-install scripts/rpm/post-install.sh \ + --before-install scripts/rpm/pre-install.sh \ + --after-remove scripts/rpm/post-remove.sh \ + --description "Plugin-driven server agent for reporting metrics into InfluxDB." \ + --depends coreutils \ + --depends shadow-utils \ + --rpm-posttrans scripts/rpm/post-install.sh \ + --name telegraf \ + --version $(version) \ + --iteration $(rpm_iteration) \ + --chdir $(DESTDIR) \ + --package $(pkgdir)/telegraf-$(rpm_version).$@ ;\ + elif [ "$(suffix $@)" = ".deb" ]; then \ + fpm --force \ + --log info \ + --architecture $(basename $@) \ + --input-type dir \ + --output-type deb \ + --vendor InfluxData \ + --url https://github.com/influxdata/telegraf \ + --license MIT \ + --maintainer support@influxdb.com \ + --config-files /etc/telegraf/telegraf.conf.sample \ + --config-files /etc/logrotate.d/telegraf \ + --after-install scripts/deb/post-install.sh \ + --before-install scripts/deb/pre-install.sh \ + --after-remove scripts/deb/post-remove.sh \ + --before-remove scripts/deb/pre-remove.sh \ + --description "Plugin-driven server agent for reporting metrics into InfluxDB." \ + --name telegraf \ + --version $(version) \ + --iteration $(deb_iteration) \ + --chdir $(DESTDIR) \ + --package $(pkgdir)/telegraf_$(deb_version)_$@ ;\ + elif [ "$(suffix $@)" = ".zip" ]; then \ + (cd $(dir $(DESTDIR)) && zip -r - ./*) > $(pkgdir)/telegraf-$(tar_version)_$@ ;\ + elif [ "$(suffix $@)" = ".gz" ]; then \ + tar --owner 0 --group 0 -czvf $(pkgdir)/telegraf-$(tar_version)_$@ -C $(dir $(DESTDIR)) . ;\ + fi .PHONY: upload-nightly upload-nightly: @@ -393,63 +356,63 @@ upload-nightly: --include "*.zip" \ --acl public-read -%amd64.deb %x86_64.rpm %linux_amd64.tar.gz: export GOOS := linux -%amd64.deb %x86_64.rpm %linux_amd64.tar.gz: export GOARCH := amd64 +amd64.deb x86_64.rpm linux_amd64.tar.gz: export GOOS := linux +amd64.deb x86_64.rpm linux_amd64.tar.gz: export GOARCH := amd64 -%static_linux_amd64.tar.gz: export cgo := -nocgo -%static_linux_amd64.tar.gz: export CGO_ENABLED := 0 +static_linux_amd64.tar.gz: export cgo := -nocgo +static_linux_amd64.tar.gz: export CGO_ENABLED := 0 -%i386.deb %i386.rpm %linux_i386.tar.gz: export GOOS := linux -%i386.deb %i386.rpm %linux_i386.tar.gz: export GOARCH := 386 +i386.deb i386.rpm linux_i386.tar.gz: export GOOS := linux +i386.deb i386.rpm linux_i386.tar.gz: export GOARCH := 386 -%armel.deb %armel.rpm %linux_armel.tar.gz: export GOOS := linux -%armel.deb %armel.rpm %linux_armel.tar.gz: export GOARCH := arm -%armel.deb %armel.rpm %linux_armel.tar.gz: export GOARM := 5 +armel.deb armel.rpm linux_armel.tar.gz: export GOOS := linux +armel.deb armel.rpm linux_armel.tar.gz: export GOARCH := arm +armel.deb armel.rpm linux_armel.tar.gz: export GOARM := 5 -%armhf.deb %armv6hl.rpm %linux_armhf.tar.gz: export GOOS := linux -%armhf.deb %armv6hl.rpm %linux_armhf.tar.gz: export GOARCH := arm -%armhf.deb %armv6hl.rpm %linux_armhf.tar.gz: export GOARM := 6 +armhf.deb armv6hl.rpm linux_armhf.tar.gz: export GOOS := linux +armhf.deb armv6hl.rpm linux_armhf.tar.gz: export GOARCH := arm +armhf.deb armv6hl.rpm linux_armhf.tar.gz: export GOARM := 6 -%arm64.deb %aarch64.rpm %linux_arm64.tar.gz: export GOOS := linux -%arm64.deb %aarch64.rpm %linux_arm64.tar.gz: export GOARCH := arm64 -%arm64.deb %aarch64.rpm %linux_arm64.tar.gz: export GOARM := 7 +arm64.deb aarch64.rpm linux_arm64.tar.gz: export GOOS := linux +arm64.deb aarch64.rpm linux_arm64.tar.gz: export GOARCH := arm64 +arm64.deb aarch64.rpm linux_arm64.tar.gz: export GOARM := 7 -%mips.deb %linux_mips.tar.gz: export GOOS := linux -%mips.deb %linux_mips.tar.gz: export GOARCH := mips +mips.deb linux_mips.tar.gz: export GOOS := linux +mips.deb linux_mips.tar.gz: export GOARCH := mips -%mipsel.deb %linux_mipsel.tar.gz: export GOOS := linux -%mipsel.deb %linux_mipsel.tar.gz: export GOARCH := mipsle +mipsel.deb linux_mipsel.tar.gz: export GOOS := linux +mipsel.deb linux_mipsel.tar.gz: export GOARCH := mipsle -%s390x.deb %s390x.rpm %linux_s390x.tar.gz: export GOOS := linux -%s390x.deb %s390x.rpm %linux_s390x.tar.gz: export GOARCH := s390x +s390x.deb s390x.rpm linux_s390x.tar.gz: export GOOS := linux +s390x.deb s390x.rpm linux_s390x.tar.gz: export GOARCH := s390x -%ppc64el.deb %ppc64le.rpm %linux_ppc64le.tar.gz: export GOOS := linux -%ppc64el.deb %ppc64le.rpm %linux_ppc64le.tar.gz: export GOARCH := ppc64le +ppc64el.deb ppc64le.rpm linux_ppc64le.tar.gz: export GOOS := linux +ppc64el.deb ppc64le.rpm linux_ppc64le.tar.gz: export GOARCH := ppc64le -%freebsd_amd64.tar.gz: export GOOS := freebsd -%freebsd_amd64.tar.gz: export GOARCH := amd64 +freebsd_amd64.tar.gz: export GOOS := freebsd +freebsd_amd64.tar.gz: export GOARCH := amd64 -%freebsd_i386.tar.gz: export GOOS := freebsd -%freebsd_i386.tar.gz: export GOARCH := 386 +freebsd_i386.tar.gz: export GOOS := freebsd +freebsd_i386.tar.gz: export GOARCH := 386 -%freebsd_armv7.tar.gz: export GOOS := freebsd -%freebsd_armv7.tar.gz: export GOARCH := arm -%freebsd_armv7.tar.gz: export GOARM := 7 +freebsd_armv7.tar.gz: export GOOS := freebsd +freebsd_armv7.tar.gz: export GOARCH := arm +freebsd_armv7.tar.gz: export GOARM := 7 -%windows_amd64.zip: export GOOS := windows -%windows_amd64.zip: export GOARCH := amd64 +windows_amd64.zip: export GOOS := windows +windows_amd64.zip: export GOARCH := amd64 -%darwin_amd64.tar.gz: export GOOS := darwin -%darwin_amd64.tar.gz: export GOARCH := amd64 +darwin_amd64.tar.gz: export GOOS := darwin +darwin_amd64.tar.gz: export GOARCH := amd64 -%windows_i386.zip: export GOOS := windows -%windows_i386.zip: export GOARCH := 386 +windows_i386.zip: export GOOS := windows +windows_i386.zip: export GOARCH := 386 -%windows_i386.zip %windows_amd64.zip: export prefix = -%windows_i386.zip %windows_amd64.zip: export bindir = $(prefix) -%windows_i386.zip %windows_amd64.zip: export sysconfdir = $(prefix) -%windows_i386.zip %windows_amd64.zip: export localstatedir = $(prefix) -%windows_i386.zip %windows_amd64.zip: export EXEEXT := .exe +windows_i386.zip windows_amd64.zip: export prefix = +windows_i386.zip windows_amd64.zip: export bindir = $(prefix) +windows_i386.zip windows_amd64.zip: export sysconfdir = $(prefix) +windows_i386.zip windows_amd64.zip: export localstatedir = $(prefix) +windows_i386.zip windows_amd64.zip: export EXEEXT := .exe %.deb: export pkg := deb %.deb: export prefix := /usr diff --git a/docs/developers/PACKAGING.md b/docs/developers/PACKAGING.md index f9708fb7164d0..cbdb61b05af01 100644 --- a/docs/developers/PACKAGING.md +++ b/docs/developers/PACKAGING.md @@ -1,5 +1,9 @@ # Packaging +Building the packages for Telegraf is automated using [Make](https://en.wikipedia.org/wiki/Make_(software)). Just running `make` will build a Telegraf binary for the operating system and architecture you are using (if it is supported). If you need to build a different package then you can run `make package` which will build all the supported packages. You will most likely only want a subset, you can define a subset of packages to be built by overriding the `include_packages` variable like so `make package include_packages="amd64.deb"`. You can also build all packages for a specific architecture like so `make package include_packages="$(make amd64)"`. + +The packaging steps require certain tools to be setup before hand to work. These dependencies are listed in the ci-1.16.docker file which you can find in the scripts directory. Therefore it is recommended to use Docker to build the artifacts, see more details below. + ## Package using Docker This packaging method uses the CI images, and is very similar to how the @@ -18,20 +22,15 @@ docker run -ti quay.io/influxdb/telegraf-ci:1.9.7 /bin/bash ``` From within the container: -``` -go get -d github.com/influxdata/telegraf -cd /go/src/github.com/influxdata/telegraf - -# Use tag of Telegraf version you would like to build -git checkout release-1.10 -git reset --hard 1.10.2 -make deps -# To build packages run: - -``` -make package amd64=1 -``` +1. `go get -d github.com/influxdata/telegraf` +2. `cd /go/src/github.com/influxdata/telegraf` +3. `git checkout release-1.10` + * Replace tag `release-1.10` with the version of Telegraf you would like to build +4. `git reset --hard 1.10.2` +5. `make deps` +6. `make package include_packages="amd64.deb"` + * Change `include_packages` to change what package you want, run `make help` to see possible values From the host system, copy the build artifacts out of the container: ``` From 58d4e9a851c293608ede43b47dda8f8b347979dc Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Mon, 20 Sep 2021 15:34:39 -0600 Subject: [PATCH 13/81] fix: bump cloud.google.com/go/pubsub from 1.15.0 to 1.17.0 (#9769) --- go.mod | 9 ++++----- go.sum | 18 ++++++++++++------ 2 files changed, 16 insertions(+), 11 deletions(-) diff --git a/go.mod b/go.mod index 6f16bb0fb0f83..d1d2dd094d749 100644 --- a/go.mod +++ b/go.mod @@ -3,9 +3,10 @@ module github.com/influxdata/telegraf go 1.17 require ( - cloud.google.com/go v0.90.0 + cloud.google.com/go v0.93.3 // indirect cloud.google.com/go/bigquery v1.8.0 - cloud.google.com/go/pubsub v1.15.0 + cloud.google.com/go/monitoring v0.2.0 + cloud.google.com/go/pubsub v1.17.0 code.cloudfoundry.org/clock v1.0.0 // indirect collectd.org v0.5.0 github.com/Azure/azure-amqp-common-go/v3 v3.0.1 // indirect @@ -161,7 +162,6 @@ require ( github.com/jmespath/go-jmespath v0.4.0 github.com/jpillora/backoff v1.0.0 // indirect github.com/json-iterator/go v1.1.11 // indirect - github.com/jstemmer/go-junit-report v0.9.1 // indirect github.com/kardianos/service v1.0.0 github.com/karrick/godirwalk v1.16.1 github.com/kballard/go-shellquote v0.0.0-20180428030007-95032a82bc51 @@ -275,7 +275,6 @@ require ( go.uber.org/atomic v1.7.0 // indirect go.uber.org/multierr v1.6.0 // indirect golang.org/x/crypto v0.0.0-20210616213533-5ff15b29337e // indirect - golang.org/x/lint v0.0.0-20210508222113-6edffad5e616 // indirect golang.org/x/mod v0.4.2 // indirect golang.org/x/net v0.0.0-20210610132358-84b48f89b13b golang.org/x/oauth2 v0.0.0-20210805134026-6f1e6394065a @@ -290,7 +289,7 @@ require ( golang.zx2c4.com/wireguard/wgctrl v0.0.0-20200205215550-e35592f146e4 google.golang.org/api v0.54.0 google.golang.org/appengine v1.6.7 // indirect - google.golang.org/genproto v0.0.0-20210813162853-db860fec028c + google.golang.org/genproto v0.0.0-20210827211047-25e5f791fe06 google.golang.org/grpc v1.40.0 google.golang.org/protobuf v1.27.1 gopkg.in/asn1-ber.v1 v1.0.0-20181015200546-f715ec2f112d // indirect diff --git a/go.sum b/go.sum index 19b3febf462ff..9a8b98cea97ad 100644 --- a/go.sum +++ b/go.sum @@ -24,8 +24,10 @@ cloud.google.com/go v0.81.0/go.mod h1:mk/AM35KwGk/Nm2YSeZbxXdrNK3KZOYHmLkOqC2V6E cloud.google.com/go v0.83.0/go.mod h1:Z7MJUsANfY0pYPdw0lbnivPx4/vhy/e2FEkSkF7vAVY= cloud.google.com/go v0.84.0/go.mod h1:RazrYuxIK6Kb7YrzzhPoLmCVzl7Sup4NrbKPg8KHSUM= cloud.google.com/go v0.87.0/go.mod h1:TpDYlFy7vuLzZMMZ+B6iRiELaY7z/gJPaqbMx6mlWcY= -cloud.google.com/go v0.90.0 h1:MjvSkUq8RuAb+2JLDi5VQmmExRJPUQ3JLCWpRB6fmdw= cloud.google.com/go v0.90.0/go.mod h1:kRX0mNRHe0e2rC6oNakvwQqzyDmg57xJ+SZU1eT2aDQ= +cloud.google.com/go v0.92.2/go.mod h1:8utlLll2EF5XMAV15woO4lSbWQlk8rer9aLOfLh7+YI= +cloud.google.com/go v0.93.3 h1:wPBktZFzYBcCZVARvwVKqH1uEj+aLXofJEtrb4oOsio= +cloud.google.com/go v0.93.3/go.mod h1:8utlLll2EF5XMAV15woO4lSbWQlk8rer9aLOfLh7+YI= cloud.google.com/go/bigquery v1.0.1/go.mod h1:i/xbL2UlR5RvWAURpBYZTtm/cXjCha9lbfbpx4poX+o= cloud.google.com/go/bigquery v1.3.0/go.mod h1:PjpwJnslEMmckchkHFfq+HTD2DmtT67aNFKH1/VBDHE= cloud.google.com/go/bigquery v1.4.0/go.mod h1:S8dzgnTigyfTmLBfrtrhyYhwRxG72rYxvftPBK2Dvzc= @@ -36,12 +38,16 @@ cloud.google.com/go/bigquery v1.8.0/go.mod h1:J5hqkt3O0uAFnINi6JXValWIb1v0goeZM7 cloud.google.com/go/bigtable v1.2.0/go.mod h1:JcVAOl45lrTmQfLj7T6TxyMzIN/3FGGcFm+2xVAli2o= cloud.google.com/go/datastore v1.0.0/go.mod h1:LXYbyblFSglQ5pkeyhO+Qmw7ukd3C+pD7TKLgZqpHYE= cloud.google.com/go/datastore v1.1.0/go.mod h1:umbIZjpQpHh4hmRpGhH4tLFup+FVzqBi1b3c64qFpCk= +cloud.google.com/go/kms v0.1.0 h1:VXAb5OzejDcyhFzIDeZ5n5AUdlsFnCyexuascIwWMj0= +cloud.google.com/go/kms v0.1.0/go.mod h1:8Qp8PCAypHg4FdmlyW1QRAv09BGQ9Uzh7JnmIZxPk+c= +cloud.google.com/go/monitoring v0.2.0 h1:UFQB1+YbZjAOqAFFY4RlNiOrt19O5HzPeCdtYSlPvmk= +cloud.google.com/go/monitoring v0.2.0/go.mod h1:K/JoZWY3xszHf38AMkzZGx1n5eT1/57ilElGMpESsEE= cloud.google.com/go/pubsub v1.0.1/go.mod h1:R0Gpsv3s54REJCy4fxDixWD93lHJMoZTyQ2kNxGRt3I= cloud.google.com/go/pubsub v1.1.0/go.mod h1:EwwdRX2sKPjnvnqCa270oGRyludottCI76h+R3AArQw= cloud.google.com/go/pubsub v1.2.0/go.mod h1:jhfEVHT8odbXTkndysNHCcx0awwzvfOlguIAii9o8iA= cloud.google.com/go/pubsub v1.3.1/go.mod h1:i+ucay31+CNRpDW4Lu78I4xXG+O1r/MAHgjpRVR+TSU= -cloud.google.com/go/pubsub v1.15.0 h1:6KI/wDVYLtNvzIPJ8ObuJcq5bBtAWQ6Suo8osHPvYn4= -cloud.google.com/go/pubsub v1.15.0/go.mod h1:DnEUPGZlp+N9MElp/6uVqCKiknQixvVLcrgrqT62O6A= +cloud.google.com/go/pubsub v1.17.0 h1:uGzqGUGvaSJ3APz5BmLFw1LpSTnB9o+EzE5fI3rBbJI= +cloud.google.com/go/pubsub v1.17.0/go.mod h1:bBIeYx9ftf/hr7eoSUim6cRaOYZE/hHuigwdwLLByi8= cloud.google.com/go/storage v1.0.0/go.mod h1:IhtSnM/ZTZV8YYJWCY8RULGVqBDmpoyjwiyrjsg+URw= cloud.google.com/go/storage v1.5.0/go.mod h1:tpKbwo567HUNpVclU5sGELwQWBDZ8gh0ZeosJ0Rtdos= cloud.google.com/go/storage v1.6.0/go.mod h1:N7U0C8pVQ/+NIKOBQyamJIeKQKkZ+mxpohlUTyfDhBk= @@ -1032,7 +1038,6 @@ github.com/json-iterator/go v1.1.10/go.mod h1:KdQUCv79m/52Kvf8AW2vK1V8akMuk1QjK/ github.com/json-iterator/go v1.1.11 h1:uVUAXhF2To8cbw/3xN3pxj6kk7TYKs98NIrTqPlMWAQ= github.com/json-iterator/go v1.1.11/go.mod h1:KdQUCv79m/52Kvf8AW2vK1V8akMuk1QjK/uOdHXbAo4= github.com/jstemmer/go-junit-report v0.0.0-20190106144839-af01ea7f8024/go.mod h1:6v2b51hI/fHJwM22ozAgKL4VKDeJcHhJFhtBdhmNjmU= -github.com/jstemmer/go-junit-report v0.9.1 h1:6QPYqodiu3GuPL+7mfx+NwDdp2eTkp9IfEUpgAwUN0o= github.com/jstemmer/go-junit-report v0.9.1/go.mod h1:Brl9GWCQeLvo8nXZwPNNblvFj/XSXhF0NWZEnDohbsk= github.com/jsternberg/zap-logfmt v1.0.0/go.mod h1:uvPs/4X51zdkcm5jXl5SYoN+4RK21K8mysFmDaM/h+o= github.com/jtolds/gls v4.20.0+incompatible h1:xdiiI2gbIgH/gLH7ADydsJ1uDOEzR8yvV7C0MuV77Wo= @@ -1716,7 +1721,6 @@ golang.org/x/lint v0.0.0-20191125180803-fdd1cda4f05f/go.mod h1:5qLYkcX4OjUUV8bRu golang.org/x/lint v0.0.0-20200130185559-910be7a94367/go.mod h1:3xt1FjdF8hUf6vQPIChWIBhFzV8gjjsPE/fR3IyQdNY= golang.org/x/lint v0.0.0-20200302205851-738671d3881b/go.mod h1:3xt1FjdF8hUf6vQPIChWIBhFzV8gjjsPE/fR3IyQdNY= golang.org/x/lint v0.0.0-20201208152925-83fdc39ff7b5/go.mod h1:3xt1FjdF8hUf6vQPIChWIBhFzV8gjjsPE/fR3IyQdNY= -golang.org/x/lint v0.0.0-20210508222113-6edffad5e616 h1:VLliZ0d+/avPrXXH+OakdXhpJuEoBZuwh1m2j7U6Iug= golang.org/x/lint v0.0.0-20210508222113-6edffad5e616/go.mod h1:3xt1FjdF8hUf6vQPIChWIBhFzV8gjjsPE/fR3IyQdNY= golang.org/x/mobile v0.0.0-20190312151609-d3739f865fa6/go.mod h1:z+o9i4GpDbdi3rU15maQ/Ox0txvL9dWGYEHz965HBQE= golang.org/x/mobile v0.0.0-20190719004257-d2bd2a29d028/go.mod h1:E/iHnbuqvinMTCcRqshq8CkpyQDoeVncDDYHnLhea+o= @@ -2174,8 +2178,10 @@ google.golang.org/genproto v0.0.0-20210713002101-d411969a0d9a/go.mod h1:AxrInvYm google.golang.org/genproto v0.0.0-20210716133855-ce7ef5c701ea/go.mod h1:AxrInvYm1dci+enl5hChSFPOmmUF1+uAa/UsgNRWd7k= google.golang.org/genproto v0.0.0-20210728212813-7823e685a01f/go.mod h1:ob2IJxKrgPT52GcgX759i1sleT07tiKowYBGbczaW48= google.golang.org/genproto v0.0.0-20210805201207-89edb61ffb67/go.mod h1:ob2IJxKrgPT52GcgX759i1sleT07tiKowYBGbczaW48= -google.golang.org/genproto v0.0.0-20210813162853-db860fec028c h1:iLQakcwWG3k/++1q/46apVb1sUQ3IqIdn9yUE6eh/xA= google.golang.org/genproto v0.0.0-20210813162853-db860fec028c/go.mod h1:cFeNkxwySK631ADgubI+/XFU/xp8FD5KIVV4rj8UC5w= +google.golang.org/genproto v0.0.0-20210824181836-a4879c3d0e89/go.mod h1:eFjDcFEctNawg4eG61bRv87N7iHBWyVhJu7u1kqDUXY= +google.golang.org/genproto v0.0.0-20210827211047-25e5f791fe06 h1:Ogdiaj9EMVKYHnDsESxwlTr/k5eqCdwoQVJEcdg0NbE= +google.golang.org/genproto v0.0.0-20210827211047-25e5f791fe06/go.mod h1:eFjDcFEctNawg4eG61bRv87N7iHBWyVhJu7u1kqDUXY= google.golang.org/grpc v0.0.0-20160317175043-d3ddb4469d5a/go.mod h1:yo6s7OP7yaDglbqo1J04qKzAhqBH6lvTonzMVmEdcZw= google.golang.org/grpc v1.17.0/go.mod h1:6QZJwpn2B+Zp71q/5VxRsJ6NXXVCE5NRUHRo+f3cWCs= google.golang.org/grpc v1.19.0/go.mod h1:mqu4LbDTu4XGKhr4mRzUsmM4RtVoemTSY81AxZiDr8c= From 7eb6e88c859ea30a783f9bf1146a3d3c574113f4 Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Mon, 20 Sep 2021 15:53:39 -0600 Subject: [PATCH 14/81] fix: bump github.com/Azure/go-autorest/autorest/azure/auth from 0.5.6 to 0.5.8 (#9678) --- go.mod | 2 +- go.sum | 8 +++----- 2 files changed, 4 insertions(+), 6 deletions(-) diff --git a/go.mod b/go.mod index d1d2dd094d749..d56ac810d6026 100644 --- a/go.mod +++ b/go.mod @@ -21,7 +21,7 @@ require ( github.com/Azure/go-autorest v14.2.0+incompatible // indirect github.com/Azure/go-autorest/autorest v0.11.18 github.com/Azure/go-autorest/autorest/adal v0.9.15 - github.com/Azure/go-autorest/autorest/azure/auth v0.5.6 + github.com/Azure/go-autorest/autorest/azure/auth v0.5.8 github.com/Azure/go-autorest/autorest/azure/cli v0.4.2 // indirect github.com/Azure/go-autorest/autorest/date v0.3.0 // indirect github.com/Azure/go-autorest/autorest/to v0.4.0 // indirect diff --git a/go.sum b/go.sum index 9a8b98cea97ad..c67071b110a16 100644 --- a/go.sum +++ b/go.sum @@ -107,13 +107,13 @@ github.com/Azure/go-autorest/autorest/adal v0.8.2/go.mod h1:ZjhuQClTqx435SRJ2iMl github.com/Azure/go-autorest/autorest/adal v0.9.0/go.mod h1:/c022QCutn2P7uY+/oQWWNcK9YU+MH96NgK+jErpbcg= github.com/Azure/go-autorest/autorest/adal v0.9.2/go.mod h1:/3SMAM86bP6wC9Ev35peQDUeqFZBMH07vvUOmg4z/fE= github.com/Azure/go-autorest/autorest/adal v0.9.5/go.mod h1:B7KF7jKIeC9Mct5spmyCB/A8CG/sEz1vwIRGv/bbw7A= -github.com/Azure/go-autorest/autorest/adal v0.9.10/go.mod h1:B7KF7jKIeC9Mct5spmyCB/A8CG/sEz1vwIRGv/bbw7A= +github.com/Azure/go-autorest/autorest/adal v0.9.11/go.mod h1:nBKAnTomx8gDtl+3ZCJv2v0KACFHWTB2drffI1B68Pk= github.com/Azure/go-autorest/autorest/adal v0.9.13/go.mod h1:W/MM4U6nLxnIskrw4UwWzlHfGjwUS50aOsc/I3yuU8M= github.com/Azure/go-autorest/autorest/adal v0.9.15 h1:X+p2GF0GWyOiSmqohIaEeuNFNDY4I4EOlVuUQvFdWMk= github.com/Azure/go-autorest/autorest/adal v0.9.15/go.mod h1:tGMin8I49Yij6AQ+rvV+Xa/zwxYQB5hmsd6DkfAx2+A= github.com/Azure/go-autorest/autorest/azure/auth v0.4.2/go.mod h1:90gmfKdlmKgfjUpnCEpOJzsUEjrWDSLwHIG73tSXddM= -github.com/Azure/go-autorest/autorest/azure/auth v0.5.6 h1:cgiBtUxatlt/e3qY6fQJioqbocWHr5osz259MomF5M0= -github.com/Azure/go-autorest/autorest/azure/auth v0.5.6/go.mod h1:nYlP+G+n8MhD5CjIi6W8nFTIJn/PnTHes5nUbK6BxD0= +github.com/Azure/go-autorest/autorest/azure/auth v0.5.8 h1:TzPg6B6fTZ0G1zBf3T54aI7p3cAT6u//TOXGPmFMOXg= +github.com/Azure/go-autorest/autorest/azure/auth v0.5.8/go.mod h1:kxyKZTSfKh8OVFWPAgOgQ/frrJgeYQJPyR5fLFmXko4= github.com/Azure/go-autorest/autorest/azure/cli v0.3.1/go.mod h1:ZG5p860J94/0kI9mNJVoIoLgXcirM2gF5i2kWloofxw= github.com/Azure/go-autorest/autorest/azure/cli v0.4.2 h1:dMOmEJfkLKW/7JsokJqkyoYSgmR08hi9KrhjZb+JALY= github.com/Azure/go-autorest/autorest/azure/cli v0.4.2/go.mod h1:7qkJkT+j6b+hIpzMOwPChJhTqS8VbsqqgULzMNRugoM= @@ -1687,7 +1687,6 @@ golang.org/x/crypto v0.0.0-20200622213623-75b288015ac9/go.mod h1:LzIPMQfyMNhhGPh golang.org/x/crypto v0.0.0-20200728195943-123391ffb6de/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= golang.org/x/crypto v0.0.0-20200820211705-5c72a883971a/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= golang.org/x/crypto v0.0.0-20201002170205-7f63de1d35b0/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= -golang.org/x/crypto v0.0.0-20201221181555-eec23a3978ad/go.mod h1:jdWPYTVW3xRLrWPugEBEK3UY2ZEsg3UU495nc5E+M+I= golang.org/x/crypto v0.0.0-20210314154223-e6e6c4f2bb5b/go.mod h1:T9bdIzuCu7OtxOm1hfPfRQxPLYneinmdGuTeoZ9dtd4= golang.org/x/crypto v0.0.0-20210314154223-e6e6c4f2bb5b/go.mod h1:T9bdIzuCu7OtxOm1hfPfRQxPLYneinmdGuTeoZ9dtd4= golang.org/x/crypto v0.0.0-20210322153248-0c34fe9e7dc2/go.mod h1:T9bdIzuCu7OtxOm1hfPfRQxPLYneinmdGuTeoZ9dtd4= @@ -1957,7 +1956,6 @@ golang.org/x/sys v0.0.0-20210616094352-59db8d763f22/go.mod h1:oPkhp1MJrh7nUepCBc golang.org/x/sys v0.0.0-20210630005230-0f9fa26af87c/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20210806184541-e5e7981a1069 h1:siQdpVirKtzPhKl3lZWozZraCFObP8S1v6PRp0bLrtU= golang.org/x/sys v0.0.0-20210806184541-e5e7981a1069/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/term v0.0.0-20201117132131-f5c789dd3221/go.mod h1:Nr5EML6q2oocZ2LXRh80K7BxOlk5/8JxuGnuhpl+muw= golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1 h1:v+OssWQX+hTHEmOBgwxdZxK4zHq3yOs8F9J7mk0PY8E= golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo= golang.org/x/text v0.0.0-20160726164857-2910a502d2bf/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= From c3bdb7e8dd8a4bbc4080c64601f17353f755210f Mon Sep 17 00:00:00 2001 From: David Reimschussel Date: Mon, 20 Sep 2021 17:24:40 -0600 Subject: [PATCH 15/81] Update build version to 1.21.0 --- build_version.txt | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/build_version.txt b/build_version.txt index 3989355915568..3500250a4b05b 100644 --- a/build_version.txt +++ b/build_version.txt @@ -1 +1 @@ -1.20.0 +1.21.0 From 8133fd83a8177866adff2028160bcf27e186464c Mon Sep 17 00:00:00 2001 From: Alan Pope Date: Tue, 21 Sep 2021 12:02:13 +0100 Subject: [PATCH 16/81] Reduce README size/complexity --- README.md | 367 +++--------------------------------------------------- 1 file changed, 19 insertions(+), 348 deletions(-) diff --git a/README.md b/README.md index 2b49842789db6..5f0861f4fa3cb 100644 --- a/README.md +++ b/README.md @@ -8,9 +8,8 @@ Telegraf is an agent for collecting, processing, aggregating, and writing metrics. -Design goals are to have a minimal memory footprint with a plugin system so -that developers in the community can easily add support for collecting -metrics. +Design goal: +- Have a minimal memory footprint with a plugin system so that developers in the community can easily add support for collecting metrics. Telegraf is plugin-driven and has the concept of 4 distinct plugin types: @@ -19,25 +18,9 @@ Telegraf is plugin-driven and has the concept of 4 distinct plugin types: 3. [Aggregator Plugins](#aggregator-plugins) create aggregate metrics (e.g. mean, min, max, quantiles, etc.) 4. [Output Plugins](#output-plugins) write metrics to various destinations -New plugins are designed to be easy to contribute, pull requests are welcomed -and we work to incorporate as many pull requests as possible. -If none of the internal plugins fit your needs, you could have a look at the +New plugins are designed to be easy to contribute, pull requests are welcomed and we work to incorporate as many pull requests as possible. If none of the internal plugins fit your needs, you could have a look at the [list of external plugins](EXTERNAL_PLUGINS.md). -## Try in Browser :rocket: - -You can try Telegraf right in your browser in the [Telegraf playground](https://rootnroll.com/d/telegraf/). - -## Contributing - -There are many ways to contribute: -- Fix and [report bugs](https://github.com/influxdata/telegraf/issues/new) -- [Improve documentation](https://github.com/influxdata/telegraf/issues?q=is%3Aopen+label%3Adocumentation) -- [Review code and feature proposals](https://github.com/influxdata/telegraf/pulls) -- Answer questions and discuss here on github and on the [Community Site](https://community.influxdata.com/) -- [Contribute plugins](CONTRIBUTING.md) -- [Contribute external plugins](docs/EXTERNAL_PLUGINS.md) - ## Minimum Requirements Telegraf shares the same [minimum requirements][] as Go: @@ -92,7 +75,6 @@ Builds for other platforms or package formats are provided by members of the Tel * Linux * [Snap](https://snapcraft.io/telegraf) by Laurent Sesquès (sajoupa) - ## How to use it: See usage with: @@ -138,330 +120,19 @@ telegraf --config telegraf.conf --input-filter cpu:mem --output-filter influxdb For documentation on the latest development code see the [documentation index][devel docs]. [release docs]: https://docs.influxdata.com/telegraf -[devel docs]: docs - -## Input Plugins - -* [activemq](./plugins/inputs/activemq) -* [aerospike](./plugins/inputs/aerospike) -* [amqp_consumer](./plugins/inputs/amqp_consumer) (rabbitmq) -* [apache](./plugins/inputs/apache) -* [apcupsd](./plugins/inputs/apcupsd) -* [aurora](./plugins/inputs/aurora) -* [aws cloudwatch](./plugins/inputs/cloudwatch) (Amazon Cloudwatch) -* [azure_storage_queue](./plugins/inputs/azure_storage_queue) -* [bcache](./plugins/inputs/bcache) -* [beanstalkd](./plugins/inputs/beanstalkd) -* [bind](./plugins/inputs/bind) -* [bond](./plugins/inputs/bond) -* [burrow](./plugins/inputs/burrow) -* [cassandra](./plugins/inputs/cassandra) (deprecated, use [jolokia2](./plugins/inputs/jolokia2)) -* [ceph](./plugins/inputs/ceph) -* [cgroup](./plugins/inputs/cgroup) -* [chrony](./plugins/inputs/chrony) -* [cisco_telemetry_gnmi](./plugins/inputs/cisco_telemetry_gnmi) (deprecated, renamed to [gnmi](/plugins/inputs/gnmi)) -* [cisco_telemetry_mdt](./plugins/inputs/cisco_telemetry_mdt) -* [clickhouse](./plugins/inputs/clickhouse) -* [cloud_pubsub](./plugins/inputs/cloud_pubsub) Google Cloud Pub/Sub -* [cloud_pubsub_push](./plugins/inputs/cloud_pubsub_push) Google Cloud Pub/Sub push endpoint -* [conntrack](./plugins/inputs/conntrack) -* [consul](./plugins/inputs/consul) -* [couchbase](./plugins/inputs/couchbase) -* [couchdb](./plugins/inputs/couchdb) -* [cpu](./plugins/inputs/cpu) -* [DC/OS](./plugins/inputs/dcos) -* [diskio](./plugins/inputs/diskio) -* [disk](./plugins/inputs/disk) -* [disque](./plugins/inputs/disque) -* [dmcache](./plugins/inputs/dmcache) -* [dns query time](./plugins/inputs/dns_query) -* [docker](./plugins/inputs/docker) -* [docker_log](./plugins/inputs/docker_log) -* [dovecot](./plugins/inputs/dovecot) -* [dpdk](./plugins/inputs/dpdk) -* [aws ecs](./plugins/inputs/ecs) (Amazon Elastic Container Service, Fargate) -* [elasticsearch](./plugins/inputs/elasticsearch) -* [ethtool](./plugins/inputs/ethtool) -* [eventhub_consumer](./plugins/inputs/eventhub_consumer) (Azure Event Hubs \& Azure IoT Hub) -* [exec](./plugins/inputs/exec) (generic executable plugin, support JSON, influx, graphite and nagios) -* [execd](./plugins/inputs/execd) (generic executable "daemon" processes) -* [fail2ban](./plugins/inputs/fail2ban) -* [fibaro](./plugins/inputs/fibaro) -* [file](./plugins/inputs/file) -* [filestat](./plugins/inputs/filestat) -* [filecount](./plugins/inputs/filecount) -* [fireboard](/plugins/inputs/fireboard) -* [fluentd](./plugins/inputs/fluentd) -* [github](./plugins/inputs/github) -* [gnmi](./plugins/inputs/gnmi) -* [graylog](./plugins/inputs/graylog) -* [haproxy](./plugins/inputs/haproxy) -* [hddtemp](./plugins/inputs/hddtemp) -* [httpjson](./plugins/inputs/httpjson) (generic JSON-emitting http service plugin) -* [http_listener](./plugins/inputs/influxdb_listener) (deprecated, renamed to [influxdb_listener](/plugins/inputs/influxdb_listener)) -* [http_listener_v2](./plugins/inputs/http_listener_v2) -* [http](./plugins/inputs/http) (generic HTTP plugin, supports using input data formats) -* [http_response](./plugins/inputs/http_response) -* [icinga2](./plugins/inputs/icinga2) -* [infiniband](./plugins/inputs/infiniband) -* [influxdb](./plugins/inputs/influxdb) -* [influxdb_listener](./plugins/inputs/influxdb_listener) -* [influxdb_v2_listener](./plugins/inputs/influxdb_v2_listener) -* [intel_powerstat](plugins/inputs/intel_powerstat) -* [intel_rdt](./plugins/inputs/intel_rdt) -* [internal](./plugins/inputs/internal) -* [interrupts](./plugins/inputs/interrupts) -* [ipmi_sensor](./plugins/inputs/ipmi_sensor) -* [ipset](./plugins/inputs/ipset) -* [iptables](./plugins/inputs/iptables) -* [ipvs](./plugins/inputs/ipvs) -* [jenkins](./plugins/inputs/jenkins) -* [jolokia2](./plugins/inputs/jolokia2) (java, cassandra, kafka) -* [jolokia](./plugins/inputs/jolokia) (deprecated, use [jolokia2](./plugins/inputs/jolokia2)) -* [jti_openconfig_telemetry](./plugins/inputs/jti_openconfig_telemetry) -* [kafka_consumer](./plugins/inputs/kafka_consumer) -* [kapacitor](./plugins/inputs/kapacitor) -* [aws kinesis](./plugins/inputs/kinesis_consumer) (Amazon Kinesis) -* [kernel](./plugins/inputs/kernel) -* [kernel_vmstat](./plugins/inputs/kernel_vmstat) -* [kibana](./plugins/inputs/kibana) -* [knx_listener](./plugins/inputs/knx_listener) -* [kubernetes](./plugins/inputs/kubernetes) -* [kube_inventory](./plugins/inputs/kube_inventory) -* [lanz](./plugins/inputs/lanz) -* [leofs](./plugins/inputs/leofs) -* [linux_sysctl_fs](./plugins/inputs/linux_sysctl_fs) -* [logparser](./plugins/inputs/logparser) (deprecated, use [tail](/plugins/inputs/tail)) -* [logstash](./plugins/inputs/logstash) -* [lustre2](./plugins/inputs/lustre2) -* [mailchimp](./plugins/inputs/mailchimp) -* [marklogic](./plugins/inputs/marklogic) -* [mcrouter](./plugins/inputs/mcrouter) -* [mdstat](./plugins/inputs/mdstat) -* [memcached](./plugins/inputs/memcached) -* [mem](./plugins/inputs/mem) -* [mesos](./plugins/inputs/mesos) -* [minecraft](./plugins/inputs/minecraft) -* [modbus](./plugins/inputs/modbus) -* [mongodb](./plugins/inputs/mongodb) -* [monit](./plugins/inputs/monit) -* [mqtt_consumer](./plugins/inputs/mqtt_consumer) -* [multifile](./plugins/inputs/multifile) -* [mysql](./plugins/inputs/mysql) -* [nats_consumer](./plugins/inputs/nats_consumer) -* [nats](./plugins/inputs/nats) -* [neptune_apex](./plugins/inputs/neptune_apex) -* [net](./plugins/inputs/net) -* [net_response](./plugins/inputs/net_response) -* [netstat](./plugins/inputs/net) -* [nfsclient](./plugins/inputs/nfsclient) -* [nginx](./plugins/inputs/nginx) -* [nginx_plus_api](./plugins/inputs/nginx_plus_api) -* [nginx_plus](./plugins/inputs/nginx_plus) -* [nginx_sts](./plugins/inputs/nginx_sts) -* [nginx_upstream_check](./plugins/inputs/nginx_upstream_check) -* [nginx_vts](./plugins/inputs/nginx_vts) -* [nsd](./plugins/inputs/nsd) -* [nsq_consumer](./plugins/inputs/nsq_consumer) -* [nsq](./plugins/inputs/nsq) -* [nstat](./plugins/inputs/nstat) -* [ntpq](./plugins/inputs/ntpq) -* [nvidia_smi](./plugins/inputs/nvidia_smi) -* [opcua](./plugins/inputs/opcua) -* [openldap](./plugins/inputs/openldap) -* [openntpd](./plugins/inputs/openntpd) -* [opensmtpd](./plugins/inputs/opensmtpd) -* [opentelemetry](./plugins/inputs/opentelemetry) -* [openweathermap](./plugins/inputs/openweathermap) -* [pf](./plugins/inputs/pf) -* [pgbouncer](./plugins/inputs/pgbouncer) -* [phpfpm](./plugins/inputs/phpfpm) -* [phusion passenger](./plugins/inputs/passenger) -* [ping](./plugins/inputs/ping) -* [postfix](./plugins/inputs/postfix) -* [postgresql_extensible](./plugins/inputs/postgresql_extensible) -* [postgresql](./plugins/inputs/postgresql) -* [powerdns](./plugins/inputs/powerdns) -* [powerdns_recursor](./plugins/inputs/powerdns_recursor) -* [processes](./plugins/inputs/processes) -* [procstat](./plugins/inputs/procstat) -* [prometheus](./plugins/inputs/prometheus) (can be used for [Caddy server](./plugins/inputs/prometheus/README.md#usage-for-caddy-http-server)) -* [proxmox](./plugins/inputs/proxmox) -* [puppetagent](./plugins/inputs/puppetagent) -* [rabbitmq](./plugins/inputs/rabbitmq) -* [raindrops](./plugins/inputs/raindrops) -* [ras](./plugins/inputs/ras) -* [ravendb](./plugins/inputs/ravendb) -* [redfish](./plugins/inputs/redfish) -* [redis](./plugins/inputs/redis) -* [rethinkdb](./plugins/inputs/rethinkdb) -* [riak](./plugins/inputs/riak) -* [salesforce](./plugins/inputs/salesforce) -* [sensors](./plugins/inputs/sensors) -* [sflow](./plugins/inputs/sflow) -* [smart](./plugins/inputs/smart) -* [snmp_legacy](./plugins/inputs/snmp_legacy) -* [snmp](./plugins/inputs/snmp) -* [snmp_trap](./plugins/inputs/snmp_trap) -* [socket_listener](./plugins/inputs/socket_listener) -* [solr](./plugins/inputs/solr) -* [sql](./plugins/inputs/sql) (generic SQL query plugin) -* [sql server](./plugins/inputs/sqlserver) (microsoft) -* [stackdriver](./plugins/inputs/stackdriver) (Google Cloud Monitoring) -* [sql](./plugins/outputs/sql) (SQL generic output) -* [statsd](./plugins/inputs/statsd) -* [suricata](./plugins/inputs/suricata) -* [swap](./plugins/inputs/swap) -* [synproxy](./plugins/inputs/synproxy) -* [syslog](./plugins/inputs/syslog) -* [sysstat](./plugins/inputs/sysstat) -* [systemd_units](./plugins/inputs/systemd_units) -* [system](./plugins/inputs/system) -* [tail](./plugins/inputs/tail) -* [temp](./plugins/inputs/temp) -* [tcp_listener](./plugins/inputs/socket_listener) -* [teamspeak](./plugins/inputs/teamspeak) -* [tengine](./plugins/inputs/tengine) -* [tomcat](./plugins/inputs/tomcat) -* [twemproxy](./plugins/inputs/twemproxy) -* [udp_listener](./plugins/inputs/socket_listener) -* [unbound](./plugins/inputs/unbound) -* [uwsgi](./plugins/inputs/uwsgi) -* [varnish](./plugins/inputs/varnish) -* [vsphere](./plugins/inputs/vsphere) VMware vSphere -* [webhooks](./plugins/inputs/webhooks) - * [filestack](./plugins/inputs/webhooks/filestack) - * [github](./plugins/inputs/webhooks/github) - * [mandrill](./plugins/inputs/webhooks/mandrill) - * [papertrail](./plugins/inputs/webhooks/papertrail) - * [particle](./plugins/inputs/webhooks/particle) - * [rollbar](./plugins/inputs/webhooks/rollbar) -* [win_eventlog](./plugins/inputs/win_eventlog) -* [win_perf_counters](./plugins/inputs/win_perf_counters) (windows performance counters) -* [win_services](./plugins/inputs/win_services) -* [wireguard](./plugins/inputs/wireguard) -* [wireless](./plugins/inputs/wireless) -* [x509_cert](./plugins/inputs/x509_cert) -* [zfs](./plugins/inputs/zfs) -* [zipkin](./plugins/inputs/zipkin) -* [zookeeper](./plugins/inputs/zookeeper) - -## Parsers - -- [InfluxDB Line Protocol](/plugins/parsers/influx) -- [Collectd](/plugins/parsers/collectd) -- [CSV](/plugins/parsers/csv) -- [Dropwizard](/plugins/parsers/dropwizard) -- [FormUrlencoded](/plugins/parser/form_urlencoded) -- [Graphite](/plugins/parsers/graphite) -- [Grok](/plugins/parsers/grok) -- [JSON](/plugins/parsers/json) -- [JSON v2](/plugins/parsers/json_v2) -- [Logfmt](/plugins/parsers/logfmt) -- [Nagios](/plugins/parsers/nagios) -- [Prometheus](/plugins/parsers/prometheus) -- [Prometheus Remote Write](/plugins/parsers/prometheusremotewrite) -- [Value](/plugins/parsers/value), ie: 45 or "booyah" -- [Wavefront](/plugins/parsers/wavefront) -- [XPath](/plugins/parsers/xpath) (supports XML, JSON, MessagePack, Protocol Buffers) - -## Serializers - -- [InfluxDB Line Protocol](/plugins/serializers/influx) -- [Carbon2](/plugins/serializers/carbon2) -- [Graphite](/plugins/serializers/graphite) -- [JSON](/plugins/serializers/json) -- [MessagePack](/plugins/serializers/msgpack) -- [Prometheus](/plugins/serializers/prometheus) -- [Prometheus Remote Write](/plugins/serializers/prometheusremotewrite) -- [ServiceNow](/plugins/serializers/nowmetric) -- [SplunkMetric](/plugins/serializers/splunkmetric) -- [Wavefront](/plugins/serializers/wavefront) - -## Processor Plugins - -* [clone](/plugins/processors/clone) -* [converter](/plugins/processors/converter) -* [date](/plugins/processors/date) -* [dedup](/plugins/processors/dedup) -* [defaults](/plugins/processors/defaults) -* [enum](/plugins/processors/enum) -* [execd](/plugins/processors/execd) -* [ifname](/plugins/processors/ifname) -* [filepath](/plugins/processors/filepath) -* [override](/plugins/processors/override) -* [parser](/plugins/processors/parser) -* [pivot](/plugins/processors/pivot) -* [port_name](/plugins/processors/port_name) -* [printer](/plugins/processors/printer) -* [regex](/plugins/processors/regex) -* [rename](/plugins/processors/rename) -* [reverse_dns](/plugins/processors/reverse_dns) -* [s2geo](/plugins/processors/s2geo) -* [starlark](/plugins/processors/starlark) -* [strings](/plugins/processors/strings) -* [tag_limit](/plugins/processors/tag_limit) -* [template](/plugins/processors/template) -* [topk](/plugins/processors/topk) -* [unpivot](/plugins/processors/unpivot) - -## Aggregator Plugins - -* [basicstats](./plugins/aggregators/basicstats) -* [derivative](./plugins/aggregators/derivative) -* [final](./plugins/aggregators/final) -* [histogram](./plugins/aggregators/histogram) -* [merge](./plugins/aggregators/merge) -* [minmax](./plugins/aggregators/minmax) -* [quantile](./plugins/aggregators/quantile) -* [valuecounter](./plugins/aggregators/valuecounter) - -## Output Plugins - -* [influxdb](./plugins/outputs/influxdb) (InfluxDB 1.x) -* [influxdb_v2](./plugins/outputs/influxdb_v2) ([InfluxDB 2.x](https://github.com/influxdata/influxdb)) -* [amon](./plugins/outputs/amon) -* [amqp](./plugins/outputs/amqp) (rabbitmq) -* [application_insights](./plugins/outputs/application_insights) -* [aws kinesis](./plugins/outputs/kinesis) -* [aws cloudwatch](./plugins/outputs/cloudwatch) -* [azure_monitor](./plugins/outputs/azure_monitor) -* [bigquery](./plugins/outputs/bigquery) -* [cloud_pubsub](./plugins/outputs/cloud_pubsub) Google Cloud Pub/Sub -* [cratedb](./plugins/outputs/cratedb) -* [datadog](./plugins/outputs/datadog) -* [discard](./plugins/outputs/discard) -* [dynatrace](./plugins/outputs/dynatrace) -* [elasticsearch](./plugins/outputs/elasticsearch) -* [exec](./plugins/outputs/exec) -* [execd](./plugins/outputs/execd) -* [file](./plugins/outputs/file) -* [graphite](./plugins/outputs/graphite) -* [graylog](./plugins/outputs/graylog) -* [health](./plugins/outputs/health) -* [http](./plugins/outputs/http) -* [instrumental](./plugins/outputs/instrumental) -* [kafka](./plugins/outputs/kafka) -* [librato](./plugins/outputs/librato) -* [logz.io](./plugins/outputs/logzio) -* [mqtt](./plugins/outputs/mqtt) -* [nats](./plugins/outputs/nats) -* [newrelic](./plugins/outputs/newrelic) -* [nsq](./plugins/outputs/nsq) -* [opentelemetry](./plugins/outputs/opentelemetry) -* [opentsdb](./plugins/outputs/opentsdb) -* [prometheus](./plugins/outputs/prometheus_client) -* [riemann](./plugins/outputs/riemann) -* [riemann_legacy](./plugins/outputs/riemann_legacy) -* [sensu](./plugins/outputs/sensu) -* [signalfx](./plugins/outputs/signalfx) -* [socket_writer](./plugins/outputs/socket_writer) -* [stackdriver](./plugins/outputs/stackdriver) (Google Cloud Monitoring) -* [syslog](./plugins/outputs/syslog) -* [tcp](./plugins/outputs/socket_writer) -* [udp](./plugins/outputs/socket_writer) -* [warp10](./plugins/outputs/warp10) -* [wavefront](./plugins/outputs/wavefront) -* [websocket](./plugins/outputs/websocket) -* [sumologic](./plugins/outputs/sumologic) -* [yandex_cloud_monitoring](./plugins/outputs/yandex_cloud_monitoring) +[developer docs]: docs +- [Input Plugins](/telegraf/docs/INPUTS.md) +- [Output Plugins](/telegraf/docs/OUTPUTS.md) +- [Processor Plugins](/telegraf/docs/PROCESSORS.md) +- [Aggregator Plugins](/telegraf/docs/AGGREGATORS.md) + + +## Contributing + +There are many ways to contribute: +- Fix and [report bugs](https://github.com/influxdata/telegraf/issues/new) +- [Improve documentation](https://github.com/influxdata/telegraf/issues?q=is%3Aopen+label%3Adocumentation) +- [Review code and feature proposals](https://github.com/influxdata/telegraf/pulls) +- Answer questions and discuss here on github and on the [Community Site](https://community.influxdata.com/) +- [Contribute plugins](CONTRIBUTING.md) +- [Contribute external plugins](docs/EXTERNAL_PLUGINS.md) \ No newline at end of file From b9aa9839fbde04fb4a728d656fae57a20281e0ba Mon Sep 17 00:00:00 2001 From: Heiko Schlittermann Date: Tue, 21 Sep 2021 17:12:44 +0200 Subject: [PATCH 17/81] feat: Add json_timestamp_layout option (#8229) --- config/config.go | 3 +- .../azure_data_explorer.go | 2 +- .../azure_data_explorer_test.go | 2 +- plugins/serializers/json/README.md | 7 +++++ plugins/serializers/json/json.go | 24 +++++++++------ plugins/serializers/json/json_test.go | 30 +++++++++++-------- plugins/serializers/registry.go | 9 ++++-- 7 files changed, 50 insertions(+), 27 deletions(-) diff --git a/config/config.go b/config/config.go index 56beed8ee4910..b6eed9446162f 100644 --- a/config/config.go +++ b/config/config.go @@ -1504,6 +1504,7 @@ func (c *Config) buildSerializer(tbl *ast.Table) (serializers.Serializer, error) c.getFieldString(tbl, "graphite_separator", &sc.GraphiteSeparator) c.getFieldDuration(tbl, "json_timestamp_units", &sc.TimestampUnits) + c.getFieldString(tbl, "json_timestamp_format", &sc.TimestampFormat) c.getFieldBool(tbl, "splunkmetric_hec_routing", &sc.HecRouting) c.getFieldBool(tbl, "splunkmetric_multimetric", &sc.SplunkmetricMultiMetric) @@ -1569,7 +1570,7 @@ func (c *Config) missingTomlField(_ reflect.Type, key string) error { "grok_custom_pattern_files", "grok_custom_patterns", "grok_named_patterns", "grok_patterns", "grok_timezone", "grok_unique_timestamp", "influx_max_line_bytes", "influx_sort_fields", "influx_uint_support", "interval", "json_name_key", "json_query", "json_strict", - "json_string_fields", "json_time_format", "json_time_key", "json_timestamp_units", "json_timezone", "json_v2", + "json_string_fields", "json_time_format", "json_time_key", "json_timestamp_format", "json_timestamp_units", "json_timezone", "json_v2", "metric_batch_size", "metric_buffer_limit", "name_override", "name_prefix", "name_suffix", "namedrop", "namepass", "order", "pass", "period", "precision", "prefix", "prometheus_export_timestamp", "prometheus_sort_metrics", "prometheus_string_as_label", diff --git a/plugins/outputs/azure_data_explorer/azure_data_explorer.go b/plugins/outputs/azure_data_explorer/azure_data_explorer.go index 6d411fd05c3b9..b4c2054d3c22e 100644 --- a/plugins/outputs/azure_data_explorer/azure_data_explorer.go +++ b/plugins/outputs/azure_data_explorer/azure_data_explorer.go @@ -230,7 +230,7 @@ func (adx *AzureDataExplorer) Init() error { return errors.New("Metrics grouping type is not valid") } - serializer, err := json.NewSerializer(time.Second) + serializer, err := json.NewSerializer(time.Second, "") // FIXME: get the json.TimestampFormat from the config file if err != nil { return err } diff --git a/plugins/outputs/azure_data_explorer/azure_data_explorer_test.go b/plugins/outputs/azure_data_explorer/azure_data_explorer_test.go index f85d074cb1f6f..b8d30d66ce28b 100644 --- a/plugins/outputs/azure_data_explorer/azure_data_explorer_test.go +++ b/plugins/outputs/azure_data_explorer/azure_data_explorer_test.go @@ -105,7 +105,7 @@ func TestWrite(t *testing.T) { for _, tC := range testCases { t.Run(tC.name, func(t *testing.T) { - serializer, err := telegrafJson.NewSerializer(time.Second) + serializer, err := telegrafJson.NewSerializer(time.Second, "") require.NoError(t, err) plugin := AzureDataExplorer{ diff --git a/plugins/serializers/json/README.md b/plugins/serializers/json/README.md index 08bb9d4f7c904..b33875578272a 100644 --- a/plugins/serializers/json/README.md +++ b/plugins/serializers/json/README.md @@ -19,6 +19,13 @@ The `json` output data format converts metrics into JSON documents. ## such as "1ns", "1us", "1ms", "10ms", "1s". Durations are truncated to ## the power of 10 less than the specified units. json_timestamp_units = "1s" + + ## The default timestamp format is Unix epoch time, subject to the + # resolution configured in json_timestamp_units. + # Other timestamp layout can be configured using the Go language time + # layout specification from https://golang.org/pkg/time/#Time.Format + # e.g.: json_timestamp_format = "2006-01-02T15:04:05Z07:00" + #json_timestamp_format = "" ``` ### Examples: diff --git a/plugins/serializers/json/json.go b/plugins/serializers/json/json.go index e2d7af3305117..6db2a43ee231a 100644 --- a/plugins/serializers/json/json.go +++ b/plugins/serializers/json/json.go @@ -8,18 +8,20 @@ import ( "github.com/influxdata/telegraf" ) -type serializer struct { - TimestampUnits time.Duration +type Serializer struct { + TimestampUnits time.Duration + TimestampFormat string } -func NewSerializer(timestampUnits time.Duration) (*serializer, error) { - s := &serializer{ - TimestampUnits: truncateDuration(timestampUnits), +func NewSerializer(timestampUnits time.Duration, timestampformat string) (*Serializer, error) { + s := &Serializer{ + TimestampUnits: truncateDuration(timestampUnits), + TimestampFormat: timestampformat, } return s, nil } -func (s *serializer) Serialize(metric telegraf.Metric) ([]byte, error) { +func (s *Serializer) Serialize(metric telegraf.Metric) ([]byte, error) { m := s.createObject(metric) serialized, err := json.Marshal(m) if err != nil { @@ -30,7 +32,7 @@ func (s *serializer) Serialize(metric telegraf.Metric) ([]byte, error) { return serialized, nil } -func (s *serializer) SerializeBatch(metrics []telegraf.Metric) ([]byte, error) { +func (s *Serializer) SerializeBatch(metrics []telegraf.Metric) ([]byte, error) { objects := make([]interface{}, 0, len(metrics)) for _, metric := range metrics { m := s.createObject(metric) @@ -48,7 +50,7 @@ func (s *serializer) SerializeBatch(metrics []telegraf.Metric) ([]byte, error) { return serialized, nil } -func (s *serializer) createObject(metric telegraf.Metric) map[string]interface{} { +func (s *Serializer) createObject(metric telegraf.Metric) map[string]interface{} { m := make(map[string]interface{}, 4) tags := make(map[string]string, len(metric.TagList())) @@ -71,7 +73,11 @@ func (s *serializer) createObject(metric telegraf.Metric) map[string]interface{} m["fields"] = fields m["name"] = metric.Name() - m["timestamp"] = metric.Time().UnixNano() / int64(s.TimestampUnits) + if s.TimestampFormat == "" { + m["timestamp"] = metric.Time().UnixNano() / int64(s.TimestampUnits) + } else { + m["timestamp"] = metric.Time().UTC().Format(s.TimestampFormat) + } return m } diff --git a/plugins/serializers/json/json_test.go b/plugins/serializers/json/json_test.go index 74d7f94166621..be939243904eb 100644 --- a/plugins/serializers/json/json_test.go +++ b/plugins/serializers/json/json_test.go @@ -30,7 +30,7 @@ func TestSerializeMetricFloat(t *testing.T) { } m := metric.New("cpu", tags, fields, now) - s, _ := NewSerializer(0) + s, _ := NewSerializer(0, "") var buf []byte buf, err := s.Serialize(m) assert.NoError(t, err) @@ -40,9 +40,10 @@ func TestSerializeMetricFloat(t *testing.T) { func TestSerialize_TimestampUnits(t *testing.T) { tests := []struct { - name string - timestampUnits time.Duration - expected string + name string + timestampUnits time.Duration + timestampFormat string + expected string }{ { name: "default of 1s", @@ -74,6 +75,11 @@ func TestSerialize_TimestampUnits(t *testing.T) { timestampUnits: 65 * time.Millisecond, expected: `{"fields":{"value":42},"name":"cpu","tags":{},"timestamp":152547879512}`, }, + { + name: "timestamp format", + timestampFormat: "2006-01-02T15:04:05Z07:00", + expected: `{"fields":{"value":42},"name":"cpu","tags":{},"timestamp":"2018-05-05T00:06:35Z"}`, + }, } for _, tt := range tests { t.Run(tt.name, func(t *testing.T) { @@ -85,7 +91,7 @@ func TestSerialize_TimestampUnits(t *testing.T) { }, time.Unix(1525478795, 123456789), ) - s, _ := NewSerializer(tt.timestampUnits) + s, _ := NewSerializer(tt.timestampUnits, tt.timestampFormat) actual, err := s.Serialize(m) require.NoError(t, err) require.Equal(t, tt.expected+"\n", string(actual)) @@ -103,7 +109,7 @@ func TestSerializeMetricInt(t *testing.T) { } m := metric.New("cpu", tags, fields, now) - s, _ := NewSerializer(0) + s, _ := NewSerializer(0, "") var buf []byte buf, err := s.Serialize(m) assert.NoError(t, err) @@ -122,7 +128,7 @@ func TestSerializeMetricString(t *testing.T) { } m := metric.New("cpu", tags, fields, now) - s, _ := NewSerializer(0) + s, _ := NewSerializer(0, "") var buf []byte buf, err := s.Serialize(m) assert.NoError(t, err) @@ -142,7 +148,7 @@ func TestSerializeMultiFields(t *testing.T) { } m := metric.New("cpu", tags, fields, now) - s, _ := NewSerializer(0) + s, _ := NewSerializer(0, "") var buf []byte buf, err := s.Serialize(m) assert.NoError(t, err) @@ -161,7 +167,7 @@ func TestSerializeMetricWithEscapes(t *testing.T) { } m := metric.New("My CPU", tags, fields, now) - s, _ := NewSerializer(0) + s, _ := NewSerializer(0, "") buf, err := s.Serialize(m) assert.NoError(t, err) @@ -180,7 +186,7 @@ func TestSerializeBatch(t *testing.T) { ) metrics := []telegraf.Metric{m, m} - s, _ := NewSerializer(0) + s, _ := NewSerializer(0, "") buf, err := s.SerializeBatch(metrics) require.NoError(t, err) require.Equal(t, []byte(`{"metrics":[{"fields":{"value":42},"name":"cpu","tags":{},"timestamp":0},{"fields":{"value":42},"name":"cpu","tags":{},"timestamp":0}]}`), buf) @@ -199,7 +205,7 @@ func TestSerializeBatchSkipInf(t *testing.T) { ), } - s, err := NewSerializer(0) + s, err := NewSerializer(0, "") require.NoError(t, err) buf, err := s.SerializeBatch(metrics) require.NoError(t, err) @@ -218,7 +224,7 @@ func TestSerializeBatchSkipInfAllFields(t *testing.T) { ), } - s, err := NewSerializer(0) + s, err := NewSerializer(0, "") require.NoError(t, err) buf, err := s.SerializeBatch(metrics) require.NoError(t, err) diff --git a/plugins/serializers/registry.go b/plugins/serializers/registry.go index e67a9594dda73..b17364e66f0a6 100644 --- a/plugins/serializers/registry.go +++ b/plugins/serializers/registry.go @@ -88,6 +88,9 @@ type Config struct { // Timestamp units to use for JSON formatted output TimestampUnits time.Duration `toml:"timestamp_units"` + // Timestamp format to use for JSON formatted output + TimestampFormat string `toml:"timestamp_format"` + // Include HEC routing fields for splunkmetric output HecRouting bool `toml:"hec_routing"` @@ -123,7 +126,7 @@ func NewSerializer(config *Config) (Serializer, error) { case "graphite": serializer, err = NewGraphiteSerializer(config.Prefix, config.Template, config.GraphiteTagSupport, config.GraphiteTagSanitizeMode, config.GraphiteSeparator, config.Templates) case "json": - serializer, err = NewJSONSerializer(config.TimestampUnits) + serializer, err = NewJSONSerializer(config.TimestampUnits, config.TimestampFormat) case "splunkmetric": serializer, err = NewSplunkmetricSerializer(config.HecRouting, config.SplunkmetricMultiMetric) case "nowmetric": @@ -188,8 +191,8 @@ func NewWavefrontSerializer(prefix string, useStrict bool, sourceOverride []stri return wavefront.NewSerializer(prefix, useStrict, sourceOverride) } -func NewJSONSerializer(timestampUnits time.Duration) (Serializer, error) { - return json.NewSerializer(timestampUnits) +func NewJSONSerializer(timestampUnits time.Duration, timestampFormat string) (Serializer, error) { + return json.NewSerializer(timestampUnits, timestampFormat) } func NewCarbon2Serializer(carbon2format string, carbon2SanitizeReplaceChar string) (Serializer, error) { From 90d08787f5961cd465c0f6b04194cdb475cef35c Mon Sep 17 00:00:00 2001 From: John Seekins Date: Tue, 21 Sep 2021 10:07:58 -0600 Subject: [PATCH 18/81] feat: add measurements from puppet 5 (#9706) --- plugins/inputs/puppetagent/README.md | 37 +++++++---- .../inputs/puppetagent/last_run_summary.yaml | 39 ++++++----- plugins/inputs/puppetagent/puppetagent.go | 52 +++++++++------ .../inputs/puppetagent/puppetagent_test.go | 64 +++++++++++-------- 4 files changed, 115 insertions(+), 77 deletions(-) diff --git a/plugins/inputs/puppetagent/README.md b/plugins/inputs/puppetagent/README.md index 687005b98cc11..1406064d5c617 100644 --- a/plugins/inputs/puppetagent/README.md +++ b/plugins/inputs/puppetagent/README.md @@ -85,18 +85,19 @@ Meta: - tags: `` Measurement names: + - puppetagent_changes_total - puppetagent_events_failure - puppetagent_events_total - puppetagent_events_success + - puppetagent_resources_changed + - puppetagent_resources_corrective_change - puppetagent_resources_failed + - puppetagent_resources_failedtorestart + - puppetagent_resources_outofsync + - puppetagent_resources_restarted - puppetagent_resources_scheduled - - puppetagent_resources_changed - puppetagent_resources_skipped - puppetagent_resources_total - - puppetagent_resources_failedtorestart - - puppetagent_resources_restarted - - puppetagent_resources_outofsync - - puppetagent_changes_total - puppetagent_time_service - puppetagent_time_lastrun - puppetagent_version_config @@ -108,18 +109,26 @@ Meta: - tags: `` Measurement names: - - puppetagent_time_user - - puppetagent_time_schedule - - puppetagent_time_filebucket - - puppetagent_time_file - - puppetagent_time_exec - puppetagent_time_anchor - - puppetagent_time_sshauthorizedkey - - puppetagent_time_package - - puppetagent_time_total + - puppetagent_time_catalogapplication - puppetagent_time_configretrieval - - puppetagent_time_lastrun + - puppetagent_time_convertcatalog - puppetagent_time_cron + - puppetagent_time_exec + - puppetagent_time_factgeneration + - puppetagent_time_file + - puppetagent_time_filebucket + - puppetagent_time_group + - puppetagent_time_lastrun + - puppetagent_time_noderetrieval + - puppetagent_time_notify + - puppetagent_time_package + - puppetagent_time_pluginsync + - puppetagent_time_schedule + - puppetagent_time_sshauthorizedkey + - puppetagent_time_total + - puppetagent_time_transactionevaluation + - puppetagent_time_user - puppetagent_version_config #### PuppetAgent string measurements: diff --git a/plugins/inputs/puppetagent/last_run_summary.yaml b/plugins/inputs/puppetagent/last_run_summary.yaml index be2f017465fad..c1aa1ce276216 100644 --- a/plugins/inputs/puppetagent/last_run_summary.yaml +++ b/plugins/inputs/puppetagent/last_run_summary.yaml @@ -1,34 +1,43 @@ --- events: failure: 0 + noop: 0 total: 0 success: 0 resources: + changed: 0 + corrective_change: 0 failed: 0 + failed_to_restart: 0 + out_of_sync: 0 + restarted: 0 scheduled: 0 - changed: 0 skipped: 0 total: 109 - failed_to_restart: 0 - restarted: 0 - out_of_sync: 0 changes: total: 0 time: - user: 0.004331 - schedule: 0.001123 - filebucket: 0.000353 - file: 0.441472 - exec: 0.508123 anchor: 0.000555 - yumrepo: 0.006989 - ssh_authorized_key: 0.000764 - service: 1.807795 - package: 1.325788 - total: 8.85354707064819 + catalog_application: 0.010555 config_retrieval: 4.75567007064819 - last_run: 1444936531 + convert_catalog: 1.3 cron: 0.000584 + exec: 0.508123 + fact_generation: 0.34 + file: 0.441472 + filebucket: 0.000353 + last_run: 1444936531 + node_retrieval: 1.235 + notify: 0.00035 + package: 1.325788 + plugin_sync: 0.325788 + schedule: 0.001123 + service: 1.807795 + ssh_authorized_key: 0.000764 + total: 8.85354707064819 + transaction_evaluation: 4.69765 + user: 0.004331 + yumrepo: 0.006989 version: config: "environment:d6018ce" puppet: "3.7.5" diff --git a/plugins/inputs/puppetagent/puppetagent.go b/plugins/inputs/puppetagent/puppetagent.go index 741de4a0dc013..36c284ff57cb6 100644 --- a/plugins/inputs/puppetagent/puppetagent.go +++ b/plugins/inputs/puppetagent/puppetagent.go @@ -32,19 +32,21 @@ type State struct { type event struct { Failure int64 `yaml:"failure"` + Noop int64 `yaml:"noop"` Total int64 `yaml:"total"` Success int64 `yaml:"success"` } type resource struct { - Failed int64 `yaml:"failed"` - Scheduled int64 `yaml:"scheduled"` - Changed int64 `yaml:"changed"` - Skipped int64 `yaml:"skipped"` - Total int64 `yaml:"total"` - FailedToRestart int64 `yaml:"failed_to_restart"` - Restarted int64 `yaml:"restarted"` - OutOfSync int64 `yaml:"out_of_sync"` + Changed int64 `yaml:"changed"` + CorrectiveChange int64 `yaml:"corrective_change"` + Failed int64 `yaml:"failed"` + FailedToRestart int64 `yaml:"failed_to_restart"` + OutOfSync int64 `yaml:"out_of_sync"` + Restarted int64 `yaml:"restarted"` + Scheduled int64 `yaml:"scheduled"` + Skipped int64 `yaml:"skipped"` + Total int64 `yaml:"total"` } type change struct { @@ -52,19 +54,27 @@ type change struct { } type time struct { - User float64 `yaml:"user"` - Schedule float64 `yaml:"schedule"` - FileBucket float64 `yaml:"filebucket"` - File float64 `yaml:"file"` - Exec float64 `yaml:"exec"` - Anchor float64 `yaml:"anchor"` - SSHAuthorizedKey float64 `yaml:"ssh_authorized_key"` - Service float64 `yaml:"service"` - Package float64 `yaml:"package"` - Total float64 `yaml:"total"` - ConfigRetrieval float64 `yaml:"config_retrieval"` - LastRun int64 `yaml:"last_run"` - Cron float64 `yaml:"cron"` + Anchor float64 `yaml:"anchor"` + CataLogApplication float64 `yaml:"catalog_application"` + ConfigRetrieval float64 `yaml:"config_retrieval"` + ConvertCatalog float64 `yaml:"convert_catalog"` + Cron float64 `yaml:"cron"` + Exec float64 `yaml:"exec"` + FactGeneration float64 `yaml:"fact_generation"` + File float64 `yaml:"file"` + FileBucket float64 `yaml:"filebucket"` + Group float64 `yaml:"group"` + LastRun int64 `yaml:"last_run"` + NodeRetrieval float64 `yaml:"node_retrieval"` + Notify float64 `yaml:"notify"` + Package float64 `yaml:"package"` + PluginSync float64 `yaml:"plugin_sync"` + Schedule float64 `yaml:"schedule"` + Service float64 `yaml:"service"` + SSHAuthorizedKey float64 `yaml:"ssh_authorized_key"` + Total float64 `yaml:"total"` + TransactionEvaluation float64 `yaml:"transaction_evaluation"` + User float64 `yaml:"user"` } type version struct { diff --git a/plugins/inputs/puppetagent/puppetagent_test.go b/plugins/inputs/puppetagent/puppetagent_test.go index 6ba769ac5dd37..754fb39783a2a 100644 --- a/plugins/inputs/puppetagent/puppetagent_test.go +++ b/plugins/inputs/puppetagent/puppetagent_test.go @@ -17,33 +17,43 @@ func TestGather(t *testing.T) { tags := map[string]string{"location": "last_run_summary.yaml"} fields := map[string]interface{}{ - "events_failure": int64(0), - "events_total": int64(0), - "events_success": int64(0), - "resources_failed": int64(0), - "resources_scheduled": int64(0), - "resources_changed": int64(0), - "resources_skipped": int64(0), - "resources_total": int64(109), - "resources_failedtorestart": int64(0), - "resources_restarted": int64(0), - "resources_outofsync": int64(0), - "changes_total": int64(0), - "time_lastrun": int64(1444936531), - "version_configstring": "environment:d6018ce", - "time_user": float64(0.004331), - "time_schedule": float64(0.001123), - "time_filebucket": float64(0.000353), - "time_file": float64(0.441472), - "time_exec": float64(0.508123), - "time_anchor": float64(0.000555), - "time_sshauthorizedkey": float64(0.000764), - "time_service": float64(1.807795), - "time_package": float64(1.325788), - "time_total": float64(8.85354707064819), - "time_configretrieval": float64(4.75567007064819), - "time_cron": float64(0.000584), - "version_puppet": "3.7.5", + "events_failure": int64(0), + "events_noop": int64(0), + "events_success": int64(0), + "events_total": int64(0), + "resources_changed": int64(0), + "resources_correctivechange": int64(0), + "resources_failed": int64(0), + "resources_failedtorestart": int64(0), + "resources_outofsync": int64(0), + "resources_restarted": int64(0), + "resources_scheduled": int64(0), + "resources_skipped": int64(0), + "resources_total": int64(109), + "changes_total": int64(0), + "time_anchor": float64(0.000555), + "time_catalogapplication": float64(0.010555), + "time_configretrieval": float64(4.75567007064819), + "time_convertcatalog": float64(1.3), + "time_cron": float64(0.000584), + "time_exec": float64(0.508123), + "time_factgeneration": float64(0.34), + "time_file": float64(0.441472), + "time_filebucket": float64(0.000353), + "time_group": float64(0), + "time_lastrun": int64(1444936531), + "time_noderetrieval": float64(1.235), + "time_notify": float64(0.00035), + "time_package": float64(1.325788), + "time_pluginsync": float64(0.325788), + "time_schedule": float64(0.001123), + "time_service": float64(1.807795), + "time_sshauthorizedkey": float64(0.000764), + "time_total": float64(8.85354707064819), + "time_transactionevaluation": float64(4.69765), + "time_user": float64(0.004331), + "version_configstring": "environment:d6018ce", + "version_puppet": "3.7.5", } acc.AssertContainsTaggedFields(t, "puppetagent", fields, tags) From 9e004623e03ba9f7003a36f0b5f9ffd259e7fafc Mon Sep 17 00:00:00 2001 From: Alan Pope Date: Tue, 21 Sep 2021 18:39:48 +0100 Subject: [PATCH 19/81] Update README.md Sorry, unbreaking the broken commit to master I did earlier. Won't do it again, promise! --- README.md | 20 ++++++++++---------- 1 file changed, 10 insertions(+), 10 deletions(-) diff --git a/README.md b/README.md index 5f0861f4fa3cb..9c75311e4e2cb 100644 --- a/README.md +++ b/README.md @@ -13,10 +13,10 @@ Design goal: Telegraf is plugin-driven and has the concept of 4 distinct plugin types: -1. [Input Plugins](#input-plugins) collect metrics from the system, services, or 3rd party APIs -2. [Processor Plugins](#processor-plugins) transform, decorate, and/or filter metrics -3. [Aggregator Plugins](#aggregator-plugins) create aggregate metrics (e.g. mean, min, max, quantiles, etc.) -4. [Output Plugins](#output-plugins) write metrics to various destinations +1. [Input Plugins](/docs/INPUTS.md) collect metrics from the system, services, or 3rd party APIs +2. [Processor Plugins](/docs/PROCESSORS.md) transform, decorate, and/or filter metrics +3. [Aggregator Plugins](/docs/AGGREGATORS.md) create aggregate metrics (e.g. mean, min, max, quantiles, etc.) +4. [Output Plugins](/docs/OUTPUTS.md) write metrics to various destinations New plugins are designed to be easy to contribute, pull requests are welcomed and we work to incorporate as many pull requests as possible. If none of the internal plugins fit your needs, you could have a look at the [list of external plugins](EXTERNAL_PLUGINS.md). @@ -117,14 +117,14 @@ telegraf --config telegraf.conf --input-filter cpu:mem --output-filter influxdb [Latest Release Documentation][release docs]. -For documentation on the latest development code see the [documentation index][devel docs]. +For documentation on the latest development code see the [documentation index](/docs). [release docs]: https://docs.influxdata.com/telegraf [developer docs]: docs -- [Input Plugins](/telegraf/docs/INPUTS.md) -- [Output Plugins](/telegraf/docs/OUTPUTS.md) -- [Processor Plugins](/telegraf/docs/PROCESSORS.md) -- [Aggregator Plugins](/telegraf/docs/AGGREGATORS.md) +- [Input Plugins](/docs/INPUTS.md) +- [Output Plugins](/docs/OUTPUTS.md) +- [Processor Plugins](/docs/PROCESSORS.md) +- [Aggregator Plugins](/docs/AGGREGATORS.md) ## Contributing @@ -135,4 +135,4 @@ There are many ways to contribute: - [Review code and feature proposals](https://github.com/influxdata/telegraf/pulls) - Answer questions and discuss here on github and on the [Community Site](https://community.influxdata.com/) - [Contribute plugins](CONTRIBUTING.md) -- [Contribute external plugins](docs/EXTERNAL_PLUGINS.md) \ No newline at end of file +- [Contribute external plugins](docs/EXTERNAL_PLUGINS.md) From c4c3c8ade982c7935f013bc93107dd9c702541f5 Mon Sep 17 00:00:00 2001 From: Samantha Wang <32681364+sjwang90@users.noreply.github.com> Date: Tue, 21 Sep 2021 12:03:41 -0700 Subject: [PATCH 20/81] docs: update caddy server instructions (#9698) --- plugins/inputs/prometheus/README.md | 10 +++++----- 1 file changed, 5 insertions(+), 5 deletions(-) diff --git a/plugins/inputs/prometheus/README.md b/plugins/inputs/prometheus/README.md index c826fd0e015ab..955c6ab7d978b 100644 --- a/plugins/inputs/prometheus/README.md +++ b/plugins/inputs/prometheus/README.md @@ -158,20 +158,20 @@ Authorization header. ### Usage for Caddy HTTP server -If you want to monitor Caddy, you need to use Caddy with its Prometheus plugin: +Steps to monitor Caddy with Telegraf's Prometheus input plugin: -* Download Caddy+Prometheus plugin [here](https://caddyserver.com/download/linux/amd64?plugins=http.prometheus) -* Add the `prometheus` directive in your `CaddyFile` +* Download [Caddy](https://caddyserver.com/download) +* Download Prometheus and set up [monitoring Caddy with Prometheus metrics](https://caddyserver.com/docs/metrics#monitoring-caddy-with-prometheus-metrics) * Restart Caddy * Configure Telegraf to fetch metrics on it: ```toml [[inputs.prometheus]] # ## An array of urls to scrape metrics from. - urls = ["http://localhost:9180/metrics"] + urls = ["http://localhost:2019/metrics"] ``` -> This is the default URL where Caddy Prometheus plugin will send data. +> This is the default URL where Caddy will send data. > For more details, please read the [Caddy Prometheus documentation](https://github.com/miekg/caddy-prometheus/blob/master/README.md). ### Metrics: From 1c0b74eacded31b103a3467535166c39f6dffc7b Mon Sep 17 00:00:00 2001 From: Sebastian Spaink <3441183+sspaink@users.noreply.github.com> Date: Tue, 21 Sep 2021 12:19:45 -0700 Subject: [PATCH 21/81] chore: update nightly to package by arch (#9781) --- .circleci/config.yml | 156 ++++++++++++++++++++++++++----------------- Makefile | 12 +--- 2 files changed, 96 insertions(+), 72 deletions(-) diff --git a/.circleci/config.yml b/.circleci/config.yml index 1f644a7b9d20b..01a4bce06952e 100644 --- a/.circleci/config.yml +++ b/.circleci/config.yml @@ -109,9 +109,6 @@ commands: release: type: boolean default: false - nightly: - type: boolean - default: false type: type: string default: "" @@ -124,15 +121,9 @@ commands: condition: << parameters.release >> steps: - run: 'make package' - - when: - condition: << parameters.nightly >> - steps: - - run: 'make package' - - run: 'make upload-nightly' - unless: condition: or: - - << parameters.nightly >> - << parameters.release >> steps: - run: 'make package include_packages="$(make << parameters.type >>)"' @@ -269,8 +260,17 @@ jobs: nightly: executor: go-1_17 steps: - - package-build: - nightly: true + - attach_workspace: + at: '/build' + - run: + command: | + aws s3 sync /build/dist s3://dl.influxdata.com/telegraf/nightlies/ \ + --exclude "*" \ + --include "*.tar.gz" \ + --include "*.deb" \ + --include "*.rpm" \ + --include "*.zip" \ + --acl public-read package-consolidate: executor: name: win/default @@ -336,6 +336,63 @@ jobs: printf -v payload '{ "pullRequestNumber": "%s" }' "$PR" curl -X POST "https://182c7jdgog.execute-api.us-east-1.amazonaws.com/prod/shareArtifacts" --data "$payload" +commonjobs: + - &test-awaiter + 'test-awaiter': + requires: + - 'test-go-1_16' + - 'test-go-1_16-386' + - 'test-go-1_17' + - 'test-go-1_17-386' + - &windows-package + 'windows-package': + requires: + - 'test-go-windows' + - &darwin-package + 'darwin-package': + requires: + - 'test-go-mac' + - &i386-package + 'i386-package': + requires: + - 'test-awaiter' + - &ppc64le-package + 'ppc64le-package': + requires: + - 'test-awaiter' + - &s390x-package + 's390x-package': + requires: + - 'test-awaiter' + - &armel-package + 'armel-package': + requires: + - 'test-awaiter' + - &amd64-package + 'amd64-package': + requires: + - 'test-awaiter' + - &arm64-package + 'arm64-package': + requires: + - 'test-awaiter' + - &armhf-package + 'armhf-package': + requires: + - 'test-awaiter' + - &static-package + 'static-package': + requires: + - 'test-awaiter' + - &mipsel-package + 'mipsel-package': + requires: + - 'test-awaiter' + - &mips-package + 'mips-package': + requires: + - 'test-awaiter' + workflows: version: 2 check: @@ -376,48 +433,19 @@ workflows: filters: tags: only: /.*/ - - 'test-awaiter': - requires: - - 'test-go-1_16' - - 'test-go-1_16-386' - - 'test-go-1_17' - - 'test-go-1_17-386' - - 'windows-package': - requires: - - 'test-go-windows' - - 'darwin-package': - requires: - - 'test-go-mac' - - 'i386-package': - requires: - - 'test-awaiter' - - 'ppc64le-package': - requires: - - 'test-awaiter' - - 's390x-package': - requires: - - 'test-awaiter' - - 'armel-package': - requires: - - 'test-awaiter' - - 'amd64-package': - requires: - - 'test-awaiter' - - 'arm64-package': - requires: - - 'test-awaiter' - - 'armhf-package': - requires: - - 'test-awaiter' - - 'static-package': - requires: - - 'test-awaiter' - - 'mipsel-package': - requires: - - 'test-awaiter' - - 'mips-package': - requires: - - 'test-awaiter' + - *test-awaiter + - *windows-package + - *darwin-package + - *i386-package + - *ppc64le-package + - *s390x-package + - *armel-package + - *amd64-package + - *arm64-package + - *armhf-package + - *static-package + - *mipsel-package + - *mips-package - 'share-artifacts': requires: - 'i386-package' @@ -479,14 +507,20 @@ workflows: - 'deps' - 'test-go-mac' - 'test-go-windows' - - 'nightly': - requires: - - 'test-go-windows' - - 'test-go-mac' - - 'test-go-1_16' - - 'test-go-1_16-386' - - 'test-go-1_17' - - 'test-go-1_17-386' + - *test-awaiter + - *windows-package + - *darwin-package + - *i386-package + - *ppc64le-package + - *s390x-package + - *armel-package + - *amd64-package + - *arm64-package + - *armhf-package + - *static-package + - *mipsel-package + - *mips-package + - nightly triggers: - schedule: cron: "0 7 * * *" diff --git a/Makefile b/Makefile index a7797a0e8ce5f..cbe0e2a2e5dbb 100644 --- a/Makefile +++ b/Makefile @@ -203,7 +203,7 @@ plugin-%: ci-1.16: docker build -t quay.io/influxdb/telegraf-ci:1.16.7 - < scripts/ci-1.16.docker docker push quay.io/influxdb/telegraf-ci:1.16.7 - + .PHONY: ci-1.17 ci-1.17: docker build -t quay.io/influxdb/telegraf-ci:1.17.0 - < scripts/ci-1.17.docker @@ -346,16 +346,6 @@ $(include_packages): tar --owner 0 --group 0 -czvf $(pkgdir)/telegraf-$(tar_version)_$@ -C $(dir $(DESTDIR)) . ;\ fi -.PHONY: upload-nightly -upload-nightly: - aws s3 sync $(pkgdir) s3://dl.influxdata.com/telegraf/nightlies/ \ - --exclude "*" \ - --include "*.tar.gz" \ - --include "*.deb" \ - --include "*.rpm" \ - --include "*.zip" \ - --acl public-read - amd64.deb x86_64.rpm linux_amd64.tar.gz: export GOOS := linux amd64.deb x86_64.rpm linux_amd64.tar.gz: export GOARCH := amd64 From 027647e3edde77ce6a6c70956ea3a5bd20abf2d2 Mon Sep 17 00:00:00 2001 From: Sebastian Spaink <3441183+sspaink@users.noreply.github.com> Date: Tue, 21 Sep 2021 12:53:18 -0700 Subject: [PATCH 22/81] chore: automate updating etc/telegraf.conf and etc/telegraf_windows.conf (#9684) --- .circleci/config.yml | 57 ++++++++++++++++++++++++++++++++++++++ scripts/generate_config.sh | 27 ++++++++++++++++++ scripts/update_config.sh | 22 +++++++++++++++ 3 files changed, 106 insertions(+) create mode 100755 scripts/generate_config.sh create mode 100755 scripts/update_config.sh diff --git a/.circleci/config.yml b/.circleci/config.yml index 01a4bce06952e..dc59d4aa13cdb 100644 --- a/.circleci/config.yml +++ b/.circleci/config.yml @@ -25,6 +25,23 @@ executors: GOFLAGS: -p=8 commands: + generate-config: + parameters: + os: + type: string + default: "linux" + steps: + - checkout + - attach_workspace: + at: '/build' + - run: ./scripts/generate_config.sh << parameters.os >> + - store_artifacts: + path: './new-config' + destination: 'new-config' + - persist_to_workspace: + root: './new-config' + paths: + - '*' check-changed-files-or-halt: steps: - run: ./scripts/check-file-changes.sh @@ -335,6 +352,24 @@ jobs: PR=${CIRCLE_PULL_REQUEST##*/} printf -v payload '{ "pullRequestNumber": "%s" }' "$PR" curl -X POST "https://182c7jdgog.execute-api.us-east-1.amazonaws.com/prod/shareArtifacts" --data "$payload" + generate-config: + executor: go-1_17 + steps: + - generate-config + generate-config-win: + executor: + name: win/default + shell: bash.exe + steps: + - generate-config: + os: windows + update-config: + executor: go-1_17 + steps: + - checkout + - attach_workspace: + at: '/new-config' + - run: ./scripts/update_config.sh ${UPDATE_CONFIG_TOKEN} commonjobs: - &test-awaiter @@ -446,6 +481,28 @@ workflows: - *static-package - *mipsel-package - *mips-package + - 'generate-config': + requires: + - 'amd64-package' + filters: + branches: + only: + - master + - 'generate-config-win': + requires: + - 'windows-package' + filters: + branches: + only: + - master + - 'update-config': + requires: + - 'generate-config-win' + - 'generate-config' + filters: + branches: + only: + - master - 'share-artifacts': requires: - 'i386-package' diff --git a/scripts/generate_config.sh b/scripts/generate_config.sh new file mode 100755 index 0000000000000..c85dd05172631 --- /dev/null +++ b/scripts/generate_config.sh @@ -0,0 +1,27 @@ +#!/bin/bash +# This script is responsible for generating the Telegraf config found under the `etc` directory. +# This script is meant to be only ran in within the Circle CI pipeline so that the Tiger Bot can update them automatically. +# It supports Windows and Linux because the configs are different depending on the OS. + + +os=$1 # windows or linux +exe_path="/build/extracted" # Path will contain telegraf binary +config_name="telegraf.conf" + +if [ "$os" = "windows" ]; then + zip=$(/bin/find ./build/dist -maxdepth 1 -name "*windows_amd64.zip" -print) + exe_path="$PWD/build/extracted" + unzip "$zip" -d "$exe_path" + config_name="telegraf_windows.conf" + exe_path=$(/bin/find "$exe_path" -name telegraf.exe -type f -print) +else + tar_path=$(find /build/dist -maxdepth 1 -name "*linux_amd64.tar.gz" -print | grep -v ".*static.*") + mkdir "$exe_path" + tar --extract --file="$tar_path" --directory "$exe_path" + exe_path=$(find "$exe_path" -name telegraf -type f -print | grep ".*usr/bin/.*") +fi + +$exe_path config > $config_name + +mkdir ./new-config +mv $config_name ./new-config diff --git a/scripts/update_config.sh b/scripts/update_config.sh new file mode 100755 index 0000000000000..87cfe2620ab61 --- /dev/null +++ b/scripts/update_config.sh @@ -0,0 +1,22 @@ +#!/bin/bash +# This script is responsible for triggering the Tiger Bot endpoint that will create the pull request with the newly generated configs. +# This script is meant to be only ran in within the Circle CI pipeline. + +token=$1 + +config_path="/new-config" + +if [ ! -f "$config_path/telegraf.conf" ]; then + echo "$config_path/telegraf.conf does not exist" + exit +fi +if [ ! -f "$config_path/telegraf_windows.conf" ]; then + echo "$config_path/telegraf_windows.conf does not exist" + exit +fi + +if cmp -s "$config_path/telegraf.conf" "etc/telegraf.conf" && cmp -s "$config_path/telegraf_windows.conf" "etc/telegraf_windows.conf"; then + echo "Both telegraf.conf and telegraf_windows.conf haven't changed" +fi + +curl -H "Authorization: Bearer $token" -X POST "https://182c7jdgog.execute-api.us-east-1.amazonaws.com/prod/updateConfig" From a9898f179bbcbbfbc126b5f604c3357b990c80c3 Mon Sep 17 00:00:00 2001 From: alespour <42931850+alespour@users.noreply.github.com> Date: Tue, 21 Sep 2021 23:02:36 +0200 Subject: [PATCH 23/81] feat: add graylog plugin TCP support (#9644) --- etc/telegraf.conf | 4 +- plugins/outputs/graylog/README.md | 10 +- plugins/outputs/graylog/graylog.go | 181 +++++++++++++++++++----- plugins/outputs/graylog/graylog_test.go | 156 ++++++++++++++++++-- 4 files changed, 295 insertions(+), 56 deletions(-) diff --git a/etc/telegraf.conf b/etc/telegraf.conf index beb22821464d9..0ed5ba8ebffba 100644 --- a/etc/telegraf.conf +++ b/etc/telegraf.conf @@ -762,8 +762,8 @@ # # Send telegraf metrics to graylog # [[outputs.graylog]] -# ## UDP endpoint for your graylog instance. -# servers = ["127.0.0.1:12201"] +# ## Endpoints for your graylog instances. +# servers = ["udp://127.0.0.1:12201"] # # ## The field to use as the GELF short_message, if unset the static string # ## "telegraf" will be used. diff --git a/plugins/outputs/graylog/README.md b/plugins/outputs/graylog/README.md index 4945ce46f84f0..6003122894f6d 100644 --- a/plugins/outputs/graylog/README.md +++ b/plugins/outputs/graylog/README.md @@ -8,11 +8,17 @@ This plugin writes to a Graylog instance using the "[GELF][]" format. ```toml [[outputs.graylog]] - ## UDP endpoint for your graylog instances. - servers = ["127.0.0.1:12201"] + ## Endpoints for your graylog instances. + servers = ["udp://127.0.0.1:12201"] + + ## Connection timeout. + # timeout = "5s" ## The field to use as the GELF short_message, if unset the static string ## "telegraf" will be used. ## example: short_message_field = "message" # short_message_field = "" ``` + +Server endpoint may be specified without UDP or TCP scheme (eg. "127.0.0.1:12201"). +In such case, UDP protocol is assumed. diff --git a/plugins/outputs/graylog/graylog.go b/plugins/outputs/graylog/graylog.go index cf5dc6dc5ac3b..951273e2e7703 100644 --- a/plugins/outputs/graylog/graylog.go +++ b/plugins/outputs/graylog/graylog.go @@ -11,8 +11,11 @@ import ( "math" "net" "os" + "strings" + "time" "github.com/influxdata/telegraf" + "github.com/influxdata/telegraf/config" "github.com/influxdata/telegraf/plugins/outputs" ) @@ -21,45 +24,78 @@ const ( defaultConnection = "wan" defaultMaxChunkSizeWan = 1420 defaultMaxChunkSizeLan = 8154 + defaultScheme = "udp" + defaultTimeout = 5 * time.Second ) -type GelfConfig struct { +type gelfConfig struct { GraylogEndpoint string Connection string MaxChunkSizeWan int MaxChunkSizeLan int } -type Gelf struct { - GelfConfig +type gelf interface { + io.WriteCloser } -func NewGelfWriter(config GelfConfig) *Gelf { - if config.GraylogEndpoint == "" { - config.GraylogEndpoint = defaultGraylogEndpoint +type gelfCommon struct { + gelfConfig + dialer *net.Dialer + conn net.Conn +} + +type gelfUDP struct { + gelfCommon +} + +type gelfTCP struct { + gelfCommon +} + +func newGelfWriter(cfg gelfConfig, dialer *net.Dialer) gelf { + if cfg.GraylogEndpoint == "" { + cfg.GraylogEndpoint = defaultGraylogEndpoint } - if config.Connection == "" { - config.Connection = defaultConnection + if cfg.Connection == "" { + cfg.Connection = defaultConnection } - if config.MaxChunkSizeWan == 0 { - config.MaxChunkSizeWan = defaultMaxChunkSizeWan + if cfg.MaxChunkSizeWan == 0 { + cfg.MaxChunkSizeWan = defaultMaxChunkSizeWan } - if config.MaxChunkSizeLan == 0 { - config.MaxChunkSizeLan = defaultMaxChunkSizeLan + if cfg.MaxChunkSizeLan == 0 { + cfg.MaxChunkSizeLan = defaultMaxChunkSizeLan } - g := &Gelf{GelfConfig: config} + scheme := defaultScheme + parts := strings.SplitN(cfg.GraylogEndpoint, "://", 2) + if len(parts) == 2 { + scheme = strings.ToLower(parts[0]) + cfg.GraylogEndpoint = parts[1] + } + common := gelfCommon{ + gelfConfig: cfg, + dialer: dialer, + } + + var g gelf + switch scheme { + case "tcp": + g = &gelfTCP{gelfCommon: common} + default: + g = &gelfUDP{gelfCommon: common} + } return g } -func (g *Gelf) Write(message []byte) (n int, err error) { +func (g *gelfUDP) Write(message []byte) (n int, err error) { compressed := g.compress(message) - chunksize := g.GelfConfig.MaxChunkSizeWan + chunksize := g.gelfConfig.MaxChunkSizeWan length := compressed.Len() if length > chunksize { @@ -84,10 +120,19 @@ func (g *Gelf) Write(message []byte) (n int, err error) { n = len(message) - return + return n, nil +} + +func (g *gelfUDP) Close() (err error) { + if g.conn != nil { + err = g.conn.Close() + g.conn = nil + } + + return err } -func (g *Gelf) createChunkedMessage(index int, chunkCountInt int, id []byte, compressed *bytes.Buffer) bytes.Buffer { +func (g *gelfUDP) createChunkedMessage(index int, chunkCountInt int, id []byte, compressed *bytes.Buffer) bytes.Buffer { var packet bytes.Buffer chunksize := g.getChunksize() @@ -104,26 +149,26 @@ func (g *Gelf) createChunkedMessage(index int, chunkCountInt int, id []byte, com return packet } -func (g *Gelf) getChunksize() int { - if g.GelfConfig.Connection == "wan" { - return g.GelfConfig.MaxChunkSizeWan +func (g *gelfUDP) getChunksize() int { + if g.gelfConfig.Connection == "wan" { + return g.gelfConfig.MaxChunkSizeWan } - if g.GelfConfig.Connection == "lan" { - return g.GelfConfig.MaxChunkSizeLan + if g.gelfConfig.Connection == "lan" { + return g.gelfConfig.MaxChunkSizeLan } - return g.GelfConfig.MaxChunkSizeWan + return g.gelfConfig.MaxChunkSizeWan } -func (g *Gelf) intToBytes(i int) []byte { +func (g *gelfUDP) intToBytes(i int) []byte { buf := new(bytes.Buffer) binary.Write(buf, binary.LittleEndian, int8(i)) return buf.Bytes() } -func (g *Gelf) compress(b []byte) bytes.Buffer { +func (g *gelfUDP) compress(b []byte) bytes.Buffer { var buf bytes.Buffer comp := zlib.NewWriter(&buf) @@ -133,30 +178,83 @@ func (g *Gelf) compress(b []byte) bytes.Buffer { return buf } -func (g *Gelf) send(b []byte) error { - udpAddr, err := net.ResolveUDPAddr("udp", g.GelfConfig.GraylogEndpoint) +func (g *gelfUDP) send(b []byte) error { + if g.conn == nil { + conn, err := g.dialer.Dial("udp", g.gelfConfig.GraylogEndpoint) + if err != nil { + return err + } + g.conn = conn + } + + _, err := g.conn.Write(b) if err != nil { - return err + _ = g.conn.Close() + g.conn = nil } - conn, err := net.DialUDP("udp", nil, udpAddr) + return err +} + +func (g *gelfTCP) Write(message []byte) (n int, err error) { + err = g.send(message) if err != nil { - return err + return 0, err + } + + n = len(message) + + return n, nil +} + +func (g *gelfTCP) Close() (err error) { + if g.conn != nil { + err = g.conn.Close() + g.conn = nil + } + + return err +} + +func (g *gelfTCP) send(b []byte) error { + if g.conn == nil { + conn, err := g.dialer.Dial("tcp", g.gelfConfig.GraylogEndpoint) + if err != nil { + return err + } + g.conn = conn + } + + _, err := g.conn.Write(b) + if err != nil { + _ = g.conn.Close() + g.conn = nil + } else { + _, err = g.conn.Write([]byte{0}) // message delimiter + if err != nil { + _ = g.conn.Close() + g.conn = nil + } } - _, err = conn.Write(b) return err } type Graylog struct { - Servers []string `toml:"servers"` - ShortMessageField string `toml:"short_message_field"` - writer io.Writer + Servers []string `toml:"servers"` + ShortMessageField string `toml:"short_message_field"` + Timeout config.Duration `toml:"timeout"` + + writer io.Writer + closers []io.WriteCloser } var sampleConfig = ` - ## UDP endpoint for your graylog instance. - servers = ["127.0.0.1:12201"] + ## Endpoints for your graylog instances. + servers = ["udp://127.0.0.1:12201"] + + ## Connection timeout. + # timeout = "5s" ## The field to use as the GELF short_message, if unset the static string ## "telegraf" will be used. @@ -166,14 +264,16 @@ var sampleConfig = ` func (g *Graylog) Connect() error { writers := []io.Writer{} + dialer := net.Dialer{Timeout: time.Duration(g.Timeout)} if len(g.Servers) == 0 { g.Servers = append(g.Servers, "localhost:12201") } for _, server := range g.Servers { - w := NewGelfWriter(GelfConfig{GraylogEndpoint: server}) + w := newGelfWriter(gelfConfig{GraylogEndpoint: server}, &dialer) writers = append(writers, w) + g.closers = append(g.closers, w) } g.writer = io.MultiWriter(writers...) @@ -181,6 +281,9 @@ func (g *Graylog) Connect() error { } func (g *Graylog) Close() error { + for _, closer := range g.closers { + _ = closer.Close() + } return nil } @@ -253,6 +356,8 @@ func (g *Graylog) serialize(metric telegraf.Metric) ([]string, error) { func init() { outputs.Add("graylog", func() telegraf.Output { - return &Graylog{} + return &Graylog{ + Timeout: config.Duration(defaultTimeout), + } }) } diff --git a/plugins/outputs/graylog/graylog_test.go b/plugins/outputs/graylog/graylog_test.go index 37816a7a2c4b3..faa5b34b908d7 100644 --- a/plugins/outputs/graylog/graylog_test.go +++ b/plugins/outputs/graylog/graylog_test.go @@ -11,9 +11,22 @@ import ( "github.com/influxdata/telegraf/testutil" "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" ) -func TestWrite(t *testing.T) { +func TestWriteDefault(t *testing.T) { + scenarioUDP(t, "127.0.0.1:12201") +} + +func TestWriteUDP(t *testing.T) { + scenarioUDP(t, "udp://127.0.0.1:12201") +} + +func TestWriteTCP(t *testing.T) { + scenarioTCP(t, "tcp://127.0.0.1:12201") +} + +func scenarioUDP(t *testing.T, server string) { var wg sync.WaitGroup var wg2 sync.WaitGroup wg.Add(1) @@ -22,13 +35,62 @@ func TestWrite(t *testing.T) { wg2.Wait() i := Graylog{ - Servers: []string{"127.0.0.1:12201"}, + Servers: []string{server}, + } + err := i.Connect() + require.NoError(t, err) + + metrics := testutil.MockMetrics() + + // UDP scenario: + // 4 messages are send + + err = i.Write(metrics) + require.NoError(t, err) + err = i.Write(metrics) + require.NoError(t, err) + err = i.Write(metrics) + require.NoError(t, err) + err = i.Write(metrics) + require.NoError(t, err) + + wg.Wait() + i.Close() +} + +func scenarioTCP(t *testing.T, server string) { + var wg sync.WaitGroup + var wg2 sync.WaitGroup + var wg3 sync.WaitGroup + wg.Add(1) + wg2.Add(1) + wg3.Add(1) + go TCPServer(t, &wg, &wg2, &wg3) + wg2.Wait() + + i := Graylog{ + Servers: []string{server}, } - i.Connect() + err := i.Connect() + require.NoError(t, err) metrics := testutil.MockMetrics() - i.Write(metrics) + // TCP scenario: + // 4 messages are send + // -> connection gets broken after the 2nd message (server closes connection) + // -> the 3rd write ends with error + // -> in the 4th write connection is restored and write is successful + + err = i.Write(metrics) + require.NoError(t, err) + err = i.Write(metrics) + require.NoError(t, err) + wg3.Wait() + err = i.Write(metrics) + require.Error(t, err) + err = i.Write(metrics) + require.NoError(t, err) wg.Wait() i.Close() @@ -37,22 +99,88 @@ func TestWrite(t *testing.T) { type GelfObject map[string]interface{} func UDPServer(t *testing.T, wg *sync.WaitGroup, wg2 *sync.WaitGroup) { - serverAddr, _ := net.ResolveUDPAddr("udp", "127.0.0.1:12201") - udpServer, _ := net.ListenUDP("udp", serverAddr) + serverAddr, err := net.ResolveUDPAddr("udp", "127.0.0.1:12201") + require.NoError(t, err) + udpServer, err := net.ListenUDP("udp", serverAddr) + require.NoError(t, err) + defer udpServer.Close() defer wg.Done() bufR := make([]byte, 1024) wg2.Done() - n, _, _ := udpServer.ReadFromUDP(bufR) - b := bytes.NewReader(bufR[0:n]) - r, _ := zlib.NewReader(b) + recv := func() { + n, _, err := udpServer.ReadFromUDP(bufR) + require.NoError(t, err) + + b := bytes.NewReader(bufR[0:n]) + r, _ := zlib.NewReader(b) + + bufW := bytes.NewBuffer(nil) + _, _ = io.Copy(bufW, r) + _ = r.Close() + + var obj GelfObject + _ = json.Unmarshal(bufW.Bytes(), &obj) + require.NoError(t, err) + assert.Equal(t, obj["_value"], float64(1)) + } + + // in UDP scenario all 4 messages are received + + recv() + recv() + recv() + recv() +} + +func TCPServer(t *testing.T, wg *sync.WaitGroup, wg2 *sync.WaitGroup, wg3 *sync.WaitGroup) { + serverAddr, err := net.ResolveTCPAddr("tcp", "127.0.0.1:12201") + require.NoError(t, err) + tcpServer, err := net.ListenTCP("tcp", serverAddr) + require.NoError(t, err) + defer tcpServer.Close() + defer wg.Done() + bufR := make([]byte, 1) bufW := bytes.NewBuffer(nil) - io.Copy(bufW, r) - r.Close() + wg2.Done() + + accept := func() *net.TCPConn { + conn, err := tcpServer.AcceptTCP() + require.NoError(t, err) + _ = conn.SetLinger(0) + return conn + } + conn := accept() + defer conn.Close() + + recv := func() { + bufW.Reset() + for { + n, err := conn.Read(bufR) + require.NoError(t, err) + if n > 0 { + if bufR[0] == 0 { // message delimiter found + break + } + _, _ = bufW.Write(bufR) + } + } + + var obj GelfObject + err = json.Unmarshal(bufW.Bytes(), &obj) + require.NoError(t, err) + assert.Equal(t, obj["_value"], float64(1)) + } + + // in TCP scenario only 3 messages are received (1st, 2dn and 4th) due to connection break after the 2nd - var obj GelfObject - json.Unmarshal(bufW.Bytes(), &obj) - assert.Equal(t, obj["_value"], float64(1)) + recv() + recv() + _ = conn.Close() + wg3.Done() + conn = accept() + defer conn.Close() + recv() } From 3eebfd2f0fd4d1768936ae98f601c827f6a271a2 Mon Sep 17 00:00:00 2001 From: Joshua Powers Date: Tue, 21 Sep 2021 15:51:43 -0600 Subject: [PATCH 24/81] feat: add Linux Volume Manager input plugin (#9771) --- config/config.go | 2 +- plugins/inputs/all/all.go | 1 + plugins/inputs/lvm/README.md | 77 +++++++++ plugins/inputs/lvm/lvm.go | 293 +++++++++++++++++++++++++++++++++ plugins/inputs/lvm/lvm_test.go | 211 ++++++++++++++++++++++++ 5 files changed, 583 insertions(+), 1 deletion(-) create mode 100644 plugins/inputs/lvm/README.md create mode 100644 plugins/inputs/lvm/lvm.go create mode 100644 plugins/inputs/lvm/lvm_test.go diff --git a/config/config.go b/config/config.go index b6eed9446162f..e64d893bc05ea 100644 --- a/config/config.go +++ b/config/config.go @@ -1571,7 +1571,7 @@ func (c *Config) missingTomlField(_ reflect.Type, key string) error { "grok_timezone", "grok_unique_timestamp", "influx_max_line_bytes", "influx_sort_fields", "influx_uint_support", "interval", "json_name_key", "json_query", "json_strict", "json_string_fields", "json_time_format", "json_time_key", "json_timestamp_format", "json_timestamp_units", "json_timezone", "json_v2", - "metric_batch_size", "metric_buffer_limit", "name_override", "name_prefix", + "lvm", "metric_batch_size", "metric_buffer_limit", "name_override", "name_prefix", "name_suffix", "namedrop", "namepass", "order", "pass", "period", "precision", "prefix", "prometheus_export_timestamp", "prometheus_sort_metrics", "prometheus_string_as_label", "separator", "splunkmetric_hec_routing", "splunkmetric_multimetric", "tag_keys", diff --git a/plugins/inputs/all/all.go b/plugins/inputs/all/all.go index 60a52903ef079..690df0d3b0e46 100644 --- a/plugins/inputs/all/all.go +++ b/plugins/inputs/all/all.go @@ -100,6 +100,7 @@ import ( _ "github.com/influxdata/telegraf/plugins/inputs/logparser" _ "github.com/influxdata/telegraf/plugins/inputs/logstash" _ "github.com/influxdata/telegraf/plugins/inputs/lustre2" + _ "github.com/influxdata/telegraf/plugins/inputs/lvm" _ "github.com/influxdata/telegraf/plugins/inputs/mailchimp" _ "github.com/influxdata/telegraf/plugins/inputs/marklogic" _ "github.com/influxdata/telegraf/plugins/inputs/mcrouter" diff --git a/plugins/inputs/lvm/README.md b/plugins/inputs/lvm/README.md new file mode 100644 index 0000000000000..c0ce1a2e6008a --- /dev/null +++ b/plugins/inputs/lvm/README.md @@ -0,0 +1,77 @@ +# LVM Input Plugin + +The Logical Volume Management (LVM) input plugin collects information about +physical volumes, volume groups, and logical volumes. + +### Configuration + +The `lvm` command requires elevated permissions. If the user has configured +sudo with the ability to run these commands, then set the `use_sudo` to true. + +```toml +# Read metrics about LVM physical volumes, volume groups, logical volumes. +[[inputs.lvm]] + ## Use sudo to run LVM commands + use_sudo = false +``` + +#### Using sudo + +If your account does not already have the ability to run commands +with passwordless sudo then updates to the sudoers file are required. Below +is an example to allow the requires LVM commands: + +First, use the `visudo` command to start editing the sudoers file. Then add +the following content, where `` is the username of the user that +needs this access: + +```text +Cmnd_Alias LVM = /usr/sbin/pvs *, /usr/sbin/vgs *, /usr/sbin/lvs * + ALL=(root) NOPASSWD: LVM +Defaults!LVM !logfile, !syslog, !pam_session +``` + +### Metrics + +Metrics are broken out by physical volume (pv), volume group (vg), and logical +volume (lv): + +- lvm_physical_vol + - tags + - path + - vol_group + - fields + - size + - free + - used + - used_percent +- lvm_vol_group + - tags + - name + - fields + - size + - free + - used_percent + - physical_volume_count + - logical_volume_count + - snapshot_count +- lvm_logical_vol + - tags + - name + - vol_group + - fields + - size + - data_percent + - meta_percent + +### Example Output + +The following example shows a system with the root partition on an LVM group +as well as with a Docker thin-provisioned LVM group on a second drive: + +> lvm_physical_vol,path=/dev/sda2,vol_group=vgroot free=0i,size=249510756352i,used=249510756352i,used_percent=100 1631823026000000000 +> lvm_physical_vol,path=/dev/sdb,vol_group=docker free=3858759680i,size=128316342272i,used=124457582592i,used_percent=96.99277612525741 1631823026000000000 +> lvm_vol_group,name=vgroot free=0i,logical_volume_count=1i,physical_volume_count=1i,size=249510756352i,snapshot_count=0i,used_percent=100 1631823026000000000 +> lvm_vol_group,name=docker free=3858759680i,logical_volume_count=1i,physical_volume_count=1i,size=128316342272i,snapshot_count=0i,used_percent=96.99277612525741 1631823026000000000 +> lvm_logical_vol,name=lvroot,vol_group=vgroot data_percent=0,metadata_percent=0,size=249510756352i 1631823026000000000 +> lvm_logical_vol,name=thinpool,vol_group=docker data_percent=0.36000001430511475,metadata_percent=1.3300000429153442,size=121899057152i 1631823026000000000 diff --git a/plugins/inputs/lvm/lvm.go b/plugins/inputs/lvm/lvm.go new file mode 100644 index 0000000000000..ce46af8a31c04 --- /dev/null +++ b/plugins/inputs/lvm/lvm.go @@ -0,0 +1,293 @@ +package lvm + +import ( + "encoding/json" + "fmt" + "os/exec" + "strconv" + "strings" + "time" + + "github.com/influxdata/telegraf" + "github.com/influxdata/telegraf/internal" + "github.com/influxdata/telegraf/plugins/inputs" +) + +var ( + execCommand = exec.Command +) + +var sampleConfig = ` +## Use sudo to run LVM commands +use_sudo = false +` + +type LVM struct { + UseSudo bool `toml:"use_sudo"` +} + +func (lvm *LVM) Description() string { + return "Read metrics about LVM physical volumes, volume groups, logical volumes." +} + +func (lvm *LVM) SampleConfig() string { + return sampleConfig +} + +func (lvm *LVM) Init() error { + return nil +} + +func (lvm *LVM) Gather(acc telegraf.Accumulator) error { + if err := lvm.gatherPhysicalVolumes(acc); err != nil { + return err + } else if err := lvm.gatherVolumeGroups(acc); err != nil { + return err + } else if err := lvm.gatherLogicalVolumes(acc); err != nil { + return err + } + + return nil +} + +func (lvm *LVM) gatherPhysicalVolumes(acc telegraf.Accumulator) error { + pvsCmd := "/usr/sbin/pvs" + args := []string{ + "--reportformat", "json", "--units", "b", "--nosuffix", + "-o", "pv_name,vg_name,pv_size,pv_free,pv_used", + } + out, err := lvm.runCmd(pvsCmd, args) + if err != nil { + return err + } + + var report pvsReport + err = json.Unmarshal(out, &report) + if err != nil { + return fmt.Errorf("failed to unmarshal physical volume JSON: %s", err) + } + + if len(report.Report) > 0 { + for _, pv := range report.Report[0].Pv { + tags := map[string]string{ + "path": pv.Name, + "vol_group": pv.VolGroup, + } + + size, err := strconv.ParseUint(pv.Size, 10, 64) + if err != nil { + return err + } + + free, err := strconv.ParseUint(pv.Free, 10, 64) + if err != nil { + return err + } + + used, err := strconv.ParseUint(pv.Used, 10, 64) + if err != nil { + return err + } + + usedPercent := float64(used) / float64(size) * 100 + + fields := map[string]interface{}{ + "size": size, + "free": free, + "used": used, + "used_percent": usedPercent, + } + + acc.AddFields("lvm_physical_vol", fields, tags) + } + } + + return nil +} + +func (lvm *LVM) gatherVolumeGroups(acc telegraf.Accumulator) error { + cmd := "/usr/sbin/vgs" + args := []string{ + "--reportformat", "json", "--units", "b", "--nosuffix", + "-o", "vg_name,pv_count,lv_count,snap_count,vg_size,vg_free", + } + out, err := lvm.runCmd(cmd, args) + if err != nil { + return err + } + + var report vgsReport + err = json.Unmarshal(out, &report) + if err != nil { + return fmt.Errorf("failed to unmarshal vol group JSON: %s", err) + } + + if len(report.Report) > 0 { + for _, vg := range report.Report[0].Vg { + tags := map[string]string{ + "name": vg.Name, + } + + size, err := strconv.ParseUint(vg.Size, 10, 64) + if err != nil { + return err + } + + free, err := strconv.ParseUint(vg.Free, 10, 64) + if err != nil { + return err + } + + pvCount, err := strconv.ParseUint(vg.PvCount, 10, 64) + if err != nil { + return err + } + lvCount, err := strconv.ParseUint(vg.LvCount, 10, 64) + if err != nil { + return err + } + snapCount, err := strconv.ParseUint(vg.SnapCount, 10, 64) + if err != nil { + return err + } + + usedPercent := (float64(size) - float64(free)) / float64(size) * 100 + + fields := map[string]interface{}{ + "size": size, + "free": free, + "used_percent": usedPercent, + "physical_volume_count": pvCount, + "logical_volume_count": lvCount, + "snapshot_count": snapCount, + } + + acc.AddFields("lvm_vol_group", fields, tags) + } + } + + return nil +} + +func (lvm *LVM) gatherLogicalVolumes(acc telegraf.Accumulator) error { + cmd := "/usr/sbin/lvs" + args := []string{ + "--reportformat", "json", "--units", "b", "--nosuffix", + "-o", "lv_name,vg_name,lv_size,data_percent,metadata_percent", + } + out, err := lvm.runCmd(cmd, args) + if err != nil { + return err + } + + var report lvsReport + err = json.Unmarshal(out, &report) + if err != nil { + return fmt.Errorf("failed to unmarshal logical vol JSON: %s", err) + } + + if len(report.Report) > 0 { + for _, lv := range report.Report[0].Lv { + tags := map[string]string{ + "name": lv.Name, + "vol_group": lv.VolGroup, + } + + size, err := strconv.ParseUint(lv.Size, 10, 64) + if err != nil { + return err + } + + // Does not apply to all logical volumes, set default value + if lv.DataPercent == "" { + lv.DataPercent = "0.0" + } + dataPercent, err := strconv.ParseFloat(lv.DataPercent, 32) + if err != nil { + return err + } + + // Does not apply to all logical volumes, set default value + if lv.MetadataPercent == "" { + lv.MetadataPercent = "0.0" + } + metadataPercent, err := strconv.ParseFloat(lv.MetadataPercent, 32) + if err != nil { + return err + } + + fields := map[string]interface{}{ + "size": size, + "data_percent": dataPercent, + "metadata_percent": metadataPercent, + } + + acc.AddFields("lvm_logical_vol", fields, tags) + } + } + + return nil +} + +func (lvm *LVM) runCmd(cmd string, args []string) ([]byte, error) { + execCmd := execCommand(cmd, args...) + if lvm.UseSudo { + execCmd = execCommand("sudo", append([]string{"-n", cmd}, args...)...) + } + + out, err := internal.StdOutputTimeout(execCmd, 5*time.Second) + if err != nil { + return nil, fmt.Errorf( + "failed to run command %s: %s - %s", + strings.Join(execCmd.Args, " "), err, string(out), + ) + } + + return out, nil +} + +// Represents info about physical volume command, pvs, output +type pvsReport struct { + Report []struct { + Pv []struct { + Name string `json:"pv_name"` + VolGroup string `json:"vg_name"` + Size string `json:"pv_size"` + Free string `json:"pv_free"` + Used string `json:"pv_used"` + } `json:"pv"` + } `json:"report"` +} + +// Represents info about volume group command, vgs, output +type vgsReport struct { + Report []struct { + Vg []struct { + Name string `json:"vg_name"` + Size string `json:"vg_size"` + Free string `json:"vg_free"` + LvCount string `json:"lv_count"` + PvCount string `json:"pv_count"` + SnapCount string `json:"snap_count"` + } `json:"vg"` + } `json:"report"` +} + +// Represents info about logical volume command, lvs, output +type lvsReport struct { + Report []struct { + Lv []struct { + Name string `json:"lv_name"` + VolGroup string `json:"vg_name"` + Size string `json:"lv_size"` + DataPercent string `json:"data_percent"` + MetadataPercent string `json:"metadata_percent"` + } `json:"lv"` + } `json:"report"` +} + +func init() { + inputs.Add("lvm", func() telegraf.Input { + return &LVM{} + }) +} diff --git a/plugins/inputs/lvm/lvm_test.go b/plugins/inputs/lvm/lvm_test.go new file mode 100644 index 0000000000000..c48eff5c039b1 --- /dev/null +++ b/plugins/inputs/lvm/lvm_test.go @@ -0,0 +1,211 @@ +package lvm + +import ( + "fmt" + "os" + "os/exec" + "testing" + + "github.com/influxdata/telegraf/testutil" + "github.com/stretchr/testify/require" +) + +func TestGather(t *testing.T) { + var lvm LVM = LVM{UseSudo: false} + var acc testutil.Accumulator + + // overwriting exec commands with mock commands + execCommand = fakeExecCommand + err := lvm.Gather(&acc) + require.NoError(t, err) + + pvsTags := map[string]string{ + "path": "/dev/sdb", + "vol_group": "docker", + } + pvsFields := map[string]interface{}{ + "size": uint64(128316342272), + "free": uint64(3858759680), + "used": uint64(124457582592), + "used_percent": 96.99277612525741, + } + acc.AssertContainsTaggedFields(t, "lvm_physical_vol", pvsFields, pvsTags) + + vgsTags := map[string]string{ + "name": "docker", + } + vgsFields := map[string]interface{}{ + "size": uint64(128316342272), + "free": uint64(3858759680), + "used_percent": 96.99277612525741, + "physical_volume_count": uint64(1), + "logical_volume_count": uint64(1), + "snapshot_count": uint64(0), + } + acc.AssertContainsTaggedFields(t, "lvm_vol_group", vgsFields, vgsTags) + + lvsTags := map[string]string{ + "name": "thinpool", + "vol_group": "docker", + } + lvsFields := map[string]interface{}{ + "size": uint64(121899057152), + "data_percent": 0.36000001430511475, + "metadata_percent": 1.3300000429153442, + } + acc.AssertContainsTaggedFields(t, "lvm_logical_vol", lvsFields, lvsTags) +} + +// Used as a helper function that mock the exec.Command call +func fakeExecCommand(command string, args ...string) *exec.Cmd { + cs := []string{"-test.run=TestHelperProcess", "--", command} + cs = append(cs, args...) + cmd := exec.Command(os.Args[0], cs...) + cmd.Env = []string{"GO_WANT_HELPER_PROCESS=1"} + return cmd +} + +// Used to mock exec.Command output +func TestHelperProcess(_ *testing.T) { + if os.Getenv("GO_WANT_HELPER_PROCESS") != "1" { + return + } + + mockPVSData := `{ + "report": [ + { + "pv": [ + {"pv_name":"/dev/sdb", "vg_name":"docker", "pv_size":"128316342272", "pv_free":"3858759680", "pv_used":"124457582592"} + ] + } + ] + } +` + + mockVGSData := `{ + "report": [ + { + "vg": [ + {"vg_name":"docker", "pv_count":"1", "lv_count":"1", "snap_count":"0", "vg_size":"128316342272", "vg_free":"3858759680"} + ] + } + ] + } +` + + mockLVSData := `{ + "report": [ + { + "lv": [ + {"lv_name":"thinpool", "vg_name":"docker", "lv_size":"121899057152", "data_percent":"0.36", "metadata_percent":"1.33"} + ] + } + ] + } +` + + // Previous arguments are tests stuff, that looks like : + // /tmp/go-build970079519/…/_test/integration.test -test.run=TestHelperProcess -- + args := os.Args + cmd := args[3] + if cmd == "/usr/sbin/pvs" { + //nolint:errcheck,revive // test will fail anyway + fmt.Fprint(os.Stdout, mockPVSData) + } else if cmd == "/usr/sbin/vgs" { + //nolint:errcheck,revive // test will fail anyway + fmt.Fprint(os.Stdout, mockVGSData) + } else if cmd == "/usr/sbin/lvs" { + //nolint:errcheck,revive // test will fail anyway + fmt.Fprint(os.Stdout, mockLVSData) + } else { + //nolint:errcheck,revive // test will fail anyway + fmt.Fprint(os.Stdout, "command not found") + //nolint:revive // error code is important for this "test" + os.Exit(1) + } + //nolint:revive // error code is important for this "test" + os.Exit(0) +} + +// test when no lvm devices exist +func TestGatherNoLVM(t *testing.T) { + var noLVM LVM = LVM{UseSudo: false} + var acc testutil.Accumulator + + // overwriting exec commands with mock commands + execCommand = fakeExecCommandNoLVM + err := noLVM.Gather(&acc) + require.NoError(t, err) + + acc.AssertDoesNotContainMeasurement(t, "lvm_physical_vol") + acc.AssertDoesNotContainMeasurement(t, "lvm_vol_group") + acc.AssertDoesNotContainMeasurement(t, "lvm_logical_vol") +} + +// Used as a helper function that mock the exec.Command call +func fakeExecCommandNoLVM(command string, args ...string) *exec.Cmd { + cs := []string{"-test.run=TestHelperProcessNoLVM", "--", command} + cs = append(cs, args...) + cmd := exec.Command(os.Args[0], cs...) + cmd.Env = []string{"GO_WANT_HELPER_PROCESS=1"} + return cmd +} + +// Used to mock exec.Command output +func TestHelperProcessNoLVM(_ *testing.T) { + if os.Getenv("GO_WANT_HELPER_PROCESS") != "1" { + return + } + + mockPVSData := `{ + "report": [ + { + "pv": [ + ] + } + ] + } +` + + mockVGSData := `{ + "report": [ + { + "vg": [ + ] + } + ] + } +` + + mockLVSData := `{ + "report": [ + { + "lv": [ + ] + } + ] + } +` + + // Previous arguments are tests stuff, that looks like : + // /tmp/go-build970079519/…/_test/integration.test -test.run=TestHelperProcess -- + args := os.Args + cmd := args[3] + if cmd == "/usr/sbin/pvs" { + //nolint:errcheck,revive // test will fail anyway + fmt.Fprint(os.Stdout, mockPVSData) + } else if cmd == "/usr/sbin/vgs" { + //nolint:errcheck,revive // test will fail anyway + fmt.Fprint(os.Stdout, mockVGSData) + } else if cmd == "/usr/sbin/lvs" { + //nolint:errcheck,revive // test will fail anyway + fmt.Fprint(os.Stdout, mockLVSData) + } else { + //nolint:errcheck,revive // test will fail anyway + fmt.Fprint(os.Stdout, "command not found") + //nolint:revive // error code is important for this "test" + os.Exit(1) + } + //nolint:revive // error code is important for this "test" + os.Exit(0) +} From 86a6c06955d6e3197ff39355293ec7cfb4c3d54d Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Wed, 22 Sep 2021 08:00:13 -0600 Subject: [PATCH 25/81] fix: bump github.com/Azure/go-autorest/autorest/adal (#9791) --- go.mod | 2 +- go.sum | 2 ++ 2 files changed, 3 insertions(+), 1 deletion(-) diff --git a/go.mod b/go.mod index d56ac810d6026..8e7e48099934e 100644 --- a/go.mod +++ b/go.mod @@ -20,7 +20,7 @@ require ( github.com/Azure/go-ansiterm v0.0.0-20170929234023-d6e3b3328b78 // indirect github.com/Azure/go-autorest v14.2.0+incompatible // indirect github.com/Azure/go-autorest/autorest v0.11.18 - github.com/Azure/go-autorest/autorest/adal v0.9.15 + github.com/Azure/go-autorest/autorest/adal v0.9.16 github.com/Azure/go-autorest/autorest/azure/auth v0.5.8 github.com/Azure/go-autorest/autorest/azure/cli v0.4.2 // indirect github.com/Azure/go-autorest/autorest/date v0.3.0 // indirect diff --git a/go.sum b/go.sum index c67071b110a16..e8ba0d9ef6cc6 100644 --- a/go.sum +++ b/go.sum @@ -111,6 +111,8 @@ github.com/Azure/go-autorest/autorest/adal v0.9.11/go.mod h1:nBKAnTomx8gDtl+3ZCJ github.com/Azure/go-autorest/autorest/adal v0.9.13/go.mod h1:W/MM4U6nLxnIskrw4UwWzlHfGjwUS50aOsc/I3yuU8M= github.com/Azure/go-autorest/autorest/adal v0.9.15 h1:X+p2GF0GWyOiSmqohIaEeuNFNDY4I4EOlVuUQvFdWMk= github.com/Azure/go-autorest/autorest/adal v0.9.15/go.mod h1:tGMin8I49Yij6AQ+rvV+Xa/zwxYQB5hmsd6DkfAx2+A= +github.com/Azure/go-autorest/autorest/adal v0.9.16 h1:P8An8Z9rH1ldbOLdFpxYorgOt2sywL9V24dAwWHPuGc= +github.com/Azure/go-autorest/autorest/adal v0.9.16/go.mod h1:tGMin8I49Yij6AQ+rvV+Xa/zwxYQB5hmsd6DkfAx2+A= github.com/Azure/go-autorest/autorest/azure/auth v0.4.2/go.mod h1:90gmfKdlmKgfjUpnCEpOJzsUEjrWDSLwHIG73tSXddM= github.com/Azure/go-autorest/autorest/azure/auth v0.5.8 h1:TzPg6B6fTZ0G1zBf3T54aI7p3cAT6u//TOXGPmFMOXg= github.com/Azure/go-autorest/autorest/azure/auth v0.5.8/go.mod h1:kxyKZTSfKh8OVFWPAgOgQ/frrJgeYQJPyR5fLFmXko4= From 4cee2ca15d80ffe16bc5c861adfb7710f158b03a Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Wed, 22 Sep 2021 08:20:37 -0600 Subject: [PATCH 26/81] fix: bump github.com/testcontainers/testcontainers-go from 0.11.0 to 0.11.1 (#9789) --- go.mod | 4 ++-- go.sum | 8 ++++---- 2 files changed, 6 insertions(+), 6 deletions(-) diff --git a/go.mod b/go.mod index 8e7e48099934e..a569c672a4eb4 100644 --- a/go.mod +++ b/go.mod @@ -82,7 +82,7 @@ require ( github.com/dgrijalva/jwt-go v3.2.0+incompatible // indirect github.com/dimchansky/utfbom v1.1.1 github.com/docker/distribution v2.7.1+incompatible // indirect - github.com/docker/docker v20.10.6+incompatible + github.com/docker/docker v20.10.7+incompatible github.com/docker/go-connections v0.4.0 // indirect github.com/docker/go-units v0.4.0 // indirect github.com/doclambda/protobufquery v0.0.0-20210317203640-88ffabe06a60 @@ -245,7 +245,7 @@ require ( github.com/stretchr/objx v0.2.0 // indirect github.com/stretchr/testify v1.7.0 github.com/tbrandon/mbserver v0.0.0-20170611213546-993e1772cc62 - github.com/testcontainers/testcontainers-go v0.11.0 + github.com/testcontainers/testcontainers-go v0.11.1 github.com/tidwall/gjson v1.8.0 github.com/tidwall/match v1.0.3 // indirect github.com/tidwall/pretty v1.1.0 // indirect diff --git a/go.sum b/go.sum index e8ba0d9ef6cc6..6cc78b3768f13 100644 --- a/go.sum +++ b/go.sum @@ -499,8 +499,8 @@ github.com/docker/distribution v2.7.1-0.20190205005809-0d3efadf0154+incompatible github.com/docker/distribution v2.7.1+incompatible h1:a5mlkVzth6W5A4fOsS3D2EO5BUmsJpcB+cRlLU7cSug= github.com/docker/distribution v2.7.1+incompatible/go.mod h1:J2gT2udsDAN96Uj4KfcMRqY0/ypR+oyYUYmja8H+y+w= github.com/docker/docker v17.12.0-ce-rc1.0.20200706150819-a40b877fbb9e+incompatible/go.mod h1:eEKB0N0r5NX/I1kEveEz05bcu8tLC/8azJZsviup8Sk= -github.com/docker/docker v20.10.6+incompatible h1:oXI3Vas8TI8Eu/EjH4srKHJBVqraSzJybhxY7Om9faQ= -github.com/docker/docker v20.10.6+incompatible/go.mod h1:eEKB0N0r5NX/I1kEveEz05bcu8tLC/8azJZsviup8Sk= +github.com/docker/docker v20.10.7+incompatible h1:Z6O9Nhsjv+ayUEeI1IojKbYcsGdgYSNqxe1s2MYzUhQ= +github.com/docker/docker v20.10.7+incompatible/go.mod h1:eEKB0N0r5NX/I1kEveEz05bcu8tLC/8azJZsviup8Sk= github.com/docker/go-connections v0.4.0 h1:El9xVISelRB7BuFusrZozjnkIM5YnzCViNKohAFqRJQ= github.com/docker/go-connections v0.4.0 h1:El9xVISelRB7BuFusrZozjnkIM5YnzCViNKohAFqRJQ= github.com/docker/go-connections v0.4.0/go.mod h1:Gbd7IOopHjR8Iph03tsViu4nIes5XhDvyHbTtUxmeec= @@ -1533,8 +1533,8 @@ github.com/tbrandon/mbserver v0.0.0-20170611213546-993e1772cc62 h1:Oj2e7Sae4XrOs github.com/tbrandon/mbserver v0.0.0-20170611213546-993e1772cc62/go.mod h1:qUzPVlSj2UgxJkVbH0ZwuuiR46U8RBMDT5KLY78Ifpw= github.com/tchap/go-patricia v2.2.6+incompatible/go.mod h1:bmLyhP68RS6kStMGxByiQ23RP/odRBOTVjwp2cDyi6I= github.com/tedsuo/ifrit v0.0.0-20180802180643-bea94bb476cc/go.mod h1:eyZnKCc955uh98WQvzOm0dgAeLnf2O0Rz0LPoC5ze+0= -github.com/testcontainers/testcontainers-go v0.11.0 h1:HO5YOx2DYBHqcg4MzVWPj3FuHAv7USWVu94vCSsgiaM= -github.com/testcontainers/testcontainers-go v0.11.0/go.mod h1:HztBCODzuA+YpMXGK8amjO8j50jz2gcT0BOzSKUiYIs= +github.com/testcontainers/testcontainers-go v0.11.1 h1:FiYsB83LSGbiawoV8TpAZGfcCUbtaeeg1SXqEKUxh08= +github.com/testcontainers/testcontainers-go v0.11.1/go.mod h1:/V0UVq+1e7NWYoqTPog179clf0Qp9TOyp4EcXaEFQz8= github.com/tidwall/gjson v1.8.0 h1:Qt+orfosKn0rbNTZqHYDqBrmm3UDA4KRkv70fDzG+PQ= github.com/tidwall/gjson v1.8.0/go.mod h1:5/xDoumyyDNerp2U36lyolv46b3uF/9Bu6OfyQ9GImk= github.com/tidwall/match v1.0.3 h1:FQUVvBImDutD8wJLN6c5eMzWtjgONK9MwIBCOrUJKeE= From 20ed68c36088941ebd608ef7405567ec764f54da Mon Sep 17 00:00:00 2001 From: Joshua Powers Date: Wed, 22 Sep 2021 08:36:04 -0600 Subject: [PATCH 27/81] fix: run go mod tidy --- go.sum | 2 -- 1 file changed, 2 deletions(-) diff --git a/go.sum b/go.sum index 6cc78b3768f13..300b12d8d6a6c 100644 --- a/go.sum +++ b/go.sum @@ -109,8 +109,6 @@ github.com/Azure/go-autorest/autorest/adal v0.9.2/go.mod h1:/3SMAM86bP6wC9Ev35pe github.com/Azure/go-autorest/autorest/adal v0.9.5/go.mod h1:B7KF7jKIeC9Mct5spmyCB/A8CG/sEz1vwIRGv/bbw7A= github.com/Azure/go-autorest/autorest/adal v0.9.11/go.mod h1:nBKAnTomx8gDtl+3ZCJv2v0KACFHWTB2drffI1B68Pk= github.com/Azure/go-autorest/autorest/adal v0.9.13/go.mod h1:W/MM4U6nLxnIskrw4UwWzlHfGjwUS50aOsc/I3yuU8M= -github.com/Azure/go-autorest/autorest/adal v0.9.15 h1:X+p2GF0GWyOiSmqohIaEeuNFNDY4I4EOlVuUQvFdWMk= -github.com/Azure/go-autorest/autorest/adal v0.9.15/go.mod h1:tGMin8I49Yij6AQ+rvV+Xa/zwxYQB5hmsd6DkfAx2+A= github.com/Azure/go-autorest/autorest/adal v0.9.16 h1:P8An8Z9rH1ldbOLdFpxYorgOt2sywL9V24dAwWHPuGc= github.com/Azure/go-autorest/autorest/adal v0.9.16/go.mod h1:tGMin8I49Yij6AQ+rvV+Xa/zwxYQB5hmsd6DkfAx2+A= github.com/Azure/go-autorest/autorest/azure/auth v0.4.2/go.mod h1:90gmfKdlmKgfjUpnCEpOJzsUEjrWDSLwHIG73tSXddM= From 045adcb700ebc55761e5876a07de82f9317e4056 Mon Sep 17 00:00:00 2001 From: Sebastian Spaink <3441183+sspaink@users.noreply.github.com> Date: Wed, 22 Sep 2021 08:54:59 -0700 Subject: [PATCH 28/81] fix: nightly upload requires package steps (#9795) --- .circleci/config.yml | 15 ++++++++++++++- 1 file changed, 14 insertions(+), 1 deletion(-) diff --git a/.circleci/config.yml b/.circleci/config.yml index dc59d4aa13cdb..027a529cb0385 100644 --- a/.circleci/config.yml +++ b/.circleci/config.yml @@ -577,7 +577,20 @@ workflows: - *static-package - *mipsel-package - *mips-package - - nightly + - nightly: + requires: + - 'i386-package' + - 'ppc64le-package' + - 's390x-package' + - 'armel-package' + - 'amd64-package' + - 'mipsel-package' + - 'mips-package' + - 'darwin-package' + - 'windows-package' + - 'static-package' + - 'arm64-package' + - 'armhf-package' triggers: - schedule: cron: "0 7 * * *" From ceae37d66ecaf949a5813847ad4b695e2a936c3e Mon Sep 17 00:00:00 2001 From: Sven Rebhan <36194019+srebhan@users.noreply.github.com> Date: Thu, 23 Sep 2021 16:10:44 +0200 Subject: [PATCH 29/81] fix: Rename KNXListener to knx_listener (#9741) --- etc/telegraf.conf | 15 +++++++-------- plugins/inputs/knx_listener/README.md | 6 +++--- plugins/inputs/knx_listener/knx_listener.go | 6 ++++-- 3 files changed, 14 insertions(+), 13 deletions(-) diff --git a/etc/telegraf.conf b/etc/telegraf.conf index 0ed5ba8ebffba..2892d99142be5 100644 --- a/etc/telegraf.conf +++ b/etc/telegraf.conf @@ -5546,7 +5546,7 @@ # # timeout = "5ms" -# # A plugin to collect stats from Opensmtpd - a validating, recursive, and caching DNS resolver +# # A plugin to collect stats from Opensmtpd - a validating, recursive, and caching DNS resolver # [[inputs.opensmtpd]] # ## If running as a restricted user you can prepend sudo for additional access: # #use_sudo = false @@ -6802,7 +6802,7 @@ # # Listener capable of handling KNX bus messages provided through a KNX-IP Interface. -# [[inputs.KNXListener]] +# [[inputs.knx_listener]] # ## Type of KNX-IP interface. # ## Can be either "tunnel" or "router". # # service_type = "tunnel" @@ -6811,7 +6811,7 @@ # service_address = "localhost:3671" # # ## Measurement definition(s) -# # [[inputs.KNXListener.measurement]] +# # [[inputs.knx_listener.measurement]] # # ## Name of the measurement # # name = "temperature" # # ## Datapoint-Type (DPT) of the KNX messages @@ -6819,7 +6819,7 @@ # # ## List of Group-Addresses (GAs) assigned to the measurement # # addresses = ["5/5/1"] # -# # [[inputs.KNXListener.measurement]] +# # [[inputs.knx_listener.measurement]] # # name = "illumination" # # dpt = "9.004" # # addresses = ["5/5/3"] @@ -7667,7 +7667,7 @@ # ## This value is propagated to pqos tool. Interval format is defined by pqos itself. # ## If not provided or provided 0, will be set to 10 = 10x100ms = 1s. # # sampling_interval = "10" -# +# # ## Optionally specify the path to pqos executable. # ## If not provided, auto discovery will be performed. # # pqos_path = "/usr/local/bin/pqos" @@ -7675,12 +7675,12 @@ # ## Optionally specify if IPC and LLC_Misses metrics shouldn't be propagated. # ## If not provided, default value is false. # # shortened_metrics = false -# +# # ## Specify the list of groups of CPU core(s) to be provided as pqos input. # ## Mandatory if processes aren't set and forbidden if processes are specified. # ## e.g. ["0-3", "4,5,6"] or ["1-3,4"] # # cores = ["0-3"] -# +# # ## Specify the list of processes for which Metrics will be collected. # ## Mandatory if cores aren't set and forbidden if cores are specified. # ## e.g. ["qemu", "pmd"] @@ -9099,4 +9099,3 @@ # [[inputs.zipkin]] # # path = "/api/v1/spans" # URL path for span data # # port = 9411 # Port on which Telegraf listens - diff --git a/plugins/inputs/knx_listener/README.md b/plugins/inputs/knx_listener/README.md index 7a06462ffbb3e..518dd5d7f3720 100644 --- a/plugins/inputs/knx_listener/README.md +++ b/plugins/inputs/knx_listener/README.md @@ -11,7 +11,7 @@ This is a sample config for the plugin. ```toml # Listener capable of handling KNX bus messages provided through a KNX-IP Interface. -[[inputs.KNXListener]] +[[inputs.knx_listener]] ## Type of KNX-IP interface. ## Can be either "tunnel" or "router". # service_type = "tunnel" @@ -20,7 +20,7 @@ This is a sample config for the plugin. service_address = "localhost:3671" ## Measurement definition(s) - # [[inputs.KNXListener.measurement]] + # [[inputs.knx_listener.measurement]] # ## Name of the measurement # name = "temperature" # ## Datapoint-Type (DPT) of the KNX messages @@ -28,7 +28,7 @@ This is a sample config for the plugin. # ## List of Group-Addresses (GAs) assigned to the measurement # addresses = ["5/5/1"] - # [[inputs.KNXListener.measurement]] + # [[inputs.knx_listener.measurement]] # name = "illumination" # dpt = "9.004" # addresses = ["5/5/3"] diff --git a/plugins/inputs/knx_listener/knx_listener.go b/plugins/inputs/knx_listener/knx_listener.go index 98f19e922f7ad..3896d649b4055 100644 --- a/plugins/inputs/knx_listener/knx_listener.go +++ b/plugins/inputs/knx_listener/knx_listener.go @@ -56,7 +56,7 @@ func (kl *KNXListener) SampleConfig() string { service_address = "localhost:3671" ## Measurement definition(s) - # [[inputs.KNXListener.measurement]] + # [[inputs.knx_listener.measurement]] # ## Name of the measurement # name = "temperature" # ## Datapoint-Type (DPT) of the KNX messages @@ -64,7 +64,7 @@ func (kl *KNXListener) SampleConfig() string { # ## List of Group-Addresses (GAs) assigned to the measurement # addresses = ["5/5/1"] - # [[inputs.KNXListener.measurement]] + # [[inputs.knx_listener.measurement]] # name = "illumination" # dpt = "9.004" # addresses = ["5/5/3"] @@ -195,5 +195,7 @@ func (kl *KNXListener) listen() { } func init() { + inputs.Add("knx_listener", func() telegraf.Input { return &KNXListener{ServiceType: "tunnel"} }) + // Register for backward compatibility inputs.Add("KNXListener", func() telegraf.Input { return &KNXListener{ServiceType: "tunnel"} }) } From fb088bd69c86c6628dfdc5a44f9e0d878587f6e3 Mon Sep 17 00:00:00 2001 From: Jacob Marble Date: Thu, 23 Sep 2021 09:05:29 -0700 Subject: [PATCH 30/81] fix: error returned to OpenTelemetry client (#9797) --- go.mod | 16 +++- go.sum | 27 +++++- plugins/inputs/opentelemetry/grpc_services.go | 2 +- plugins/inputs/opentelemetry/opentelemetry.go | 11 ++- .../opentelemetry/opentelemetry_test.go | 83 +++++++++++++++++++ 5 files changed, 131 insertions(+), 8 deletions(-) create mode 100644 plugins/inputs/opentelemetry/opentelemetry_test.go diff --git a/go.mod b/go.mod index a569c672a4eb4..dc8b762d1e6d1 100644 --- a/go.mod +++ b/go.mod @@ -61,7 +61,7 @@ require ( github.com/aws/aws-sdk-go-v2/service/sso v1.1.5 // indirect github.com/aws/aws-sdk-go-v2/service/sts v1.2.2 // indirect github.com/aws/smithy-go v1.8.0 - github.com/benbjohnson/clock v1.0.3 + github.com/benbjohnson/clock v1.1.0 github.com/beorn7/perks v1.0.1 // indirect github.com/bitly/go-hostpool v0.1.0 // indirect github.com/bmatcuk/doublestar/v3 v3.0.0 @@ -271,6 +271,9 @@ require ( go.mongodb.org/mongo-driver v1.5.3 go.opencensus.io v0.23.0 // indirect go.opentelemetry.io/collector/model v0.35.0 + go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetricgrpc v0.23.0 + go.opentelemetry.io/otel/metric v0.23.0 + go.opentelemetry.io/otel/sdk/metric v0.23.0 go.starlark.net v0.0.0-20210406145628-7a1108eaa012 go.uber.org/atomic v1.7.0 // indirect go.uber.org/multierr v1.6.0 // indirect @@ -329,6 +332,17 @@ require ( sigs.k8s.io/yaml v1.2.0 // indirect ) +require ( + github.com/cenkalti/backoff/v4 v4.1.1 // indirect + go.opentelemetry.io/otel v1.0.0-RC3 // indirect + go.opentelemetry.io/otel/exporters/otlp/otlpmetric v0.23.0 // indirect + go.opentelemetry.io/otel/internal/metric v0.23.0 // indirect + go.opentelemetry.io/otel/sdk v1.0.0-RC3 // indirect + go.opentelemetry.io/otel/sdk/export/metric v0.23.0 // indirect + go.opentelemetry.io/otel/trace v1.0.0-RC3 // indirect + go.opentelemetry.io/proto/otlp v0.9.0 // indirect +) + // replaced due to https://github.com/satori/go.uuid/issues/73 replace github.com/satori/go.uuid => github.com/gofrs/uuid v3.2.0+incompatible diff --git a/go.sum b/go.sum index 300b12d8d6a6c..4189b415723f0 100644 --- a/go.sum +++ b/go.sum @@ -291,8 +291,8 @@ github.com/aws/smithy-go v1.0.0/go.mod h1:EzMw8dbp/YJL4A5/sbhGddag+NPT7q084agLbB github.com/aws/smithy-go v1.3.1/go.mod h1:SObp3lf9smib00L/v3U2eAKG8FyQ7iLrJnQiAmR5n+E= github.com/aws/smithy-go v1.8.0 h1:AEwwwXQZtUwP5Mz506FeXXrKBe0jA8gVM+1gEcSRooc= github.com/aws/smithy-go v1.8.0/go.mod h1:SObp3lf9smib00L/v3U2eAKG8FyQ7iLrJnQiAmR5n+E= -github.com/benbjohnson/clock v1.0.3 h1:vkLuvpK4fmtSCuo60+yC63p7y0BmQ8gm5ZXGuBCJyXg= -github.com/benbjohnson/clock v1.0.3/go.mod h1:bGMdMPoPVvcYyt1gHDf4J2KE153Yf9BuiUKYMaxlTDM= +github.com/benbjohnson/clock v1.1.0 h1:Q92kusRqC1XV2MjkWETPvjJVqKetz1OzxZB7mHJLju8= +github.com/benbjohnson/clock v1.1.0/go.mod h1:J11/hYXuz8f4ySSvYwY0FKfm+ezbsZBKZxNJlLklBHA= github.com/beorn7/perks v0.0.0-20160804104726-4c0e84591b9a/go.mod h1:Dwedo/Wpr24TaqPxmxbtue+5NUziq4I4S80YR8gNf3Q= github.com/beorn7/perks v0.0.0-20180321164747-3a771d992973/go.mod h1:Dwedo/Wpr24TaqPxmxbtue+5NUziq4I4S80YR8gNf3Q= github.com/beorn7/perks v1.0.0/go.mod h1:KWe93zE9D1o94FZ5RNwFwVgaQK1VOXiVxmqh+CedLV8= @@ -333,6 +333,8 @@ github.com/cenkalti/backoff v2.2.1+incompatible h1:tNowT99t7UNflLxfYYSlKYsBpXdEe github.com/cenkalti/backoff v2.2.1+incompatible/go.mod h1:90ReRw6GdpyfrHakVjL/QHaoyV4aDUVVkXQJJJ3NXXM= github.com/cenkalti/backoff v2.2.1+incompatible/go.mod h1:90ReRw6GdpyfrHakVjL/QHaoyV4aDUVVkXQJJJ3NXXM= github.com/cenkalti/backoff/v4 v4.0.2/go.mod h1:eEew/i+1Q6OrCDZh3WiXYv3+nJwBASZ8Bog/87DQnVg= +github.com/cenkalti/backoff/v4 v4.1.1 h1:G2HAfAmvm/GcKan2oOQpBXOd2tT2G57ZnZGWa1PxPBQ= +github.com/cenkalti/backoff/v4 v4.1.1/go.mod h1:scbssz8iZGpm3xbr14ovlUdkxfGXNInqkPWOWmG2CLw= github.com/census-instrumentation/opencensus-proto v0.2.1/go.mod h1:f6KPmirojxKA12rnyqOA5BBL4O983OfeGPqjHWSTneU= github.com/cespare/xxhash v1.1.0 h1:a6HrQnmkObjyL+Gs60czilIUGqrzKutQD6XZog3p+ko= github.com/cespare/xxhash v1.1.0/go.mod h1:XrSqR1VqqWfGrhpAt58auRo0WTKS1nRRg3ghfAqPWnc= @@ -1639,7 +1641,27 @@ go.opencensus.io v0.23.0 h1:gqCw0LfLxScz8irSi8exQc7fyQ0fKQU/qnC/X8+V/1M= go.opencensus.io v0.23.0/go.mod h1:XItmlyltB5F7CS4xOC1DcqMoFqwtC6OG2xF7mCv7P7E= go.opentelemetry.io/collector/model v0.35.0 h1:NpKjghiqlei4ecwjOYOMhD6tj4gY8yiWHPJmbFs/ArI= go.opentelemetry.io/collector/model v0.35.0/go.mod h1:+7YCSjJG+MqiIFjauzt7oM2qkqBsaJWh5hcsO4fwsAc= +go.opentelemetry.io/otel v1.0.0-RC3 h1:kvwiyEkiUT/JaadXzVLI/R1wDO934A7r3Bs2wEe6wqA= +go.opentelemetry.io/otel v1.0.0-RC3/go.mod h1:Ka5j3ua8tZs4Rkq4Ex3hwgBgOchyPVq5S6P2lz//nKQ= +go.opentelemetry.io/otel/exporters/otlp/otlpmetric v0.23.0 h1:vKIEsT6IJU0NYd+iZccjgCmk80zsa7dTiC2Bu7U1jz0= +go.opentelemetry.io/otel/exporters/otlp/otlpmetric v0.23.0/go.mod h1:pe9oOWRaZyapdajWCn64fnl76v3cmTEmNBgh7MkKvwE= +go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetricgrpc v0.23.0 h1:JSsJID+KU3G8wxynfHIlWaefOvYngDjnrmtHOGb1sb0= +go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetricgrpc v0.23.0/go.mod h1:aSP5oMNaAfOYq+sRydHANZ0vBYLyZR/3lR9pru9aPLk= +go.opentelemetry.io/otel/internal/metric v0.23.0 h1:mPfzm9Iqhw7G2nDBmUAjFTfPqLZPbOW2k7QI57ITbaI= +go.opentelemetry.io/otel/internal/metric v0.23.0/go.mod h1:z+RPiDJe30YnCrOhFGivwBS+DU1JU/PiLKkk4re2DNY= +go.opentelemetry.io/otel/metric v0.23.0 h1:mYCcDxi60P4T27/0jchIDFa1WHEfQeU3zH9UEMpnj2c= +go.opentelemetry.io/otel/metric v0.23.0/go.mod h1:G/Nn9InyNnIv7J6YVkQfpc0JCfKBNJaERBGw08nqmVQ= +go.opentelemetry.io/otel/sdk v1.0.0-RC3 h1:iRMkET+EmJUn5mW0hJzygBraXRmrUwzbOtNvTCh/oKs= +go.opentelemetry.io/otel/sdk v1.0.0-RC3/go.mod h1:78H6hyg2fka0NYT9fqGuFLvly2yCxiBXDJAgLKo/2Us= +go.opentelemetry.io/otel/sdk/export/metric v0.23.0 h1:7NeoKPPx6NdZBVHLEp/LY5Lq85Ff1WNZnuJkuRy+azw= +go.opentelemetry.io/otel/sdk/export/metric v0.23.0/go.mod h1:SuMiREmKVRIwFKq73zvGTvwFpxb/ZAYkMfyqMoOtDqs= +go.opentelemetry.io/otel/sdk/metric v0.23.0 h1:xlZhPbiue1+jjSFEth94q9QCmX8Q24mOtue9IAmlVyI= +go.opentelemetry.io/otel/sdk/metric v0.23.0/go.mod h1:wa0sKK13eeIFW+0OFjcC3S1i7FTRRiLAXe1kjBVbhwg= +go.opentelemetry.io/otel/trace v1.0.0-RC3 h1:9F0ayEvlxv8BmNmPbU005WK7hC+7KbOazCPZjNa1yME= +go.opentelemetry.io/otel/trace v1.0.0-RC3/go.mod h1:VUt2TUYd8S2/ZRX09ZDFZQwn2RqfMB5MzO17jBojGxo= go.opentelemetry.io/proto/otlp v0.7.0/go.mod h1:PqfVotwruBrMGOCsRd/89rSnXhoiJIqeYNgFYFoEGnI= +go.opentelemetry.io/proto/otlp v0.9.0 h1:C0g6TWmQYvjKRnljRULLWUVJGy8Uvu0NEL/5frY2/t4= +go.opentelemetry.io/proto/otlp v0.9.0/go.mod h1:1vKfU9rv61e9EVGthD1zNvUbiwPcimSsOPU9brfSHJg= go.starlark.net v0.0.0-20210406145628-7a1108eaa012 h1:4RGobP/iq7S22H0Bb92OEt+M8/cfBQnW+T+a2MC0sQo= go.starlark.net v0.0.0-20210406145628-7a1108eaa012/go.mod h1:t3mmBBPzAVvK0L0n1drDmrQsJ8FoIx4INCqVMTr/Zo0= go.uber.org/atomic v1.3.2/go.mod h1:gD2HeocX3+yG+ygLZcrzQJaqmWj9AIm7n08wl/qW/PE= @@ -1944,6 +1966,7 @@ golang.org/x/sys v0.0.0-20210320140829-1e4c9ba3b0c4/go.mod h1:h1NjWce9XRLGQEsW7w golang.org/x/sys v0.0.0-20210324051608-47abb6519492/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20210330210617-4fbd30eecc44/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20210423082822-04245dca01da/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20210423185535-09eb48e85fd7/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20210426230700-d19ff857e887/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20210510120138-977fb7262007/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20210510120138-977fb7262007/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= diff --git a/plugins/inputs/opentelemetry/grpc_services.go b/plugins/inputs/opentelemetry/grpc_services.go index f5fa450fa8f65..1c805e2a23ff2 100644 --- a/plugins/inputs/opentelemetry/grpc_services.go +++ b/plugins/inputs/opentelemetry/grpc_services.go @@ -56,7 +56,7 @@ func newMetricsService(logger common.Logger, writer *writeToAccumulator, schema func (s *metricsService) Export(ctx context.Context, req pdata.Metrics) (otlpgrpc.MetricsResponse, error) { err := s.converter.WriteMetrics(ctx, req, s.writer) - return otlpgrpc.MetricsResponse{}, err + return otlpgrpc.NewMetricsResponse(), err } type logsService struct { diff --git a/plugins/inputs/opentelemetry/opentelemetry.go b/plugins/inputs/opentelemetry/opentelemetry.go index 2e6cbf9b8349a..85f32a7695efa 100644 --- a/plugins/inputs/opentelemetry/opentelemetry.go +++ b/plugins/inputs/opentelemetry/opentelemetry.go @@ -24,6 +24,7 @@ type OpenTelemetry struct { Log telegraf.Logger `toml:"-"` + listener net.Listener // overridden in tests grpcServer *grpc.Server wg sync.WaitGroup @@ -89,14 +90,16 @@ func (o *OpenTelemetry) Start(accumulator telegraf.Accumulator) error { otlpgrpc.RegisterMetricsServer(o.grpcServer, ms) otlpgrpc.RegisterLogsServer(o.grpcServer, newLogsService(logger, influxWriter)) - listener, err := net.Listen("tcp", o.ServiceAddress) - if err != nil { - return err + if o.listener == nil { + o.listener, err = net.Listen("tcp", o.ServiceAddress) + if err != nil { + return err + } } o.wg.Add(1) go func() { - if err := o.grpcServer.Serve(listener); err != nil { + if err := o.grpcServer.Serve(o.listener); err != nil { accumulator.AddError(fmt.Errorf("failed to stop OpenTelemetry gRPC service: %w", err)) } o.wg.Done() diff --git a/plugins/inputs/opentelemetry/opentelemetry_test.go b/plugins/inputs/opentelemetry/opentelemetry_test.go new file mode 100644 index 0000000000000..2de35bb06af50 --- /dev/null +++ b/plugins/inputs/opentelemetry/opentelemetry_test.go @@ -0,0 +1,83 @@ +package opentelemetry + +import ( + "context" + "net" + "testing" + + "github.com/influxdata/telegraf" + "github.com/influxdata/telegraf/plugins/inputs" + "github.com/influxdata/telegraf/testutil" + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" + "go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetricgrpc" + "go.opentelemetry.io/otel/metric" + "go.opentelemetry.io/otel/metric/global" + controller "go.opentelemetry.io/otel/sdk/metric/controller/basic" + processor "go.opentelemetry.io/otel/sdk/metric/processor/basic" + "go.opentelemetry.io/otel/sdk/metric/selector/simple" + "google.golang.org/grpc" + "google.golang.org/grpc/test/bufconn" +) + +func TestOpenTelemetry(t *testing.T) { + mockListener := bufconn.Listen(1024 * 1024) + plugin := inputs.Inputs["opentelemetry"]().(*OpenTelemetry) + plugin.listener = mockListener + accumulator := new(testutil.Accumulator) + + err := plugin.Start(accumulator) + require.NoError(t, err) + t.Cleanup(plugin.Stop) + + metricExporter, err := otlpmetricgrpc.New(context.Background(), + otlpmetricgrpc.WithInsecure(), + otlpmetricgrpc.WithDialOption( + grpc.WithBlock(), + grpc.WithContextDialer(func(_ context.Context, _ string) (net.Conn, error) { + return mockListener.Dial() + })), + ) + require.NoError(t, err) + t.Cleanup(func() { _ = metricExporter.Shutdown(context.Background()) }) + + pusher := controller.New( + processor.New( + simple.NewWithExactDistribution(), + metricExporter, + ), + controller.WithExporter(metricExporter), + ) + + err = pusher.Start(context.Background()) + require.NoError(t, err) + t.Cleanup(func() { _ = pusher.Stop(context.Background()) }) + + global.SetMeterProvider(pusher.MeterProvider()) + + // write metrics + meter := global.Meter("library-name") + counter := metric.Must(meter).NewInt64Counter("measurement-counter") + meter.RecordBatch(context.Background(), nil, counter.Measurement(7)) + + err = pusher.Stop(context.Background()) + require.NoError(t, err) + + // Shutdown + + plugin.Stop() + + err = metricExporter.Shutdown(context.Background()) + require.NoError(t, err) + + // Check + + assert.Empty(t, accumulator.Errors) + + if assert.Len(t, accumulator.Metrics, 1) { + got := accumulator.Metrics[0] + assert.Equal(t, "measurement-counter", got.Measurement) + assert.Equal(t, telegraf.Counter, got.Type) + assert.Equal(t, "library-name", got.Tags["otel.library.name"]) + } +} From 3ec4c128caf32f89c18501299f894abac69420e9 Mon Sep 17 00:00:00 2001 From: Samantha Wang <32681364+sjwang90@users.noreply.github.com> Date: Thu, 23 Sep 2021 10:15:43 -0700 Subject: [PATCH 31/81] docs: add trig and twemproxy plugin readmes (#9801) --- plugins/inputs/trig/README.md | 28 ++++++++++++++++++++++++++++ plugins/inputs/twemproxy/README.md | 16 ++++++++++++++++ 2 files changed, 44 insertions(+) create mode 100644 plugins/inputs/trig/README.md create mode 100644 plugins/inputs/twemproxy/README.md diff --git a/plugins/inputs/trig/README.md b/plugins/inputs/trig/README.md new file mode 100644 index 0000000000000..41ff8743e8cf3 --- /dev/null +++ b/plugins/inputs/trig/README.md @@ -0,0 +1,28 @@ +# Trig Input Plugin + +The `trig` plugin is for demonstration purposes and inserts sine and cosine + +### Configuration + +```toml +# Inserts sine and cosine waves for demonstration purposes +[[inputs.trig]] + ## Set the amplitude + amplitude = 10.0 +``` + +### Metrics + +- trig + - fields: + - cosine (float) + - sine (float) + + +### Example Output + +``` +trig,host=MBP15-SWANG.local cosine=10,sine=0 1632338680000000000 +trig,host=MBP15-SWANG.local sine=5.877852522924732,cosine=8.090169943749473 1632338690000000000 +trig,host=MBP15-SWANG.local sine=9.510565162951535,cosine=3.0901699437494745 1632338700000000000 +``` diff --git a/plugins/inputs/twemproxy/README.md b/plugins/inputs/twemproxy/README.md new file mode 100644 index 0000000000000..0c07e0aec4463 --- /dev/null +++ b/plugins/inputs/twemproxy/README.md @@ -0,0 +1,16 @@ +# Twemproxy Input Plugin + +The `twemproxy` plugin gathers statistics from [Twemproxy](https://github.com/twitter/twemproxy) servers. + + +### Configuration + +```toml +# Read Twemproxy stats data +[[inputs.twemproxy]] + ## Twemproxy stats address and port (no scheme) + addr = "localhost:22222" + ## Monitor pool name + pools = ["redis_pool", "mc_pool"] +``` + From 3b94269f30de70079fbf9942cf9266882b359947 Mon Sep 17 00:00:00 2001 From: Joshua Powers Date: Fri, 24 Sep 2021 09:09:52 -0600 Subject: [PATCH 32/81] fix: update golang-ci package (#9817) --- Makefile | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/Makefile b/Makefile index cbe0e2a2e5dbb..12267c04403bb 100644 --- a/Makefile +++ b/Makefile @@ -141,7 +141,7 @@ vet: .PHONY: lint-install lint-install: - go install github.com/golangci/golangci-lint/cmd/golangci-lint@v1.38.0 + go install github.com/golangci/golangci-lint/cmd/golangci-lint@v1.42.1 .PHONY: lint lint: From c4d2ad85f0dc4ee386f4b5975c33b9ef15f7cbf6 Mon Sep 17 00:00:00 2001 From: Sven Rebhan <36194019+srebhan@users.noreply.github.com> Date: Tue, 28 Sep 2021 16:09:20 +0200 Subject: [PATCH 33/81] feat: Allow user to select the source for the metric timestamp. (#9013) --- plugins/inputs/opcua/README.md | 6 ++++ plugins/inputs/opcua/opcua_client.go | 42 ++++++++++++++++++++++------ 2 files changed, 39 insertions(+), 9 deletions(-) diff --git a/plugins/inputs/opcua/README.md b/plugins/inputs/opcua/README.md index d6530c0839b18..f28981f7482ae 100644 --- a/plugins/inputs/opcua/README.md +++ b/plugins/inputs/opcua/README.md @@ -46,6 +46,12 @@ Plugin minimum tested version: 1.16 ## Password. Required for auth_method = "UserName" # password = "" # + ## Option to select the metric timestamp to use. Valid options are: + ## "gather" -- uses the time of receiving the data in telegraf + ## "server" -- uses the timestamp provided by the server + ## "source" -- uses the timestamp provided by the source + # timestamp = "gather" + # ## Node ID configuration ## name - field name to use in the output ## namespace - OPC UA namespace of the node (integer value 0 thru 3) diff --git a/plugins/inputs/opcua/opcua_client.go b/plugins/inputs/opcua/opcua_client.go index 8dec41eb343e3..d59adc453ba8b 100644 --- a/plugins/inputs/opcua/opcua_client.go +++ b/plugins/inputs/opcua/opcua_client.go @@ -12,6 +12,7 @@ import ( "github.com/gopcua/opcua/ua" "github.com/influxdata/telegraf" "github.com/influxdata/telegraf/config" + "github.com/influxdata/telegraf/internal/choice" "github.com/influxdata/telegraf/plugins/inputs" "github.com/influxdata/telegraf/selfstat" ) @@ -26,6 +27,7 @@ type OpcUA struct { PrivateKey string `toml:"private_key"` Username string `toml:"username"` Password string `toml:"password"` + Timestamp string `toml:"timestamp"` AuthMethod string `toml:"auth_method"` ConnectTimeout config.Duration `toml:"connect_timeout"` RequestTimeout config.Duration `toml:"request_timeout"` @@ -77,12 +79,12 @@ type GroupSettings struct { // OPCData type type OPCData struct { - TagName string - Value interface{} - Quality ua.StatusCode - TimeStamp string - Time string - DataType ua.TypeID + TagName string + Value interface{} + Quality ua.StatusCode + ServerTime time.Time + SourceTime time.Time + DataType ua.TypeID } // ConnectionState used for constants @@ -136,6 +138,12 @@ const sampleConfig = ` ## Password. Required for auth_method = "UserName" # password = "" # + ## Option to select the metric timestamp to use. Valid options are: + ## "gather" -- uses the time of receiving the data in telegraf + ## "server" -- uses the timestamp provided by the server + ## "source" -- uses the timestamp provided by the source + # timestamp = "gather" + # ## Node ID configuration ## name - field name to use in the output ## namespace - OPC UA namespace of the node (integer value 0 thru 3) @@ -188,7 +196,12 @@ func (o *OpcUA) SampleConfig() string { func (o *OpcUA) Init() error { o.state = Disconnected - err := o.validateEndpoint() + err := choice.Check(o.Timestamp, []string{"", "gather", "server", "source"}) + if err != nil { + return err + } + + err = o.validateEndpoint() if err != nil { return err } @@ -485,8 +498,9 @@ func (o *OpcUA) getData() error { o.nodeData[i].Value = d.Value.Value() o.nodeData[i].DataType = d.Value.Type() } - o.nodeData[i].TimeStamp = d.ServerTimestamp.String() - o.nodeData[i].Time = d.SourceTimestamp.String() + o.nodeData[i].Quality = d.Status + o.nodeData[i].ServerTime = d.ServerTimestamp + o.nodeData[i].SourceTime = d.SourceTimestamp } return nil } @@ -551,6 +565,15 @@ func (o *OpcUA) Gather(acc telegraf.Accumulator) error { fields[o.nodeData[i].TagName] = o.nodeData[i].Value fields["Quality"] = strings.TrimSpace(fmt.Sprint(o.nodeData[i].Quality)) acc.AddFields(n.metricName, fields, tags) + + switch o.Timestamp { + case "server": + acc.AddFields(n.metricName, fields, tags, o.nodeData[i].ServerTime) + case "source": + acc.AddFields(n.metricName, fields, tags, o.nodeData[i].SourceTime) + default: + acc.AddFields(n.metricName, fields, tags) + } } } return nil @@ -564,6 +587,7 @@ func init() { Endpoint: "opc.tcp://localhost:4840", SecurityPolicy: "auto", SecurityMode: "auto", + Timestamp: "gather", RequestTimeout: config.Duration(5 * time.Second), ConnectTimeout: config.Duration(10 * time.Second), Certificate: "/etc/telegraf/cert.pem", From 6a3b27126a26dd43d2a287b4c9d8831dca6bf687 Mon Sep 17 00:00:00 2001 From: Eng Zer Jun Date: Wed, 29 Sep 2021 05:16:32 +0800 Subject: [PATCH 34/81] refactor: move from io/ioutil to io and os package (#9811) --- config/config.go | 6 ++-- internal/content_coding_test.go | 4 +-- internal/internal_test.go | 7 ++-- internal/process/process.go | 3 +- internal/rotate/file_writer_test.go | 35 +++++++++---------- logger/logger_test.go | 27 +++++++------- plugins/common/cookie/cookie.go | 5 ++- plugins/common/cookie/cookie_test.go | 4 +-- plugins/common/encoding/decoder_test.go | 4 +-- plugins/common/logrus/hook.go | 4 +-- plugins/common/shim/config.go | 3 +- plugins/common/shim/input_test.go | 5 +-- plugins/common/shim/processor_test.go | 5 +-- plugins/common/tls/config.go | 4 +-- plugins/inputs/activemq/activemq.go | 4 +-- plugins/inputs/aliyuncms/aliyuncms_test.go | 8 ++--- .../inputs/amd_rocm_smi/amd_rocm_smi_test.go | 4 +-- plugins/inputs/bcache/bcache.go | 5 ++- plugins/inputs/bcache/bcache_test.go | 19 +++++----- plugins/inputs/beat/beat_test.go | 6 ++-- plugins/inputs/bond/bond.go | 3 +- plugins/inputs/burrow/burrow_test.go | 3 +- plugins/inputs/cassandra/cassandra.go | 4 +-- plugins/inputs/cassandra/cassandra_test.go | 4 +-- plugins/inputs/ceph/ceph.go | 4 +-- plugins/inputs/ceph/ceph_test.go | 5 ++- plugins/inputs/cgroup/cgroup_linux.go | 3 +- plugins/inputs/clickhouse/clickhouse.go | 5 ++- .../inputs/cloud_pubsub_push/pubsub_push.go | 4 +-- plugins/inputs/conntrack/conntrack.go | 6 ++-- plugins/inputs/conntrack/conntrack_test.go | 17 +++++---- plugins/inputs/dcos/creds.go | 4 +-- plugins/inputs/dcos/dcos.go | 4 +-- .../directory_monitor/directory_monitor.go | 5 ++- .../directory_monitor_test.go | 11 +++--- plugins/inputs/diskio/diskio_linux_test.go | 3 +- plugins/inputs/docker/docker_test.go | 6 ++-- plugins/inputs/docker/docker_testdata.go | 6 ++-- plugins/inputs/ecs/client.go | 5 ++- plugins/inputs/ecs/client_test.go | 14 ++++---- plugins/inputs/elasticsearch/elasticsearch.go | 4 +-- .../elasticsearch/elasticsearch_test.go | 4 +-- plugins/inputs/execd/shim/goshim.go | 3 +- plugins/inputs/file/file.go | 4 +-- plugins/inputs/fluentd/fluentd.go | 4 +-- plugins/inputs/graylog/graylog.go | 4 +-- plugins/inputs/graylog/graylog_test.go | 4 +-- plugins/inputs/http/http.go | 8 ++--- plugins/inputs/http/http_test.go | 10 +++--- .../http_listener_v2/http_listener_v2.go | 8 ++--- .../http_listener_v2/http_listener_v2_test.go | 4 +-- plugins/inputs/http_response/http_response.go | 6 ++-- .../http_response/http_response_test.go | 4 +-- plugins/inputs/httpjson/httpjson.go | 4 +-- plugins/inputs/httpjson/httpjson_test.go | 6 ++-- .../influxdb_listener_test.go | 4 +-- .../influxdb_v2_listener.go | 4 +-- .../influxdb_v2_listener_test.go | 7 ++-- plugins/inputs/intel_powerstat/file.go | 5 ++- plugins/inputs/jolokia/jolokia.go | 4 +-- plugins/inputs/jolokia/jolokia_test.go | 4 +-- plugins/inputs/jolokia2/client.go | 4 +-- plugins/inputs/jolokia2/client_test.go | 6 ++-- plugins/inputs/kernel/kernel.go | 5 ++- plugins/inputs/kernel/kernel_test.go | 3 +- plugins/inputs/kernel_vmstat/kernel_vmstat.go | 3 +- .../kernel_vmstat/kernel_vmstat_test.go | 3 +- plugins/inputs/kibana/kibana.go | 3 +- plugins/inputs/kibana/kibana_test.go | 4 +-- .../kinesis_consumer/kinesis_consumer.go | 6 ++-- plugins/inputs/kube_inventory/kube_state.go | 4 +-- plugins/inputs/kubernetes/kubernetes.go | 4 +-- plugins/inputs/leofs/leofs_test.go | 3 +- .../inputs/linux_sysctl_fs/linux_sysctl_fs.go | 5 ++- .../linux_sysctl_fs/linux_sysctl_fs_test.go | 15 ++++---- plugins/inputs/logparser/logparser_test.go | 7 ++-- plugins/inputs/logstash/logstash.go | 3 +- plugins/inputs/lustre2/lustre2.go | 4 +-- plugins/inputs/lustre2/lustre2_test.go | 11 +++--- plugins/inputs/mailchimp/chimp_api.go | 5 ++- plugins/inputs/mdstat/mdstat.go | 3 +- plugins/inputs/mdstat/mdstat_test.go | 3 +- plugins/inputs/mesos/mesos.go | 4 +-- plugins/inputs/multifile/multifile.go | 4 +-- plugins/inputs/nats/nats.go | 4 +-- plugins/inputs/neptune_apex/neptune_apex.go | 4 +-- .../nginx_plus_api/nginx_plus_api_metrics.go | 4 +-- .../nginx_upstream_check.go | 3 +- plugins/inputs/nsq/nsq.go | 4 +-- plugins/inputs/nstat/nstat.go | 7 ++-- plugins/inputs/nvidia_smi/nvidia_smi_test.go | 4 +-- plugins/inputs/opcua/opcua_util.go | 3 +- plugins/inputs/passenger/passenger_test.go | 3 +- plugins/inputs/phpfpm/child.go | 5 ++- plugins/inputs/phpfpm/fcgi_test.go | 5 ++- plugins/inputs/postfix/postfix_test.go | 15 ++++---- .../postgresql_extensible.go | 4 +-- .../inputs/processes/processes_notwindows.go | 3 +- plugins/inputs/procstat/native_finder.go | 4 +-- plugins/inputs/procstat/pgrep.go | 4 +-- plugins/inputs/procstat/procstat.go | 3 +- plugins/inputs/procstat/procstat_test.go | 5 ++- plugins/inputs/prometheus/kubernetes.go | 4 +-- plugins/inputs/prometheus/prometheus.go | 6 ++-- plugins/inputs/proxmox/proxmox.go | 4 +-- plugins/inputs/puppetagent/puppetagent.go | 6 ++-- plugins/inputs/rabbitmq/rabbitmq.go | 4 +-- plugins/inputs/rabbitmq/rabbitmq_test.go | 6 ++-- plugins/inputs/ravendb/ravendb_test.go | 6 ++-- plugins/inputs/redfish/redfish.go | 4 +-- plugins/inputs/salesforce/salesforce.go | 5 ++- plugins/inputs/snmp_legacy/snmp_legacy.go | 4 +-- .../socket_listener/socket_listener_test.go | 7 ++-- plugins/inputs/sql/sql.go | 4 +-- plugins/inputs/suricata/suricata_test.go | 27 +++++++------- plugins/inputs/synproxy/synproxy_test.go | 3 +- plugins/inputs/syslog/nontransparent_test.go | 9 +++-- plugins/inputs/syslog/octetcounting_test.go | 9 +++-- plugins/inputs/syslog/rfc5426_test.go | 5 ++- plugins/inputs/syslog/syslog_test.go | 3 +- plugins/inputs/tail/tail_test.go | 13 ++++--- plugins/inputs/twemproxy/twemproxy.go | 4 +-- .../inputs/udp_listener/udp_listener_test.go | 8 ++--- .../webhooks/filestack/filestack_webhooks.go | 4 +-- .../inputs/webhooks/github/github_webhooks.go | 4 +-- .../webhooks/mandrill/mandrill_webhooks.go | 4 +-- .../webhooks/rollbar/rollbar_webhooks.go | 4 +-- plugins/inputs/wireless/wireless_linux.go | 3 +- plugins/inputs/x509_cert/x509_cert.go | 7 ++-- plugins/inputs/x509_cert/x509_cert_test.go | 12 +++---- plugins/inputs/zfs/zfs_linux_test.go | 21 ++++++----- .../cmd/thrift_serialize/thrift_serialize.go | 8 ++--- .../inputs/zipkin/codec/thrift/thrift_test.go | 4 +-- plugins/inputs/zipkin/handler.go | 4 +-- plugins/inputs/zipkin/handler_test.go | 7 ++-- plugins/inputs/zipkin/zipkin_test.go | 4 +-- .../outputs/azure_monitor/azure_monitor.go | 6 ++-- plugins/outputs/dynatrace/dynatrace.go | 4 +-- plugins/outputs/dynatrace/dynatrace_test.go | 18 +++++----- plugins/outputs/file/file_test.go | 7 ++-- plugins/outputs/health/health_test.go | 4 +-- plugins/outputs/http/http.go | 3 +- plugins/outputs/http/http_test.go | 4 +-- plugins/outputs/influxdb/http.go | 7 ++-- plugins/outputs/influxdb/http_test.go | 16 ++++----- plugins/outputs/influxdb_v2/http.go | 3 +- plugins/outputs/influxdb_v2/http_test.go | 4 +-- plugins/outputs/librato/librato.go | 4 +-- plugins/outputs/loki/loki_test.go | 9 ++--- plugins/outputs/opentsdb/opentsdb_http.go | 3 +- .../prometheus_client_v1_test.go | 8 ++--- .../prometheus_client_v2_test.go | 6 ++-- plugins/outputs/sensu/sensu.go | 3 +- plugins/outputs/sensu/sensu_test.go | 4 +-- .../socket_writer/socket_writer_test.go | 5 ++- plugins/outputs/sql/sql_test.go | 13 ++++--- plugins/outputs/sql/sqlite_test.go | 3 +- plugins/outputs/sumologic/sumologic_test.go | 3 +- plugins/outputs/warp10/warp10.go | 4 +-- .../yandex_cloud_monitoring.go | 6 ++-- plugins/parsers/json_v2/parser_test.go | 3 +- plugins/parsers/prometheus/parser_test.go | 4 +-- plugins/parsers/xpath/parser_test.go | 6 ++-- plugins/processors/starlark/starlark_test.go | 3 +- testutil/tls.go | 4 +-- 165 files changed, 456 insertions(+), 517 deletions(-) diff --git a/config/config.go b/config/config.go index e64d893bc05ea..4880da4832e5a 100644 --- a/config/config.go +++ b/config/config.go @@ -3,7 +3,7 @@ package config import ( "bytes" "fmt" - "io/ioutil" + "io" "log" "net/http" "net/url" @@ -933,7 +933,7 @@ func loadConfig(config string) ([]byte, error) { } // If it isn't a https scheme, try it as a file - return ioutil.ReadFile(config) + return os.ReadFile(config) } func fetchConfig(u *url.URL) ([]byte, error) { @@ -964,7 +964,7 @@ func fetchConfig(u *url.URL) ([]byte, error) { return nil, fmt.Errorf("Retry %d of %d failed to retrieve remote config: %s", i, retries, resp.Status) } defer resp.Body.Close() - return ioutil.ReadAll(resp.Body) + return io.ReadAll(resp.Body) } return nil, nil diff --git a/internal/content_coding_test.go b/internal/content_coding_test.go index 85496df59c5b6..06235a63879a9 100644 --- a/internal/content_coding_test.go +++ b/internal/content_coding_test.go @@ -2,7 +2,7 @@ package internal import ( "bytes" - "io/ioutil" + "io" "testing" "github.com/stretchr/testify/require" @@ -68,7 +68,7 @@ func TestStreamIdentityDecode(t *testing.T) { dec, err := NewStreamContentDecoder("identity", &r) require.NoError(t, err) - data, err := ioutil.ReadAll(dec) + data, err := io.ReadAll(dec) require.NoError(t, err) require.Equal(t, []byte("howdy"), data) diff --git a/internal/internal_test.go b/internal/internal_test.go index 7cb56d5324f06..8dae73f562702 100644 --- a/internal/internal_test.go +++ b/internal/internal_test.go @@ -5,7 +5,6 @@ import ( "compress/gzip" "crypto/rand" "io" - "io/ioutil" "log" "os/exec" "regexp" @@ -182,7 +181,7 @@ func TestCompressWithGzip(t *testing.T) { assert.NoError(t, err) defer gzipReader.Close() - output, err := ioutil.ReadAll(gzipReader) + output, err := io.ReadAll(gzipReader) assert.NoError(t, err) assert.Equal(t, testData, string(output)) @@ -203,7 +202,7 @@ func TestCompressWithGzipEarlyClose(t *testing.T) { rc, err := CompressWithGzip(mr) assert.NoError(t, err) - n, err := io.CopyN(ioutil.Discard, rc, 10000) + n, err := io.CopyN(io.Discard, rc, 10000) assert.NoError(t, err) assert.Equal(t, int64(10000), n) @@ -211,7 +210,7 @@ func TestCompressWithGzipEarlyClose(t *testing.T) { err = rc.Close() assert.NoError(t, err) - n, err = io.CopyN(ioutil.Discard, rc, 10000) + n, err = io.CopyN(io.Discard, rc, 10000) assert.Error(t, io.EOF, err) assert.Equal(t, int64(0), n) diff --git a/internal/process/process.go b/internal/process/process.go index 6da98d211a43b..3bfc3bb7e44e6 100644 --- a/internal/process/process.go +++ b/internal/process/process.go @@ -5,7 +5,6 @@ import ( "errors" "fmt" "io" - "io/ioutil" "os/exec" "sync" "sync/atomic" @@ -187,5 +186,5 @@ func isQuitting(ctx context.Context) bool { } func defaultReadPipe(r io.Reader) { - io.Copy(ioutil.Discard, r) + _, _ = io.Copy(io.Discard, r) } diff --git a/internal/rotate/file_writer_test.go b/internal/rotate/file_writer_test.go index ca29b9a2f45d6..2d249d74548e1 100644 --- a/internal/rotate/file_writer_test.go +++ b/internal/rotate/file_writer_test.go @@ -1,7 +1,6 @@ package rotate import ( - "io/ioutil" "os" "path/filepath" "testing" @@ -12,7 +11,7 @@ import ( ) func TestFileWriter_NoRotation(t *testing.T) { - tempDir, err := ioutil.TempDir("", "RotationNo") + tempDir, err := os.MkdirTemp("", "RotationNo") require.NoError(t, err) writer, err := NewFileWriter(filepath.Join(tempDir, "test"), 0, 0, 0) require.NoError(t, err) @@ -22,12 +21,12 @@ func TestFileWriter_NoRotation(t *testing.T) { require.NoError(t, err) _, err = writer.Write([]byte("Hello World 2")) require.NoError(t, err) - files, _ := ioutil.ReadDir(tempDir) + files, _ := os.ReadDir(tempDir) assert.Equal(t, 1, len(files)) } func TestFileWriter_TimeRotation(t *testing.T) { - tempDir, err := ioutil.TempDir("", "RotationTime") + tempDir, err := os.MkdirTemp("", "RotationTime") require.NoError(t, err) interval, _ := time.ParseDuration("1s") writer, err := NewFileWriter(filepath.Join(tempDir, "test"), interval, 0, -1) @@ -39,28 +38,28 @@ func TestFileWriter_TimeRotation(t *testing.T) { time.Sleep(1 * time.Second) _, err = writer.Write([]byte("Hello World 2")) require.NoError(t, err) - files, _ := ioutil.ReadDir(tempDir) + files, _ := os.ReadDir(tempDir) assert.Equal(t, 2, len(files)) } func TestFileWriter_ReopenTimeRotation(t *testing.T) { - tempDir, err := ioutil.TempDir("", "RotationTime") + tempDir, err := os.MkdirTemp("", "RotationTime") require.NoError(t, err) interval, _ := time.ParseDuration("1s") filePath := filepath.Join(tempDir, "test.log") - err = ioutil.WriteFile(filePath, []byte("Hello World"), 0644) + err = os.WriteFile(filePath, []byte("Hello World"), 0644) time.Sleep(1 * time.Second) assert.NoError(t, err) writer, err := NewFileWriter(filepath.Join(tempDir, "test.log"), interval, 0, -1) require.NoError(t, err) defer func() { writer.Close(); os.RemoveAll(tempDir) }() - files, _ := ioutil.ReadDir(tempDir) + files, _ := os.ReadDir(tempDir) assert.Equal(t, 2, len(files)) } func TestFileWriter_SizeRotation(t *testing.T) { - tempDir, err := ioutil.TempDir("", "RotationSize") + tempDir, err := os.MkdirTemp("", "RotationSize") require.NoError(t, err) maxSize := int64(9) writer, err := NewFileWriter(filepath.Join(tempDir, "test.log"), 0, maxSize, -1) @@ -71,16 +70,16 @@ func TestFileWriter_SizeRotation(t *testing.T) { require.NoError(t, err) _, err = writer.Write([]byte("World 2")) require.NoError(t, err) - files, _ := ioutil.ReadDir(tempDir) + files, _ := os.ReadDir(tempDir) assert.Equal(t, 2, len(files)) } func TestFileWriter_ReopenSizeRotation(t *testing.T) { - tempDir, err := ioutil.TempDir("", "RotationSize") + tempDir, err := os.MkdirTemp("", "RotationSize") require.NoError(t, err) maxSize := int64(12) filePath := filepath.Join(tempDir, "test.log") - err = ioutil.WriteFile(filePath, []byte("Hello World"), 0644) + err = os.WriteFile(filePath, []byte("Hello World"), 0644) assert.NoError(t, err) writer, err := NewFileWriter(filepath.Join(tempDir, "test.log"), 0, maxSize, -1) require.NoError(t, err) @@ -88,12 +87,12 @@ func TestFileWriter_ReopenSizeRotation(t *testing.T) { _, err = writer.Write([]byte("Hello World Again")) require.NoError(t, err) - files, _ := ioutil.ReadDir(tempDir) + files, _ := os.ReadDir(tempDir) assert.Equal(t, 2, len(files)) } func TestFileWriter_DeleteArchives(t *testing.T) { - tempDir, err := ioutil.TempDir("", "RotationDeleteArchives") + tempDir, err := os.MkdirTemp("", "RotationDeleteArchives") require.NoError(t, err) maxSize := int64(5) writer, err := NewFileWriter(filepath.Join(tempDir, "test.log"), 0, maxSize, 2) @@ -112,14 +111,14 @@ func TestFileWriter_DeleteArchives(t *testing.T) { _, err = writer.Write([]byte("Third file")) require.NoError(t, err) - files, _ := ioutil.ReadDir(tempDir) + files, _ := os.ReadDir(tempDir) assert.Equal(t, 3, len(files)) for _, tempFile := range files { var bytes []byte var err error path := filepath.Join(tempDir, tempFile.Name()) - if bytes, err = ioutil.ReadFile(path); err != nil { + if bytes, err = os.ReadFile(path); err != nil { t.Error(err.Error()) return } @@ -133,7 +132,7 @@ func TestFileWriter_DeleteArchives(t *testing.T) { } func TestFileWriter_CloseRotates(t *testing.T) { - tempDir, err := ioutil.TempDir("", "RotationClose") + tempDir, err := os.MkdirTemp("", "RotationClose") require.NoError(t, err) defer os.RemoveAll(tempDir) maxSize := int64(9) @@ -142,7 +141,7 @@ func TestFileWriter_CloseRotates(t *testing.T) { writer.Close() - files, _ := ioutil.ReadDir(tempDir) + files, _ := os.ReadDir(tempDir) assert.Equal(t, 1, len(files)) assert.Regexp(t, "^test\\.[^\\.]+\\.log$", files[0].Name()) } diff --git a/logger/logger_test.go b/logger/logger_test.go index d2c699da52644..47af1d4591bff 100644 --- a/logger/logger_test.go +++ b/logger/logger_test.go @@ -3,7 +3,6 @@ package logger import ( "bytes" "io" - "io/ioutil" "log" "os" "path/filepath" @@ -15,7 +14,7 @@ import ( ) func TestWriteLogToFile(t *testing.T) { - tmpfile, err := ioutil.TempFile("", "") + tmpfile, err := os.CreateTemp("", "") assert.NoError(t, err) defer func() { os.Remove(tmpfile.Name()) }() @@ -24,13 +23,13 @@ func TestWriteLogToFile(t *testing.T) { log.Printf("I! TEST") log.Printf("D! TEST") // <- should be ignored - f, err := ioutil.ReadFile(tmpfile.Name()) + f, err := os.ReadFile(tmpfile.Name()) assert.NoError(t, err) assert.Equal(t, f[19:], []byte("Z I! TEST\n")) } func TestDebugWriteLogToFile(t *testing.T) { - tmpfile, err := ioutil.TempFile("", "") + tmpfile, err := os.CreateTemp("", "") assert.NoError(t, err) defer func() { os.Remove(tmpfile.Name()) }() config := createBasicLogConfig(tmpfile.Name()) @@ -38,13 +37,13 @@ func TestDebugWriteLogToFile(t *testing.T) { SetupLogging(config) log.Printf("D! TEST") - f, err := ioutil.ReadFile(tmpfile.Name()) + f, err := os.ReadFile(tmpfile.Name()) assert.NoError(t, err) assert.Equal(t, f[19:], []byte("Z D! TEST\n")) } func TestErrorWriteLogToFile(t *testing.T) { - tmpfile, err := ioutil.TempFile("", "") + tmpfile, err := os.CreateTemp("", "") assert.NoError(t, err) defer func() { os.Remove(tmpfile.Name()) }() config := createBasicLogConfig(tmpfile.Name()) @@ -53,13 +52,13 @@ func TestErrorWriteLogToFile(t *testing.T) { log.Printf("E! TEST") log.Printf("I! TEST") // <- should be ignored - f, err := ioutil.ReadFile(tmpfile.Name()) + f, err := os.ReadFile(tmpfile.Name()) assert.NoError(t, err) assert.Equal(t, f[19:], []byte("Z E! TEST\n")) } func TestAddDefaultLogLevel(t *testing.T) { - tmpfile, err := ioutil.TempFile("", "") + tmpfile, err := os.CreateTemp("", "") assert.NoError(t, err) defer func() { os.Remove(tmpfile.Name()) }() config := createBasicLogConfig(tmpfile.Name()) @@ -67,13 +66,13 @@ func TestAddDefaultLogLevel(t *testing.T) { SetupLogging(config) log.Printf("TEST") - f, err := ioutil.ReadFile(tmpfile.Name()) + f, err := os.ReadFile(tmpfile.Name()) assert.NoError(t, err) assert.Equal(t, f[19:], []byte("Z I! TEST\n")) } func TestWriteToTruncatedFile(t *testing.T) { - tmpfile, err := ioutil.TempFile("", "") + tmpfile, err := os.CreateTemp("", "") assert.NoError(t, err) defer func() { os.Remove(tmpfile.Name()) }() config := createBasicLogConfig(tmpfile.Name()) @@ -81,7 +80,7 @@ func TestWriteToTruncatedFile(t *testing.T) { SetupLogging(config) log.Printf("TEST") - f, err := ioutil.ReadFile(tmpfile.Name()) + f, err := os.ReadFile(tmpfile.Name()) assert.NoError(t, err) assert.Equal(t, f[19:], []byte("Z I! TEST\n")) @@ -91,13 +90,13 @@ func TestWriteToTruncatedFile(t *testing.T) { log.Printf("SHOULD BE FIRST") - f, err = ioutil.ReadFile(tmpfile.Name()) + f, err = os.ReadFile(tmpfile.Name()) assert.NoError(t, err) assert.Equal(t, f[19:], []byte("Z I! SHOULD BE FIRST\n")) } func TestWriteToFileInRotation(t *testing.T) { - tempDir, err := ioutil.TempDir("", "LogRotation") + tempDir, err := os.MkdirTemp("", "LogRotation") require.NoError(t, err) cfg := createBasicLogConfig(filepath.Join(tempDir, "test.log")) cfg.LogTarget = LogTargetFile @@ -110,7 +109,7 @@ func TestWriteToFileInRotation(t *testing.T) { log.Printf("I! TEST 1") // Writes 31 bytes, will rotate log.Printf("I! TEST") // Writes 29 byes, no rotation expected - files, _ := ioutil.ReadDir(tempDir) + files, _ := os.ReadDir(tempDir) assert.Equal(t, 2, len(files)) } diff --git a/plugins/common/cookie/cookie.go b/plugins/common/cookie/cookie.go index e452a50a4b0a9..03fd97f95077f 100644 --- a/plugins/common/cookie/cookie.go +++ b/plugins/common/cookie/cookie.go @@ -4,7 +4,6 @@ import ( "context" "fmt" "io" - "io/ioutil" "net/http" "net/http/cookiejar" "strings" @@ -78,7 +77,7 @@ func (c *CookieAuthConfig) authRenewal(ctx context.Context, ticker *clockutil.Ti func (c *CookieAuthConfig) auth() error { var body io.ReadCloser if c.Body != "" { - body = ioutil.NopCloser(strings.NewReader(c.Body)) + body = io.NopCloser(strings.NewReader(c.Body)) defer body.Close() } @@ -97,7 +96,7 @@ func (c *CookieAuthConfig) auth() error { } defer resp.Body.Close() - if _, err = io.Copy(ioutil.Discard, resp.Body); err != nil { + if _, err = io.Copy(io.Discard, resp.Body); err != nil { return err } diff --git a/plugins/common/cookie/cookie_test.go b/plugins/common/cookie/cookie_test.go index 99269c27cd339..b32ceb0059e8b 100644 --- a/plugins/common/cookie/cookie_test.go +++ b/plugins/common/cookie/cookie_test.go @@ -3,7 +3,7 @@ package cookie import ( "context" "fmt" - "io/ioutil" + "io" "net/http" "net/http/httptest" "sync/atomic" @@ -50,7 +50,7 @@ func newFakeServer(t *testing.T) fakeServer { case authEndpointNoCreds: authed() case authEndpointWithBody: - body, err := ioutil.ReadAll(r.Body) + body, err := io.ReadAll(r.Body) require.NoError(t, err) if !cmp.Equal([]byte(reqBody), body) { w.WriteHeader(http.StatusUnauthorized) diff --git a/plugins/common/encoding/decoder_test.go b/plugins/common/encoding/decoder_test.go index 87115318ad0ed..b8e19af9cea43 100644 --- a/plugins/common/encoding/decoder_test.go +++ b/plugins/common/encoding/decoder_test.go @@ -2,7 +2,7 @@ package encoding import ( "bytes" - "io/ioutil" + "io" "testing" "github.com/stretchr/testify/require" @@ -66,7 +66,7 @@ func TestDecoder(t *testing.T) { require.NoError(t, err) buf := bytes.NewBuffer(tt.input) r := decoder.Reader(buf) - actual, err := ioutil.ReadAll(r) + actual, err := io.ReadAll(r) if tt.expectedErr { require.Error(t, err) return diff --git a/plugins/common/logrus/hook.go b/plugins/common/logrus/hook.go index a7f99023be1ba..7451639a75423 100644 --- a/plugins/common/logrus/hook.go +++ b/plugins/common/logrus/hook.go @@ -1,7 +1,7 @@ package logrus import ( - "io/ioutil" + "io" "log" "strings" "sync" @@ -19,7 +19,7 @@ type LogHook struct { // that directly log to the logrus system without providing an override method. func InstallHook() { once.Do(func() { - logrus.SetOutput(ioutil.Discard) + logrus.SetOutput(io.Discard) logrus.AddHook(&LogHook{}) }) } diff --git a/plugins/common/shim/config.go b/plugins/common/shim/config.go index a0bb3ce0de696..089c2b7ee7525 100644 --- a/plugins/common/shim/config.go +++ b/plugins/common/shim/config.go @@ -3,7 +3,6 @@ package shim import ( "errors" "fmt" - "io/ioutil" "log" "os" @@ -53,7 +52,7 @@ func LoadConfig(filePath *string) (loaded loadedConfig, err error) { var data string conf := config{} if filePath != nil && *filePath != "" { - b, err := ioutil.ReadFile(*filePath) + b, err := os.ReadFile(*filePath) if err != nil { return loadedConfig{}, err } diff --git a/plugins/common/shim/input_test.go b/plugins/common/shim/input_test.go index 7cbfe6413975f..9a0423261ac14 100644 --- a/plugins/common/shim/input_test.go +++ b/plugins/common/shim/input_test.go @@ -3,7 +3,6 @@ package shim import ( "bufio" "io" - "io/ioutil" "strings" "testing" "time" @@ -45,7 +44,9 @@ func TestInputShimStdinSignalingWorks(t *testing.T) { require.Equal(t, "measurement,tag=tag field=1i 1234000005678\n", out) stdinWriter.Close() - go ioutil.ReadAll(r) + go func() { + _, _ = io.ReadAll(r) + }() // check that it exits cleanly <-exited } diff --git a/plugins/common/shim/processor_test.go b/plugins/common/shim/processor_test.go index ea2e61a459469..bc00fb70d1bba 100644 --- a/plugins/common/shim/processor_test.go +++ b/plugins/common/shim/processor_test.go @@ -3,7 +3,6 @@ package shim import ( "bufio" "io" - "io/ioutil" "math/rand" "sync" "testing" @@ -84,7 +83,9 @@ func testSendAndRecieve(t *testing.T, fieldKey string, fieldValue string) { val2, ok := mOut.Fields()[fieldKey] require.True(t, ok) require.Equal(t, fieldValue, val2) - go ioutil.ReadAll(r) + go func() { + _, _ = io.ReadAll(r) + }() wg.Wait() } diff --git a/plugins/common/tls/config.go b/plugins/common/tls/config.go index 9a752fbce5714..586ec8fd4a417 100644 --- a/plugins/common/tls/config.go +++ b/plugins/common/tls/config.go @@ -4,7 +4,7 @@ import ( "crypto/tls" "crypto/x509" "fmt" - "io/ioutil" + "os" "strings" ) @@ -147,7 +147,7 @@ func (c *ServerConfig) TLSConfig() (*tls.Config, error) { func makeCertPool(certFiles []string) (*x509.CertPool, error) { pool := x509.NewCertPool() for _, certFile := range certFiles { - pem, err := ioutil.ReadFile(certFile) + pem, err := os.ReadFile(certFile) if err != nil { return nil, fmt.Errorf( "could not read certificate %q: %v", certFile, err) diff --git a/plugins/inputs/activemq/activemq.go b/plugins/inputs/activemq/activemq.go index 0674b7ae0fe52..f5cf7927342e5 100644 --- a/plugins/inputs/activemq/activemq.go +++ b/plugins/inputs/activemq/activemq.go @@ -3,7 +3,7 @@ package activemq import ( "encoding/xml" "fmt" - "io/ioutil" + "io" "net/http" "net/url" "path" @@ -184,7 +184,7 @@ func (a *ActiveMQ) GetMetrics(u string) ([]byte, error) { return nil, fmt.Errorf("GET %s returned status %q", u, resp.Status) } - return ioutil.ReadAll(resp.Body) + return io.ReadAll(resp.Body) } func (a *ActiveMQ) GatherQueuesMetrics(acc telegraf.Accumulator, queues Queues) { diff --git a/plugins/inputs/aliyuncms/aliyuncms_test.go b/plugins/inputs/aliyuncms/aliyuncms_test.go index 22e0acbc52ebe..7e346a6ae9b8e 100644 --- a/plugins/inputs/aliyuncms/aliyuncms_test.go +++ b/plugins/inputs/aliyuncms/aliyuncms_test.go @@ -2,7 +2,7 @@ package aliyuncms import ( "bytes" - "io/ioutil" + "io" "net/http" "testing" "time" @@ -132,7 +132,7 @@ func TestPluginInitialize(t *testing.T) { httpResp := &http.Response{ StatusCode: 200, - Body: ioutil.NopCloser(bytes.NewBufferString( + Body: io.NopCloser(bytes.NewBufferString( `{ "LoadBalancers": { @@ -359,7 +359,7 @@ func TestGetDiscoveryDataAcrossRegions(t *testing.T) { region: "cn-hongkong", httpResp: &http.Response{ StatusCode: 200, - Body: ioutil.NopCloser(bytes.NewBufferString(`{}`)), + Body: io.NopCloser(bytes.NewBufferString(`{}`)), }, totalCount: 0, pageSize: 0, @@ -372,7 +372,7 @@ func TestGetDiscoveryDataAcrossRegions(t *testing.T) { region: "cn-hongkong", httpResp: &http.Response{ StatusCode: 200, - Body: ioutil.NopCloser(bytes.NewBufferString( + Body: io.NopCloser(bytes.NewBufferString( `{ "LoadBalancers": { diff --git a/plugins/inputs/amd_rocm_smi/amd_rocm_smi_test.go b/plugins/inputs/amd_rocm_smi/amd_rocm_smi_test.go index 7893760bdf952..e38e0ff89eae0 100644 --- a/plugins/inputs/amd_rocm_smi/amd_rocm_smi_test.go +++ b/plugins/inputs/amd_rocm_smi/amd_rocm_smi_test.go @@ -1,7 +1,7 @@ package amd_rocm_smi import ( - "io/ioutil" + "os" "path/filepath" "testing" "time" @@ -78,7 +78,7 @@ func TestGatherValidJSON(t *testing.T) { for _, tt := range tests { t.Run(tt.name, func(t *testing.T) { var acc testutil.Accumulator - octets, err := ioutil.ReadFile(filepath.Join("testdata", tt.filename)) + octets, err := os.ReadFile(filepath.Join("testdata", tt.filename)) require.NoError(t, err) err = gatherROCmSMI(octets, &acc) diff --git a/plugins/inputs/bcache/bcache.go b/plugins/inputs/bcache/bcache.go index 3195cf4dabcbb..84eb3262fdf28 100644 --- a/plugins/inputs/bcache/bcache.go +++ b/plugins/inputs/bcache/bcache.go @@ -8,7 +8,6 @@ package bcache import ( "errors" "fmt" - "io/ioutil" "os" "path/filepath" "strconv" @@ -85,7 +84,7 @@ func (b *Bcache) gatherBcache(bdev string, acc telegraf.Accumulator) error { if len(metrics) == 0 { return errors.New("can't read any stats file") } - file, err := ioutil.ReadFile(bdev + "/dirty_data") + file, err := os.ReadFile(bdev + "/dirty_data") if err != nil { return err } @@ -97,7 +96,7 @@ func (b *Bcache) gatherBcache(bdev string, acc telegraf.Accumulator) error { for _, path := range metrics { key := filepath.Base(path) - file, err := ioutil.ReadFile(path) + file, err := os.ReadFile(path) rawValue := strings.TrimSpace(string(file)) if err != nil { return err diff --git a/plugins/inputs/bcache/bcache_test.go b/plugins/inputs/bcache/bcache_test.go index 857538a8d6f72..4c62e0f014f14 100644 --- a/plugins/inputs/bcache/bcache_test.go +++ b/plugins/inputs/bcache/bcache_test.go @@ -4,7 +4,6 @@ package bcache import ( - "io/ioutil" "os" "testing" @@ -50,39 +49,39 @@ func TestBcacheGeneratesMetrics(t *testing.T) { err = os.MkdirAll(testBcacheUUIDPath+"/bdev0/stats_total", 0755) require.NoError(t, err) - err = ioutil.WriteFile(testBcacheUUIDPath+"/bdev0/dirty_data", + err = os.WriteFile(testBcacheUUIDPath+"/bdev0/dirty_data", []byte(dirtyData), 0644) require.NoError(t, err) - err = ioutil.WriteFile(testBcacheUUIDPath+"/bdev0/stats_total/bypassed", + err = os.WriteFile(testBcacheUUIDPath+"/bdev0/stats_total/bypassed", []byte(bypassed), 0644) require.NoError(t, err) - err = ioutil.WriteFile(testBcacheUUIDPath+"/bdev0/stats_total/cache_bypass_hits", + err = os.WriteFile(testBcacheUUIDPath+"/bdev0/stats_total/cache_bypass_hits", []byte(cacheBypassHits), 0644) require.NoError(t, err) - err = ioutil.WriteFile(testBcacheUUIDPath+"/bdev0/stats_total/cache_bypass_misses", + err = os.WriteFile(testBcacheUUIDPath+"/bdev0/stats_total/cache_bypass_misses", []byte(cacheBypassMisses), 0644) require.NoError(t, err) - err = ioutil.WriteFile(testBcacheUUIDPath+"/bdev0/stats_total/cache_hit_ratio", + err = os.WriteFile(testBcacheUUIDPath+"/bdev0/stats_total/cache_hit_ratio", []byte(cacheHitRatio), 0644) require.NoError(t, err) - err = ioutil.WriteFile(testBcacheUUIDPath+"/bdev0/stats_total/cache_hits", + err = os.WriteFile(testBcacheUUIDPath+"/bdev0/stats_total/cache_hits", []byte(cacheHits), 0644) require.NoError(t, err) - err = ioutil.WriteFile(testBcacheUUIDPath+"/bdev0/stats_total/cache_miss_collisions", + err = os.WriteFile(testBcacheUUIDPath+"/bdev0/stats_total/cache_miss_collisions", []byte(cacheMissCollisions), 0644) require.NoError(t, err) - err = ioutil.WriteFile(testBcacheUUIDPath+"/bdev0/stats_total/cache_misses", + err = os.WriteFile(testBcacheUUIDPath+"/bdev0/stats_total/cache_misses", []byte(cacheMisses), 0644) require.NoError(t, err) - err = ioutil.WriteFile(testBcacheUUIDPath+"/bdev0/stats_total/cache_readaheads", + err = os.WriteFile(testBcacheUUIDPath+"/bdev0/stats_total/cache_readaheads", []byte(cacheReadaheads), 0644) require.NoError(t, err) diff --git a/plugins/inputs/beat/beat_test.go b/plugins/inputs/beat/beat_test.go index 8f2c5c9c2fbee..433e8fcd61337 100644 --- a/plugins/inputs/beat/beat_test.go +++ b/plugins/inputs/beat/beat_test.go @@ -2,11 +2,11 @@ package beat import ( "fmt" - "io/ioutil" "net" "net/http" "net/http/httptest" "net/url" + "os" "testing" "github.com/influxdata/telegraf/testutil" @@ -31,7 +31,7 @@ func Test_BeatStats(t *testing.T) { require.FailNow(t, "cannot handle request") } - data, err := ioutil.ReadFile(jsonFilePath) + data, err := os.ReadFile(jsonFilePath) require.NoErrorf(t, err, "could not read from data file %s", jsonFilePath) _, err = w.Write(data) require.NoError(t, err, "could not write data") @@ -175,7 +175,7 @@ func Test_BeatRequest(t *testing.T) { require.FailNow(t, "cannot handle request") } - data, err := ioutil.ReadFile(jsonFilePath) + data, err := os.ReadFile(jsonFilePath) require.NoErrorf(t, err, "could not read from data file %s", jsonFilePath) require.Equal(t, request.Host, "beat.test.local") require.Equal(t, request.Method, "POST") diff --git a/plugins/inputs/bond/bond.go b/plugins/inputs/bond/bond.go index dc9b083ec5af9..4f30a20e3f677 100644 --- a/plugins/inputs/bond/bond.go +++ b/plugins/inputs/bond/bond.go @@ -3,7 +3,6 @@ package bond import ( "bufio" "fmt" - "io/ioutil" "os" "path/filepath" "strconv" @@ -53,7 +52,7 @@ func (bond *Bond) Gather(acc telegraf.Accumulator) error { } for _, bondName := range bondNames { bondAbsPath := bond.HostProc + "/net/bonding/" + bondName - file, err := ioutil.ReadFile(bondAbsPath) + file, err := os.ReadFile(bondAbsPath) if err != nil { acc.AddError(fmt.Errorf("error inspecting '%s' interface: %v", bondAbsPath, err)) continue diff --git a/plugins/inputs/burrow/burrow_test.go b/plugins/inputs/burrow/burrow_test.go index d9df7be31d27e..db58df6fc94e8 100644 --- a/plugins/inputs/burrow/burrow_test.go +++ b/plugins/inputs/burrow/burrow_test.go @@ -2,7 +2,6 @@ package burrow import ( "fmt" - "io/ioutil" "net/http" "net/http/httptest" "os" @@ -28,7 +27,7 @@ func getResponseJSON(requestURI string) ([]byte, int) { } // respond with file - b, _ := ioutil.ReadFile(jsonFile) + b, _ := os.ReadFile(jsonFile) return b, code } diff --git a/plugins/inputs/cassandra/cassandra.go b/plugins/inputs/cassandra/cassandra.go index 4a52ef2979b7d..d1c23caadc68a 100644 --- a/plugins/inputs/cassandra/cassandra.go +++ b/plugins/inputs/cassandra/cassandra.go @@ -4,7 +4,7 @@ import ( "encoding/json" "errors" "fmt" - "io/ioutil" + "io" "net/http" "net/url" "strings" @@ -217,7 +217,7 @@ func (c *Cassandra) getAttr(requestURL *url.URL) (map[string]interface{}, error) } // read body - body, err := ioutil.ReadAll(resp.Body) + body, err := io.ReadAll(resp.Body) if err != nil { return nil, err } diff --git a/plugins/inputs/cassandra/cassandra_test.go b/plugins/inputs/cassandra/cassandra_test.go index 325c267d9274b..f167f50e7187f 100644 --- a/plugins/inputs/cassandra/cassandra_test.go +++ b/plugins/inputs/cassandra/cassandra_test.go @@ -2,7 +2,7 @@ package cassandra import ( _ "fmt" - "io/ioutil" + "io" "net/http" "strings" "testing" @@ -109,7 +109,7 @@ type jolokiaClientStub struct { func (c jolokiaClientStub) MakeRequest(_ *http.Request) (*http.Response, error) { resp := http.Response{} resp.StatusCode = c.statusCode - resp.Body = ioutil.NopCloser(strings.NewReader(c.responseBody)) + resp.Body = io.NopCloser(strings.NewReader(c.responseBody)) return &resp, nil } diff --git a/plugins/inputs/ceph/ceph.go b/plugins/inputs/ceph/ceph.go index 7baa28213ac7f..efd61d56322a7 100644 --- a/plugins/inputs/ceph/ceph.go +++ b/plugins/inputs/ceph/ceph.go @@ -4,7 +4,7 @@ import ( "bytes" "encoding/json" "fmt" - "io/ioutil" + "os" "os/exec" "path/filepath" "strings" @@ -206,7 +206,7 @@ var perfDump = func(binary string, socket *socket) (string, error) { } var findSockets = func(c *Ceph) ([]*socket, error) { - listing, err := ioutil.ReadDir(c.SocketDir) + listing, err := os.ReadDir(c.SocketDir) if err != nil { return []*socket{}, fmt.Errorf("Failed to read socket directory '%s': %v", c.SocketDir, err) } diff --git a/plugins/inputs/ceph/ceph_test.go b/plugins/inputs/ceph/ceph_test.go index a61838bc6a4e0..7915d6dd695f4 100644 --- a/plugins/inputs/ceph/ceph_test.go +++ b/plugins/inputs/ceph/ceph_test.go @@ -2,7 +2,6 @@ package ceph import ( "fmt" - "io/ioutil" "os" "path/filepath" "strconv" @@ -113,7 +112,7 @@ func TestGather(t *testing.T) { } func TestFindSockets(t *testing.T) { - tmpdir, err := ioutil.TempDir("", "socktest") + tmpdir, err := os.MkdirTemp("", "socktest") require.NoError(t, err) defer func() { err := os.Remove(tmpdir) @@ -189,7 +188,7 @@ func createTestFiles(dir string, st *SockTest) error { writeFile := func(prefix string, i int) error { f := sockFile(prefix, i) fpath := filepath.Join(dir, f) - return ioutil.WriteFile(fpath, []byte(""), 0777) + return os.WriteFile(fpath, []byte(""), 0777) } return tstFileApply(st, writeFile) } diff --git a/plugins/inputs/cgroup/cgroup_linux.go b/plugins/inputs/cgroup/cgroup_linux.go index d1eda6e7a3b07..b892f528c234f 100644 --- a/plugins/inputs/cgroup/cgroup_linux.go +++ b/plugins/inputs/cgroup/cgroup_linux.go @@ -5,7 +5,6 @@ package cgroup import ( "fmt" - "io/ioutil" "os" "path" "path/filepath" @@ -46,7 +45,7 @@ func (g *CGroup) gatherDir(acc telegraf.Accumulator, dir string) error { return file.err } - raw, err := ioutil.ReadFile(file.path) + raw, err := os.ReadFile(file.path) if err != nil { return err } diff --git a/plugins/inputs/clickhouse/clickhouse.go b/plugins/inputs/clickhouse/clickhouse.go index e7c5991676211..bdd4cf4730fbc 100644 --- a/plugins/inputs/clickhouse/clickhouse.go +++ b/plugins/inputs/clickhouse/clickhouse.go @@ -5,7 +5,6 @@ import ( "encoding/json" "fmt" "io" - "io/ioutil" "net" "net/http" "net/url" @@ -590,7 +589,7 @@ func (ch *ClickHouse) execQuery(address *url.URL, query string, i interface{}) e } defer func() { _ = resp.Body.Close() }() if resp.StatusCode >= 300 { - body, _ := ioutil.ReadAll(io.LimitReader(resp.Body, 200)) + body, _ := io.ReadAll(io.LimitReader(resp.Body, 200)) return &clickhouseError{ StatusCode: resp.StatusCode, body: body, @@ -606,7 +605,7 @@ func (ch *ClickHouse) execQuery(address *url.URL, query string, i interface{}) e return err } - if _, err := io.Copy(ioutil.Discard, resp.Body); err != nil { + if _, err := io.Copy(io.Discard, resp.Body); err != nil { return err } return nil diff --git a/plugins/inputs/cloud_pubsub_push/pubsub_push.go b/plugins/inputs/cloud_pubsub_push/pubsub_push.go index ef43a3d5eb161..48329e1cd362e 100644 --- a/plugins/inputs/cloud_pubsub_push/pubsub_push.go +++ b/plugins/inputs/cloud_pubsub_push/pubsub_push.go @@ -5,7 +5,7 @@ import ( "crypto/subtle" "encoding/base64" "encoding/json" - "io/ioutil" + "io" "net/http" "sync" "time" @@ -222,7 +222,7 @@ func (p *PubSubPush) serveWrite(res http.ResponseWriter, req *http.Request) { } body := http.MaxBytesReader(res, req.Body, int64(p.MaxBodySize)) - bytes, err := ioutil.ReadAll(body) + bytes, err := io.ReadAll(body) if err != nil { res.WriteHeader(http.StatusRequestEntityTooLarge) return diff --git a/plugins/inputs/conntrack/conntrack.go b/plugins/inputs/conntrack/conntrack.go index f1b04fb0d965a..d644f7c188fc5 100644 --- a/plugins/inputs/conntrack/conntrack.go +++ b/plugins/inputs/conntrack/conntrack.go @@ -5,14 +5,14 @@ package conntrack import ( "fmt" - "io/ioutil" "os" "strconv" "strings" + "path/filepath" + "github.com/influxdata/telegraf" "github.com/influxdata/telegraf/plugins/inputs" - "path/filepath" ) type Conntrack struct { @@ -91,7 +91,7 @@ func (c *Conntrack) Gather(acc telegraf.Accumulator) error { continue } - contents, err := ioutil.ReadFile(fName) + contents, err := os.ReadFile(fName) if err != nil { acc.AddError(fmt.Errorf("E! failed to read file '%s': %v", fName, err)) continue diff --git a/plugins/inputs/conntrack/conntrack_test.go b/plugins/inputs/conntrack/conntrack_test.go index 50f56d831791e..cb33caec2e330 100644 --- a/plugins/inputs/conntrack/conntrack_test.go +++ b/plugins/inputs/conntrack/conntrack_test.go @@ -4,7 +4,6 @@ package conntrack import ( - "io/ioutil" "os" "path" "strconv" @@ -35,11 +34,11 @@ func TestNoFilesFound(t *testing.T) { func TestDefaultsUsed(t *testing.T) { defer restoreDflts(dfltFiles, dfltDirs) - tmpdir, err := ioutil.TempDir("", "tmp1") + tmpdir, err := os.MkdirTemp("", "tmp1") require.NoError(t, err) defer os.Remove(tmpdir) - tmpFile, err := ioutil.TempFile(tmpdir, "ip_conntrack_count") + tmpFile, err := os.CreateTemp(tmpdir, "ip_conntrack_count") require.NoError(t, err) defer os.Remove(tmpFile.Name()) @@ -48,7 +47,7 @@ func TestDefaultsUsed(t *testing.T) { dfltFiles = []string{fname} count := 1234321 - require.NoError(t, ioutil.WriteFile(tmpFile.Name(), []byte(strconv.Itoa(count)), 0660)) + require.NoError(t, os.WriteFile(tmpFile.Name(), []byte(strconv.Itoa(count)), 0660)) c := &Conntrack{} acc := &testutil.Accumulator{} @@ -59,13 +58,13 @@ func TestDefaultsUsed(t *testing.T) { func TestConfigsUsed(t *testing.T) { defer restoreDflts(dfltFiles, dfltDirs) - tmpdir, err := ioutil.TempDir("", "tmp1") + tmpdir, err := os.MkdirTemp("", "tmp1") require.NoError(t, err) defer os.Remove(tmpdir) - cntFile, err := ioutil.TempFile(tmpdir, "nf_conntrack_count") + cntFile, err := os.CreateTemp(tmpdir, "nf_conntrack_count") require.NoError(t, err) - maxFile, err := ioutil.TempFile(tmpdir, "nf_conntrack_max") + maxFile, err := os.CreateTemp(tmpdir, "nf_conntrack_max") require.NoError(t, err) defer os.Remove(cntFile.Name()) defer os.Remove(maxFile.Name()) @@ -77,8 +76,8 @@ func TestConfigsUsed(t *testing.T) { count := 1234321 max := 9999999 - require.NoError(t, ioutil.WriteFile(cntFile.Name(), []byte(strconv.Itoa(count)), 0660)) - require.NoError(t, ioutil.WriteFile(maxFile.Name(), []byte(strconv.Itoa(max)), 0660)) + require.NoError(t, os.WriteFile(cntFile.Name(), []byte(strconv.Itoa(count)), 0660)) + require.NoError(t, os.WriteFile(maxFile.Name(), []byte(strconv.Itoa(max)), 0660)) c := &Conntrack{} acc := &testutil.Accumulator{} diff --git a/plugins/inputs/dcos/creds.go b/plugins/inputs/dcos/creds.go index 2fd5f078e46e5..328ce394a4cf6 100644 --- a/plugins/inputs/dcos/creds.go +++ b/plugins/inputs/dcos/creds.go @@ -4,7 +4,7 @@ import ( "context" "crypto/rsa" "fmt" - "io/ioutil" + "os" "strings" "time" "unicode/utf8" @@ -48,7 +48,7 @@ func (c *ServiceAccount) IsExpired() bool { } func (c *TokenCreds) Token(_ context.Context, _ Client) (string, error) { - octets, err := ioutil.ReadFile(c.Path) + octets, err := os.ReadFile(c.Path) if err != nil { return "", fmt.Errorf("error reading token file %q: %s", c.Path, err) } diff --git a/plugins/inputs/dcos/dcos.go b/plugins/inputs/dcos/dcos.go index 35822f30b074f..dd8f22f7292f5 100644 --- a/plugins/inputs/dcos/dcos.go +++ b/plugins/inputs/dcos/dcos.go @@ -2,8 +2,8 @@ package dcos import ( "context" - "io/ioutil" "net/url" + "os" "sort" "strings" "sync" @@ -370,7 +370,7 @@ func (d *DCOS) createClient() (Client, error) { func (d *DCOS) createCredentials() (Credentials, error) { if d.ServiceAccountID != "" && d.ServiceAccountPrivateKey != "" { - bs, err := ioutil.ReadFile(d.ServiceAccountPrivateKey) + bs, err := os.ReadFile(d.ServiceAccountPrivateKey) if err != nil { return nil, err } diff --git a/plugins/inputs/directory_monitor/directory_monitor.go b/plugins/inputs/directory_monitor/directory_monitor.go index 45acd1c062ba9..a58c039422757 100644 --- a/plugins/inputs/directory_monitor/directory_monitor.go +++ b/plugins/inputs/directory_monitor/directory_monitor.go @@ -7,7 +7,6 @@ import ( "errors" "fmt" "io" - "io/ioutil" "os" "path/filepath" "regexp" @@ -108,7 +107,7 @@ func (monitor *DirectoryMonitor) Description() string { func (monitor *DirectoryMonitor) Gather(_ telegraf.Accumulator) error { // Get all files sitting in the directory. - files, err := ioutil.ReadDir(monitor.Directory) + files, err := os.ReadDir(monitor.Directory) if err != nil { return fmt.Errorf("unable to monitor the targeted directory: %w", err) } @@ -183,7 +182,7 @@ func (monitor *DirectoryMonitor) Monitor() { } } -func (monitor *DirectoryMonitor) processFile(file os.FileInfo) { +func (monitor *DirectoryMonitor) processFile(file os.DirEntry) { if file.IsDir() { return } diff --git a/plugins/inputs/directory_monitor/directory_monitor_test.go b/plugins/inputs/directory_monitor/directory_monitor_test.go index 2ad504637c6c2..7cda5f2d7b639 100644 --- a/plugins/inputs/directory_monitor/directory_monitor_test.go +++ b/plugins/inputs/directory_monitor/directory_monitor_test.go @@ -3,7 +3,6 @@ package directory_monitor import ( "bytes" "compress/gzip" - "io/ioutil" "os" "path/filepath" "testing" @@ -20,9 +19,9 @@ func TestCSVGZImport(t *testing.T) { testCsvGzFile := "test.csv.gz" // Establish process directory and finished directory. - finishedDirectory, err := ioutil.TempDir("", "finished") + finishedDirectory, err := os.MkdirTemp("", "finished") require.NoError(t, err) - processDirectory, err := ioutil.TempDir("", "test") + processDirectory, err := os.MkdirTemp("", "test") require.NoError(t, err) defer os.RemoveAll(processDirectory) defer os.RemoveAll(finishedDirectory) @@ -62,7 +61,7 @@ func TestCSVGZImport(t *testing.T) { require.NoError(t, err) err = w.Close() require.NoError(t, err) - err = ioutil.WriteFile(filepath.Join(processDirectory, testCsvGzFile), b.Bytes(), 0666) + err = os.WriteFile(filepath.Join(processDirectory, testCsvGzFile), b.Bytes(), 0666) require.NoError(t, err) // Start plugin before adding file. @@ -89,9 +88,9 @@ func TestMultipleJSONFileImports(t *testing.T) { testJSONFile := "test.json" // Establish process directory and finished directory. - finishedDirectory, err := ioutil.TempDir("", "finished") + finishedDirectory, err := os.MkdirTemp("", "finished") require.NoError(t, err) - processDirectory, err := ioutil.TempDir("", "test") + processDirectory, err := os.MkdirTemp("", "test") require.NoError(t, err) defer os.RemoveAll(processDirectory) defer os.RemoveAll(finishedDirectory) diff --git a/plugins/inputs/diskio/diskio_linux_test.go b/plugins/inputs/diskio/diskio_linux_test.go index 1a97aabf40db5..8a76e230cbb98 100644 --- a/plugins/inputs/diskio/diskio_linux_test.go +++ b/plugins/inputs/diskio/diskio_linux_test.go @@ -4,7 +4,6 @@ package diskio import ( - "io/ioutil" "os" "testing" @@ -20,7 +19,7 @@ S:foo/bar/devlink1 // setupNullDisk sets up fake udev info as if /dev/null were a disk. func setupNullDisk(t *testing.T, s *DiskIO, devName string) func() { - td, err := ioutil.TempFile("", ".telegraf.DiskInfoTest") + td, err := os.CreateTemp("", ".telegraf.DiskInfoTest") require.NoError(t, err) if s.infoCache == nil { diff --git a/plugins/inputs/docker/docker_test.go b/plugins/inputs/docker/docker_test.go index 599adae409e99..a84a6047b30aa 100644 --- a/plugins/inputs/docker/docker_test.go +++ b/plugins/inputs/docker/docker_test.go @@ -3,7 +3,7 @@ package docker import ( "context" "crypto/tls" - "io/ioutil" + "io" "reflect" "sort" "strings" @@ -1060,7 +1060,7 @@ func TestContainerName(t *testing.T) { } client.ContainerStatsF = func(ctx context.Context, containerID string, stream bool) (types.ContainerStats, error) { return types.ContainerStats{ - Body: ioutil.NopCloser(strings.NewReader(`{"name": "logspout"}`)), + Body: io.NopCloser(strings.NewReader(`{"name": "logspout"}`)), }, nil } return &client, nil @@ -1080,7 +1080,7 @@ func TestContainerName(t *testing.T) { } client.ContainerStatsF = func(ctx context.Context, containerID string, stream bool) (types.ContainerStats, error) { return types.ContainerStats{ - Body: ioutil.NopCloser(strings.NewReader(`{}`)), + Body: io.NopCloser(strings.NewReader(`{}`)), }, nil } return &client, nil diff --git a/plugins/inputs/docker/docker_testdata.go b/plugins/inputs/docker/docker_testdata.go index bde0bd312c788..826f34f6703d4 100644 --- a/plugins/inputs/docker/docker_testdata.go +++ b/plugins/inputs/docker/docker_testdata.go @@ -2,7 +2,7 @@ package docker import ( "fmt" - "io/ioutil" + "io" "strings" "time" @@ -344,7 +344,7 @@ func containerStats(s string) types.ContainerStats { }, "read": "2016-02-24T11:42:27.472459608-05:00" }`, name) - stat.Body = ioutil.NopCloser(strings.NewReader(jsonStat)) + stat.Body = io.NopCloser(strings.NewReader(jsonStat)) return stat } @@ -488,7 +488,7 @@ func containerStatsWindows() types.ContainerStats { }, "name":"/gt_test_iis", }` - stat.Body = ioutil.NopCloser(strings.NewReader(jsonStat)) + stat.Body = io.NopCloser(strings.NewReader(jsonStat)) return stat } diff --git a/plugins/inputs/ecs/client.go b/plugins/inputs/ecs/client.go index ac7ed2e1b09ef..b5521c5ea3f3a 100644 --- a/plugins/inputs/ecs/client.go +++ b/plugins/inputs/ecs/client.go @@ -3,7 +3,6 @@ package ecs import ( "fmt" "io" - "io/ioutil" "net/http" "net/url" "time" @@ -113,7 +112,7 @@ func (c *EcsClient) Task() (*Task, error) { if resp.StatusCode != http.StatusOK { // ignore the err here; LimitReader returns io.EOF and we're not interested in read errors. - body, _ := ioutil.ReadAll(io.LimitReader(resp.Body, 200)) + body, _ := io.ReadAll(io.LimitReader(resp.Body, 200)) return nil, fmt.Errorf("%s returned HTTP status %s: %q", c.taskURL, resp.Status, body) } @@ -137,7 +136,7 @@ func (c *EcsClient) ContainerStats() (map[string]types.StatsJSON, error) { if resp.StatusCode != http.StatusOK { // ignore the err here; LimitReader returns io.EOF and we're not interested in read errors. - body, _ := ioutil.ReadAll(io.LimitReader(resp.Body, 200)) + body, _ := io.ReadAll(io.LimitReader(resp.Body, 200)) return nil, fmt.Errorf("%s returned HTTP status %s: %q", c.statsURL, resp.Status, body) } diff --git a/plugins/inputs/ecs/client_test.go b/plugins/inputs/ecs/client_test.go index 2f37ca0cfa456..7e9d7e393346f 100644 --- a/plugins/inputs/ecs/client_test.go +++ b/plugins/inputs/ecs/client_test.go @@ -3,7 +3,7 @@ package ecs import ( "bytes" "errors" - "io/ioutil" + "io" "net/http" "net/url" "os" @@ -108,7 +108,7 @@ func TestEcsClient_Task(t *testing.T) { do: func(req *http.Request) (*http.Response, error) { return &http.Response{ StatusCode: http.StatusOK, - Body: ioutil.NopCloser(rc), + Body: io.NopCloser(rc), }, nil }, }, @@ -129,7 +129,7 @@ func TestEcsClient_Task(t *testing.T) { do: func(req *http.Request) (*http.Response, error) { return &http.Response{ StatusCode: http.StatusInternalServerError, - Body: ioutil.NopCloser(bytes.NewReader([]byte("foo"))), + Body: io.NopCloser(bytes.NewReader([]byte("foo"))), }, nil }, }, @@ -141,7 +141,7 @@ func TestEcsClient_Task(t *testing.T) { do: func(req *http.Request) (*http.Response, error) { return &http.Response{ StatusCode: http.StatusOK, - Body: ioutil.NopCloser(bytes.NewReader([]byte("foo"))), + Body: io.NopCloser(bytes.NewReader([]byte("foo"))), }, nil }, }, @@ -179,7 +179,7 @@ func TestEcsClient_ContainerStats(t *testing.T) { do: func(req *http.Request) (*http.Response, error) { return &http.Response{ StatusCode: http.StatusOK, - Body: ioutil.NopCloser(rc), + Body: io.NopCloser(rc), }, nil }, }, @@ -201,7 +201,7 @@ func TestEcsClient_ContainerStats(t *testing.T) { do: func(req *http.Request) (*http.Response, error) { return &http.Response{ StatusCode: http.StatusOK, - Body: ioutil.NopCloser(bytes.NewReader([]byte("foo"))), + Body: io.NopCloser(bytes.NewReader([]byte("foo"))), }, nil }, }, @@ -214,7 +214,7 @@ func TestEcsClient_ContainerStats(t *testing.T) { do: func(req *http.Request) (*http.Response, error) { return &http.Response{ StatusCode: http.StatusInternalServerError, - Body: ioutil.NopCloser(bytes.NewReader([]byte("foo"))), + Body: io.NopCloser(bytes.NewReader([]byte("foo"))), }, nil }, }, diff --git a/plugins/inputs/elasticsearch/elasticsearch.go b/plugins/inputs/elasticsearch/elasticsearch.go index 0bd4ce677cd9e..24142ba38c32e 100644 --- a/plugins/inputs/elasticsearch/elasticsearch.go +++ b/plugins/inputs/elasticsearch/elasticsearch.go @@ -3,7 +3,7 @@ package elasticsearch import ( "encoding/json" "fmt" - "io/ioutil" + "io" "net/http" "regexp" "sort" @@ -702,7 +702,7 @@ func (e *Elasticsearch) getCatMaster(url string) (string, error) { // future calls. return "", fmt.Errorf("elasticsearch: Unable to retrieve master node information. API responded with status-code %d, expected %d", r.StatusCode, http.StatusOK) } - response, err := ioutil.ReadAll(r.Body) + response, err := io.ReadAll(r.Body) if err != nil { return "", err diff --git a/plugins/inputs/elasticsearch/elasticsearch_test.go b/plugins/inputs/elasticsearch/elasticsearch_test.go index 8248d063b6883..1ed61e731ce1f 100644 --- a/plugins/inputs/elasticsearch/elasticsearch_test.go +++ b/plugins/inputs/elasticsearch/elasticsearch_test.go @@ -1,7 +1,7 @@ package elasticsearch import ( - "io/ioutil" + "io" "net/http" "strings" "testing" @@ -44,7 +44,7 @@ func (t *transportMock) RoundTrip(r *http.Request) (*http.Response, error) { StatusCode: t.statusCode, } res.Header.Set("Content-Type", "application/json") - res.Body = ioutil.NopCloser(strings.NewReader(t.body)) + res.Body = io.NopCloser(strings.NewReader(t.body)) return res, nil } diff --git a/plugins/inputs/execd/shim/goshim.go b/plugins/inputs/execd/shim/goshim.go index 075d2cf55ab62..cfb54e3ae0708 100644 --- a/plugins/inputs/execd/shim/goshim.go +++ b/plugins/inputs/execd/shim/goshim.go @@ -8,7 +8,6 @@ import ( "errors" "fmt" "io" - "io/ioutil" "os" "os/signal" "strings" @@ -274,7 +273,7 @@ func LoadConfig(filePath *string) ([]telegraf.Input, error) { return DefaultImportedPlugins() } - b, err := ioutil.ReadFile(*filePath) + b, err := os.ReadFile(*filePath) if err != nil { return nil, err } diff --git a/plugins/inputs/file/file.go b/plugins/inputs/file/file.go index e431bc6df9f15..22af282dbde0a 100644 --- a/plugins/inputs/file/file.go +++ b/plugins/inputs/file/file.go @@ -2,7 +2,7 @@ package file import ( "fmt" - "io/ioutil" + "io" "os" "path/filepath" @@ -115,7 +115,7 @@ func (f *File) readMetric(filename string) ([]telegraf.Metric, error) { defer file.Close() r, _ := utfbom.Skip(f.decoder.Reader(file)) - fileContents, err := ioutil.ReadAll(r) + fileContents, err := io.ReadAll(r) if err != nil { return nil, fmt.Errorf("E! Error file: %v could not be read, %s", filename, err) } diff --git a/plugins/inputs/fluentd/fluentd.go b/plugins/inputs/fluentd/fluentd.go index 03f46c67ce515..9ebd1682a56b7 100644 --- a/plugins/inputs/fluentd/fluentd.go +++ b/plugins/inputs/fluentd/fluentd.go @@ -3,7 +3,7 @@ package fluentd import ( "encoding/json" "fmt" - "io/ioutil" + "io" "net/http" "net/url" "time" @@ -104,7 +104,7 @@ func (h *Fluentd) Gather(acc telegraf.Accumulator) error { defer resp.Body.Close() - body, err := ioutil.ReadAll(resp.Body) + body, err := io.ReadAll(resp.Body) if err != nil { return fmt.Errorf("unable to read the HTTP body \"%s\": %v", string(body), err) diff --git a/plugins/inputs/graylog/graylog.go b/plugins/inputs/graylog/graylog.go index 9b73991eb8227..d522f5a49dfea 100644 --- a/plugins/inputs/graylog/graylog.go +++ b/plugins/inputs/graylog/graylog.go @@ -5,7 +5,7 @@ import ( "encoding/base64" "encoding/json" "fmt" - "io/ioutil" + "io" "net" "net/http" "net/url" @@ -264,7 +264,7 @@ func (h *GrayLog) sendRequest(serverURL string) (string, float64, error) { defer resp.Body.Close() responseTime := time.Since(start).Seconds() - body, err := ioutil.ReadAll(resp.Body) + body, err := io.ReadAll(resp.Body) if err != nil { return string(body), responseTime, err } diff --git a/plugins/inputs/graylog/graylog_test.go b/plugins/inputs/graylog/graylog_test.go index f8008f1d94c66..5739969e3df01 100644 --- a/plugins/inputs/graylog/graylog_test.go +++ b/plugins/inputs/graylog/graylog_test.go @@ -1,7 +1,7 @@ package graylog import ( - "io/ioutil" + "io" "net/http" "strings" "testing" @@ -115,7 +115,7 @@ func (c *mockHTTPClient) MakeRequest(req *http.Request) (*http.Response, error) resp.StatusCode = 405 // Method not allowed } - resp.Body = ioutil.NopCloser(strings.NewReader(c.responseBody)) + resp.Body = io.NopCloser(strings.NewReader(c.responseBody)) return &resp, nil } diff --git a/plugins/inputs/http/http.go b/plugins/inputs/http/http.go index c61465a54c36f..d7a6ac1213b6f 100644 --- a/plugins/inputs/http/http.go +++ b/plugins/inputs/http/http.go @@ -4,8 +4,8 @@ import ( "context" "fmt" "io" - "io/ioutil" "net/http" + "os" "strings" "sync" @@ -180,7 +180,7 @@ func (h *HTTP) gatherURL( } if h.BearerToken != "" { - token, err := ioutil.ReadFile(h.BearerToken) + token, err := os.ReadFile(h.BearerToken) if err != nil { return err } @@ -225,7 +225,7 @@ func (h *HTTP) gatherURL( h.SuccessStatusCodes) } - b, err := ioutil.ReadAll(resp.Body) + b, err := io.ReadAll(resp.Body) if err != nil { return err } @@ -254,7 +254,7 @@ func makeRequestBodyReader(contentEncoding, body string) (io.ReadCloser, error) } return rc, nil } - return ioutil.NopCloser(reader), nil + return io.NopCloser(reader), nil } func init() { diff --git a/plugins/inputs/http/http_test.go b/plugins/inputs/http/http_test.go index 02351effc71b9..da9fed2251514 100644 --- a/plugins/inputs/http/http_test.go +++ b/plugins/inputs/http/http_test.go @@ -3,7 +3,7 @@ package http_test import ( "compress/gzip" "fmt" - "io/ioutil" + "io" "net/http" "net/http/httptest" "net/url" @@ -183,7 +183,7 @@ func TestBodyAndContentEncoding(t *testing.T) { URLs: []string{url}, }, queryHandlerFunc: func(t *testing.T, w http.ResponseWriter, r *http.Request) { - body, err := ioutil.ReadAll(r.Body) + body, err := io.ReadAll(r.Body) require.NoError(t, err) require.Equal(t, []byte(""), body) w.WriteHeader(http.StatusOK) @@ -197,7 +197,7 @@ func TestBodyAndContentEncoding(t *testing.T) { Body: "test", }, queryHandlerFunc: func(t *testing.T, w http.ResponseWriter, r *http.Request) { - body, err := ioutil.ReadAll(r.Body) + body, err := io.ReadAll(r.Body) require.NoError(t, err) require.Equal(t, []byte("test"), body) w.WriteHeader(http.StatusOK) @@ -211,7 +211,7 @@ func TestBodyAndContentEncoding(t *testing.T) { Body: "test", }, queryHandlerFunc: func(t *testing.T, w http.ResponseWriter, r *http.Request) { - body, err := ioutil.ReadAll(r.Body) + body, err := io.ReadAll(r.Body) require.NoError(t, err) require.Equal(t, []byte("test"), body) w.WriteHeader(http.StatusOK) @@ -230,7 +230,7 @@ func TestBodyAndContentEncoding(t *testing.T) { gr, err := gzip.NewReader(r.Body) require.NoError(t, err) - body, err := ioutil.ReadAll(gr) + body, err := io.ReadAll(gr) require.NoError(t, err) require.Equal(t, []byte("test"), body) w.WriteHeader(http.StatusOK) diff --git a/plugins/inputs/http_listener_v2/http_listener_v2.go b/plugins/inputs/http_listener_v2/http_listener_v2.go index 5b511de57fb54..d2a2e5f35214e 100644 --- a/plugins/inputs/http_listener_v2/http_listener_v2.go +++ b/plugins/inputs/http_listener_v2/http_listener_v2.go @@ -4,7 +4,7 @@ import ( "compress/gzip" "crypto/subtle" "crypto/tls" - "io/ioutil" + "io" "net" "net/http" "net/url" @@ -292,7 +292,7 @@ func (h *HTTPListenerV2) collectBody(res http.ResponseWriter, req *http.Request) } defer r.Close() maxReader := http.MaxBytesReader(res, r, int64(h.MaxBodySize)) - bytes, err := ioutil.ReadAll(maxReader) + bytes, err := io.ReadAll(maxReader) if err != nil { if err := tooLarge(res); err != nil { h.Log.Debugf("error in too-large: %v", err) @@ -302,7 +302,7 @@ func (h *HTTPListenerV2) collectBody(res http.ResponseWriter, req *http.Request) return bytes, true case "snappy": defer req.Body.Close() - bytes, err := ioutil.ReadAll(req.Body) + bytes, err := io.ReadAll(req.Body) if err != nil { h.Log.Debug(err.Error()) if err := badRequest(res); err != nil { @@ -322,7 +322,7 @@ func (h *HTTPListenerV2) collectBody(res http.ResponseWriter, req *http.Request) return bytes, true default: defer req.Body.Close() - bytes, err := ioutil.ReadAll(req.Body) + bytes, err := io.ReadAll(req.Body) if err != nil { h.Log.Debug(err.Error()) if err := badRequest(res); err != nil { diff --git a/plugins/inputs/http_listener_v2/http_listener_v2_test.go b/plugins/inputs/http_listener_v2/http_listener_v2_test.go index 5daaf2785ffe3..da70f443998e1 100644 --- a/plugins/inputs/http_listener_v2/http_listener_v2_test.go +++ b/plugins/inputs/http_listener_v2/http_listener_v2_test.go @@ -4,9 +4,9 @@ import ( "bytes" "crypto/tls" "crypto/x509" - "io/ioutil" "net/http" "net/url" + "os" "runtime" "strconv" "sync" @@ -361,7 +361,7 @@ func TestWriteHTTPGzippedData(t *testing.T) { require.NoError(t, listener.Start(acc)) defer listener.Stop() - data, err := ioutil.ReadFile("./testdata/testmsgs.gz") + data, err := os.ReadFile("./testdata/testmsgs.gz") require.NoError(t, err) req, err := http.NewRequest("POST", createURL(listener, "http", "/write", ""), bytes.NewBuffer(data)) diff --git a/plugins/inputs/http_response/http_response.go b/plugins/inputs/http_response/http_response.go index d8a4e0e1438cd..799f664d1e7b0 100644 --- a/plugins/inputs/http_response/http_response.go +++ b/plugins/inputs/http_response/http_response.go @@ -4,10 +4,10 @@ import ( "errors" "fmt" "io" - "io/ioutil" "net" "net/http" "net/url" + "os" "regexp" "strconv" "strings" @@ -277,7 +277,7 @@ func (h *HTTPResponse) httpGather(u string) (map[string]interface{}, map[string] } if h.BearerToken != "" { - token, err := ioutil.ReadFile(h.BearerToken) + token, err := os.ReadFile(h.BearerToken) if err != nil { return nil, nil, err } @@ -339,7 +339,7 @@ func (h *HTTPResponse) httpGather(u string) (map[string]interface{}, map[string] if h.ResponseBodyMaxSize == 0 { h.ResponseBodyMaxSize = config.Size(defaultResponseBodyMaxSize) } - bodyBytes, err := ioutil.ReadAll(io.LimitReader(resp.Body, int64(h.ResponseBodyMaxSize)+1)) + bodyBytes, err := io.ReadAll(io.LimitReader(resp.Body, int64(h.ResponseBodyMaxSize)+1)) // Check first if the response body size exceeds the limit. if err == nil && int64(len(bodyBytes)) > int64(h.ResponseBodyMaxSize) { h.setBodyReadError("The body of the HTTP Response is too large", bodyBytes, fields, tags) diff --git a/plugins/inputs/http_response/http_response_test.go b/plugins/inputs/http_response/http_response_test.go index 40917bba1bc39..5d109d0a35439 100644 --- a/plugins/inputs/http_response/http_response_test.go +++ b/plugins/inputs/http_response/http_response_test.go @@ -8,7 +8,7 @@ package http_response import ( "errors" "fmt" - "io/ioutil" + "io" "net" "net/http" "net/http/httptest" @@ -123,7 +123,7 @@ func setUpTestMux() http.Handler { fmt.Fprintf(w, "used post correctly!") }) mux.HandleFunc("/musthaveabody", func(w http.ResponseWriter, req *http.Request) { - body, err := ioutil.ReadAll(req.Body) + body, err := io.ReadAll(req.Body) //nolint:errcheck,revive req.Body.Close() if err != nil { diff --git a/plugins/inputs/httpjson/httpjson.go b/plugins/inputs/httpjson/httpjson.go index f5d97b90989c0..10a4cb0c17643 100644 --- a/plugins/inputs/httpjson/httpjson.go +++ b/plugins/inputs/httpjson/httpjson.go @@ -3,7 +3,7 @@ package httpjson import ( "bytes" "fmt" - "io/ioutil" + "io" "net/http" "net/url" "strings" @@ -263,7 +263,7 @@ func (h *HTTPJSON) sendRequest(serverURL string) (string, float64, error) { defer resp.Body.Close() responseTime := time.Since(start).Seconds() - body, err := ioutil.ReadAll(resp.Body) + body, err := io.ReadAll(resp.Body) if err != nil { return string(body), responseTime, err } diff --git a/plugins/inputs/httpjson/httpjson_test.go b/plugins/inputs/httpjson/httpjson_test.go index 9f6292cba722d..b203238a94037 100644 --- a/plugins/inputs/httpjson/httpjson_test.go +++ b/plugins/inputs/httpjson/httpjson_test.go @@ -2,7 +2,7 @@ package httpjson import ( "fmt" - "io/ioutil" + "io" "net/http" "net/http/httptest" "strings" @@ -143,7 +143,7 @@ func (c *mockHTTPClient) MakeRequest(req *http.Request) (*http.Response, error) resp.StatusCode = 405 // Method not allowed } - resp.Body = ioutil.NopCloser(strings.NewReader(c.responseBody)) + resp.Body = io.NopCloser(strings.NewReader(c.responseBody)) return &resp, nil } @@ -377,7 +377,7 @@ func TestHttpJsonPOST(t *testing.T) { "api_key": "mykey", } ts := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { - body, err := ioutil.ReadAll(r.Body) + body, err := io.ReadAll(r.Body) assert.NoError(t, err) assert.Equal(t, "api_key=mykey", string(body)) w.WriteHeader(http.StatusOK) diff --git a/plugins/inputs/influxdb_listener/influxdb_listener_test.go b/plugins/inputs/influxdb_listener/influxdb_listener_test.go index 8a082a855a7f8..6b88907f95801 100644 --- a/plugins/inputs/influxdb_listener/influxdb_listener_test.go +++ b/plugins/inputs/influxdb_listener/influxdb_listener_test.go @@ -4,9 +4,9 @@ import ( "bytes" "crypto/tls" "crypto/x509" - "io/ioutil" "net/http" "net/url" + "os" "runtime" "strconv" "sync" @@ -406,7 +406,7 @@ func TestWriteGzippedData(t *testing.T) { require.NoError(t, listener.Start(acc)) defer listener.Stop() - data, err := ioutil.ReadFile("./testdata/testmsgs.gz") + data, err := os.ReadFile("./testdata/testmsgs.gz") require.NoError(t, err) req, err := http.NewRequest("POST", createURL(listener, "http", "/write", ""), bytes.NewBuffer(data)) diff --git a/plugins/inputs/influxdb_v2_listener/influxdb_v2_listener.go b/plugins/inputs/influxdb_v2_listener/influxdb_v2_listener.go index 64907d12a52dc..4df2f7dc86a5e 100644 --- a/plugins/inputs/influxdb_v2_listener/influxdb_v2_listener.go +++ b/plugins/inputs/influxdb_v2_listener/influxdb_v2_listener.go @@ -6,7 +6,7 @@ import ( "crypto/tls" "encoding/json" "fmt" - "io/ioutil" + "io" "net" "net/http" "time" @@ -256,7 +256,7 @@ func (h *InfluxDBV2Listener) handleWrite() http.HandlerFunc { var readErr error var bytes []byte //body = http.MaxBytesReader(res, req.Body, 1000000) //p.MaxBodySize.Size) - bytes, readErr = ioutil.ReadAll(body) + bytes, readErr = io.ReadAll(body) if readErr != nil { h.Log.Debugf("Error parsing the request body: %v", readErr.Error()) if err := badRequest(res, InternalError, readErr.Error()); err != nil { diff --git a/plugins/inputs/influxdb_v2_listener/influxdb_v2_listener_test.go b/plugins/inputs/influxdb_v2_listener/influxdb_v2_listener_test.go index be99c93f51a8a..055dfc395ba7b 100644 --- a/plugins/inputs/influxdb_v2_listener/influxdb_v2_listener_test.go +++ b/plugins/inputs/influxdb_v2_listener/influxdb_v2_listener_test.go @@ -5,9 +5,10 @@ import ( "crypto/tls" "crypto/x509" "fmt" - "io/ioutil" + "io" "net/http" "net/url" + "os" "runtime" "strconv" "sync" @@ -363,7 +364,7 @@ func TestWriteGzippedData(t *testing.T) { require.NoError(t, listener.Start(acc)) defer listener.Stop() - data, err := ioutil.ReadFile("./testdata/testmsgs.gz") + data, err := os.ReadFile("./testdata/testmsgs.gz") require.NoError(t, err) req, err := http.NewRequest("POST", createURL(listener, "http", "/api/v2/write", "bucket=mybucket"), bytes.NewBuffer(data)) @@ -485,7 +486,7 @@ func TestReady(t *testing.T) { resp, err := http.Get(createURL(listener, "http", "/api/v2/ready", "")) require.NoError(t, err) require.Equal(t, "application/json", resp.Header["Content-Type"][0]) - bodyBytes, err := ioutil.ReadAll(resp.Body) + bodyBytes, err := io.ReadAll(resp.Body) require.NoError(t, err) require.Contains(t, string(bodyBytes), "\"status\":\"ready\"") require.NoError(t, resp.Body.Close()) diff --git a/plugins/inputs/intel_powerstat/file.go b/plugins/inputs/intel_powerstat/file.go index a07dd57e16a57..c69dea89f4e26 100644 --- a/plugins/inputs/intel_powerstat/file.go +++ b/plugins/inputs/intel_powerstat/file.go @@ -8,7 +8,6 @@ import ( "encoding/binary" "fmt" "io" - "io/ioutil" "os" "path/filepath" "regexp" @@ -107,7 +106,7 @@ func (fs *fileServiceImpl) getStringsMatchingPatternOnPath(path string) ([]strin // readFile reads file on path and return string content. func (fs *fileServiceImpl) readFile(path string) ([]byte, error) { - out, err := ioutil.ReadFile(path) + out, err := os.ReadFile(path) if err != nil { return make([]byte, 0), err } @@ -116,7 +115,7 @@ func (fs *fileServiceImpl) readFile(path string) ([]byte, error) { // readFileToFloat64 reads file on path and tries to parse content to float64. func (fs *fileServiceImpl) readFileToFloat64(reader io.Reader) (float64, int64, error) { - read, err := ioutil.ReadAll(reader) + read, err := io.ReadAll(reader) if err != nil { return 0, 0, err } diff --git a/plugins/inputs/jolokia/jolokia.go b/plugins/inputs/jolokia/jolokia.go index 9e4cac511683b..af5e3de283800 100644 --- a/plugins/inputs/jolokia/jolokia.go +++ b/plugins/inputs/jolokia/jolokia.go @@ -4,7 +4,7 @@ import ( "bytes" "encoding/json" "fmt" - "io/ioutil" + "io" "net/http" "net/url" "time" @@ -153,7 +153,7 @@ func (j *Jolokia) doRequest(req *http.Request) ([]map[string]interface{}, error) } // read body - body, err := ioutil.ReadAll(resp.Body) + body, err := io.ReadAll(resp.Body) if err != nil { return nil, err } diff --git a/plugins/inputs/jolokia/jolokia_test.go b/plugins/inputs/jolokia/jolokia_test.go index 5c1bc50aa2ae7..e91e9a1087fda 100644 --- a/plugins/inputs/jolokia/jolokia_test.go +++ b/plugins/inputs/jolokia/jolokia_test.go @@ -2,7 +2,7 @@ package jolokia import ( _ "fmt" - "io/ioutil" + "io" "net/http" "strings" "testing" @@ -116,7 +116,7 @@ type jolokiaClientStub struct { func (c jolokiaClientStub) MakeRequest(_ *http.Request) (*http.Response, error) { resp := http.Response{} resp.StatusCode = c.statusCode - resp.Body = ioutil.NopCloser(strings.NewReader(c.responseBody)) + resp.Body = io.NopCloser(strings.NewReader(c.responseBody)) return &resp, nil } diff --git a/plugins/inputs/jolokia2/client.go b/plugins/inputs/jolokia2/client.go index 41ebd4f8af872..789450e3a1016 100644 --- a/plugins/inputs/jolokia2/client.go +++ b/plugins/inputs/jolokia2/client.go @@ -4,7 +4,7 @@ import ( "bytes" "encoding/json" "fmt" - "io/ioutil" + "io" "net/http" "net/url" "path" @@ -149,7 +149,7 @@ func (c *Client) read(requests []ReadRequest) ([]ReadResponse, error) { c.URL, resp.StatusCode, http.StatusText(resp.StatusCode), http.StatusOK, http.StatusText(http.StatusOK)) } - responseBody, err := ioutil.ReadAll(resp.Body) + responseBody, err := io.ReadAll(resp.Body) if err != nil { return nil, err } diff --git a/plugins/inputs/jolokia2/client_test.go b/plugins/inputs/jolokia2/client_test.go index 7ec65d27a0ebf..a1bd5f4a2e141 100644 --- a/plugins/inputs/jolokia2/client_test.go +++ b/plugins/inputs/jolokia2/client_test.go @@ -3,7 +3,7 @@ package jolokia2 import ( "encoding/json" "fmt" - "io/ioutil" + "io" "net/http" "net/http/httptest" "testing" @@ -20,7 +20,7 @@ func TestJolokia2_ClientAuthRequest(t *testing.T) { server := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { username, password, _ = r.BasicAuth() - body, _ := ioutil.ReadAll(r.Body) + body, _ := io.ReadAll(r.Body) require.NoError(t, json.Unmarshal(body, &requests)) w.WriteHeader(http.StatusOK) @@ -56,7 +56,7 @@ func TestJolokia2_ClientProxyAuthRequest(t *testing.T) { server := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { username, password, _ = r.BasicAuth() - body, _ := ioutil.ReadAll(r.Body) + body, _ := io.ReadAll(r.Body) require.NoError(t, json.Unmarshal(body, &requests)) w.WriteHeader(http.StatusOK) _, err := fmt.Fprintf(w, "[]") diff --git a/plugins/inputs/kernel/kernel.go b/plugins/inputs/kernel/kernel.go index 22311e9a0f12d..c16c68bf44bd1 100644 --- a/plugins/inputs/kernel/kernel.go +++ b/plugins/inputs/kernel/kernel.go @@ -6,7 +6,6 @@ package kernel import ( "bytes" "fmt" - "io/ioutil" "os" "strconv" "strings" @@ -41,7 +40,7 @@ func (k *Kernel) Gather(acc telegraf.Accumulator) error { return err } - entropyData, err := ioutil.ReadFile(k.entropyStatFile) + entropyData, err := os.ReadFile(k.entropyStatFile) if err != nil { return err } @@ -109,7 +108,7 @@ func (k *Kernel) getProcStat() ([]byte, error) { return nil, err } - data, err := ioutil.ReadFile(k.statFile) + data, err := os.ReadFile(k.statFile) if err != nil { return nil, err } diff --git a/plugins/inputs/kernel/kernel_test.go b/plugins/inputs/kernel/kernel_test.go index 462624c2eb40d..f174017fad7b9 100644 --- a/plugins/inputs/kernel/kernel_test.go +++ b/plugins/inputs/kernel/kernel_test.go @@ -4,7 +4,6 @@ package kernel import ( - "io/ioutil" "os" "testing" @@ -169,7 +168,7 @@ const entropyStatFilePartial = `1024` const entropyStatFileInvalid = `` func makeFakeStatFile(t *testing.T, content []byte) string { - tmpfile, err := ioutil.TempFile("", "kernel_test") + tmpfile, err := os.CreateTemp("", "kernel_test") require.NoError(t, err) _, err = tmpfile.Write(content) diff --git a/plugins/inputs/kernel_vmstat/kernel_vmstat.go b/plugins/inputs/kernel_vmstat/kernel_vmstat.go index 2019e0cbfddb3..95a7a5e32f1e0 100644 --- a/plugins/inputs/kernel_vmstat/kernel_vmstat.go +++ b/plugins/inputs/kernel_vmstat/kernel_vmstat.go @@ -6,7 +6,6 @@ package kernel_vmstat import ( "bytes" "fmt" - "io/ioutil" "os" "strconv" @@ -61,7 +60,7 @@ func (k *KernelVmstat) getProcVmstat() ([]byte, error) { return nil, err } - data, err := ioutil.ReadFile(k.statFile) + data, err := os.ReadFile(k.statFile) if err != nil { return nil, err } diff --git a/plugins/inputs/kernel_vmstat/kernel_vmstat_test.go b/plugins/inputs/kernel_vmstat/kernel_vmstat_test.go index 6bbb9d7b5b12f..6590e3febd19c 100644 --- a/plugins/inputs/kernel_vmstat/kernel_vmstat_test.go +++ b/plugins/inputs/kernel_vmstat/kernel_vmstat_test.go @@ -4,7 +4,6 @@ package kernel_vmstat import ( - "io/ioutil" "os" "testing" @@ -300,7 +299,7 @@ thp_collapse_alloc_failed 102214 thp_split abcd` func makeFakeVMStatFile(t *testing.T, content []byte) string { - tmpfile, err := ioutil.TempFile("", "kernel_vmstat_test") + tmpfile, err := os.CreateTemp("", "kernel_vmstat_test") require.NoError(t, err) _, err = tmpfile.Write(content) diff --git a/plugins/inputs/kibana/kibana.go b/plugins/inputs/kibana/kibana.go index c94438eb38d4d..55ffa1df845f9 100644 --- a/plugins/inputs/kibana/kibana.go +++ b/plugins/inputs/kibana/kibana.go @@ -4,7 +4,6 @@ import ( "encoding/json" "fmt" "io" - "io/ioutil" "net/http" "strconv" "strings" @@ -253,7 +252,7 @@ func (k *Kibana) gatherJSONData(url string, v interface{}) (host string, err err if response.StatusCode != http.StatusOK { // ignore the err here; LimitReader returns io.EOF and we're not interested in read errors. - body, _ := ioutil.ReadAll(io.LimitReader(response.Body, 200)) + body, _ := io.ReadAll(io.LimitReader(response.Body, 200)) return request.Host, fmt.Errorf("%s returned HTTP status %s: %q", url, response.Status, body) } diff --git a/plugins/inputs/kibana/kibana_test.go b/plugins/inputs/kibana/kibana_test.go index 3dfed9edfa9a2..565d9b1c79416 100644 --- a/plugins/inputs/kibana/kibana_test.go +++ b/plugins/inputs/kibana/kibana_test.go @@ -1,7 +1,7 @@ package kibana import ( - "io/ioutil" + "io" "net/http" "strings" "testing" @@ -46,7 +46,7 @@ func (t *transportMock) RoundTrip(r *http.Request) (*http.Response, error) { StatusCode: t.statusCode, } res.Header.Set("Content-Type", "application/json") - res.Body = ioutil.NopCloser(strings.NewReader(t.body)) + res.Body = io.NopCloser(strings.NewReader(t.body)) return res, nil } diff --git a/plugins/inputs/kinesis_consumer/kinesis_consumer.go b/plugins/inputs/kinesis_consumer/kinesis_consumer.go index 7acd3202c012b..005ccdc43aab2 100644 --- a/plugins/inputs/kinesis_consumer/kinesis_consumer.go +++ b/plugins/inputs/kinesis_consumer/kinesis_consumer.go @@ -6,7 +6,7 @@ import ( "compress/zlib" "context" "fmt" - "io/ioutil" + "io" "math/big" "strings" "sync" @@ -349,7 +349,7 @@ func processGzip(data []byte) ([]byte, error) { return nil, err } defer zipData.Close() - return ioutil.ReadAll(zipData) + return io.ReadAll(zipData) } func processZlib(data []byte) ([]byte, error) { @@ -358,7 +358,7 @@ func processZlib(data []byte) ([]byte, error) { return nil, err } defer zlibData.Close() - return ioutil.ReadAll(zlibData) + return io.ReadAll(zlibData) } func processNoOp(data []byte) ([]byte, error) { diff --git a/plugins/inputs/kube_inventory/kube_state.go b/plugins/inputs/kube_inventory/kube_state.go index bcfae4ce8f52f..24db993dd39bb 100644 --- a/plugins/inputs/kube_inventory/kube_state.go +++ b/plugins/inputs/kube_inventory/kube_state.go @@ -3,8 +3,8 @@ package kube_inventory import ( "context" "fmt" - "io/ioutil" "log" + "os" "strconv" "strings" "sync" @@ -101,7 +101,7 @@ func (ki *KubernetesInventory) Init() error { } if ki.BearerToken != "" { - token, err := ioutil.ReadFile(ki.BearerToken) + token, err := os.ReadFile(ki.BearerToken) if err != nil { return err } diff --git a/plugins/inputs/kubernetes/kubernetes.go b/plugins/inputs/kubernetes/kubernetes.go index ab1cf4bfe4afc..8ca636d480cc2 100644 --- a/plugins/inputs/kubernetes/kubernetes.go +++ b/plugins/inputs/kubernetes/kubernetes.go @@ -3,8 +3,8 @@ package kubernetes import ( "encoding/json" "fmt" - "io/ioutil" "net/http" + "os" "strings" "time" @@ -93,7 +93,7 @@ func (k *Kubernetes) Init() error { } if k.BearerToken != "" { - token, err := ioutil.ReadFile(k.BearerToken) + token, err := os.ReadFile(k.BearerToken) if err != nil { return err } diff --git a/plugins/inputs/leofs/leofs_test.go b/plugins/inputs/leofs/leofs_test.go index 513d2f5ed7de7..1e33ddc4c3d38 100644 --- a/plugins/inputs/leofs/leofs_test.go +++ b/plugins/inputs/leofs/leofs_test.go @@ -1,7 +1,6 @@ package leofs import ( - "io/ioutil" "os" "os/exec" "runtime" @@ -132,7 +131,7 @@ func testMain(t *testing.T, code string, endpoint string, serverType ServerType) // Build the fake snmpwalk for test src := os.TempDir() + "/test.go" - require.NoError(t, ioutil.WriteFile(src, []byte(code), 0600)) + require.NoError(t, os.WriteFile(src, []byte(code), 0600)) defer os.Remove(src) require.NoError(t, exec.Command("go", "build", "-o", executable, src).Run()) diff --git a/plugins/inputs/linux_sysctl_fs/linux_sysctl_fs.go b/plugins/inputs/linux_sysctl_fs/linux_sysctl_fs.go index 55cb22292105a..19848b6db0e37 100644 --- a/plugins/inputs/linux_sysctl_fs/linux_sysctl_fs.go +++ b/plugins/inputs/linux_sysctl_fs/linux_sysctl_fs.go @@ -3,7 +3,6 @@ package linux_sysctl_fs import ( "bytes" "errors" - "io/ioutil" "os" "strconv" @@ -29,7 +28,7 @@ func (sfs SysctlFS) SampleConfig() string { } func (sfs *SysctlFS) gatherList(file string, fields map[string]interface{}, fieldNames ...string) error { - bs, err := ioutil.ReadFile(sfs.path + "/" + file) + bs, err := os.ReadFile(sfs.path + "/" + file) if err != nil { // Ignore non-existing entries if errors.Is(err, os.ErrNotExist) { @@ -58,7 +57,7 @@ func (sfs *SysctlFS) gatherList(file string, fields map[string]interface{}, fiel } func (sfs *SysctlFS) gatherOne(name string, fields map[string]interface{}) error { - bs, err := ioutil.ReadFile(sfs.path + "/" + name) + bs, err := os.ReadFile(sfs.path + "/" + name) if err != nil { // Ignore non-existing entries if errors.Is(err, os.ErrNotExist) { diff --git a/plugins/inputs/linux_sysctl_fs/linux_sysctl_fs_test.go b/plugins/inputs/linux_sysctl_fs/linux_sysctl_fs_test.go index 78011e288b962..8b76b266b1c9e 100644 --- a/plugins/inputs/linux_sysctl_fs/linux_sysctl_fs_test.go +++ b/plugins/inputs/linux_sysctl_fs/linux_sysctl_fs_test.go @@ -1,7 +1,6 @@ package linux_sysctl_fs import ( - "io/ioutil" "os" "testing" @@ -10,16 +9,16 @@ import ( ) func TestSysctlFSGather(t *testing.T) { - td, err := ioutil.TempDir("", "") + td, err := os.MkdirTemp("", "") require.NoError(t, err) defer os.RemoveAll(td) - require.NoError(t, ioutil.WriteFile(td+"/aio-nr", []byte("100\n"), 0644)) - require.NoError(t, ioutil.WriteFile(td+"/aio-max-nr", []byte("101\n"), 0644)) - require.NoError(t, ioutil.WriteFile(td+"/super-nr", []byte("102\n"), 0644)) - require.NoError(t, ioutil.WriteFile(td+"/super-max", []byte("103\n"), 0644)) - require.NoError(t, ioutil.WriteFile(td+"/file-nr", []byte("104\t0\t106\n"), 0644)) - require.NoError(t, ioutil.WriteFile(td+"/inode-state", []byte("107\t108\t109\t0\t0\t0\t0\n"), 0644)) + require.NoError(t, os.WriteFile(td+"/aio-nr", []byte("100\n"), 0644)) + require.NoError(t, os.WriteFile(td+"/aio-max-nr", []byte("101\n"), 0644)) + require.NoError(t, os.WriteFile(td+"/super-nr", []byte("102\n"), 0644)) + require.NoError(t, os.WriteFile(td+"/super-max", []byte("103\n"), 0644)) + require.NoError(t, os.WriteFile(td+"/file-nr", []byte("104\t0\t106\n"), 0644)) + require.NoError(t, os.WriteFile(td+"/inode-state", []byte("107\t108\t109\t0\t0\t0\t0\n"), 0644)) sfs := &SysctlFS{ path: td, diff --git a/plugins/inputs/logparser/logparser_test.go b/plugins/inputs/logparser/logparser_test.go index 9bf4f125ae4f6..3100c615cd4e4 100644 --- a/plugins/inputs/logparser/logparser_test.go +++ b/plugins/inputs/logparser/logparser_test.go @@ -1,7 +1,6 @@ package logparser import ( - "io/ioutil" "os" "path/filepath" "testing" @@ -111,7 +110,7 @@ func TestGrokParseLogFiles(t *testing.T) { } func TestGrokParseLogFilesAppearLater(t *testing.T) { - emptydir, err := ioutil.TempDir("", "TestGrokParseLogFilesAppearLater") + emptydir, err := os.MkdirTemp("", "TestGrokParseLogFilesAppearLater") defer os.RemoveAll(emptydir) assert.NoError(t, err) @@ -131,10 +130,10 @@ func TestGrokParseLogFilesAppearLater(t *testing.T) { assert.Equal(t, acc.NFields(), 0) - input, err := ioutil.ReadFile(filepath.Join(testdataDir, "test_a.log")) + input, err := os.ReadFile(filepath.Join(testdataDir, "test_a.log")) assert.NoError(t, err) - err = ioutil.WriteFile(filepath.Join(emptydir, "test_a.log"), input, 0644) + err = os.WriteFile(filepath.Join(emptydir, "test_a.log"), input, 0644) assert.NoError(t, err) assert.NoError(t, acc.GatherError(logparser.Gather)) diff --git a/plugins/inputs/logstash/logstash.go b/plugins/inputs/logstash/logstash.go index 10a3e7b6b8dd0..6fcaadabcd244 100644 --- a/plugins/inputs/logstash/logstash.go +++ b/plugins/inputs/logstash/logstash.go @@ -4,7 +4,6 @@ import ( "encoding/json" "fmt" "io" - "io/ioutil" "net/http" "net/url" "strings" @@ -206,7 +205,7 @@ func (logstash *Logstash) gatherJSONData(url string, value interface{}) error { defer response.Body.Close() if response.StatusCode != http.StatusOK { // ignore the err here; LimitReader returns io.EOF and we're not interested in read errors. - body, _ := ioutil.ReadAll(io.LimitReader(response.Body, 200)) + body, _ := io.ReadAll(io.LimitReader(response.Body, 200)) return fmt.Errorf("%s returned HTTP status %s: %q", url, response.Status, body) } diff --git a/plugins/inputs/lustre2/lustre2.go b/plugins/inputs/lustre2/lustre2.go index 00aa288b316a8..abd5ce87c6bbb 100644 --- a/plugins/inputs/lustre2/lustre2.go +++ b/plugins/inputs/lustre2/lustre2.go @@ -8,7 +8,7 @@ package lustre2 import ( - "io/ioutil" + "os" "path/filepath" "regexp" "strconv" @@ -374,7 +374,7 @@ func (l *Lustre2) GetLustreProcStats(fileglob string, wantedFields []*mapping) e name := path[len(path)-2] //lines, err := internal.ReadLines(file) - wholeFile, err := ioutil.ReadFile(file) + wholeFile, err := os.ReadFile(file) if err != nil { return err } diff --git a/plugins/inputs/lustre2/lustre2_test.go b/plugins/inputs/lustre2/lustre2_test.go index 52c7e87f08fc6..7fd3fd91f469e 100644 --- a/plugins/inputs/lustre2/lustre2_test.go +++ b/plugins/inputs/lustre2/lustre2_test.go @@ -4,7 +4,6 @@ package lustre2 import ( - "io/ioutil" "os" "testing" @@ -149,13 +148,13 @@ func TestLustre2GeneratesMetrics(t *testing.T) { err = os.MkdirAll(obddir+"/"+ostName, 0755) require.NoError(t, err) - err = ioutil.WriteFile(mdtdir+"/"+ostName+"/md_stats", []byte(mdtProcContents), 0644) + err = os.WriteFile(mdtdir+"/"+ostName+"/md_stats", []byte(mdtProcContents), 0644) require.NoError(t, err) - err = ioutil.WriteFile(osddir+"/"+ostName+"/stats", []byte(osdldiskfsProcContents), 0644) + err = os.WriteFile(osddir+"/"+ostName+"/stats", []byte(osdldiskfsProcContents), 0644) require.NoError(t, err) - err = ioutil.WriteFile(obddir+"/"+ostName+"/stats", []byte(obdfilterProcContents), 0644) + err = os.WriteFile(obddir+"/"+ostName+"/stats", []byte(obdfilterProcContents), 0644) require.NoError(t, err) // Begin by testing standard Lustre stats @@ -218,10 +217,10 @@ func TestLustre2GeneratesJobstatsMetrics(t *testing.T) { err = os.MkdirAll(obddir+"/"+ostName, 0755) require.NoError(t, err) - err = ioutil.WriteFile(mdtdir+"/"+ostName+"/job_stats", []byte(mdtJobStatsContents), 0644) + err = os.WriteFile(mdtdir+"/"+ostName+"/job_stats", []byte(mdtJobStatsContents), 0644) require.NoError(t, err) - err = ioutil.WriteFile(obddir+"/"+ostName+"/job_stats", []byte(obdfilterJobStatsContents), 0644) + err = os.WriteFile(obddir+"/"+ostName+"/job_stats", []byte(obdfilterJobStatsContents), 0644) require.NoError(t, err) // Test Lustre Jobstats diff --git a/plugins/inputs/mailchimp/chimp_api.go b/plugins/inputs/mailchimp/chimp_api.go index 259e64a0e3104..2f6cecdb9e0da 100644 --- a/plugins/inputs/mailchimp/chimp_api.go +++ b/plugins/inputs/mailchimp/chimp_api.go @@ -5,7 +5,6 @@ import ( "encoding/json" "fmt" "io" - "io/ioutil" "log" "net/http" "net/url" @@ -148,11 +147,11 @@ func runChimp(api *ChimpAPI, params ReportsParams) ([]byte, error) { if resp.StatusCode != http.StatusOK { // ignore the err here; LimitReader returns io.EOF and we're not interested in read errors. - body, _ := ioutil.ReadAll(io.LimitReader(resp.Body, 200)) + body, _ := io.ReadAll(io.LimitReader(resp.Body, 200)) return nil, fmt.Errorf("%s returned HTTP status %s: %q", api.url.String(), resp.Status, body) } - body, err := ioutil.ReadAll(resp.Body) + body, err := io.ReadAll(resp.Body) if err != nil { return nil, err } diff --git a/plugins/inputs/mdstat/mdstat.go b/plugins/inputs/mdstat/mdstat.go index 81e3f36e7c767..3f6fee7d086ca 100644 --- a/plugins/inputs/mdstat/mdstat.go +++ b/plugins/inputs/mdstat/mdstat.go @@ -20,7 +20,6 @@ package mdstat import ( "fmt" - "io/ioutil" "os" "regexp" "sort" @@ -291,7 +290,7 @@ func (k *MdstatConf) getProcMdstat() ([]byte, error) { return nil, err } - data, err := ioutil.ReadFile(mdStatFile) + data, err := os.ReadFile(mdStatFile) if err != nil { return nil, err } diff --git a/plugins/inputs/mdstat/mdstat_test.go b/plugins/inputs/mdstat/mdstat_test.go index fe6041abec353..070b7ddd234f5 100644 --- a/plugins/inputs/mdstat/mdstat_test.go +++ b/plugins/inputs/mdstat/mdstat_test.go @@ -4,7 +4,6 @@ package mdstat import ( - "io/ioutil" "os" "testing" @@ -134,7 +133,7 @@ unused devices: ` func makeFakeMDStatFile(content []byte) (filename string) { - fileobj, err := ioutil.TempFile("", "mdstat") + fileobj, err := os.CreateTemp("", "mdstat") if err != nil { panic(err) } diff --git a/plugins/inputs/mesos/mesos.go b/plugins/inputs/mesos/mesos.go index acb79ce5724e5..68203c9d480cb 100644 --- a/plugins/inputs/mesos/mesos.go +++ b/plugins/inputs/mesos/mesos.go @@ -3,7 +3,7 @@ package mesos import ( "encoding/json" "errors" - "io/ioutil" + "io" "log" "net" "net/http" @@ -558,7 +558,7 @@ func (m *Mesos) gatherMainMetrics(u *url.URL, role Role, acc telegraf.Accumulato return err } - data, err := ioutil.ReadAll(resp.Body) + data, err := io.ReadAll(resp.Body) // Ignore the returned error to not shadow the initial one //nolint:errcheck,revive resp.Body.Close() diff --git a/plugins/inputs/multifile/multifile.go b/plugins/inputs/multifile/multifile.go index 838b1dd764d2f..65c2ac4e4b783 100644 --- a/plugins/inputs/multifile/multifile.go +++ b/plugins/inputs/multifile/multifile.go @@ -3,8 +3,8 @@ package multifile import ( "bytes" "fmt" - "io/ioutil" "math" + "os" "path" "strconv" "time" @@ -84,7 +84,7 @@ func (m *MultiFile) Gather(acc telegraf.Accumulator) error { tags := make(map[string]string) for _, file := range m.Files { - fileContents, err := ioutil.ReadFile(file.Name) + fileContents, err := os.ReadFile(file.Name) if err != nil { if m.FailEarly { diff --git a/plugins/inputs/nats/nats.go b/plugins/inputs/nats/nats.go index c2adab29b324d..7144355096b4e 100644 --- a/plugins/inputs/nats/nats.go +++ b/plugins/inputs/nats/nats.go @@ -5,7 +5,7 @@ package nats import ( "encoding/json" - "io/ioutil" + "io" "net/http" "net/url" "path" @@ -56,7 +56,7 @@ func (n *Nats) Gather(acc telegraf.Accumulator) error { } defer resp.Body.Close() - bytes, err := ioutil.ReadAll(resp.Body) + bytes, err := io.ReadAll(resp.Body) if err != nil { return err } diff --git a/plugins/inputs/neptune_apex/neptune_apex.go b/plugins/inputs/neptune_apex/neptune_apex.go index dad4c8e5857f6..c2bb05384d7c8 100644 --- a/plugins/inputs/neptune_apex/neptune_apex.go +++ b/plugins/inputs/neptune_apex/neptune_apex.go @@ -5,7 +5,7 @@ package neptuneapex import ( "encoding/xml" "fmt" - "io/ioutil" + "io" "math" "net/http" "strconv" @@ -276,7 +276,7 @@ func (n *NeptuneApex) sendRequest(server string) ([]byte, error) { url, resp.StatusCode, http.StatusText(resp.StatusCode), http.StatusOK, http.StatusText(http.StatusOK)) } - body, err := ioutil.ReadAll(resp.Body) + body, err := io.ReadAll(resp.Body) if err != nil { return nil, fmt.Errorf("unable to read output from %q: %v", url, err) } diff --git a/plugins/inputs/nginx_plus_api/nginx_plus_api_metrics.go b/plugins/inputs/nginx_plus_api/nginx_plus_api_metrics.go index 7e1e753c5ff76..5cd7e76aec439 100644 --- a/plugins/inputs/nginx_plus_api/nginx_plus_api_metrics.go +++ b/plugins/inputs/nginx_plus_api/nginx_plus_api_metrics.go @@ -4,7 +4,7 @@ import ( "encoding/json" "errors" "fmt" - "io/ioutil" + "io" "net" "net/http" "net/url" @@ -70,7 +70,7 @@ func (n *NginxPlusAPI) gatherURL(addr *url.URL, path string) ([]byte, error) { contentType := strings.Split(resp.Header.Get("Content-Type"), ";")[0] switch contentType { case "application/json": - body, err := ioutil.ReadAll(resp.Body) + body, err := io.ReadAll(resp.Body) if err != nil { return nil, err } diff --git a/plugins/inputs/nginx_upstream_check/nginx_upstream_check.go b/plugins/inputs/nginx_upstream_check/nginx_upstream_check.go index fb40643409056..42e0cab62d53e 100644 --- a/plugins/inputs/nginx_upstream_check/nginx_upstream_check.go +++ b/plugins/inputs/nginx_upstream_check/nginx_upstream_check.go @@ -4,7 +4,6 @@ import ( "encoding/json" "fmt" "io" - "io/ioutil" "net/http" "net/url" "strconv" @@ -153,7 +152,7 @@ func (check *NginxUpstreamCheck) gatherJSONData(url string, value interface{}) e defer response.Body.Close() if response.StatusCode != http.StatusOK { // ignore the err here; LimitReader returns io.EOF and we're not interested in read errors. - body, _ := ioutil.ReadAll(io.LimitReader(response.Body, 200)) + body, _ := io.ReadAll(io.LimitReader(response.Body, 200)) return fmt.Errorf("%s returned HTTP status %s: %q", url, response.Status, body) } diff --git a/plugins/inputs/nsq/nsq.go b/plugins/inputs/nsq/nsq.go index 681c2f6e7f460..58f60192b96d0 100644 --- a/plugins/inputs/nsq/nsq.go +++ b/plugins/inputs/nsq/nsq.go @@ -25,7 +25,7 @@ package nsq import ( "encoding/json" "fmt" - "io/ioutil" + "io" "net/http" "net/url" "strconv" @@ -131,7 +131,7 @@ func (n *NSQ) gatherEndpoint(e string, acc telegraf.Accumulator) error { return fmt.Errorf("%s returned HTTP status %s", u.String(), r.Status) } - body, err := ioutil.ReadAll(r.Body) + body, err := io.ReadAll(r.Body) if err != nil { return fmt.Errorf(`error reading body: %s`, err) } diff --git a/plugins/inputs/nstat/nstat.go b/plugins/inputs/nstat/nstat.go index 5bc2bc85a3136..4408b8f728579 100644 --- a/plugins/inputs/nstat/nstat.go +++ b/plugins/inputs/nstat/nstat.go @@ -2,7 +2,6 @@ package nstat import ( "bytes" - "io/ioutil" "os" "strconv" @@ -62,7 +61,7 @@ func (ns *Nstat) Gather(acc telegraf.Accumulator) error { // load paths, get from env if config values are empty ns.loadPaths() - netstat, err := ioutil.ReadFile(ns.ProcNetNetstat) + netstat, err := os.ReadFile(ns.ProcNetNetstat) if err != nil { return err } @@ -71,14 +70,14 @@ func (ns *Nstat) Gather(acc telegraf.Accumulator) error { ns.gatherNetstat(netstat, acc) // collect SNMP data - snmp, err := ioutil.ReadFile(ns.ProcNetSNMP) + snmp, err := os.ReadFile(ns.ProcNetSNMP) if err != nil { return err } ns.gatherSNMP(snmp, acc) // collect SNMP6 data, if SNMP6 directory exists (IPv6 enabled) - snmp6, err := ioutil.ReadFile(ns.ProcNetSNMP6) + snmp6, err := os.ReadFile(ns.ProcNetSNMP6) if err == nil { ns.gatherSNMP6(snmp6, acc) } else if !os.IsNotExist(err) { diff --git a/plugins/inputs/nvidia_smi/nvidia_smi_test.go b/plugins/inputs/nvidia_smi/nvidia_smi_test.go index ea5887ae10a5d..3c0b14d6e4559 100644 --- a/plugins/inputs/nvidia_smi/nvidia_smi_test.go +++ b/plugins/inputs/nvidia_smi/nvidia_smi_test.go @@ -1,7 +1,7 @@ package nvidia_smi import ( - "io/ioutil" + "os" "path/filepath" "testing" "time" @@ -139,7 +139,7 @@ func TestGatherValidXML(t *testing.T) { t.Run(tt.name, func(t *testing.T) { var acc testutil.Accumulator - octets, err := ioutil.ReadFile(filepath.Join("testdata", tt.filename)) + octets, err := os.ReadFile(filepath.Join("testdata", tt.filename)) require.NoError(t, err) err = gatherNvidiaSMI(octets, &acc) diff --git a/plugins/inputs/opcua/opcua_util.go b/plugins/inputs/opcua/opcua_util.go index bb7ca56200954..e1304fa304fc6 100644 --- a/plugins/inputs/opcua/opcua_util.go +++ b/plugins/inputs/opcua/opcua_util.go @@ -9,7 +9,6 @@ import ( "crypto/x509/pkix" "encoding/pem" "fmt" - "io/ioutil" "log" "math/big" "net" @@ -27,7 +26,7 @@ import ( // SELF SIGNED CERT FUNCTIONS func newTempDir() (string, error) { - dir, err := ioutil.TempDir("", "ssc") + dir, err := os.MkdirTemp("", "ssc") return dir, err } diff --git a/plugins/inputs/passenger/passenger_test.go b/plugins/inputs/passenger/passenger_test.go index dbee336ba1040..ecbeeb532fd1e 100644 --- a/plugins/inputs/passenger/passenger_test.go +++ b/plugins/inputs/passenger/passenger_test.go @@ -2,7 +2,6 @@ package passenger import ( "fmt" - "io/ioutil" "os" "path/filepath" "runtime" @@ -28,7 +27,7 @@ func fakePassengerStatus(stat string) (string, error) { } tempFilePath := filepath.Join(os.TempDir(), "passenger-status"+fileExtension) - if err := ioutil.WriteFile(tempFilePath, []byte(content), 0700); err != nil { + if err := os.WriteFile(tempFilePath, []byte(content), 0700); err != nil { return "", err } diff --git a/plugins/inputs/phpfpm/child.go b/plugins/inputs/phpfpm/child.go index 9ac7e60715856..b6a6f956d3bf0 100644 --- a/plugins/inputs/phpfpm/child.go +++ b/plugins/inputs/phpfpm/child.go @@ -10,7 +10,6 @@ import ( "errors" "fmt" "io" - "io/ioutil" "net" "net/http" "net/http/cgi" @@ -161,7 +160,7 @@ func (c *child) serve() { var errCloseConn = errors.New("fcgi: connection should be closed") -var emptyBody = ioutil.NopCloser(strings.NewReader("")) +var emptyBody = io.NopCloser(strings.NewReader("")) // ErrRequestAborted is returned by Read when a handler attempts to read the // body of a request that has been aborted by the web server. @@ -295,7 +294,7 @@ func (c *child) serveRequest(req *request, body io.ReadCloser) { // can properly cut off the client sending all the data. // For now just bound it a little and //nolint:errcheck,revive - io.CopyN(ioutil.Discard, body, 100<<20) + io.CopyN(io.Discard, body, 100<<20) //nolint:errcheck,revive body.Close() diff --git a/plugins/inputs/phpfpm/fcgi_test.go b/plugins/inputs/phpfpm/fcgi_test.go index a7234225806cc..7211c0c3971e1 100644 --- a/plugins/inputs/phpfpm/fcgi_test.go +++ b/plugins/inputs/phpfpm/fcgi_test.go @@ -8,7 +8,6 @@ import ( "bytes" "errors" "io" - "io/ioutil" "net/http" "testing" ) @@ -242,7 +241,7 @@ func TestChildServeCleansUp(t *testing.T) { r *http.Request, ) { // block on reading body of request - _, err := io.Copy(ioutil.Discard, r.Body) + _, err := io.Copy(io.Discard, r.Body) if err != tt.err { t.Errorf("Expected %#v, got %#v", tt.err, err) } @@ -274,7 +273,7 @@ func TestMalformedParams(_ *testing.T) { // end of params 1, 4, 0, 1, 0, 0, 0, 0, } - rw := rwNopCloser{bytes.NewReader(input), ioutil.Discard} + rw := rwNopCloser{bytes.NewReader(input), io.Discard} c := newChild(rw, http.DefaultServeMux) c.serve() } diff --git a/plugins/inputs/postfix/postfix_test.go b/plugins/inputs/postfix/postfix_test.go index 782a0c78c95b9..6ab6556a0cf07 100644 --- a/plugins/inputs/postfix/postfix_test.go +++ b/plugins/inputs/postfix/postfix_test.go @@ -4,7 +4,6 @@ package postfix import ( - "io/ioutil" "os" "path/filepath" "testing" @@ -15,7 +14,7 @@ import ( ) func TestGather(t *testing.T) { - td, err := ioutil.TempDir("", "") + td, err := os.MkdirTemp("", "") require.NoError(t, err) defer os.RemoveAll(td) @@ -23,12 +22,12 @@ func TestGather(t *testing.T) { require.NoError(t, os.MkdirAll(filepath.FromSlash(td+"/"+q), 0755)) } - require.NoError(t, ioutil.WriteFile(filepath.FromSlash(td+"/active/01"), []byte("abc"), 0644)) - require.NoError(t, ioutil.WriteFile(filepath.FromSlash(td+"/active/02"), []byte("defg"), 0644)) - require.NoError(t, ioutil.WriteFile(filepath.FromSlash(td+"/hold/01"), []byte("abc"), 0644)) - require.NoError(t, ioutil.WriteFile(filepath.FromSlash(td+"/incoming/01"), []byte("abcd"), 0644)) - require.NoError(t, ioutil.WriteFile(filepath.FromSlash(td+"/deferred/0/0/01"), []byte("abc"), 0644)) - require.NoError(t, ioutil.WriteFile(filepath.FromSlash(td+"/deferred/F/F/F1"), []byte("abc"), 0644)) + require.NoError(t, os.WriteFile(filepath.FromSlash(td+"/active/01"), []byte("abc"), 0644)) + require.NoError(t, os.WriteFile(filepath.FromSlash(td+"/active/02"), []byte("defg"), 0644)) + require.NoError(t, os.WriteFile(filepath.FromSlash(td+"/hold/01"), []byte("abc"), 0644)) + require.NoError(t, os.WriteFile(filepath.FromSlash(td+"/incoming/01"), []byte("abcd"), 0644)) + require.NoError(t, os.WriteFile(filepath.FromSlash(td+"/deferred/0/0/01"), []byte("abc"), 0644)) + require.NoError(t, os.WriteFile(filepath.FromSlash(td+"/deferred/F/F/F1"), []byte("abc"), 0644)) p := Postfix{ QueueDirectory: td, diff --git a/plugins/inputs/postgresql_extensible/postgresql_extensible.go b/plugins/inputs/postgresql_extensible/postgresql_extensible.go index 8311064b1f060..176827a4b1dc7 100644 --- a/plugins/inputs/postgresql_extensible/postgresql_extensible.go +++ b/plugins/inputs/postgresql_extensible/postgresql_extensible.go @@ -3,7 +3,7 @@ package postgresql_extensible import ( "bytes" "fmt" - "io/ioutil" + "io" "os" "strings" "time" @@ -147,7 +147,7 @@ func ReadQueryFromFile(filePath string) (string, error) { } defer file.Close() - query, err := ioutil.ReadAll(file) + query, err := io.ReadAll(file) if err != nil { return "", err } diff --git a/plugins/inputs/processes/processes_notwindows.go b/plugins/inputs/processes/processes_notwindows.go index 3c685cf1ebf7f..070dce65fe2a0 100644 --- a/plugins/inputs/processes/processes_notwindows.go +++ b/plugins/inputs/processes/processes_notwindows.go @@ -6,7 +6,6 @@ package processes import ( "bytes" "fmt" - "io/ioutil" "os" "os/exec" "path/filepath" @@ -192,7 +191,7 @@ func (p *Processes) gatherFromProc(fields map[string]interface{}) error { } func readProcFile(filename string) ([]byte, error) { - data, err := ioutil.ReadFile(filename) + data, err := os.ReadFile(filename) if err != nil { if os.IsNotExist(err) { return nil, nil diff --git a/plugins/inputs/procstat/native_finder.go b/plugins/inputs/procstat/native_finder.go index d5d8b8b36fe70..05cf4a72735f0 100644 --- a/plugins/inputs/procstat/native_finder.go +++ b/plugins/inputs/procstat/native_finder.go @@ -2,7 +2,7 @@ package procstat import ( "fmt" - "io/ioutil" + "os" "regexp" "strconv" "strings" @@ -43,7 +43,7 @@ func (pg *NativeFinder) UID(user string) ([]PID, error) { //PidFile returns the pid from the pid file given. func (pg *NativeFinder) PidFile(path string) ([]PID, error) { var pids []PID - pidString, err := ioutil.ReadFile(path) + pidString, err := os.ReadFile(path) if err != nil { return pids, fmt.Errorf("Failed to read pidfile '%s'. Error: '%s'", path, err) diff --git a/plugins/inputs/procstat/pgrep.go b/plugins/inputs/procstat/pgrep.go index 85e8d80f83cfe..34c44e0b2fefb 100644 --- a/plugins/inputs/procstat/pgrep.go +++ b/plugins/inputs/procstat/pgrep.go @@ -2,7 +2,7 @@ package procstat import ( "fmt" - "io/ioutil" + "os" "os/exec" "strconv" "strings" @@ -25,7 +25,7 @@ func NewPgrep() (PIDFinder, error) { func (pg *Pgrep) PidFile(path string) ([]PID, error) { var pids []PID - pidString, err := ioutil.ReadFile(path) + pidString, err := os.ReadFile(path) if err != nil { return pids, fmt.Errorf("Failed to read pidfile '%s'. Error: '%s'", path, err) diff --git a/plugins/inputs/procstat/procstat.go b/plugins/inputs/procstat/procstat.go index b838df651f636..ce29a08460cca 100644 --- a/plugins/inputs/procstat/procstat.go +++ b/plugins/inputs/procstat/procstat.go @@ -3,7 +3,6 @@ package procstat import ( "bytes" "fmt" - "io/ioutil" "os" "os/exec" "path/filepath" @@ -516,7 +515,7 @@ func (p *Procstat) singleCgroupPIDs(path string) ([]PID, error) { return nil, fmt.Errorf("not a directory %s", path) } procsPath := filepath.Join(path, "cgroup.procs") - out, err := ioutil.ReadFile(procsPath) + out, err := os.ReadFile(procsPath) if err != nil { return nil, err } diff --git a/plugins/inputs/procstat/procstat_test.go b/plugins/inputs/procstat/procstat_test.go index 2d8687e75013b..bc586fca4fa42 100644 --- a/plugins/inputs/procstat/procstat_test.go +++ b/plugins/inputs/procstat/procstat_test.go @@ -2,7 +2,6 @@ package procstat import ( "fmt" - "io/ioutil" "os" "os/exec" "path/filepath" @@ -385,10 +384,10 @@ func TestGather_cgroupPIDs(t *testing.T) { if runtime.GOOS == "windows" { t.Skip("no cgroups in windows") } - td, err := ioutil.TempDir("", "") + td, err := os.MkdirTemp("", "") require.NoError(t, err) defer os.RemoveAll(td) - err = ioutil.WriteFile(filepath.Join(td, "cgroup.procs"), []byte("1234\n5678\n"), 0644) + err = os.WriteFile(filepath.Join(td, "cgroup.procs"), []byte("1234\n5678\n"), 0644) require.NoError(t, err) p := Procstat{ diff --git a/plugins/inputs/prometheus/kubernetes.go b/plugins/inputs/prometheus/kubernetes.go index 0e658003a7122..a57e771bfc483 100644 --- a/plugins/inputs/prometheus/kubernetes.go +++ b/plugins/inputs/prometheus/kubernetes.go @@ -5,11 +5,11 @@ import ( "crypto/tls" "encoding/json" "fmt" - "io/ioutil" "log" "net" "net/http" "net/url" + "os" "os/user" "path/filepath" "time" @@ -41,7 +41,7 @@ const cAdvisorPodListDefaultInterval = 60 // loadClient parses a kubeconfig from a file and returns a Kubernetes // client. It does not support extensions or client auth providers. func loadClient(kubeconfigPath string) (*kubernetes.Clientset, error) { - data, err := ioutil.ReadFile(kubeconfigPath) + data, err := os.ReadFile(kubeconfigPath) if err != nil { return nil, fmt.Errorf("failed reading '%s': %v", kubeconfigPath, err) } diff --git a/plugins/inputs/prometheus/prometheus.go b/plugins/inputs/prometheus/prometheus.go index adeb452253a37..136e8ae0f6d9d 100644 --- a/plugins/inputs/prometheus/prometheus.go +++ b/plugins/inputs/prometheus/prometheus.go @@ -4,7 +4,7 @@ import ( "context" "errors" "fmt" - "io/ioutil" + "io" "net" "net/http" "net/url" @@ -382,7 +382,7 @@ func (p *Prometheus) gatherURL(u URLAndAddress, acc telegraf.Accumulator) error p.addHeaders(req) if p.BearerToken != "" { - token, err := ioutil.ReadFile(p.BearerToken) + token, err := os.ReadFile(p.BearerToken) if err != nil { return err } @@ -408,7 +408,7 @@ func (p *Prometheus) gatherURL(u URLAndAddress, acc telegraf.Accumulator) error return fmt.Errorf("%s returned HTTP status %s", u.URL, resp.Status) } - body, err := ioutil.ReadAll(resp.Body) + body, err := io.ReadAll(resp.Body) if err != nil { return fmt.Errorf("error reading body: %s", err) } diff --git a/plugins/inputs/proxmox/proxmox.go b/plugins/inputs/proxmox/proxmox.go index ec34a7b2f5a36..efd7fae7d5d5f 100644 --- a/plugins/inputs/proxmox/proxmox.go +++ b/plugins/inputs/proxmox/proxmox.go @@ -3,7 +3,7 @@ package proxmox import ( "encoding/json" "errors" - "io/ioutil" + "io" "net/http" "net/url" "os" @@ -115,7 +115,7 @@ func performRequest(px *Proxmox, apiURL string, method string, data url.Values) } defer resp.Body.Close() - responseBody, err := ioutil.ReadAll(resp.Body) + responseBody, err := io.ReadAll(resp.Body) if err != nil { return nil, err } diff --git a/plugins/inputs/puppetagent/puppetagent.go b/plugins/inputs/puppetagent/puppetagent.go index 36c284ff57cb6..f31e03d327817 100644 --- a/plugins/inputs/puppetagent/puppetagent.go +++ b/plugins/inputs/puppetagent/puppetagent.go @@ -2,12 +2,12 @@ package puppetagent import ( "fmt" - "gopkg.in/yaml.v2" - "io/ioutil" "os" "reflect" "strings" + "gopkg.in/yaml.v2" + "github.com/influxdata/telegraf" "github.com/influxdata/telegraf/plugins/inputs" ) @@ -102,7 +102,7 @@ func (pa *PuppetAgent) Gather(acc telegraf.Accumulator) error { return fmt.Errorf("%s", err) } - fh, err := ioutil.ReadFile(pa.Location) + fh, err := os.ReadFile(pa.Location) if err != nil { return fmt.Errorf("%s", err) } diff --git a/plugins/inputs/rabbitmq/rabbitmq.go b/plugins/inputs/rabbitmq/rabbitmq.go index 13be5f63b1619..158b8d5ed6b21 100644 --- a/plugins/inputs/rabbitmq/rabbitmq.go +++ b/plugins/inputs/rabbitmq/rabbitmq.go @@ -3,7 +3,7 @@ package rabbitmq import ( "encoding/json" "fmt" - "io/ioutil" + "io" "net/http" "strconv" "sync" @@ -431,7 +431,7 @@ func (r *RabbitMQ) requestEndpoint(u string) ([]byte, error) { return nil, fmt.Errorf("getting %q failed: %v %v", u, resp.StatusCode, http.StatusText(resp.StatusCode)) } - return ioutil.ReadAll(resp.Body) + return io.ReadAll(resp.Body) } func (r *RabbitMQ) requestJSON(u string, target interface{}) error { diff --git a/plugins/inputs/rabbitmq/rabbitmq_test.go b/plugins/inputs/rabbitmq/rabbitmq_test.go index 830819b0528e4..e867b1e2dcb61 100644 --- a/plugins/inputs/rabbitmq/rabbitmq_test.go +++ b/plugins/inputs/rabbitmq/rabbitmq_test.go @@ -2,9 +2,9 @@ package rabbitmq import ( "fmt" - "io/ioutil" "net/http" "net/http/httptest" + "os" "time" "testing" @@ -37,7 +37,7 @@ func TestRabbitMQGeneratesMetricsSet1(t *testing.T) { return } - data, err := ioutil.ReadFile(jsonFilePath) + data, err := os.ReadFile(jsonFilePath) require.NoErrorf(t, err, "could not read from data file %s", jsonFilePath) _, err = w.Write(data) @@ -247,7 +247,7 @@ func TestRabbitMQGeneratesMetricsSet2(t *testing.T) { return } - data, err := ioutil.ReadFile(jsonFilePath) + data, err := os.ReadFile(jsonFilePath) require.NoErrorf(t, err, "could not read from data file %s", jsonFilePath) _, err = w.Write(data) diff --git a/plugins/inputs/ravendb/ravendb_test.go b/plugins/inputs/ravendb/ravendb_test.go index 42eaea3fb3e3b..3da1d0190a055 100644 --- a/plugins/inputs/ravendb/ravendb_test.go +++ b/plugins/inputs/ravendb/ravendb_test.go @@ -1,9 +1,9 @@ package ravendb import ( - "io/ioutil" "net/http" "net/http/httptest" + "os" "testing" "time" @@ -30,7 +30,7 @@ func TestRavenDBGeneratesMetricsFull(t *testing.T) { require.Failf(t, "Cannot handle request for uri %s", r.URL.Path) } - data, err := ioutil.ReadFile(jsonFilePath) + data, err := os.ReadFile(jsonFilePath) require.NoErrorf(t, err, "could not read from data file %s", jsonFilePath) _, err = w.Write(data) @@ -225,7 +225,7 @@ func TestRavenDBGeneratesMetricsMin(t *testing.T) { require.Failf(t, "Cannot handle request for uri %s", r.URL.Path) } - data, err := ioutil.ReadFile(jsonFilePath) + data, err := os.ReadFile(jsonFilePath) require.NoErrorf(t, err, "could not read from data file %s", jsonFilePath) _, err = w.Write(data) diff --git a/plugins/inputs/redfish/redfish.go b/plugins/inputs/redfish/redfish.go index 4d9e70a57a9bd..dcf26b192c651 100644 --- a/plugins/inputs/redfish/redfish.go +++ b/plugins/inputs/redfish/redfish.go @@ -3,7 +3,7 @@ package redfish import ( "encoding/json" "fmt" - "io/ioutil" + "io" "net" "net/http" "net/url" @@ -199,7 +199,7 @@ func (r *Redfish) getData(url string, payload interface{}) error { r.Address) } - body, err := ioutil.ReadAll(resp.Body) + body, err := io.ReadAll(resp.Body) if err != nil { return err } diff --git a/plugins/inputs/salesforce/salesforce.go b/plugins/inputs/salesforce/salesforce.go index f1ecff8d61a83..f7c321d7ae978 100644 --- a/plugins/inputs/salesforce/salesforce.go +++ b/plugins/inputs/salesforce/salesforce.go @@ -6,7 +6,6 @@ import ( "errors" "fmt" "io" - "io/ioutil" "net/http" "net/url" "strings" @@ -203,11 +202,11 @@ func (s *Salesforce) login() error { defer resp.Body.Close() if resp.StatusCode != http.StatusOK { // ignore the err here; LimitReader returns io.EOF and we're not interested in read errors. - body, _ := ioutil.ReadAll(io.LimitReader(resp.Body, 200)) + body, _ := io.ReadAll(io.LimitReader(resp.Body, 200)) return fmt.Errorf("%s returned HTTP status %s: %q", loginEndpoint, resp.Status, body) } - respBody, err := ioutil.ReadAll(resp.Body) + respBody, err := io.ReadAll(resp.Body) if err != nil { return err } diff --git a/plugins/inputs/snmp_legacy/snmp_legacy.go b/plugins/inputs/snmp_legacy/snmp_legacy.go index d85afca8e4e7f..604a2205c0d2c 100644 --- a/plugins/inputs/snmp_legacy/snmp_legacy.go +++ b/plugins/inputs/snmp_legacy/snmp_legacy.go @@ -1,9 +1,9 @@ package snmp_legacy import ( - "io/ioutil" "log" "net" + "os" "strconv" "strings" "time" @@ -296,7 +296,7 @@ func (s *Snmp) Gather(acc telegraf.Accumulator) error { subnodes: make(map[string]Node), } - data, err := ioutil.ReadFile(s.SnmptranslateFile) + data, err := os.ReadFile(s.SnmptranslateFile) if err != nil { s.Log.Errorf("Reading SNMPtranslate file error: %s", err.Error()) return err diff --git a/plugins/inputs/socket_listener/socket_listener_test.go b/plugins/inputs/socket_listener/socket_listener_test.go index c33e59f7129b6..a3ccacae1ceb2 100644 --- a/plugins/inputs/socket_listener/socket_listener_test.go +++ b/plugins/inputs/socket_listener/socket_listener_test.go @@ -4,7 +4,6 @@ import ( "bytes" "crypto/tls" "io" - "io/ioutil" "log" "net" "os" @@ -69,7 +68,7 @@ func TestSocketListener_tcp_tls(t *testing.T) { } func TestSocketListener_unix_tls(t *testing.T) { - tmpdir, err := ioutil.TempDir("", "telegraf") + tmpdir, err := os.MkdirTemp("", "telegraf") require.NoError(t, err) defer os.RemoveAll(tmpdir) sock := filepath.Join(tmpdir, "sl.TestSocketListener_unix_tls.sock") @@ -133,7 +132,7 @@ func TestSocketListener_udp(t *testing.T) { } func TestSocketListener_unix(t *testing.T) { - tmpdir, err := ioutil.TempDir("", "telegraf") + tmpdir, err := os.MkdirTemp("", "telegraf") require.NoError(t, err) defer os.RemoveAll(tmpdir) sock := filepath.Join(tmpdir, "sl.TestSocketListener_unix.sock") @@ -163,7 +162,7 @@ func TestSocketListener_unixgram(t *testing.T) { t.Skip("Skipping on Windows, as unixgram sockets are not supported") } - tmpdir, err := ioutil.TempDir("", "telegraf") + tmpdir, err := os.MkdirTemp("", "telegraf") require.NoError(t, err) defer os.RemoveAll(tmpdir) sock := filepath.Join(tmpdir, "sl.TestSocketListener_unixgram.sock") diff --git a/plugins/inputs/sql/sql.go b/plugins/inputs/sql/sql.go index c6c4658d83959..87227663bb4d0 100644 --- a/plugins/inputs/sql/sql.go +++ b/plugins/inputs/sql/sql.go @@ -5,7 +5,7 @@ import ( dbsql "database/sql" "errors" "fmt" - "io/ioutil" + "os" "sort" "strings" "sync" @@ -326,7 +326,7 @@ func (s *SQL) Init() error { // In case we got a script, we should read the query now. if q.Script != "" { - query, err := ioutil.ReadFile(q.Script) + query, err := os.ReadFile(q.Script) if err != nil { return fmt.Errorf("reading script %q failed: %v", q.Script, err) } diff --git a/plugins/inputs/suricata/suricata_test.go b/plugins/inputs/suricata/suricata_test.go index 9b620efc3e216..f3fc5f14eb394 100644 --- a/plugins/inputs/suricata/suricata_test.go +++ b/plugins/inputs/suricata/suricata_test.go @@ -2,7 +2,6 @@ package suricata import ( "fmt" - "io/ioutil" "log" "math/rand" "net" @@ -21,7 +20,7 @@ var ex2 = `{"timestamp":"2017-03-06T07:43:39.000397+0000","event_type":"stats"," var ex3 = `{"timestamp":"2017-03-06T07:43:39.000397+0000","event_type":"stats","stats":{"threads": { "W#05-wlp4s0": { "capture":{"kernel_packets":905344474,"kernel_drops":78355440}}}}}` func TestSuricataLarge(t *testing.T) { - dir, err := ioutil.TempDir("", "test") + dir, err := os.MkdirTemp("", "test") require.NoError(t, err) defer os.RemoveAll(dir) tmpfn := filepath.Join(dir, fmt.Sprintf("t%d", rand.Int63())) @@ -38,7 +37,7 @@ func TestSuricataLarge(t *testing.T) { require.NoError(t, s.Start(&acc)) defer s.Stop() - data, err := ioutil.ReadFile("testdata/test1.json") + data, err := os.ReadFile("testdata/test1.json") require.NoError(t, err) c, err := net.Dial("unix", tmpfn) @@ -49,7 +48,7 @@ func TestSuricataLarge(t *testing.T) { require.NoError(t, err) //test suricata alerts - data2, err := ioutil.ReadFile("testdata/test2.json") + data2, err := os.ReadFile("testdata/test2.json") require.NoError(t, err) _, err = c.Write(data2) require.NoError(t, err) @@ -61,7 +60,7 @@ func TestSuricataLarge(t *testing.T) { } func TestSuricataAlerts(t *testing.T) { - dir, err := ioutil.TempDir("", "test") + dir, err := os.MkdirTemp("", "test") require.NoError(t, err) defer os.RemoveAll(dir) tmpfn := filepath.Join(dir, fmt.Sprintf("t%d", rand.Int63())) @@ -78,7 +77,7 @@ func TestSuricataAlerts(t *testing.T) { require.NoError(t, s.Start(&acc)) defer s.Stop() - data, err := ioutil.ReadFile("testdata/test3.json") + data, err := os.ReadFile("testdata/test3.json") require.NoError(t, err) c, err := net.Dial("unix", tmpfn) @@ -116,7 +115,7 @@ func TestSuricataAlerts(t *testing.T) { } func TestSuricata(t *testing.T) { - dir, err := ioutil.TempDir("", "test") + dir, err := os.MkdirTemp("", "test") require.NoError(t, err) defer os.RemoveAll(dir) tmpfn := filepath.Join(dir, fmt.Sprintf("t%d", rand.Int63())) @@ -162,7 +161,7 @@ func TestSuricata(t *testing.T) { } func TestThreadStats(t *testing.T) { - dir, err := ioutil.TempDir("", "test") + dir, err := os.MkdirTemp("", "test") require.NoError(t, err) defer os.RemoveAll(dir) tmpfn := filepath.Join(dir, fmt.Sprintf("t%d", rand.Int63())) @@ -212,7 +211,7 @@ func TestThreadStats(t *testing.T) { } func TestSuricataInvalid(t *testing.T) { - dir, err := ioutil.TempDir("", "test") + dir, err := os.MkdirTemp("", "test") require.NoError(t, err) defer os.RemoveAll(dir) tmpfn := filepath.Join(dir, fmt.Sprintf("t%d", rand.Int63())) @@ -254,7 +253,7 @@ func TestSuricataInvalidPath(t *testing.T) { } func TestSuricataTooLongLine(t *testing.T) { - dir, err := ioutil.TempDir("", "test") + dir, err := os.MkdirTemp("", "test") require.NoError(t, err) defer os.RemoveAll(dir) tmpfn := filepath.Join(dir, fmt.Sprintf("t%d", rand.Int63())) @@ -282,7 +281,7 @@ func TestSuricataTooLongLine(t *testing.T) { } func TestSuricataEmptyJSON(t *testing.T) { - dir, err := ioutil.TempDir("", "test") + dir, err := os.MkdirTemp("", "test") require.NoError(t, err) defer os.RemoveAll(dir) tmpfn := filepath.Join(dir, fmt.Sprintf("t%d", rand.Int63())) @@ -309,7 +308,7 @@ func TestSuricataEmptyJSON(t *testing.T) { } func TestSuricataDisconnectSocket(t *testing.T) { - dir, err := ioutil.TempDir("", "test") + dir, err := os.MkdirTemp("", "test") require.NoError(t, err) defer os.RemoveAll(dir) tmpfn := filepath.Join(dir, fmt.Sprintf("t%d", rand.Int63())) @@ -345,7 +344,7 @@ func TestSuricataDisconnectSocket(t *testing.T) { } func TestSuricataStartStop(t *testing.T) { - dir, err := ioutil.TempDir("", "test") + dir, err := os.MkdirTemp("", "test") require.NoError(t, err) defer os.RemoveAll(dir) tmpfn := filepath.Join(dir, fmt.Sprintf("t%d", rand.Int63())) @@ -387,7 +386,7 @@ func TestSuricataParse(t *testing.T) { } for _, tc := range tests { - data, err := ioutil.ReadFile("testdata/" + tc.filename) + data, err := os.ReadFile("testdata/" + tc.filename) require.NoError(t, err) s := Suricata{ Delimiter: "_", diff --git a/plugins/inputs/synproxy/synproxy_test.go b/plugins/inputs/synproxy/synproxy_test.go index dd733253635b8..e8fbe62989055 100644 --- a/plugins/inputs/synproxy/synproxy_test.go +++ b/plugins/inputs/synproxy/synproxy_test.go @@ -4,7 +4,6 @@ package synproxy import ( - "io/ioutil" "os" "testing" @@ -156,7 +155,7 @@ func testSynproxyFileData(t *testing.T, fileData string, telegrafData map[string } func makeFakeSynproxyFile(content []byte) string { - tmpfile, err := ioutil.TempFile("", "synproxy_test") + tmpfile, err := os.CreateTemp("", "synproxy_test") if err != nil { panic(err) } diff --git a/plugins/inputs/syslog/nontransparent_test.go b/plugins/inputs/syslog/nontransparent_test.go index 4d29daaf53915..7782ad968a3b1 100644 --- a/plugins/inputs/syslog/nontransparent_test.go +++ b/plugins/inputs/syslog/nontransparent_test.go @@ -2,7 +2,6 @@ package syslog import ( "crypto/tls" - "io/ioutil" "net" "os" "path/filepath" @@ -270,7 +269,7 @@ func TestNonTransparentStrictWithZeroKeepAlive_tcp_tls(t *testing.T) { } func TestNonTransparentStrict_unix(t *testing.T) { - tmpdir, err := ioutil.TempDir("", "telegraf") + tmpdir, err := os.MkdirTemp("", "telegraf") require.NoError(t, err) defer os.RemoveAll(tmpdir) sock := filepath.Join(tmpdir, "syslog.TestStrict_unix.sock") @@ -278,7 +277,7 @@ func TestNonTransparentStrict_unix(t *testing.T) { } func TestNonTransparentBestEffort_unix(t *testing.T) { - tmpdir, err := ioutil.TempDir("", "telegraf") + tmpdir, err := os.MkdirTemp("", "telegraf") require.NoError(t, err) defer os.RemoveAll(tmpdir) sock := filepath.Join(tmpdir, "syslog.TestBestEffort_unix.sock") @@ -286,7 +285,7 @@ func TestNonTransparentBestEffort_unix(t *testing.T) { } func TestNonTransparentStrict_unix_tls(t *testing.T) { - tmpdir, err := ioutil.TempDir("", "telegraf") + tmpdir, err := os.MkdirTemp("", "telegraf") require.NoError(t, err) defer os.RemoveAll(tmpdir) sock := filepath.Join(tmpdir, "syslog.TestStrict_unix_tls.sock") @@ -294,7 +293,7 @@ func TestNonTransparentStrict_unix_tls(t *testing.T) { } func TestNonTransparentBestEffort_unix_tls(t *testing.T) { - tmpdir, err := ioutil.TempDir("", "telegraf") + tmpdir, err := os.MkdirTemp("", "telegraf") require.NoError(t, err) defer os.RemoveAll(tmpdir) sock := filepath.Join(tmpdir, "syslog.TestBestEffort_unix_tls.sock") diff --git a/plugins/inputs/syslog/octetcounting_test.go b/plugins/inputs/syslog/octetcounting_test.go index 53fee69d112a5..1c0cc024507e2 100644 --- a/plugins/inputs/syslog/octetcounting_test.go +++ b/plugins/inputs/syslog/octetcounting_test.go @@ -3,7 +3,6 @@ package syslog import ( "crypto/tls" "fmt" - "io/ioutil" "net" "os" "path/filepath" @@ -470,7 +469,7 @@ func TestOctetCountingStrictWithZeroKeepAlive_tcp_tls(t *testing.T) { } func TestOctetCountingStrict_unix(t *testing.T) { - tmpdir, err := ioutil.TempDir("", "telegraf") + tmpdir, err := os.MkdirTemp("", "telegraf") require.NoError(t, err) defer os.RemoveAll(tmpdir) sock := filepath.Join(tmpdir, "syslog.TestStrict_unix.sock") @@ -478,7 +477,7 @@ func TestOctetCountingStrict_unix(t *testing.T) { } func TestOctetCountingBestEffort_unix(t *testing.T) { - tmpdir, err := ioutil.TempDir("", "telegraf") + tmpdir, err := os.MkdirTemp("", "telegraf") require.NoError(t, err) defer os.RemoveAll(tmpdir) sock := filepath.Join(tmpdir, "syslog.TestBestEffort_unix.sock") @@ -486,7 +485,7 @@ func TestOctetCountingBestEffort_unix(t *testing.T) { } func TestOctetCountingStrict_unix_tls(t *testing.T) { - tmpdir, err := ioutil.TempDir("", "telegraf") + tmpdir, err := os.MkdirTemp("", "telegraf") require.NoError(t, err) defer os.RemoveAll(tmpdir) sock := filepath.Join(tmpdir, "syslog.TestStrict_unix_tls.sock") @@ -494,7 +493,7 @@ func TestOctetCountingStrict_unix_tls(t *testing.T) { } func TestOctetCountingBestEffort_unix_tls(t *testing.T) { - tmpdir, err := ioutil.TempDir("", "telegraf") + tmpdir, err := os.MkdirTemp("", "telegraf") require.NoError(t, err) defer os.RemoveAll(tmpdir) sock := filepath.Join(tmpdir, "syslog.TestBestEffort_unix_tls.sock") diff --git a/plugins/inputs/syslog/rfc5426_test.go b/plugins/inputs/syslog/rfc5426_test.go index ab3fe2ceaf60f..5bcb847b36ec4 100644 --- a/plugins/inputs/syslog/rfc5426_test.go +++ b/plugins/inputs/syslog/rfc5426_test.go @@ -2,7 +2,6 @@ package syslog import ( "fmt" - "io/ioutil" "net" "os" "path/filepath" @@ -290,7 +289,7 @@ func TestBestEffort_unixgram(t *testing.T) { t.Skip("Skipping on Windows, as unixgram sockets are not supported") } - tmpdir, err := ioutil.TempDir("", "telegraf") + tmpdir, err := os.MkdirTemp("", "telegraf") require.NoError(t, err) defer os.RemoveAll(tmpdir) sock := filepath.Join(tmpdir, "syslog.TestBestEffort_unixgram.sock") @@ -304,7 +303,7 @@ func TestStrict_unixgram(t *testing.T) { t.Skip("Skipping on Windows, as unixgram sockets are not supported") } - tmpdir, err := ioutil.TempDir("", "telegraf") + tmpdir, err := os.MkdirTemp("", "telegraf") require.NoError(t, err) defer os.RemoveAll(tmpdir) sock := filepath.Join(tmpdir, "syslog.TestStrict_unixgram.sock") diff --git a/plugins/inputs/syslog/syslog_test.go b/plugins/inputs/syslog/syslog_test.go index ac0539d30e1af..00146fde9cd26 100644 --- a/plugins/inputs/syslog/syslog_test.go +++ b/plugins/inputs/syslog/syslog_test.go @@ -1,7 +1,6 @@ package syslog import ( - "io/ioutil" "os" "path/filepath" "runtime" @@ -46,7 +45,7 @@ func TestAddress(t *testing.T) { require.EqualError(t, err, "unknown protocol 'unsupported' in 'example.com:6514'") require.Error(t, err) - tmpdir, err := ioutil.TempDir("", "telegraf") + tmpdir, err := os.MkdirTemp("", "telegraf") defer os.RemoveAll(tmpdir) require.NoError(t, err) sock := filepath.Join(tmpdir, "syslog.TestAddress.sock") diff --git a/plugins/inputs/tail/tail_test.go b/plugins/inputs/tail/tail_test.go index 16c38519a83b6..1098a10edbff5 100644 --- a/plugins/inputs/tail/tail_test.go +++ b/plugins/inputs/tail/tail_test.go @@ -2,7 +2,6 @@ package tail import ( "bytes" - "io/ioutil" "log" "os" "path/filepath" @@ -49,7 +48,7 @@ func NewTestTail() *Tail { } func TestTailBadLine(t *testing.T) { - tmpfile, err := ioutil.TempFile("", "") + tmpfile, err := os.CreateTemp("", "") require.NoError(t, err) defer os.Remove(tmpfile.Name()) @@ -86,7 +85,7 @@ func TestTailBadLine(t *testing.T) { } func TestTailDosLineEndings(t *testing.T) { - tmpfile, err := ioutil.TempFile("", "") + tmpfile, err := os.CreateTemp("", "") require.NoError(t, err) defer os.Remove(tmpfile.Name()) _, err = tmpfile.WriteString("cpu usage_idle=100\r\ncpu2 usage_idle=200\r\n") @@ -173,7 +172,7 @@ func TestGrokParseLogFilesWithMultiline(t *testing.T) { } func TestGrokParseLogFilesWithMultilineTimeout(t *testing.T) { - tmpfile, err := ioutil.TempFile("", "") + tmpfile, err := os.CreateTemp("", "") require.NoError(t, err) defer os.Remove(tmpfile.Name()) @@ -286,7 +285,7 @@ func createGrokParser() (parsers.Parser, error) { // The csv parser should only parse the header line once per file. func TestCSVHeadersParsedOnce(t *testing.T) { - tmpfile, err := ioutil.TempFile("", "") + tmpfile, err := os.CreateTemp("", "") require.NoError(t, err) defer os.Remove(tmpfile.Name()) @@ -345,7 +344,7 @@ cpu,42 // Ensure that the first line can produce multiple metrics (#6138) func TestMultipleMetricsOnFirstLine(t *testing.T) { - tmpfile, err := ioutil.TempFile("", "") + tmpfile, err := os.CreateTemp("", "") require.NoError(t, err) defer os.Remove(tmpfile.Name()) @@ -542,7 +541,7 @@ func TestCharacterEncoding(t *testing.T) { } func TestTailEOF(t *testing.T) { - tmpfile, err := ioutil.TempFile("", "") + tmpfile, err := os.CreateTemp("", "") require.NoError(t, err) defer os.Remove(tmpfile.Name()) _, err = tmpfile.WriteString("cpu usage_idle=100\r\n") diff --git a/plugins/inputs/twemproxy/twemproxy.go b/plugins/inputs/twemproxy/twemproxy.go index cda56943f1002..b4c4b52f85b6c 100644 --- a/plugins/inputs/twemproxy/twemproxy.go +++ b/plugins/inputs/twemproxy/twemproxy.go @@ -3,7 +3,7 @@ package twemproxy import ( "encoding/json" "errors" - "io/ioutil" + "io" "net" "time" @@ -37,7 +37,7 @@ func (t *Twemproxy) Gather(acc telegraf.Accumulator) error { if err != nil { return err } - body, err := ioutil.ReadAll(conn) + body, err := io.ReadAll(conn) if err != nil { return err } diff --git a/plugins/inputs/udp_listener/udp_listener_test.go b/plugins/inputs/udp_listener/udp_listener_test.go index 8bd8262c035b0..3e36838c6192a 100644 --- a/plugins/inputs/udp_listener/udp_listener_test.go +++ b/plugins/inputs/udp_listener/udp_listener_test.go @@ -104,7 +104,7 @@ package udp_listener // } // func TestRunParser(t *testing.T) { -// log.SetOutput(ioutil.Discard) +// log.SetOutput(io.Discard) // var testmsg = []byte("cpu_load_short,host=server01 value=12.0 1422568543702900257\n") // listener, in := newTestUDPListener() @@ -127,7 +127,7 @@ package udp_listener // } // func TestRunParserInvalidMsg(_ *testing.T) { -// log.SetOutput(ioutil.Discard) +// log.SetOutput(io.Discard) // var testmsg = []byte("cpu_load_short") // listener, in := newTestUDPListener() @@ -153,7 +153,7 @@ package udp_listener // } // func TestRunParserGraphiteMsg(t *testing.T) { -// log.SetOutput(ioutil.Discard) +// log.SetOutput(io.Discard) // var testmsg = []byte("cpu.load.graphite 12 1454780029") // listener, in := newTestUDPListener() @@ -174,7 +174,7 @@ package udp_listener // } // func TestRunParserJSONMsg(t *testing.T) { -// log.SetOutput(ioutil.Discard) +// log.SetOutput(io.Discard) // var testmsg = []byte("{\"a\": 5, \"b\": {\"c\": 6}}\n") // listener, in := newTestUDPListener() diff --git a/plugins/inputs/webhooks/filestack/filestack_webhooks.go b/plugins/inputs/webhooks/filestack/filestack_webhooks.go index 19f8c0251bbb7..44def8c6f5141 100644 --- a/plugins/inputs/webhooks/filestack/filestack_webhooks.go +++ b/plugins/inputs/webhooks/filestack/filestack_webhooks.go @@ -2,7 +2,7 @@ package filestack import ( "encoding/json" - "io/ioutil" + "io" "log" "net/http" "time" @@ -25,7 +25,7 @@ func (fs *FilestackWebhook) Register(router *mux.Router, acc telegraf.Accumulato func (fs *FilestackWebhook) eventHandler(w http.ResponseWriter, r *http.Request) { defer r.Body.Close() - body, err := ioutil.ReadAll(r.Body) + body, err := io.ReadAll(r.Body) if err != nil { w.WriteHeader(http.StatusBadRequest) return diff --git a/plugins/inputs/webhooks/github/github_webhooks.go b/plugins/inputs/webhooks/github/github_webhooks.go index 5febb80afb6bb..2d48cbef2e5f2 100644 --- a/plugins/inputs/webhooks/github/github_webhooks.go +++ b/plugins/inputs/webhooks/github/github_webhooks.go @@ -5,7 +5,7 @@ import ( "crypto/sha1" "encoding/hex" "encoding/json" - "io/ioutil" + "io" "log" "net/http" @@ -28,7 +28,7 @@ func (gh *GithubWebhook) Register(router *mux.Router, acc telegraf.Accumulator) func (gh *GithubWebhook) eventHandler(w http.ResponseWriter, r *http.Request) { defer r.Body.Close() eventType := r.Header.Get("X-Github-Event") - data, err := ioutil.ReadAll(r.Body) + data, err := io.ReadAll(r.Body) if err != nil { w.WriteHeader(http.StatusBadRequest) return diff --git a/plugins/inputs/webhooks/mandrill/mandrill_webhooks.go b/plugins/inputs/webhooks/mandrill/mandrill_webhooks.go index a7e219c53c905..67ba86908d1a1 100644 --- a/plugins/inputs/webhooks/mandrill/mandrill_webhooks.go +++ b/plugins/inputs/webhooks/mandrill/mandrill_webhooks.go @@ -2,7 +2,7 @@ package mandrill import ( "encoding/json" - "io/ioutil" + "io" "log" "net/http" "net/url" @@ -31,7 +31,7 @@ func (md *MandrillWebhook) returnOK(w http.ResponseWriter, _ *http.Request) { func (md *MandrillWebhook) eventHandler(w http.ResponseWriter, r *http.Request) { defer r.Body.Close() - body, err := ioutil.ReadAll(r.Body) + body, err := io.ReadAll(r.Body) if err != nil { w.WriteHeader(http.StatusBadRequest) return diff --git a/plugins/inputs/webhooks/rollbar/rollbar_webhooks.go b/plugins/inputs/webhooks/rollbar/rollbar_webhooks.go index 55ff7eb2f3594..d9c1323cdd608 100644 --- a/plugins/inputs/webhooks/rollbar/rollbar_webhooks.go +++ b/plugins/inputs/webhooks/rollbar/rollbar_webhooks.go @@ -3,7 +3,7 @@ package rollbar import ( "encoding/json" "errors" - "io/ioutil" + "io" "log" "net/http" "time" @@ -25,7 +25,7 @@ func (rb *RollbarWebhook) Register(router *mux.Router, acc telegraf.Accumulator) func (rb *RollbarWebhook) eventHandler(w http.ResponseWriter, r *http.Request) { defer r.Body.Close() - data, err := ioutil.ReadAll(r.Body) + data, err := io.ReadAll(r.Body) if err != nil { w.WriteHeader(http.StatusBadRequest) return diff --git a/plugins/inputs/wireless/wireless_linux.go b/plugins/inputs/wireless/wireless_linux.go index 706f9700d12c9..29a0250d92b7f 100644 --- a/plugins/inputs/wireless/wireless_linux.go +++ b/plugins/inputs/wireless/wireless_linux.go @@ -5,7 +5,6 @@ package wireless import ( "bytes" - "io/ioutil" "log" "os" "path" @@ -47,7 +46,7 @@ func (w *Wireless) Gather(acc telegraf.Accumulator) error { w.loadPath() wirelessPath := path.Join(w.HostProc, "net", "wireless") - table, err := ioutil.ReadFile(wirelessPath) + table, err := os.ReadFile(wirelessPath) if err != nil { return err } diff --git a/plugins/inputs/x509_cert/x509_cert.go b/plugins/inputs/x509_cert/x509_cert.go index b106f91b772f6..3486f2779eb2b 100644 --- a/plugins/inputs/x509_cert/x509_cert.go +++ b/plugins/inputs/x509_cert/x509_cert.go @@ -7,14 +7,15 @@ import ( "crypto/x509" "encoding/pem" "fmt" - "github.com/pion/dtls/v2" - "io/ioutil" "net" "net/url" + "os" "path/filepath" "strings" "time" + "github.com/pion/dtls/v2" + "github.com/influxdata/telegraf" "github.com/influxdata/telegraf/config" "github.com/influxdata/telegraf/internal/globpath" @@ -176,7 +177,7 @@ func (c *X509Cert) getCert(u *url.URL, timeout time.Duration) ([]*x509.Certifica return certs, nil case "file": - content, err := ioutil.ReadFile(u.Path) + content, err := os.ReadFile(u.Path) if err != nil { return nil, err } diff --git a/plugins/inputs/x509_cert/x509_cert_test.go b/plugins/inputs/x509_cert/x509_cert_test.go index 9c42c09bdabda..f0b0379109749 100644 --- a/plugins/inputs/x509_cert/x509_cert_test.go +++ b/plugins/inputs/x509_cert/x509_cert_test.go @@ -4,8 +4,6 @@ import ( "crypto/tls" "encoding/base64" "fmt" - "github.com/pion/dtls/v2" - "io/ioutil" "math/big" "net" "net/url" @@ -15,6 +13,8 @@ import ( "testing" "time" + "github.com/pion/dtls/v2" + "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" @@ -32,7 +32,7 @@ var _ telegraf.Input = &X509Cert{} func TestGatherRemoteIntegration(t *testing.T) { t.Skip("Skipping network-dependent test due to race condition when test-all") - tmpfile, err := ioutil.TempFile("", "example") + tmpfile, err := os.CreateTemp("", "example") require.NoError(t, err) defer os.Remove(tmpfile.Name()) @@ -149,7 +149,7 @@ func TestGatherLocal(t *testing.T) { for _, test := range tests { t.Run(test.name, func(t *testing.T) { - f, err := ioutil.TempFile("", "x509_cert") + f, err := os.CreateTemp("", "x509_cert") require.NoError(t, err) _, err = f.Write([]byte(test.content)) @@ -181,7 +181,7 @@ func TestGatherLocal(t *testing.T) { func TestTags(t *testing.T) { cert := fmt.Sprintf("%s\n%s", pki.ReadServerCert(), pki.ReadCACert()) - f, err := ioutil.TempFile("", "x509_cert") + f, err := os.CreateTemp("", "x509_cert") require.NoError(t, err) _, err = f.Write([]byte(cert)) @@ -238,7 +238,7 @@ func TestGatherChain(t *testing.T) { for _, test := range tests { t.Run(test.name, func(t *testing.T) { - f, err := ioutil.TempFile("", "x509_cert") + f, err := os.CreateTemp("", "x509_cert") require.NoError(t, err) _, err = f.Write([]byte(test.content)) diff --git a/plugins/inputs/zfs/zfs_linux_test.go b/plugins/inputs/zfs/zfs_linux_test.go index 52622582029a5..b844759eaffd1 100644 --- a/plugins/inputs/zfs/zfs_linux_test.go +++ b/plugins/inputs/zfs/zfs_linux_test.go @@ -4,7 +4,6 @@ package zfs import ( - "io/ioutil" "os" "testing" @@ -192,10 +191,10 @@ func TestZfsPoolMetrics(t *testing.T) { err = os.MkdirAll(testKstatPath+"/HOME", 0755) require.NoError(t, err) - err = ioutil.WriteFile(testKstatPath+"/HOME/io", []byte(poolIoContents), 0644) + err = os.WriteFile(testKstatPath+"/HOME/io", []byte(poolIoContents), 0644) require.NoError(t, err) - err = ioutil.WriteFile(testKstatPath+"/arcstats", []byte(arcstatsContents), 0644) + err = os.WriteFile(testKstatPath+"/arcstats", []byte(arcstatsContents), 0644) require.NoError(t, err) poolMetrics := getPoolMetrics() @@ -231,25 +230,25 @@ func TestZfsGeneratesMetrics(t *testing.T) { err = os.MkdirAll(testKstatPath+"/HOME", 0755) require.NoError(t, err) - err = ioutil.WriteFile(testKstatPath+"/HOME/io", []byte(""), 0644) + err = os.WriteFile(testKstatPath+"/HOME/io", []byte(""), 0644) require.NoError(t, err) - err = ioutil.WriteFile(testKstatPath+"/arcstats", []byte(arcstatsContents), 0644) + err = os.WriteFile(testKstatPath+"/arcstats", []byte(arcstatsContents), 0644) require.NoError(t, err) - err = ioutil.WriteFile(testKstatPath+"/zfetchstats", []byte(zfetchstatsContents), 0644) + err = os.WriteFile(testKstatPath+"/zfetchstats", []byte(zfetchstatsContents), 0644) require.NoError(t, err) - err = ioutil.WriteFile(testKstatPath+"/zil", []byte(zilContents), 0644) + err = os.WriteFile(testKstatPath+"/zil", []byte(zilContents), 0644) require.NoError(t, err) - err = ioutil.WriteFile(testKstatPath+"/fm", []byte(fmContents), 0644) + err = os.WriteFile(testKstatPath+"/fm", []byte(fmContents), 0644) require.NoError(t, err) - err = ioutil.WriteFile(testKstatPath+"/dmu_tx", []byte(dmuTxContents), 0644) + err = os.WriteFile(testKstatPath+"/dmu_tx", []byte(dmuTxContents), 0644) require.NoError(t, err) - err = ioutil.WriteFile(testKstatPath+"/abdstats", []byte(abdstatsContents), 0644) + err = os.WriteFile(testKstatPath+"/abdstats", []byte(abdstatsContents), 0644) require.NoError(t, err) intMetrics := getKstatMetricsAll() @@ -272,7 +271,7 @@ func TestZfsGeneratesMetrics(t *testing.T) { err = os.MkdirAll(testKstatPath+"/STORAGE", 0755) require.NoError(t, err) - err = ioutil.WriteFile(testKstatPath+"/STORAGE/io", []byte(""), 0644) + err = os.WriteFile(testKstatPath+"/STORAGE/io", []byte(""), 0644) require.NoError(t, err) tags = map[string]string{ diff --git a/plugins/inputs/zipkin/cmd/thrift_serialize/thrift_serialize.go b/plugins/inputs/zipkin/cmd/thrift_serialize/thrift_serialize.go index 9bf1f3261d9f6..09518103b22cc 100644 --- a/plugins/inputs/zipkin/cmd/thrift_serialize/thrift_serialize.go +++ b/plugins/inputs/zipkin/cmd/thrift_serialize/thrift_serialize.go @@ -29,8 +29,8 @@ import ( "errors" "flag" "fmt" - "io/ioutil" "log" + "os" "github.com/apache/thrift/lib/go/thrift" "github.com/influxdata/telegraf/plugins/inputs/zipkin/codec/thrift/gen-go/zipkincore" @@ -52,7 +52,7 @@ func init() { func main() { flag.Parse() - contents, err := ioutil.ReadFile(filename) + contents, err := os.ReadFile(filename) if err != nil { log.Fatalf("Error reading file: %v\n", err) } @@ -63,7 +63,7 @@ func main() { if err != nil { log.Fatalf("%v\n", err) } - if err := ioutil.WriteFile(outFileName, raw, 0644); err != nil { + if err := os.WriteFile(outFileName, raw, 0644); err != nil { log.Fatalf("%v", err) } case "thrift": @@ -71,7 +71,7 @@ func main() { if err != nil { log.Fatalf("%v\n", err) } - if err := ioutil.WriteFile(outFileName, raw, 0644); err != nil { + if err := os.WriteFile(outFileName, raw, 0644); err != nil { log.Fatalf("%v", err) } default: diff --git a/plugins/inputs/zipkin/codec/thrift/thrift_test.go b/plugins/inputs/zipkin/codec/thrift/thrift_test.go index d4bbc1d54df20..ea566e4bfd0c8 100644 --- a/plugins/inputs/zipkin/codec/thrift/thrift_test.go +++ b/plugins/inputs/zipkin/codec/thrift/thrift_test.go @@ -1,7 +1,7 @@ package thrift import ( - "io/ioutil" + "os" "testing" "github.com/google/go-cmp/cmp" @@ -193,7 +193,7 @@ func TestUnmarshalThrift(t *testing.T) { } for _, tt := range tests { t.Run(tt.name, func(t *testing.T) { - dat, err := ioutil.ReadFile(tt.filename) + dat, err := os.ReadFile(tt.filename) if err != nil { t.Fatalf("Could not find file %s\n", tt.filename) } diff --git a/plugins/inputs/zipkin/handler.go b/plugins/inputs/zipkin/handler.go index 24e7ac12f01be..83288bd6e4b2e 100644 --- a/plugins/inputs/zipkin/handler.go +++ b/plugins/inputs/zipkin/handler.go @@ -3,7 +3,7 @@ package zipkin import ( "compress/gzip" "fmt" - "io/ioutil" + "io" "mime" "net/http" "strings" @@ -88,7 +88,7 @@ func (s *SpanHandler) Spans(w http.ResponseWriter, r *http.Request) { w.WriteHeader(http.StatusUnsupportedMediaType) } - octets, err := ioutil.ReadAll(body) + octets, err := io.ReadAll(body) if err != nil { s.recorder.Error(err) w.WriteHeader(http.StatusInternalServerError) diff --git a/plugins/inputs/zipkin/handler_test.go b/plugins/inputs/zipkin/handler_test.go index b0176a22ead3c..f6e8bece80240 100644 --- a/plugins/inputs/zipkin/handler_test.go +++ b/plugins/inputs/zipkin/handler_test.go @@ -2,9 +2,10 @@ package zipkin import ( "bytes" - "io/ioutil" + "io" "net/http" "net/http/httptest" + "os" "strconv" "testing" "time" @@ -28,7 +29,7 @@ func (m *MockRecorder) Error(err error) { } func TestSpanHandler(t *testing.T) { - dat, err := ioutil.ReadFile("testdata/threespans.dat") + dat, err := os.ReadFile("testdata/threespans.dat") if err != nil { t.Fatalf("Could not find file %s\n", "testdata/threespans.dat") } @@ -37,7 +38,7 @@ func TestSpanHandler(t *testing.T) { r := httptest.NewRequest( "POST", "http://server.local/api/v1/spans", - ioutil.NopCloser( + io.NopCloser( bytes.NewReader(dat))) r.Header.Set("Content-Type", "application/x-thrift") diff --git a/plugins/inputs/zipkin/zipkin_test.go b/plugins/inputs/zipkin/zipkin_test.go index 77bef853b7e52..0c0bab279cc7f 100644 --- a/plugins/inputs/zipkin/zipkin_test.go +++ b/plugins/inputs/zipkin/zipkin_test.go @@ -3,8 +3,8 @@ package zipkin import ( "bytes" "fmt" - "io/ioutil" "net/http" + "os" "testing" "time" @@ -637,7 +637,7 @@ func TestZipkinPlugin(t *testing.T) { } func postThriftData(datafile, address, contentType string) error { - dat, err := ioutil.ReadFile(datafile) + dat, err := os.ReadFile(datafile) if err != nil { return fmt.Errorf("could not read from data file %s", datafile) } diff --git a/plugins/outputs/azure_monitor/azure_monitor.go b/plugins/outputs/azure_monitor/azure_monitor.go index e513dbdca23e9..ca511a5211860 100644 --- a/plugins/outputs/azure_monitor/azure_monitor.go +++ b/plugins/outputs/azure_monitor/azure_monitor.go @@ -7,7 +7,7 @@ import ( "encoding/json" "fmt" "hash/fnv" - "io/ioutil" + "io" "net/http" "regexp" "strings" @@ -221,7 +221,7 @@ func vmInstanceMetadata(c *http.Client) (string, string, error) { } defer resp.Body.Close() - body, err := ioutil.ReadAll(resp.Body) + body, err := io.ReadAll(resp.Body) if err != nil { return "", "", err } @@ -356,7 +356,7 @@ func (a *AzureMonitor) send(body []byte) error { } defer resp.Body.Close() - _, err = ioutil.ReadAll(resp.Body) + _, err = io.ReadAll(resp.Body) if err != nil || resp.StatusCode < 200 || resp.StatusCode > 299 { return fmt.Errorf("failed to write batch: [%v] %s", resp.StatusCode, resp.Status) } diff --git a/plugins/outputs/dynatrace/dynatrace.go b/plugins/outputs/dynatrace/dynatrace.go index 11796e8e12994..adf74ea48a232 100644 --- a/plugins/outputs/dynatrace/dynatrace.go +++ b/plugins/outputs/dynatrace/dynatrace.go @@ -3,7 +3,7 @@ package dynatrace import ( "bytes" "fmt" - "io/ioutil" + "io" "net/http" "strings" "time" @@ -209,7 +209,7 @@ func (d *Dynatrace) send(msg string) error { } // print metric line results as info log - bodyBytes, err := ioutil.ReadAll(resp.Body) + bodyBytes, err := io.ReadAll(resp.Body) if err != nil { d.Log.Errorf("Dynatrace error reading response") } diff --git a/plugins/outputs/dynatrace/dynatrace_test.go b/plugins/outputs/dynatrace/dynatrace_test.go index c3cb091cbf549..0ed7cf4cf1195 100644 --- a/plugins/outputs/dynatrace/dynatrace_test.go +++ b/plugins/outputs/dynatrace/dynatrace_test.go @@ -3,7 +3,7 @@ package dynatrace import ( "encoding/json" "fmt" - "io/ioutil" + "io" "net/http" "net/http/httptest" "regexp" @@ -130,7 +130,7 @@ func TestSendMetrics(t *testing.T) { ts := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { // check the encoded result - bodyBytes, err := ioutil.ReadAll(r.Body) + bodyBytes, err := io.ReadAll(r.Body) require.NoError(t, err) bodyString := string(bodyBytes) @@ -209,7 +209,7 @@ func TestSendMetrics(t *testing.T) { func TestSendSingleMetricWithUnorderedTags(t *testing.T) { ts := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { // check the encoded result - bodyBytes, err := ioutil.ReadAll(r.Body) + bodyBytes, err := io.ReadAll(r.Body) require.NoError(t, err) bodyString := string(bodyBytes) // use regex because dimension order isn't guaranteed @@ -255,7 +255,7 @@ func TestSendMetricWithoutTags(t *testing.T) { ts := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { w.WriteHeader(http.StatusOK) // check the encoded result - bodyBytes, err := ioutil.ReadAll(r.Body) + bodyBytes, err := io.ReadAll(r.Body) require.NoError(t, err) bodyString := string(bodyBytes) expected := "mymeasurement.myfield,dt.metrics.source=telegraf gauge,3.14 1289430000000" @@ -296,7 +296,7 @@ func TestSendMetricWithUpperCaseTagKeys(t *testing.T) { ts := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { w.WriteHeader(http.StatusOK) // check the encoded result - bodyBytes, err := ioutil.ReadAll(r.Body) + bodyBytes, err := io.ReadAll(r.Body) require.NoError(t, err) bodyString := string(bodyBytes) @@ -343,7 +343,7 @@ func TestSendBooleanMetricWithoutTags(t *testing.T) { ts := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { w.WriteHeader(http.StatusOK) // check the encoded result - bodyBytes, err := ioutil.ReadAll(r.Body) + bodyBytes, err := io.ReadAll(r.Body) require.NoError(t, err) bodyString := string(bodyBytes) // use regex because field order isn't guaranteed @@ -384,7 +384,7 @@ func TestSendMetricWithDefaultDimensions(t *testing.T) { ts := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { w.WriteHeader(http.StatusOK) // check the encoded result - bodyBytes, err := ioutil.ReadAll(r.Body) + bodyBytes, err := io.ReadAll(r.Body) require.NoError(t, err) bodyString := string(bodyBytes) // use regex because field order isn't guaranteed @@ -427,7 +427,7 @@ func TestMetricDimensionsOverrideDefault(t *testing.T) { ts := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { w.WriteHeader(http.StatusOK) // check the encoded result - bodyBytes, err := ioutil.ReadAll(r.Body) + bodyBytes, err := io.ReadAll(r.Body) require.NoError(t, err) bodyString := string(bodyBytes) // use regex because field order isn't guaranteed @@ -470,7 +470,7 @@ func TestStaticDimensionsOverrideMetric(t *testing.T) { ts := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { w.WriteHeader(http.StatusOK) // check the encoded result - bodyBytes, err := ioutil.ReadAll(r.Body) + bodyBytes, err := io.ReadAll(r.Body) require.NoError(t, err) bodyString := string(bodyBytes) // use regex because field order isn't guaranteed diff --git a/plugins/outputs/file/file_test.go b/plugins/outputs/file/file_test.go index f1e87853d6153..5fcdc511972ac 100644 --- a/plugins/outputs/file/file_test.go +++ b/plugins/outputs/file/file_test.go @@ -3,7 +3,6 @@ package file import ( "bytes" "io" - "io/ioutil" "os" "testing" @@ -181,7 +180,7 @@ func TestFileStdout(t *testing.T) { } func createFile() *os.File { - f, err := ioutil.TempFile("", "") + f, err := os.CreateTemp("", "") if err != nil { panic(err) } @@ -190,7 +189,7 @@ func createFile() *os.File { } func tmpFile() string { - d, err := ioutil.TempDir("", "") + d, err := os.MkdirTemp("", "") if err != nil { panic(err) } @@ -198,7 +197,7 @@ func tmpFile() string { } func validateFile(fname, expS string, t *testing.T) { - buf, err := ioutil.ReadFile(fname) + buf, err := os.ReadFile(fname) if err != nil { panic(err) } diff --git a/plugins/outputs/health/health_test.go b/plugins/outputs/health/health_test.go index f03cfcacba7a6..03a08fca21e7b 100644 --- a/plugins/outputs/health/health_test.go +++ b/plugins/outputs/health/health_test.go @@ -1,7 +1,7 @@ package health_test import ( - "io/ioutil" + "io" "net/http" "testing" "time" @@ -121,7 +121,7 @@ func TestHealth(t *testing.T) { require.NoError(t, err) require.Equal(t, tt.expectedCode, resp.StatusCode) - _, err = ioutil.ReadAll(resp.Body) + _, err = io.ReadAll(resp.Body) require.NoError(t, err) err = output.Close() diff --git a/plugins/outputs/http/http.go b/plugins/outputs/http/http.go index edaae3f6ec07d..c94052ea92c1c 100644 --- a/plugins/outputs/http/http.go +++ b/plugins/outputs/http/http.go @@ -6,7 +6,6 @@ import ( "context" "fmt" "io" - "io/ioutil" "net/http" "strings" "time" @@ -195,7 +194,7 @@ func (h *HTTP) write(reqBody []byte) error { return fmt.Errorf("when writing to [%s] received status code: %d. body: %s", h.URL, resp.StatusCode, errorLine) } - _, err = ioutil.ReadAll(resp.Body) + _, err = io.ReadAll(resp.Body) if err != nil { return fmt.Errorf("when writing to [%s] received error: %v", h.URL, err) } diff --git a/plugins/outputs/http/http_test.go b/plugins/outputs/http/http_test.go index 8089f45f59f2e..d6803eed3211d 100644 --- a/plugins/outputs/http/http_test.go +++ b/plugins/outputs/http/http_test.go @@ -3,7 +3,7 @@ package http import ( "compress/gzip" "fmt" - "io/ioutil" + "io" "net/http" "net/http/httptest" "net/url" @@ -272,7 +272,7 @@ func TestContentEncodingGzip(t *testing.T) { require.NoError(t, err) } - payload, err := ioutil.ReadAll(body) + payload, err := io.ReadAll(body) require.NoError(t, err) require.Contains(t, string(payload), "cpu value=42") diff --git a/plugins/outputs/influxdb/http.go b/plugins/outputs/influxdb/http.go index 5c11d2821d2f1..ac85814db1f34 100644 --- a/plugins/outputs/influxdb/http.go +++ b/plugins/outputs/influxdb/http.go @@ -8,7 +8,6 @@ import ( "errors" "fmt" "io" - "io/ioutil" "net" "net/http" "net/url" @@ -489,7 +488,7 @@ func (c *httpClient) requestBodyReader(metrics []telegraf.Metric) (io.ReadCloser return rc, nil } - return ioutil.NopCloser(reader), nil + return io.NopCloser(reader), nil } func (c *httpClient) addHeaders(req *http.Request) { @@ -503,13 +502,13 @@ func (c *httpClient) addHeaders(req *http.Request) { } func (c *httpClient) validateResponse(response io.ReadCloser) (io.ReadCloser, error) { - bodyBytes, err := ioutil.ReadAll(response) + bodyBytes, err := io.ReadAll(response) if err != nil { return nil, err } defer response.Close() - originalResponse := ioutil.NopCloser(bytes.NewBuffer(bodyBytes)) + originalResponse := io.NopCloser(bytes.NewBuffer(bodyBytes)) // Empty response is valid. if response == http.NoBody || len(bodyBytes) == 0 || bodyBytes == nil { diff --git a/plugins/outputs/influxdb/http_test.go b/plugins/outputs/influxdb/http_test.go index e19d8d2e580c9..ba4dd2d81b12a 100644 --- a/plugins/outputs/influxdb/http_test.go +++ b/plugins/outputs/influxdb/http_test.go @@ -6,7 +6,7 @@ import ( "compress/gzip" "context" "fmt" - "io/ioutil" + "io" "log" "net" "net/http" @@ -284,7 +284,7 @@ func TestHTTP_Write(t *testing.T) { }, queryHandlerFunc: func(t *testing.T, w http.ResponseWriter, r *http.Request) { require.Equal(t, r.FormValue("db"), "telegraf") - body, err := ioutil.ReadAll(r.Body) + body, err := io.ReadAll(r.Body) require.NoError(t, err) require.Contains(t, string(body), "cpu value=42") w.WriteHeader(http.StatusNoContent) @@ -573,7 +573,7 @@ func TestHTTP_WriteContentEncodingGzip(t *testing.T) { gr, err := gzip.NewReader(r.Body) require.NoError(t, err) - body, err := ioutil.ReadAll(gr) + body, err := io.ReadAll(gr) require.NoError(t, err) require.Contains(t, string(body), "cpu value=42") @@ -618,7 +618,7 @@ func TestHTTP_WriteContentEncodingGzip(t *testing.T) { } func TestHTTP_UnixSocket(t *testing.T) { - tmpdir, err := ioutil.TempDir("", "telegraf-test") + tmpdir, err := os.MkdirTemp("", "telegraf-test") if err != nil { require.NoError(t, err) } @@ -700,7 +700,7 @@ func TestHTTP_WriteDatabaseTagWorksOnRetry(t *testing.T) { r.ParseForm() require.Equal(t, r.Form["db"], []string{"foo"}) - body, err := ioutil.ReadAll(r.Body) + body, err := io.ReadAll(r.Body) require.NoError(t, err) require.Contains(t, string(body), "cpu value=42") @@ -835,7 +835,7 @@ func TestDBRPTags(t *testing.T) { handlerFunc: func(t *testing.T, w http.ResponseWriter, r *http.Request) { require.Equal(t, r.FormValue("db"), "telegraf") require.Equal(t, r.FormValue("rp"), "foo") - body, err := ioutil.ReadAll(r.Body) + body, err := io.ReadAll(r.Body) require.NoError(t, err) require.Contains(t, string(body), "cpu,rp=foo value=42") w.WriteHeader(http.StatusNoContent) @@ -917,7 +917,7 @@ func TestDBRPTags(t *testing.T) { handlerFunc: func(t *testing.T, w http.ResponseWriter, r *http.Request) { require.Equal(t, r.FormValue("db"), "telegraf") require.Equal(t, r.FormValue("rp"), "foo") - body, err := ioutil.ReadAll(r.Body) + body, err := io.ReadAll(r.Body) require.NoError(t, err) require.Contains(t, string(body), "cpu value=42") w.WriteHeader(http.StatusNoContent) @@ -948,7 +948,7 @@ func TestDBRPTags(t *testing.T) { handlerFunc: func(t *testing.T, w http.ResponseWriter, r *http.Request) { require.Equal(t, r.FormValue("db"), "telegraf") require.Equal(t, r.FormValue("rp"), "foo") - body, err := ioutil.ReadAll(r.Body) + body, err := io.ReadAll(r.Body) require.NoError(t, err) require.Contains(t, string(body), "cpu,rp=foo value=42") w.WriteHeader(http.StatusNoContent) diff --git a/plugins/outputs/influxdb_v2/http.go b/plugins/outputs/influxdb_v2/http.go index e8df4da7d2041..c076580255740 100644 --- a/plugins/outputs/influxdb_v2/http.go +++ b/plugins/outputs/influxdb_v2/http.go @@ -7,7 +7,6 @@ import ( "errors" "fmt" "io" - "io/ioutil" "log" "math" "net" @@ -361,7 +360,7 @@ func (c *httpClient) requestBodyReader(metrics []telegraf.Metric) (io.ReadCloser return rc, nil } - return ioutil.NopCloser(reader), nil + return io.NopCloser(reader), nil } func (c *httpClient) addHeaders(req *http.Request) { diff --git a/plugins/outputs/influxdb_v2/http_test.go b/plugins/outputs/influxdb_v2/http_test.go index 23c3ff05e17b6..0637cd8060bd0 100644 --- a/plugins/outputs/influxdb_v2/http_test.go +++ b/plugins/outputs/influxdb_v2/http_test.go @@ -2,7 +2,7 @@ package influxdb_v2_test import ( "context" - "io/ioutil" + "io" "net/http" "net/http/httptest" "net/url" @@ -63,7 +63,7 @@ func TestWriteBucketTagWorksOnRetry(t *testing.T) { r.ParseForm() require.Equal(t, r.Form["bucket"], []string{"foo"}) - body, err := ioutil.ReadAll(r.Body) + body, err := io.ReadAll(r.Body) require.NoError(t, err) require.Contains(t, string(body), "cpu value=42") diff --git a/plugins/outputs/librato/librato.go b/plugins/outputs/librato/librato.go index d4aa3e6e92bb7..dc1e9b6fa7856 100644 --- a/plugins/outputs/librato/librato.go +++ b/plugins/outputs/librato/librato.go @@ -4,7 +4,7 @@ import ( "bytes" "encoding/json" "fmt" - "io/ioutil" + "io" "net/http" "regexp" "time" @@ -151,7 +151,7 @@ func (l *Librato) Write(metrics []telegraf.Metric) error { defer resp.Body.Close() if resp.StatusCode != 200 || l.Debug { - htmlData, err := ioutil.ReadAll(resp.Body) + htmlData, err := io.ReadAll(resp.Body) if err != nil { l.Log.Debugf("Couldn't get response! (%v)", err) } diff --git a/plugins/outputs/loki/loki_test.go b/plugins/outputs/loki/loki_test.go index efe31728218d7..ba6d0808fabaa 100644 --- a/plugins/outputs/loki/loki_test.go +++ b/plugins/outputs/loki/loki_test.go @@ -4,14 +4,15 @@ import ( "compress/gzip" "encoding/json" "fmt" - "github.com/influxdata/telegraf/testutil" - "io/ioutil" + "io" "net/http" "net/http/httptest" "net/url" "testing" "time" + "github.com/influxdata/telegraf/testutil" + "github.com/influxdata/telegraf" "github.com/influxdata/telegraf/internal" "github.com/stretchr/testify/require" @@ -215,7 +216,7 @@ func TestContentEncodingGzip(t *testing.T) { require.NoError(t, err) } - payload, err := ioutil.ReadAll(body) + payload, err := io.ReadAll(body) require.NoError(t, err) var s Request @@ -394,7 +395,7 @@ func TestMetricSorting(t *testing.T) { body := r.Body var err error - payload, err := ioutil.ReadAll(body) + payload, err := io.ReadAll(body) require.NoError(t, err) var s Request diff --git a/plugins/outputs/opentsdb/opentsdb_http.go b/plugins/outputs/opentsdb/opentsdb_http.go index b164765850578..582a9bb85fc9a 100644 --- a/plugins/outputs/opentsdb/opentsdb_http.go +++ b/plugins/outputs/opentsdb/opentsdb_http.go @@ -6,7 +6,6 @@ import ( "encoding/json" "fmt" "io" - "io/ioutil" "log" "net/http" "net/http/httputil" @@ -163,7 +162,7 @@ func (o *openTSDBHttp) flush() error { fmt.Printf("Received response\n%s\n\n", dump) } else { // Important so http client reuse connection for next request if need be. - io.Copy(ioutil.Discard, resp.Body) + _, _ = io.Copy(io.Discard, resp.Body) } if resp.StatusCode/100 != 2 { diff --git a/plugins/outputs/prometheus_client/prometheus_client_v1_test.go b/plugins/outputs/prometheus_client/prometheus_client_v1_test.go index 39b8fec262095..95fa97fb688b7 100644 --- a/plugins/outputs/prometheus_client/prometheus_client_v1_test.go +++ b/plugins/outputs/prometheus_client/prometheus_client_v1_test.go @@ -2,7 +2,7 @@ package prometheus import ( "fmt" - "io/ioutil" + "io" "net/http" "net/http/httptest" "net/url" @@ -261,7 +261,7 @@ rpc_duration_seconds_count 2693 require.NoError(t, err) require.Equal(t, http.StatusOK, resp.StatusCode) defer resp.Body.Close() - body, err := ioutil.ReadAll(resp.Body) + body, err := io.ReadAll(resp.Body) require.NoError(t, err) require.Equal(t, @@ -392,7 +392,7 @@ rpc_duration_seconds_count 2693 resp, err := http.Get(output.URL()) require.NoError(t, err) - actual, err := ioutil.ReadAll(resp.Body) + actual, err := io.ReadAll(resp.Body) require.NoError(t, err) require.Equal(t, @@ -422,7 +422,7 @@ func TestLandingPage(t *testing.T) { resp, err := http.Get(u.String()) require.NoError(t, err) - actual, err := ioutil.ReadAll(resp.Body) + actual, err := io.ReadAll(resp.Body) require.NoError(t, err) require.Equal(t, expected, strings.TrimSpace(string(actual))) diff --git a/plugins/outputs/prometheus_client/prometheus_client_v2_test.go b/plugins/outputs/prometheus_client/prometheus_client_v2_test.go index 27be9103b28bd..c5ff76d4017a7 100644 --- a/plugins/outputs/prometheus_client/prometheus_client_v2_test.go +++ b/plugins/outputs/prometheus_client/prometheus_client_v2_test.go @@ -2,7 +2,7 @@ package prometheus import ( "fmt" - "io/ioutil" + "io" "net/http" "net/http/httptest" "strings" @@ -321,7 +321,7 @@ cpu_usage_idle_count{cpu="cpu1"} 20 require.NoError(t, err) require.Equal(t, http.StatusOK, resp.StatusCode) defer resp.Body.Close() - body, err := ioutil.ReadAll(resp.Body) + body, err := io.ReadAll(resp.Body) require.NoError(t, err) require.Equal(t, @@ -452,7 +452,7 @@ rpc_duration_seconds_count 2693 resp, err := http.Get(output.URL()) require.NoError(t, err) - actual, err := ioutil.ReadAll(resp.Body) + actual, err := io.ReadAll(resp.Body) require.NoError(t, err) require.Equal(t, diff --git a/plugins/outputs/sensu/sensu.go b/plugins/outputs/sensu/sensu.go index 568f8f7a144e4..3cd8b2274e52a 100644 --- a/plugins/outputs/sensu/sensu.go +++ b/plugins/outputs/sensu/sensu.go @@ -5,7 +5,6 @@ import ( "encoding/json" "fmt" "io" - "io/ioutil" "math" "net/http" "net/url" @@ -336,7 +335,7 @@ func (s *Sensu) write(reqBody []byte) error { defer resp.Body.Close() if resp.StatusCode != http.StatusCreated { - bodyData, err := ioutil.ReadAll(resp.Body) + bodyData, err := io.ReadAll(resp.Body) if err != nil { s.Log.Debugf("Couldn't read response body: %v", err) } diff --git a/plugins/outputs/sensu/sensu_test.go b/plugins/outputs/sensu/sensu_test.go index 249775727a481..e7a272ed5e149 100644 --- a/plugins/outputs/sensu/sensu_test.go +++ b/plugins/outputs/sensu/sensu_test.go @@ -3,7 +3,7 @@ package sensu import ( "encoding/json" "fmt" - "io/ioutil" + "io" "math" "net/http" "net/http/httptest" @@ -118,7 +118,7 @@ func TestConnectAndWrite(t *testing.T) { require.Equal(t, expectedURL, r.URL.String()) require.Equal(t, expectedAuthHeader, r.Header.Get("Authorization")) // let's make sure what we received is a valid Sensu event that contains all of the expected data - body, err := ioutil.ReadAll(r.Body) + body, err := io.ReadAll(r.Body) require.NoError(t, err) receivedEvent := &corev2.Event{} err = json.Unmarshal(body, receivedEvent) diff --git a/plugins/outputs/socket_writer/socket_writer_test.go b/plugins/outputs/socket_writer/socket_writer_test.go index 3c20583e15e20..0decb644cccab 100644 --- a/plugins/outputs/socket_writer/socket_writer_test.go +++ b/plugins/outputs/socket_writer/socket_writer_test.go @@ -2,7 +2,6 @@ package socket_writer import ( "bufio" - "io/ioutil" "net" "os" "path/filepath" @@ -46,7 +45,7 @@ func TestSocketWriter_udp(t *testing.T) { } func TestSocketWriter_unix(t *testing.T) { - tmpdir, err := ioutil.TempDir("", "telegraf") + tmpdir, err := os.MkdirTemp("", "telegraf") require.NoError(t, err) defer os.RemoveAll(tmpdir) sock := filepath.Join(tmpdir, "sw.TestSocketWriter_unix.sock") @@ -71,7 +70,7 @@ func TestSocketWriter_unixgram(t *testing.T) { t.Skip("Skipping on Windows, as unixgram sockets are not supported") } - tmpdir, err := ioutil.TempDir("", "telegraf") + tmpdir, err := os.MkdirTemp("", "telegraf") require.NoError(t, err) defer os.RemoveAll(tmpdir) sock := filepath.Join(tmpdir, "sw.TSW_unixgram.sock") diff --git a/plugins/outputs/sql/sql_test.go b/plugins/outputs/sql/sql_test.go index 5dad6752d4cfe..ef02c89b11fad 100644 --- a/plugins/outputs/sql/sql_test.go +++ b/plugins/outputs/sql/sql_test.go @@ -3,7 +3,6 @@ package sql import ( "context" "fmt" - "io/ioutil" "math/rand" "os" "path/filepath" @@ -162,7 +161,7 @@ func TestMysqlIntegration(t *testing.T) { const username = "root" password := pwgen(32) - outDir, err := ioutil.TempDir("", "tg-mysql-*") + outDir, err := os.MkdirTemp("", "tg-mysql-*") require.NoError(t, err) defer os.RemoveAll(outDir) @@ -230,9 +229,9 @@ func TestMysqlIntegration(t *testing.T) { require.FileExists(t, dumpfile) //compare the dump to what we expected - expected, err := ioutil.ReadFile("testdata/mariadb/expected.sql") + expected, err := os.ReadFile("testdata/mariadb/expected.sql") require.NoError(t, err) - actual, err := ioutil.ReadFile(dumpfile) + actual, err := os.ReadFile(dumpfile) require.NoError(t, err) require.Equal(t, string(expected), string(actual)) } @@ -252,7 +251,7 @@ func TestPostgresIntegration(t *testing.T) { const username = "postgres" password := pwgen(32) - outDir, err := ioutil.TempDir("", "tg-postgres-*") + outDir, err := os.MkdirTemp("", "tg-postgres-*") require.NoError(t, err) defer os.RemoveAll(outDir) @@ -329,9 +328,9 @@ func TestPostgresIntegration(t *testing.T) { require.FileExists(t, dumpfile) //compare the dump to what we expected - expected, err := ioutil.ReadFile("testdata/postgres/expected.sql") + expected, err := os.ReadFile("testdata/postgres/expected.sql") require.NoError(t, err) - actual, err := ioutil.ReadFile(dumpfile) + actual, err := os.ReadFile(dumpfile) require.NoError(t, err) require.Equal(t, string(expected), string(actual)) } diff --git a/plugins/outputs/sql/sqlite_test.go b/plugins/outputs/sql/sqlite_test.go index d54ffe877a80f..7707f9d085e7e 100644 --- a/plugins/outputs/sql/sqlite_test.go +++ b/plugins/outputs/sql/sqlite_test.go @@ -7,7 +7,6 @@ package sql import ( gosql "database/sql" - "io/ioutil" "os" "path/filepath" "testing" @@ -18,7 +17,7 @@ import ( ) func TestSqlite(t *testing.T) { - outDir, err := ioutil.TempDir("", "tg-sqlite-*") + outDir, err := os.MkdirTemp("", "tg-sqlite-*") require.NoError(t, err) defer os.RemoveAll(outDir) diff --git a/plugins/outputs/sumologic/sumologic_test.go b/plugins/outputs/sumologic/sumologic_test.go index 5ce502bab2c0e..5629defa4506e 100644 --- a/plugins/outputs/sumologic/sumologic_test.go +++ b/plugins/outputs/sumologic/sumologic_test.go @@ -6,7 +6,6 @@ import ( "compress/gzip" "fmt" "io" - "io/ioutil" "net/http" "net/http/httptest" "net/url" @@ -300,7 +299,7 @@ func TestContentEncodingGzip(t *testing.T) { body, err := gzip.NewReader(r.Body) require.NoError(t, err) - payload, err := ioutil.ReadAll(body) + payload, err := io.ReadAll(body) require.NoError(t, err) assert.Equal(t, string(payload), "metric=cpu field=value 42 0\n") diff --git a/plugins/outputs/warp10/warp10.go b/plugins/outputs/warp10/warp10.go index 7826047d7873d..4d3027b1b5331 100644 --- a/plugins/outputs/warp10/warp10.go +++ b/plugins/outputs/warp10/warp10.go @@ -3,7 +3,7 @@ package warp10 import ( "bytes" "fmt" - "io/ioutil" + "io" "log" "math" "net/http" @@ -154,7 +154,7 @@ func (w *Warp10) Write(metrics []telegraf.Metric) error { if resp.StatusCode != http.StatusOK { if w.PrintErrorBody { - body, _ := ioutil.ReadAll(resp.Body) + body, _ := io.ReadAll(resp.Body) return fmt.Errorf(w.WarpURL + ": " + w.HandleError(string(body), w.MaxStringErrorSize)) } diff --git a/plugins/outputs/yandex_cloud_monitoring/yandex_cloud_monitoring.go b/plugins/outputs/yandex_cloud_monitoring/yandex_cloud_monitoring.go index c6eb9db2ae5b5..dc097da45ac2a 100644 --- a/plugins/outputs/yandex_cloud_monitoring/yandex_cloud_monitoring.go +++ b/plugins/outputs/yandex_cloud_monitoring/yandex_cloud_monitoring.go @@ -4,7 +4,7 @@ import ( "bytes" "encoding/json" "fmt" - "io/ioutil" + "io" "net/http" "time" @@ -172,7 +172,7 @@ func getResponseFromMetadata(c *http.Client, metadataURL string) ([]byte, error) } defer resp.Body.Close() - body, err := ioutil.ReadAll(resp.Body) + body, err := io.ReadAll(resp.Body) if err != nil { return nil, err } @@ -242,7 +242,7 @@ func (a *YandexCloudMonitoring) send(body []byte) error { } defer resp.Body.Close() - _, err = ioutil.ReadAll(resp.Body) + _, err = io.ReadAll(resp.Body) if err != nil || resp.StatusCode < 200 || resp.StatusCode > 299 { return fmt.Errorf("failed to write batch: [%v] %s", resp.StatusCode, resp.Status) } diff --git a/plugins/parsers/json_v2/parser_test.go b/plugins/parsers/json_v2/parser_test.go index f0f018034dc5b..7b34b83c0af8a 100644 --- a/plugins/parsers/json_v2/parser_test.go +++ b/plugins/parsers/json_v2/parser_test.go @@ -3,7 +3,6 @@ package json_v2_test import ( "bufio" "fmt" - "io/ioutil" "os" "testing" @@ -90,7 +89,7 @@ func TestData(t *testing.T) { for _, tc := range tests { t.Run(tc.name, func(t *testing.T) { // Process the telegraf config file for the test - buf, err := ioutil.ReadFile(fmt.Sprintf("testdata/%s/telegraf.conf", tc.test)) + buf, err := os.ReadFile(fmt.Sprintf("testdata/%s/telegraf.conf", tc.test)) require.NoError(t, err) inputs.Add("file", func() telegraf.Input { return &file.File{} diff --git a/plugins/parsers/prometheus/parser_test.go b/plugins/parsers/prometheus/parser_test.go index f53b926bda4a5..a403887e093b9 100644 --- a/plugins/parsers/prometheus/parser_test.go +++ b/plugins/parsers/prometheus/parser_test.go @@ -2,7 +2,7 @@ package prometheus import ( "fmt" - "io/ioutil" + "io" "net/http" "net/http/httptest" "testing" @@ -435,7 +435,7 @@ func TestParserProtobufHeader(t *testing.T) { t.Fatalf("error making HTTP request to %s: %s", ts.URL, err) } defer resp.Body.Close() - body, err := ioutil.ReadAll(resp.Body) + body, err := io.ReadAll(resp.Body) if err != nil { t.Fatalf("error reading body: %s", err) } diff --git a/plugins/parsers/xpath/parser_test.go b/plugins/parsers/xpath/parser_test.go index 8e7a3087c0888..ead02e0392769 100644 --- a/plugins/parsers/xpath/parser_test.go +++ b/plugins/parsers/xpath/parser_test.go @@ -1,7 +1,7 @@ package xpath import ( - "io/ioutil" + "os" "path/filepath" "strings" "testing" @@ -1233,7 +1233,7 @@ func TestTestCases(t *testing.T) { pbmsgtype = protofields[1] } - content, err := ioutil.ReadFile(datafile) + content, err := os.ReadFile(datafile) require.NoError(t, err) // Get the expectations @@ -1266,7 +1266,7 @@ func TestTestCases(t *testing.T) { } func loadTestConfiguration(filename string) (*Config, []string, error) { - buf, err := ioutil.ReadFile(filename) + buf, err := os.ReadFile(filename) if err != nil { return nil, nil, err } diff --git a/plugins/processors/starlark/starlark_test.go b/plugins/processors/starlark/starlark_test.go index 15152a2f349c3..9eed069948bb0 100644 --- a/plugins/processors/starlark/starlark_test.go +++ b/plugins/processors/starlark/starlark_test.go @@ -3,7 +3,6 @@ package starlark import ( "errors" "fmt" - "io/ioutil" "os" "path/filepath" "strings" @@ -3194,7 +3193,7 @@ func TestAllScriptTestData(t *testing.T) { } fn := path t.Run(fn, func(t *testing.T) { - b, err := ioutil.ReadFile(fn) + b, err := os.ReadFile(fn) require.NoError(t, err) lines := strings.Split(string(b), "\n") inputMetrics := parseMetricsFrom(t, lines, "Example Input:") diff --git a/testutil/tls.go b/testutil/tls.go index 68a244a8b1e74..686f327d06f49 100644 --- a/testutil/tls.go +++ b/testutil/tls.go @@ -2,7 +2,7 @@ package testutil import ( "fmt" - "io/ioutil" + "io" "os" "path" @@ -93,7 +93,7 @@ func readCertificate(filename string) string { if err != nil { panic(fmt.Sprintf("opening %q: %v", filename, err)) } - octets, err := ioutil.ReadAll(file) + octets, err := io.ReadAll(file) if err != nil { panic(fmt.Sprintf("reading %q: %v", filename, err)) } From 56398237c4a931dba438db553a1ee1487926c072 Mon Sep 17 00:00:00 2001 From: helotpl Date: Tue, 28 Sep 2021 23:24:08 +0200 Subject: [PATCH 35/81] feat: telegraf to merge tables with different indexes (#9241) --- plugins/inputs/snmp/README.md | 107 +++++++++++ plugins/inputs/snmp/snmp.go | 65 +++++++ plugins/inputs/snmp/snmp_test.go | 248 ++++++++++++++++++++++++++ plugins/inputs/snmp/testdata/test.mib | 39 ++++ 4 files changed, 459 insertions(+) diff --git a/plugins/inputs/snmp/README.md b/plugins/inputs/snmp/README.md index 0d52881a72f04..3728cddb34349 100644 --- a/plugins/inputs/snmp/README.md +++ b/plugins/inputs/snmp/README.md @@ -201,6 +201,113 @@ One [metric][] is created for each row of the SNMP table. ## Specifies if the value of given field should be snmptranslated ## by default no field values are translated # translate = true + + ## Secondary index table allows to merge data from two tables with + ## different index that this filed will be used to join them. There can + ## be only one secondary index table. + # secondary_index_table = false + + ## This field is using secondary index, and will be later merged with + ## primary index using SecondaryIndexTable. SecondaryIndexTable and + ## SecondaryIndexUse are exclusive. + # secondary_index_use = false + + ## Controls if entries from secondary table should be added or not + ## if joining index is present or not. I set to true, means that join + ## is outer, and index is prepended with "Secondary." for missing values + ## to avoid overlaping indexes from both tables. Can be set per field or + ## globally with SecondaryIndexTable, global true overrides per field false. + # secondary_outer_join = false +``` + +##### Two Table Join +Snmp plugin can join two snmp tables that have different indexes. For this to work one table +should have translation field that return index of second table as value. Examples +of such fields are: + * Cisco portTable with translation field: `CISCO-STACK-MIB::portIfIndex`, +which value is IfIndex from ifTable + * Adva entityFacilityTable with translation field: `ADVA-FSPR7-MIB::entityFacilityOneIndex`, +which value is IfIndex from ifTable + * Cisco cpeExtPsePortTable with translation field: `CISCO-POWER-ETHERNET-EXT-MIB::cpeExtPsePortEntPhyIndex`, +which value is index from entPhysicalTable + +Such field can be used to translate index to secondary table with `secondary_index_table = true` +and all fields from secondary table (with index pointed from translation field), should have added option +`secondary_index_use = true`. Telegraf cannot duplicate entries during join so translation +must be 1-to-1 (not 1-to-many). To add fields from secondary table with index that is not present +in translation table (outer join), there is a second option for translation index `secondary_outer_join = true`. + +###### Example configuration for table joins + +CISCO-POWER-ETHERNET-EXT-MIB table before join: +``` +[[inputs.snmp.table]] +name = "ciscoPower" +index_as_tag = true + +[[inputs.snmp.table.field]] +name = "PortPwrConsumption" +oid = "CISCO-POWER-ETHERNET-EXT-MIB::cpeExtPsePortPwrConsumption" + +[[inputs.snmp.table.field]] +name = "EntPhyIndex" +oid = "CISCO-POWER-ETHERNET-EXT-MIB::cpeExtPsePortEntPhyIndex" +``` + +Partial result (removed agent_host and host columns from all following outputs in this section): +``` +> ciscoPower,index=1.2 EntPhyIndex=1002i,PortPwrConsumption=6643i 1621460628000000000 +> ciscoPower,index=1.6 EntPhyIndex=1006i,PortPwrConsumption=10287i 1621460628000000000 +> ciscoPower,index=1.5 EntPhyIndex=1005i,PortPwrConsumption=8358i 1621460628000000000 +``` + +Note here that EntPhyIndex column carries index from ENTITY-MIB table, config for it: +``` +[[inputs.snmp.table]] +name = "entityTable" +index_as_tag = true + +[[inputs.snmp.table.field]] +name = "EntPhysicalName" +oid = "ENTITY-MIB::entPhysicalName" +``` +Partial result: +``` +> entityTable,index=1006 EntPhysicalName="GigabitEthernet1/6" 1621460809000000000 +> entityTable,index=1002 EntPhysicalName="GigabitEthernet1/2" 1621460809000000000 +> entityTable,index=1005 EntPhysicalName="GigabitEthernet1/5" 1621460809000000000 +``` + +Now, lets attempt to join these results into one table. EntPhyIndex matches index +from second table, and lets convert EntPhysicalName into tag, so second table will +only provide tags into result. Configuration: + +``` +[[inputs.snmp.table]] +name = "ciscoPowerEntity" +index_as_tag = true + +[[inputs.snmp.table.field]] +name = "PortPwrConsumption" +oid = "CISCO-POWER-ETHERNET-EXT-MIB::cpeExtPsePortPwrConsumption" + +[[inputs.snmp.table.field]] +name = "EntPhyIndex" +oid = "CISCO-POWER-ETHERNET-EXT-MIB::cpeExtPsePortEntPhyIndex" +secondary_index_table = true # enables joining + +[[inputs.snmp.table.field]] +name = "EntPhysicalName" +oid = "ENTITY-MIB::entPhysicalName" +secondary_index_use = true # this tag is indexed from secondary table +is_tag = true +``` + +Result: +``` +> ciscoPowerEntity,EntPhysicalName=GigabitEthernet1/2,index=1.2 EntPhyIndex=1002i,PortPwrConsumption=6643i 1621461148000000000 +> ciscoPowerEntity,EntPhysicalName=GigabitEthernet1/6,index=1.6 EntPhyIndex=1006i,PortPwrConsumption=10287i 1621461148000000000 +> ciscoPowerEntity,EntPhysicalName=GigabitEthernet1/5,index=1.5 EntPhyIndex=1005i,PortPwrConsumption=8358i 1621461148000000000 ``` ### Troubleshooting diff --git a/plugins/inputs/snmp/snmp.go b/plugins/inputs/snmp/snmp.go index 7f2df6b689eac..a2259e88179c2 100644 --- a/plugins/inputs/snmp/snmp.go +++ b/plugins/inputs/snmp/snmp.go @@ -187,11 +187,18 @@ func (t *Table) Init() error { return err } + secondaryIndexTablePresent := false // initialize all the nested fields for i := range t.Fields { if err := t.Fields[i].init(); err != nil { return fmt.Errorf("initializing field %s: %w", t.Fields[i].Name, err) } + if t.Fields[i].SecondaryIndexTable { + if secondaryIndexTablePresent { + return fmt.Errorf("only one field can be SecondaryIndexTable") + } + secondaryIndexTablePresent = true + } } t.initialized = true @@ -252,6 +259,19 @@ type Field struct { Conversion string // Translate tells if the value of the field should be snmptranslated Translate bool + // Secondary index table allows to merge data from two tables with different index + // that this filed will be used to join them. There can be only one secondary index table. + SecondaryIndexTable bool + // This field is using secondary index, and will be later merged with primary index + // using SecondaryIndexTable. SecondaryIndexTable and SecondaryIndexUse are exclusive. + SecondaryIndexUse bool + // Controls if entries from secondary table should be added or not if joining + // index is present or not. I set to true, means that join is outer, and + // index is prepended with "Secondary." for missing values to avoid overlaping + // indexes from both tables. + // Can be set per field or globally with SecondaryIndexTable, global true overrides + // per field false. + SecondaryOuterJoin bool initialized bool } @@ -278,6 +298,14 @@ func (f *Field) init() error { //TODO use textual convention conversion from the MIB } + if f.SecondaryIndexTable && f.SecondaryIndexUse { + return fmt.Errorf("SecondaryIndexTable and UseSecondaryIndex are exclusive") + } + + if !f.SecondaryIndexTable && !f.SecondaryIndexUse && f.SecondaryOuterJoin { + return fmt.Errorf("SecondaryOuterJoin set to true, but field is not being used in join") + } + f.initialized = true return nil } @@ -414,6 +442,19 @@ func (s *Snmp) gatherTable(acc telegraf.Accumulator, gs snmpConnection, t Table, func (t Table) Build(gs snmpConnection, walk bool) (*RTable, error) { rows := map[string]RTableRow{} + //translation table for secondary index (when preforming join on two tables) + secIdxTab := make(map[string]string) + secGlobalOuterJoin := false + for i, f := range t.Fields { + if f.SecondaryIndexTable { + secGlobalOuterJoin = f.SecondaryOuterJoin + if i != 0 { + t.Fields[0], t.Fields[i] = t.Fields[i], t.Fields[0] + } + break + } + } + tagCount := 0 for _, f := range t.Fields { if f.IsTag { @@ -519,6 +560,16 @@ func (t Table) Build(gs snmpConnection, walk bool) (*RTable, error) { } for idx, v := range ifv { + if f.SecondaryIndexUse { + if newidx, ok := secIdxTab[idx]; ok { + idx = newidx + } else { + if !secGlobalOuterJoin && !f.SecondaryOuterJoin { + continue + } + idx = ".Secondary" + idx + } + } rtr, ok := rows[idx] if !ok { rtr = RTableRow{} @@ -543,6 +594,20 @@ func (t Table) Build(gs snmpConnection, walk bool) (*RTable, error) { } else { rtr.Fields[f.Name] = v } + if f.SecondaryIndexTable { + //indexes are stored here with prepending "." so we need to add them if needed + var vss string + if ok { + vss = "." + vs + } else { + vss = fmt.Sprintf(".%v", v) + } + if idx[0] == '.' { + secIdxTab[vss] = idx + } else { + secIdxTab[vss] = "." + idx + } + } } } } diff --git a/plugins/inputs/snmp/snmp_test.go b/plugins/inputs/snmp/snmp_test.go index f447f13c54e67..49c9bf381b107 100644 --- a/plugins/inputs/snmp/snmp_test.go +++ b/plugins/inputs/snmp/snmp_test.go @@ -81,6 +81,15 @@ var tsc = &testSNMPConnection{ ".1.0.0.2.1.5.0.9.9": 11, ".1.0.0.2.1.5.1.9.9": 22, ".1.0.0.0.1.6.0": ".1.0.0.0.1.7", + ".1.0.0.3.1.1.10": "instance", + ".1.0.0.3.1.1.11": "instance2", + ".1.0.0.3.1.1.12": "instance3", + ".1.0.0.3.1.2.10": 10, + ".1.0.0.3.1.2.11": 20, + ".1.0.0.3.1.2.12": 20, + ".1.0.0.3.1.3.10": 1, + ".1.0.0.3.1.3.11": 2, + ".1.0.0.3.1.3.12": 3, }, } @@ -960,3 +969,242 @@ func TestSnmpTableCache_hit(t *testing.T) { assert.Equal(t, []Field{{Name: "d"}}, fields) assert.Equal(t, fmt.Errorf("e"), err) } + +func TestTableJoin_walk(t *testing.T) { + tbl := Table{ + Name: "mytable", + IndexAsTag: true, + Fields: []Field{ + { + Name: "myfield1", + Oid: ".1.0.0.3.1.1", + IsTag: true, + }, + { + Name: "myfield2", + Oid: ".1.0.0.3.1.2", + }, + { + Name: "myfield3", + Oid: ".1.0.0.3.1.3", + SecondaryIndexTable: true, + }, + { + Name: "myfield4", + Oid: ".1.0.0.0.1.1", + SecondaryIndexUse: true, + IsTag: true, + }, + { + Name: "myfield5", + Oid: ".1.0.0.0.1.2", + SecondaryIndexUse: true, + }, + }, + } + + tb, err := tbl.Build(tsc, true) + require.NoError(t, err) + + assert.Equal(t, tb.Name, "mytable") + rtr1 := RTableRow{ + Tags: map[string]string{ + "myfield1": "instance", + "myfield4": "bar", + "index": "10", + }, + Fields: map[string]interface{}{ + "myfield2": 10, + "myfield3": 1, + "myfield5": 2, + }, + } + rtr2 := RTableRow{ + Tags: map[string]string{ + "myfield1": "instance2", + "index": "11", + }, + Fields: map[string]interface{}{ + "myfield2": 20, + "myfield3": 2, + "myfield5": 0, + }, + } + rtr3 := RTableRow{ + Tags: map[string]string{ + "myfield1": "instance3", + "index": "12", + }, + Fields: map[string]interface{}{ + "myfield2": 20, + "myfield3": 3, + }, + } + assert.Len(t, tb.Rows, 3) + assert.Contains(t, tb.Rows, rtr1) + assert.Contains(t, tb.Rows, rtr2) + assert.Contains(t, tb.Rows, rtr3) +} + +func TestTableOuterJoin_walk(t *testing.T) { + tbl := Table{ + Name: "mytable", + IndexAsTag: true, + Fields: []Field{ + { + Name: "myfield1", + Oid: ".1.0.0.3.1.1", + IsTag: true, + }, + { + Name: "myfield2", + Oid: ".1.0.0.3.1.2", + }, + { + Name: "myfield3", + Oid: ".1.0.0.3.1.3", + SecondaryIndexTable: true, + SecondaryOuterJoin: true, + }, + { + Name: "myfield4", + Oid: ".1.0.0.0.1.1", + SecondaryIndexUse: true, + IsTag: true, + }, + { + Name: "myfield5", + Oid: ".1.0.0.0.1.2", + SecondaryIndexUse: true, + }, + }, + } + + tb, err := tbl.Build(tsc, true) + require.NoError(t, err) + + assert.Equal(t, tb.Name, "mytable") + rtr1 := RTableRow{ + Tags: map[string]string{ + "myfield1": "instance", + "myfield4": "bar", + "index": "10", + }, + Fields: map[string]interface{}{ + "myfield2": 10, + "myfield3": 1, + "myfield5": 2, + }, + } + rtr2 := RTableRow{ + Tags: map[string]string{ + "myfield1": "instance2", + "index": "11", + }, + Fields: map[string]interface{}{ + "myfield2": 20, + "myfield3": 2, + "myfield5": 0, + }, + } + rtr3 := RTableRow{ + Tags: map[string]string{ + "myfield1": "instance3", + "index": "12", + }, + Fields: map[string]interface{}{ + "myfield2": 20, + "myfield3": 3, + }, + } + rtr4 := RTableRow{ + Tags: map[string]string{ + "index": "Secondary.0", + "myfield4": "foo", + }, + Fields: map[string]interface{}{ + "myfield5": 1, + }, + } + assert.Len(t, tb.Rows, 4) + assert.Contains(t, tb.Rows, rtr1) + assert.Contains(t, tb.Rows, rtr2) + assert.Contains(t, tb.Rows, rtr3) + assert.Contains(t, tb.Rows, rtr4) +} + +func TestTableJoinNoIndexAsTag_walk(t *testing.T) { + tbl := Table{ + Name: "mytable", + IndexAsTag: false, + Fields: []Field{ + { + Name: "myfield1", + Oid: ".1.0.0.3.1.1", + IsTag: true, + }, + { + Name: "myfield2", + Oid: ".1.0.0.3.1.2", + }, + { + Name: "myfield3", + Oid: ".1.0.0.3.1.3", + SecondaryIndexTable: true, + }, + { + Name: "myfield4", + Oid: ".1.0.0.0.1.1", + SecondaryIndexUse: true, + IsTag: true, + }, + { + Name: "myfield5", + Oid: ".1.0.0.0.1.2", + SecondaryIndexUse: true, + }, + }, + } + + tb, err := tbl.Build(tsc, true) + require.NoError(t, err) + + assert.Equal(t, tb.Name, "mytable") + rtr1 := RTableRow{ + Tags: map[string]string{ + "myfield1": "instance", + "myfield4": "bar", + //"index": "10", + }, + Fields: map[string]interface{}{ + "myfield2": 10, + "myfield3": 1, + "myfield5": 2, + }, + } + rtr2 := RTableRow{ + Tags: map[string]string{ + "myfield1": "instance2", + //"index": "11", + }, + Fields: map[string]interface{}{ + "myfield2": 20, + "myfield3": 2, + "myfield5": 0, + }, + } + rtr3 := RTableRow{ + Tags: map[string]string{ + "myfield1": "instance3", + //"index": "12", + }, + Fields: map[string]interface{}{ + "myfield2": 20, + "myfield3": 3, + }, + } + assert.Len(t, tb.Rows, 3) + assert.Contains(t, tb.Rows, rtr1) + assert.Contains(t, tb.Rows, rtr2) + assert.Contains(t, tb.Rows, rtr3) +} diff --git a/plugins/inputs/snmp/testdata/test.mib b/plugins/inputs/snmp/testdata/test.mib index 7c3758d66d9a1..c6e7a2a8962b6 100644 --- a/plugins/inputs/snmp/testdata/test.mib +++ b/plugins/inputs/snmp/testdata/test.mib @@ -55,4 +55,43 @@ hostname OBJECT-TYPE STATUS current ::= { testOID 1 1 } +testSecondaryTable OBJECT-TYPE + SYNTAX SEQUENCE OF testSecondaryTableEntry + MAX-ACCESS not-accessible + STATUS current + ::= { testOID 3 } + +testSecondaryTableEntry OBJECT-TYPE + SYNTAX TestSecondaryTableEntry + MAX-ACCESS not-accessible + STATUS current + INDEX { + instance + } + ::= { testSecondaryTable 1 } + +TestSecondaryTableEntry ::= + SEQUENCE { + instance OCTET STRING, + connections INTEGER, + testTableIndex INTEGER, + } + +instance OBJECT-TYPE + SYNTAX OCTET STRING + MAX-ACCESS read-only + STATUS current + ::= { testSecondaryTableEntry 1 } + +connections OBJECT-TYPE + SYNTAX OCTET STRING + MAX-ACCESS read-only + STATUS current + ::= { testSecondaryTableEntry 2 } + +testTableIndex OBJECT-TYPE + SYNTAX OCTET STRING + MAX-ACCESS read-only + STATUS current + ::= { testSecondaryTableEntry 3 } END From e6155346203b9ecf95ef9a25994da76654e2b187 Mon Sep 17 00:00:00 2001 From: Joshua Powers Date: Wed, 29 Sep 2021 07:50:44 -0600 Subject: [PATCH 36/81] feat: add debug query output to elasticsearch_query (#9827) --- .../inputs/elasticsearch_query/aggregation_query.go | 11 +++++++++++ 1 file changed, 11 insertions(+) diff --git a/plugins/inputs/elasticsearch_query/aggregation_query.go b/plugins/inputs/elasticsearch_query/aggregation_query.go index b5fa9db3c667a..51bdd98e7130b 100644 --- a/plugins/inputs/elasticsearch_query/aggregation_query.go +++ b/plugins/inputs/elasticsearch_query/aggregation_query.go @@ -2,6 +2,7 @@ package elasticsearch_query import ( "context" + "encoding/json" "fmt" "strings" "time" @@ -34,6 +35,16 @@ func (e *ElasticsearchQuery) runAggregationQuery(ctx context.Context, aggregatio query = query.Filter(elastic5.NewQueryStringQuery(filterQuery)) query = query.Filter(elastic5.NewRangeQuery(aggregation.DateField).From(from).To(now)) + src, err := query.Source() + if err != nil { + return nil, fmt.Errorf("failed to get query source - %v", err) + } + data, err := json.Marshal(src) + if err != nil { + return nil, fmt.Errorf("failed to unmarshal response - %v", err) + } + e.Log.Debugf("{\"query\": %s}", string(data)) + search := e.esClient.Search().Index(aggregation.Index).Query(query).Size(0) // add only parent elastic.Aggregations to the search request, all the rest are subaggregations of these From 872b29bf958cf6c485f0d649b0540b0bae137a50 Mon Sep 17 00:00:00 2001 From: Alexander Krantz Date: Wed, 29 Sep 2021 14:39:46 -0700 Subject: [PATCH 37/81] fix: Couchbase insecure certificate validation (#9458) --- plugins/inputs/couchbase/README.md | 8 ++++++ plugins/inputs/couchbase/couchbase.go | 33 ++++++++++++++++++++-- plugins/inputs/couchbase/couchbase_test.go | 12 ++++++-- 3 files changed, 49 insertions(+), 4 deletions(-) diff --git a/plugins/inputs/couchbase/README.md b/plugins/inputs/couchbase/README.md index 2c777e17a9ed0..1acdaea4ac76e 100644 --- a/plugins/inputs/couchbase/README.md +++ b/plugins/inputs/couchbase/README.md @@ -20,6 +20,14 @@ This plugin gets metrics for each Couchbase node, as well as detailed metrics fo ## Filter bucket fields to include only here. # bucket_stats_included = ["quota_percent_used", "ops_per_sec", "disk_fetches", "item_count", "disk_used", "data_used", "mem_used"] + + ## Optional TLS Config + # tls_ca = "/etc/telegraf/ca.pem" + # tls_cert = "/etc/telegraf/cert.pem" + # tls_key = "/etc/telegraf/key.pem" + ## Use TLS but skip chain & host verification (defaults to false) + ## If set to false, tls_cert and tls_key are required + # insecure_skip_verify = false ``` ## Measurements: diff --git a/plugins/inputs/couchbase/couchbase.go b/plugins/inputs/couchbase/couchbase.go index 7b99c76e6982c..f67e75096cde3 100644 --- a/plugins/inputs/couchbase/couchbase.go +++ b/plugins/inputs/couchbase/couchbase.go @@ -11,6 +11,7 @@ import ( "github.com/influxdata/telegraf" "github.com/influxdata/telegraf/filter" + "github.com/influxdata/telegraf/plugins/common/tls" "github.com/influxdata/telegraf/plugins/inputs" ) @@ -20,6 +21,9 @@ type Couchbase struct { BucketStatsIncluded []string `toml:"bucket_stats_included"` bucketInclude filter.Filter + client *http.Client + + tls.ClientConfig } var sampleConfig = ` @@ -36,10 +40,17 @@ var sampleConfig = ` ## Filter bucket fields to include only here. # bucket_stats_included = ["quota_percent_used", "ops_per_sec", "disk_fetches", "item_count", "disk_used", "data_used", "mem_used"] + + ## Optional TLS Config + # tls_ca = "/etc/telegraf/ca.pem" + # tls_cert = "/etc/telegraf/cert.pem" + # tls_key = "/etc/telegraf/key.pem" + ## Use TLS but skip chain & host verification (defaults to false) + ## If set to false, tls_cert and tls_key are required + # insecure_skip_verify = false ` var regexpURI = regexp.MustCompile(`(\S+://)?(\S+\:\S+@)`) -var client = &http.Client{Timeout: 10 * time.Second} func (cb *Couchbase) SampleConfig() string { return sampleConfig @@ -369,7 +380,7 @@ func (cb *Couchbase) queryDetailedBucketStats(server, bucket string, bucketStats return err } - r, err := client.Do(req) + r, err := cb.client.Do(req) if err != nil { return err } @@ -387,6 +398,24 @@ func (cb *Couchbase) Init() error { cb.bucketInclude = f + tlsConfig, err := cb.TLSConfig() + if err != nil { + return err + } + + cb.client = &http.Client{ + Timeout: 10 * time.Second, + Transport: &http.Transport{ + MaxIdleConnsPerHost: couchbaseClient.MaxIdleConnsPerHost, + TLSClientConfig: tlsConfig, + }, + } + + couchbaseClient.SetSkipVerify(cb.ClientConfig.InsecureSkipVerify) + couchbaseClient.SetCertFile(cb.ClientConfig.TLSCert) + couchbaseClient.SetKeyFile(cb.ClientConfig.TLSKey) + couchbaseClient.SetRootFile(cb.ClientConfig.TLSCA) + return nil } diff --git a/plugins/inputs/couchbase/couchbase_test.go b/plugins/inputs/couchbase/couchbase_test.go index a739732458a51..e6abc3ea74c01 100644 --- a/plugins/inputs/couchbase/couchbase_test.go +++ b/plugins/inputs/couchbase/couchbase_test.go @@ -2,6 +2,7 @@ package couchbase import ( "encoding/json" + "github.com/influxdata/telegraf/plugins/common/tls" "net/http" "net/http/httptest" "testing" @@ -26,8 +27,12 @@ func TestGatherServer(t *testing.T) { } })) - var cb Couchbase - cb.BucketStatsIncluded = []string{"quota_percent_used", "ops_per_sec", "disk_fetches", "item_count", "disk_used", "data_used", "mem_used"} + cb := Couchbase{ + BucketStatsIncluded: []string{"quota_percent_used", "ops_per_sec", "disk_fetches", "item_count", "disk_used", "data_used", "mem_used"}, + ClientConfig: tls.ClientConfig{ + InsecureSkipVerify: true, + }, + } err := cb.Init() require.NoError(t, err) @@ -105,6 +110,9 @@ func TestGatherDetailedBucketMetrics(t *testing.T) { var err error var cb Couchbase cb.BucketStatsIncluded = []string{"couch_total_disk_size"} + cb.ClientConfig = tls.ClientConfig{ + InsecureSkipVerify: true, + } err = cb.Init() require.NoError(t, err) var acc testutil.Accumulator From 11193a3b4cbffd4ccdb7eb5b2aa12e83be729c11 Mon Sep 17 00:00:00 2001 From: Samantha Wang <32681364+sjwang90@users.noreply.github.com> Date: Wed, 29 Sep 2021 14:40:23 -0700 Subject: [PATCH 38/81] docs: update readme title for amd_rocm_smi (#9826) --- plugins/inputs/amd_rocm_smi/README.md | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/plugins/inputs/amd_rocm_smi/README.md b/plugins/inputs/amd_rocm_smi/README.md index 89a5b063065d7..ac080974dd274 100644 --- a/plugins/inputs/amd_rocm_smi/README.md +++ b/plugins/inputs/amd_rocm_smi/README.md @@ -1,11 +1,11 @@ -# ROCm System Management Interface (SMI) Input Plugin +# AMD ROCm System Management Interface (SMI) Input Plugin This plugin uses a query on the [`rocm-smi`](https://github.com/RadeonOpenCompute/rocm_smi_lib/tree/master/python_smi_tools) binary to pull GPU stats including memory and GPU usage, temperatures and other. ### Configuration ```toml -# Pulls statistics from nvidia GPUs attached to the host +# Pulls statistics from AMD GPUs attached to the host [[inputs.amd_rocm_smi]] ## Optional: path to rocm-smi binary, defaults to $PATH via exec.LookPath # bin_path = "/opt/rocm/bin/rocm-smi" From 70afc94d121c4bb75ded3f8177859436355c4dfa Mon Sep 17 00:00:00 2001 From: Joshua Powers Date: Thu, 30 Sep 2021 10:28:48 -0600 Subject: [PATCH 39/81] fix: Revert "Reset the flush interval timer when flush is requested or batch is ready. (#8953)" (#9800) This reverts commit a6d2c4f254dbe9f7353961d892f8b91d907423ea. --- agent/agent.go | 12 ++++++++---- agent/tick.go | 19 +++++++------------ 2 files changed, 15 insertions(+), 16 deletions(-) diff --git a/agent/agent.go b/agent/agent.go index 78097bcd47731..7bd6b108df048 100644 --- a/agent/agent.go +++ b/agent/agent.go @@ -775,7 +775,7 @@ func (a *Agent) runOutputs( func (a *Agent) flushLoop( ctx context.Context, output *models.RunningOutput, - ticker *RollingTicker, + ticker Ticker, ) { logError := func(err error) { if err != nil { @@ -804,11 +804,15 @@ func (a *Agent) flushLoop( case <-ticker.Elapsed(): logError(a.flushOnce(output, ticker, output.Write)) case <-flushRequested: - ticker.Reset() logError(a.flushOnce(output, ticker, output.Write)) case <-output.BatchReady: - ticker.Reset() - logError(a.flushOnce(output, ticker, output.WriteBatch)) + // Favor the ticker over batch ready + select { + case <-ticker.Elapsed(): + logError(a.flushOnce(output, ticker, output.Write)) + default: + logError(a.flushOnce(output, ticker, output.WriteBatch)) + } } } } diff --git a/agent/tick.go b/agent/tick.go index 9696cd2c18c16..16233ba6d4adb 100644 --- a/agent/tick.go +++ b/agent/tick.go @@ -214,7 +214,6 @@ type RollingTicker struct { ch chan time.Time cancel context.CancelFunc wg sync.WaitGroup - timer *clock.Timer } func NewRollingTicker(interval, jitter time.Duration) *RollingTicker { @@ -231,12 +230,12 @@ func newRollingTicker(interval, jitter time.Duration, clock clock.Clock) *Rollin } d := t.next() - t.timer = clock.Timer(d) + timer := clock.Timer(d) t.wg.Add(1) go func() { defer t.wg.Done() - t.run(ctx) + t.run(ctx, timer) }() return t @@ -246,28 +245,24 @@ func (t *RollingTicker) next() time.Duration { return t.interval + internal.RandomDuration(t.jitter) } -func (t *RollingTicker) run(ctx context.Context) { +func (t *RollingTicker) run(ctx context.Context, timer *clock.Timer) { for { select { case <-ctx.Done(): - t.timer.Stop() + timer.Stop() return - case now := <-t.timer.C: + case now := <-timer.C: select { case t.ch <- now: default: } - t.Reset() + d := t.next() + timer.Reset(d) } } } -// Reset the ticker to the next interval + jitter. -func (t *RollingTicker) Reset() { - t.timer.Reset(t.next()) -} - func (t *RollingTicker) Elapsed() <-chan time.Time { return t.ch } From 3990ab5eb9047c99b03a40afd3f02a90e7aabdb2 Mon Sep 17 00:00:00 2001 From: Helen Weller <38860767+helenosheaa@users.noreply.github.com> Date: Fri, 1 Oct 2021 11:10:30 -0400 Subject: [PATCH 40/81] fix: add keep alive config option, add documentation around issue with eclipse/mosquitto version combined with this plugin, update test (#9803) --- plugins/outputs/mqtt/README.md | 7 +++++++ plugins/outputs/mqtt/mqtt.go | 21 +++++++++++++++++---- plugins/outputs/mqtt/mqtt_test.go | 1 + 3 files changed, 25 insertions(+), 4 deletions(-) diff --git a/plugins/outputs/mqtt/README.md b/plugins/outputs/mqtt/README.md index abb770f068d4f..f82d7597c5bea 100644 --- a/plugins/outputs/mqtt/README.md +++ b/plugins/outputs/mqtt/README.md @@ -40,6 +40,12 @@ This plugin writes to a [MQTT Broker](http://http://mqtt.org/) acting as a mqtt ## When true, messages will have RETAIN flag set. # retain = false + ## Defines the maximum length of time that the broker and client may not communicate. + ## Defaults to 0 which turns the feature off. For version v2.0.12 mosquitto there is a + ## [bug](https://github.com/eclipse/mosquitto/issues/2117) which requires keep_alive to be set. + ## As a reference eclipse/paho.mqtt.golang v1.3.0 defaults to 30. + # keep_alive = 0 + ## Data format to output. # data_format = "influx" ``` @@ -62,3 +68,4 @@ This plugin writes to a [MQTT Broker](http://http://mqtt.org/) acting as a mqtt * `batch`: When true, metrics will be sent in one MQTT message per flush. Otherwise, metrics are written one metric per MQTT message. * `retain`: Set `retain` flag when publishing * `data_format`: [About Telegraf data formats](https://github.com/influxdata/telegraf/blob/master/docs/DATA_FORMATS_OUTPUT.md) +* `keep_alive`: Defines the maximum length of time that the broker and client may not communicate with each other. Defaults to 0 which deactivates this feature. diff --git a/plugins/outputs/mqtt/mqtt.go b/plugins/outputs/mqtt/mqtt.go index 584a79ffd2ef1..54203ee0dba66 100644 --- a/plugins/outputs/mqtt/mqtt.go +++ b/plugins/outputs/mqtt/mqtt.go @@ -16,6 +16,10 @@ import ( "github.com/influxdata/telegraf/plugins/serializers" ) +const ( + defaultKeepAlive = 0 +) + var sampleConfig = ` servers = ["localhost:1883"] # required. @@ -55,6 +59,12 @@ var sampleConfig = ` ## actually reads it # retain = false + ## Defines the maximum length of time that the broker and client may not communicate. + ## Defaults to 0 which turns the feature off. For version v2.0.12 of eclipse/mosquitto there is a + ## [bug](https://github.com/eclipse/mosquitto/issues/2117) which requires keep_alive to be set. + ## As a reference eclipse/paho.mqtt.golang v1.3.0 defaults to 30. + # keep_alive = 0 + ## Data format to output. ## Each data format has its own unique set of configuration options, read ## more about them here: @@ -72,8 +82,9 @@ type MQTT struct { QoS int `toml:"qos"` ClientID string `toml:"client_id"` tls.ClientConfig - BatchMessage bool `toml:"batch"` - Retain bool `toml:"retain"` + BatchMessage bool `toml:"batch"` + Retain bool `toml:"retain"` + KeepAlive int64 `toml:"keep_alive"` client paho.Client opts *paho.ClientOptions @@ -190,7 +201,7 @@ func (m *MQTT) publish(topic string, body []byte) error { func (m *MQTT) createOpts() (*paho.ClientOptions, error) { opts := paho.NewClientOptions() - opts.KeepAlive = 0 + opts.KeepAlive = m.KeepAlive if m.Timeout < config.Duration(time.Second) { m.Timeout = config.Duration(5 * time.Second) @@ -237,6 +248,8 @@ func (m *MQTT) createOpts() (*paho.ClientOptions, error) { func init() { outputs.Add("mqtt", func() telegraf.Output { - return &MQTT{} + return &MQTT{ + KeepAlive: defaultKeepAlive, + } }) } diff --git a/plugins/outputs/mqtt/mqtt_test.go b/plugins/outputs/mqtt/mqtt_test.go index 8affce1c93ddf..fd36d6d0577ac 100644 --- a/plugins/outputs/mqtt/mqtt_test.go +++ b/plugins/outputs/mqtt/mqtt_test.go @@ -19,6 +19,7 @@ func TestConnectAndWriteIntegration(t *testing.T) { m := &MQTT{ Servers: []string{url}, serializer: s, + KeepAlive: 30, } // Verify that we can connect to the MQTT broker From 49e50863901354fbc8c66e8f07920beb88bbd2ac Mon Sep 17 00:00:00 2001 From: Joshua Powers Date: Fri, 1 Oct 2021 15:09:50 -0600 Subject: [PATCH 41/81] fix: gitignore should ignore .toml/.conf files (#9818) As the application requires a config.toml or config.conf file it makes sense to ignore these types of files rather than having them show up in git status output. While the files are technically in the toml format, we use the .conf extension in our documentation so ignore both. --- .gitignore | 2 ++ 1 file changed, 2 insertions(+) diff --git a/.gitignore b/.gitignore index 7c3fbd21c3535..614809d0681e1 100644 --- a/.gitignore +++ b/.gitignore @@ -7,3 +7,5 @@ .DS_Store process.yml /.vscode +/*.toml +/*.conf From ac40bdc52e8ced5afc9605cea33b4fe32d998797 Mon Sep 17 00:00:00 2001 From: Joshua Powers Date: Fri, 1 Oct 2021 15:10:25 -0600 Subject: [PATCH 42/81] fix: procstat missing tags in procstat_lookup metric (#9808) In #9488 the way that tags were built for procstat_lookup was changed and it was only including the pid_finder and result tags. This is not consistent with the documentation and is a regression from how they were previously constructed. Becuase of the large change to how procstat metrics are gathered, this will use one of the process metric's tags as a basis for the tags for procstat_lookup. Resolves: #9793 --- plugins/inputs/procstat/procstat.go | 5 +++-- 1 file changed, 3 insertions(+), 2 deletions(-) diff --git a/plugins/inputs/procstat/procstat.go b/plugins/inputs/procstat/procstat.go index ce29a08460cca..7b2ffba26b430 100644 --- a/plugins/inputs/procstat/procstat.go +++ b/plugins/inputs/procstat/procstat.go @@ -154,9 +154,10 @@ func (p *Procstat) Gather(acc telegraf.Accumulator) error { } } + tags := make(map[string]string) p.procs = newProcs - for _, proc := range p.procs { + tags = proc.Tags() p.addMetric(proc, acc, now) } @@ -165,7 +166,7 @@ func (p *Procstat) Gather(acc telegraf.Accumulator) error { "running": len(p.procs), "result_code": 0, } - tags := make(map[string]string) + tags["pid_finder"] = p.PidFinder tags["result"] = "success" acc.AddFields("procstat_lookup", fields, tags, now) From 021dedb792cf2791a21d3bb80024dd67db7b875c Mon Sep 17 00:00:00 2001 From: Helen Weller <38860767+helenosheaa@users.noreply.github.com> Date: Mon, 4 Oct 2021 10:05:56 -0400 Subject: [PATCH 43/81] fix: update toml tag to match sample config / readme (#9848) --- plugins/outputs/loki/loki.go | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/plugins/outputs/loki/loki.go b/plugins/outputs/loki/loki.go index 2f920ec829e3b..07d4d473bf396 100644 --- a/plugins/outputs/loki/loki.go +++ b/plugins/outputs/loki/loki.go @@ -57,7 +57,7 @@ type Loki struct { Timeout config.Duration `toml:"timeout"` Username string `toml:"username"` Password string `toml:"password"` - Headers map[string]string `toml:"headers"` + Headers map[string]string `toml:"http_headers"` ClientID string `toml:"client_id"` ClientSecret string `toml:"client_secret"` TokenURL string `toml:"token_url"` From 6c1bdfad76d8833f538f346a95ca5a5af88e9db9 Mon Sep 17 00:00:00 2001 From: "Guo Qiao (Joe)" Date: Tue, 5 Oct 2021 05:04:30 +1300 Subject: [PATCH 44/81] fix: logging in intel_rdt.go caused service stop timeout even as root (#9844) (#9850) --- plugins/inputs/intel_rdt/intel_rdt.go | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/plugins/inputs/intel_rdt/intel_rdt.go b/plugins/inputs/intel_rdt/intel_rdt.go index 89370062d730e..e0c7de526b067 100644 --- a/plugins/inputs/intel_rdt/intel_rdt.go +++ b/plugins/inputs/intel_rdt/intel_rdt.go @@ -278,12 +278,12 @@ func (r *IntelRDT) readData(ctx context.Context, args []string, processesPIDsAss }() err = cmd.Start() if err != nil { - r.errorChan <- fmt.Errorf("pqos: %v", err) + r.Log.Errorf("pqos: %v", err) return } err = cmd.Wait() if err != nil { - r.errorChan <- fmt.Errorf("pqos: %v", err) + r.Log.Errorf("pqos: %v", err) } } From c1f51b0645235e851f8c68e01b2e649dd7af5d22 Mon Sep 17 00:00:00 2001 From: Howard Yoo <32691630+howardyoo@users.noreply.github.com> Date: Mon, 4 Oct 2021 11:04:58 -0500 Subject: [PATCH 45/81] fix: mongodb input plugin issue #9845 (#9846) --- plugins/inputs/mongodb/mongostat.go | 6 ++++-- 1 file changed, 4 insertions(+), 2 deletions(-) diff --git a/plugins/inputs/mongodb/mongostat.go b/plugins/inputs/mongodb/mongostat.go index 41f735d389c7a..3871f6d252909 100644 --- a/plugins/inputs/mongodb/mongostat.go +++ b/plugins/inputs/mongodb/mongostat.go @@ -1086,8 +1086,10 @@ func NewStatLine(oldMongo, newMongo MongoStatus, key string, all bool, sampleSec } if newStat.Metrics.Repl.Network != nil { returnVal.ReplNetworkBytes = newStat.Metrics.Repl.Network.Bytes - returnVal.ReplNetworkGetmoresNum = newStat.Metrics.Repl.Network.GetMores.Num - returnVal.ReplNetworkGetmoresTotalMillis = newStat.Metrics.Repl.Network.GetMores.TotalMillis + if newStat.Metrics.Repl.Network.GetMores != nil { + returnVal.ReplNetworkGetmoresNum = newStat.Metrics.Repl.Network.GetMores.Num + returnVal.ReplNetworkGetmoresTotalMillis = newStat.Metrics.Repl.Network.GetMores.TotalMillis + } returnVal.ReplNetworkOps = newStat.Metrics.Repl.Network.Ops } } From df5c19c17edac8cab07bbd5107f23603b1686a00 Mon Sep 17 00:00:00 2001 From: Sebastian Spaink <3441183+sspaink@users.noreply.github.com> Date: Mon, 4 Oct 2021 11:19:06 -0700 Subject: [PATCH 46/81] feat (parser.json_v2): Support defining field/tag tables within an object table (#9449) --- config/config.go | 64 +++++--- go.mod | 2 +- go.sum | 4 +- plugins/parsers/json_v2/README.md | 80 ++++++--- plugins/parsers/json_v2/parser.go | 154 +++++++++++++----- plugins/parsers/json_v2/parser_test.go | 12 ++ .../multiple_arrays_in_object/expected.out | 1 - .../testdata/multiple_json_input/expected.out | 2 + .../testdata/multiple_json_input/input_1.json | 87 ++++++++++ .../testdata/multiple_json_input/input_2.json | 134 +++++++++++++++ .../multiple_json_input/telegraf.conf | 18 ++ .../subfieldtag_in_object/expected.out | 1 + .../testdata/subfieldtag_in_object/input.json | 97 +++++++++++ .../subfieldtag_in_object/telegraf.conf | 17 ++ .../subfieldtag_in_object_2/expected.out | 4 + .../subfieldtag_in_object_2/input.json | 10 ++ .../subfieldtag_in_object_2/telegraf.conf | 16 ++ 17 files changed, 617 insertions(+), 86 deletions(-) create mode 100644 plugins/parsers/json_v2/testdata/multiple_json_input/expected.out create mode 100644 plugins/parsers/json_v2/testdata/multiple_json_input/input_1.json create mode 100644 plugins/parsers/json_v2/testdata/multiple_json_input/input_2.json create mode 100644 plugins/parsers/json_v2/testdata/multiple_json_input/telegraf.conf create mode 100644 plugins/parsers/json_v2/testdata/subfieldtag_in_object/expected.out create mode 100644 plugins/parsers/json_v2/testdata/subfieldtag_in_object/input.json create mode 100644 plugins/parsers/json_v2/testdata/subfieldtag_in_object/telegraf.conf create mode 100644 plugins/parsers/json_v2/testdata/subfieldtag_in_object_2/expected.out create mode 100644 plugins/parsers/json_v2/testdata/subfieldtag_in_object_2/input.json create mode 100644 plugins/parsers/json_v2/testdata/subfieldtag_in_object_2/telegraf.conf diff --git a/config/config.go b/config/config.go index 4880da4832e5a..76aa494c4ca43 100644 --- a/config/config.go +++ b/config/config.go @@ -1421,28 +1421,8 @@ func (c *Config) getParserConfig(name string, tbl *ast.Table) (*parsers.Config, c.getFieldString(metricConfig, "timestamp_format", &mc.TimestampFormat) c.getFieldString(metricConfig, "timestamp_timezone", &mc.TimestampTimezone) - if fieldConfigs, ok := metricConfig.Fields["field"]; ok { - if fieldConfigs, ok := fieldConfigs.([]*ast.Table); ok { - for _, fieldconfig := range fieldConfigs { - var f json_v2.DataSet - c.getFieldString(fieldconfig, "path", &f.Path) - c.getFieldString(fieldconfig, "rename", &f.Rename) - c.getFieldString(fieldconfig, "type", &f.Type) - mc.Fields = append(mc.Fields, f) - } - } - } - if fieldConfigs, ok := metricConfig.Fields["tag"]; ok { - if fieldConfigs, ok := fieldConfigs.([]*ast.Table); ok { - for _, fieldconfig := range fieldConfigs { - var t json_v2.DataSet - c.getFieldString(fieldconfig, "path", &t.Path) - c.getFieldString(fieldconfig, "rename", &t.Rename) - t.Type = "string" - mc.Tags = append(mc.Tags, t) - } - } - } + mc.Fields = getFieldSubtable(c, metricConfig) + mc.Tags = getTagSubtable(c, metricConfig) if objectconfigs, ok := metricConfig.Fields["object"]; ok { if objectconfigs, ok := objectconfigs.([]*ast.Table); ok { @@ -1458,6 +1438,10 @@ func (c *Config) getParserConfig(name string, tbl *ast.Table) (*parsers.Config, c.getFieldStringSlice(objectConfig, "tags", &o.Tags) c.getFieldStringMap(objectConfig, "renames", &o.Renames) c.getFieldStringMap(objectConfig, "fields", &o.Fields) + + o.FieldPaths = getFieldSubtable(c, objectConfig) + o.TagPaths = getTagSubtable(c, objectConfig) + mc.JSONObjects = append(mc.JSONObjects, o) } } @@ -1477,6 +1461,42 @@ func (c *Config) getParserConfig(name string, tbl *ast.Table) (*parsers.Config, return pc, nil } +func getFieldSubtable(c *Config, metricConfig *ast.Table) []json_v2.DataSet { + var fields []json_v2.DataSet + + if fieldConfigs, ok := metricConfig.Fields["field"]; ok { + if fieldConfigs, ok := fieldConfigs.([]*ast.Table); ok { + for _, fieldconfig := range fieldConfigs { + var f json_v2.DataSet + c.getFieldString(fieldconfig, "path", &f.Path) + c.getFieldString(fieldconfig, "rename", &f.Rename) + c.getFieldString(fieldconfig, "type", &f.Type) + fields = append(fields, f) + } + } + } + + return fields +} + +func getTagSubtable(c *Config, metricConfig *ast.Table) []json_v2.DataSet { + var tags []json_v2.DataSet + + if fieldConfigs, ok := metricConfig.Fields["tag"]; ok { + if fieldConfigs, ok := fieldConfigs.([]*ast.Table); ok { + for _, fieldconfig := range fieldConfigs { + var t json_v2.DataSet + c.getFieldString(fieldconfig, "path", &t.Path) + c.getFieldString(fieldconfig, "rename", &t.Rename) + t.Type = "string" + tags = append(tags, t) + } + } + } + + return tags +} + // buildSerializer grabs the necessary entries from the ast.Table for creating // a serializers.Serializer object, and creates it, which can then be added onto // an Output object. diff --git a/go.mod b/go.mod index dc8b762d1e6d1..0999e764200ca 100644 --- a/go.mod +++ b/go.mod @@ -246,7 +246,7 @@ require ( github.com/stretchr/testify v1.7.0 github.com/tbrandon/mbserver v0.0.0-20170611213546-993e1772cc62 github.com/testcontainers/testcontainers-go v0.11.1 - github.com/tidwall/gjson v1.8.0 + github.com/tidwall/gjson v1.9.0 github.com/tidwall/match v1.0.3 // indirect github.com/tidwall/pretty v1.1.0 // indirect github.com/tinylib/msgp v1.1.6 diff --git a/go.sum b/go.sum index 4189b415723f0..6b60e06efb308 100644 --- a/go.sum +++ b/go.sum @@ -1535,8 +1535,8 @@ github.com/tchap/go-patricia v2.2.6+incompatible/go.mod h1:bmLyhP68RS6kStMGxByiQ github.com/tedsuo/ifrit v0.0.0-20180802180643-bea94bb476cc/go.mod h1:eyZnKCc955uh98WQvzOm0dgAeLnf2O0Rz0LPoC5ze+0= github.com/testcontainers/testcontainers-go v0.11.1 h1:FiYsB83LSGbiawoV8TpAZGfcCUbtaeeg1SXqEKUxh08= github.com/testcontainers/testcontainers-go v0.11.1/go.mod h1:/V0UVq+1e7NWYoqTPog179clf0Qp9TOyp4EcXaEFQz8= -github.com/tidwall/gjson v1.8.0 h1:Qt+orfosKn0rbNTZqHYDqBrmm3UDA4KRkv70fDzG+PQ= -github.com/tidwall/gjson v1.8.0/go.mod h1:5/xDoumyyDNerp2U36lyolv46b3uF/9Bu6OfyQ9GImk= +github.com/tidwall/gjson v1.9.0 h1:+Od7AE26jAaMgVC31cQV/Ope5iKXulNMflrlB7k+F9E= +github.com/tidwall/gjson v1.9.0/go.mod h1:5/xDoumyyDNerp2U36lyolv46b3uF/9Bu6OfyQ9GImk= github.com/tidwall/match v1.0.3 h1:FQUVvBImDutD8wJLN6c5eMzWtjgONK9MwIBCOrUJKeE= github.com/tidwall/match v1.0.3/go.mod h1:eRSPERbgtNPcGhD8UCthc6PmLEQXEWd3PRB5JTxsfmM= github.com/tidwall/pretty v1.0.0/go.mod h1:XNkn88O1ChpSDQmQeStsy+sBenx6DDtFZJxhVysOjyk= diff --git a/plugins/parsers/json_v2/README.md b/plugins/parsers/json_v2/README.md index a1effd5940614..d1e2e9c407255 100644 --- a/plugins/parsers/json_v2/README.md +++ b/plugins/parsers/json_v2/README.md @@ -1,10 +1,10 @@ # JSON Parser - Version 2 -This parser takes valid JSON input and turns it into metrics. The query syntax supported is [GJSON Path Syntax](https://github.com/tidwall/gjson/blob/v1.7.5/SYNTAX.md), you can go to this playground to test out your GJSON path here: https://gjson.dev/. You can find multiple examples under the `testdata` folder. +This parser takes valid JSON input and turns it into line protocol. The query syntax supported is [GJSON Path Syntax](https://github.com/tidwall/gjson/blob/v1.7.5/SYNTAX.md), you can go to this playground to test out your GJSON path here: https://gjson.dev/. You can find multiple examples under the `testdata` folder. ## Configuration -You configure this parser by describing the metric you want by defining the fields and tags from the input. The configuration is divided into config sub-tables called `field`, `tag`, and `object`. In the example below you can see all the possible configuration keys you can define for each config table. In the sections that follow these configuration keys are defined in more detail. +You configure this parser by describing the line protocol you want by defining the fields and tags from the input. The configuration is divided into config sub-tables called `field`, `tag`, and `object`. In the example below you can see all the possible configuration keys you can define for each config table. In the sections that follow these configuration keys are defined in more detail. **Example configuration:** @@ -19,27 +19,45 @@ You configure this parser by describing the metric you want by defining the fiel timestamp_format = "" # A string with a valid timestamp format (see below for possible values) timestamp_timezone = "" # A string with with a valid timezone (see below for possible values) [[inputs.file.json_v2.tag]] - path = "" # A string with valid GJSON path syntax + path = "" # A string with valid GJSON path syntax to a non-array/non-object value rename = "new name" # A string with a new name for the tag key [[inputs.file.json_v2.field]] - path = "" # A string with valid GJSON path syntax + path = "" # A string with valid GJSON path syntax to a non-array/non-object value rename = "new name" # A string with a new name for the tag key type = "int" # A string specifying the type (int,uint,float,string,bool) [[inputs.file.json_v2.object]] - path = "" # A string with valid GJSON path syntax + path = "" # A string with valid GJSON path syntax, can include array's and object's + + ## Configuration to define what JSON keys should be used as timestamps ## timestamp_key = "" # A JSON key (for a nested key, prepend the parent keys with underscores) to a valid timestamp timestamp_format = "" # A string with a valid timestamp format (see below for possible values) timestamp_timezone = "" # A string with with a valid timezone (see below for possible values) - disable_prepend_keys = false (or true, just not both) + + ### Configuration to define what JSON keys should be included and how (field/tag) ### + tags = [] # List of JSON keys (for a nested key, prepend the parent keys with underscores) to be a tag instead of a field, when adding a JSON key in this list you don't have to define it in the included_keys list included_keys = [] # List of JSON keys (for a nested key, prepend the parent keys with underscores) that should be only included in result excluded_keys = [] # List of JSON keys (for a nested key, prepend the parent keys with underscores) that shouldn't be included in result - tags = [] # List of JSON keys (for a nested key, prepend the parent keys with underscores) to be a tag instead of a field + # When a tag/field sub-table is defined, they will be the only field/tag's along with any keys defined in the included_keys list. + # If the resulting values aren't included in the object/array returned by the root object path, it won't be included. + # You can define as many tag/field sub-tables as you want. + [[inputs.file.json_v2.object.tag]] + path = "" # # A string with valid GJSON path syntax, can include array's and object's + rename = "new name" # A string with a new name for the tag key + [[inputs.file.json_v2.object.field]] + path = "" # # A string with valid GJSON path syntax, can include array's and object's + rename = "new name" # A string with a new name for the tag key + type = "int" # A string specifying the type (int,uint,float,string,bool) + + ### Configuration to modify the resutling line protocol ### + disable_prepend_keys = false (or true, just not both) [inputs.file.json_v2.object.renames] # A map of JSON keys (for a nested key, prepend the parent keys with underscores) with a new name for the tag key key = "new name" [inputs.file.json_v2.object.fields] # A map of JSON keys (for a nested key, prepend the parent keys with underscores) with a type (int,uint,float,string,bool) key = "int" ``` + --- + ### root config options * **measurement_name (OPTIONAL)**: Will set the measurement name to the provided string. @@ -56,7 +74,7 @@ such as `America/New_York`, to `Local` to utilize the system timezone, or to `UT ### `field` and `tag` config options -`field` and `tag` represent the elements of [line protocol](https://docs.influxdata.com/influxdb/v2.0/reference/syntax/line-protocol/), which is used to define a `metric`. You can use the `field` and `tag` config tables to gather a single value or an array of values that all share the same type and name. With this you can add a field or tag to a metric from data stored anywhere in your JSON. If you define the GJSON path to return a single value then you will get a single resutling metric that contains the field/tag. If you define the GJSON path to return an array of values, then each field/tag will be put into a separate metric (you use the # character to retrieve JSON arrays, find examples [here](https://github.com/tidwall/gjson/blob/v1.7.5/SYNTAX.md#arrays)). +`field` and `tag` represent the elements of [line protocol](https://docs.influxdata.com/influxdb/v2.0/reference/syntax/line-protocol/). You can use the `field` and `tag` config tables to gather a single value or an array of values that all share the same type and name. With this you can add a field or tag to a line protocol from data stored anywhere in your JSON. If you define the GJSON path to return a single value then you will get a single resutling line protocol that contains the field/tag. If you define the GJSON path to return an array of values, then each field/tag will be put into a separate line protocol (you use the # character to retrieve JSON arrays, find examples [here](https://github.com/tidwall/gjson/blob/v1.7.5/SYNTAX.md#arrays)). Note that objects are handled separately, therefore if you provide a path that returns a object it will be ignored. You will need use the `object` config table to parse objects, because `field` and `tag` doesn't handle relationships between data. Each `field` and `tag` you define is handled as a separate data point. @@ -70,26 +88,34 @@ The notable difference between `field` and `tag`, is that `tag` values will alwa #### **field** -* **path (REQUIRED)**: You must define the path query that gathers the object with [GJSON Path Syntax](https://github.com/tidwall/gjson/blob/v1.7.5/SYNTAX.md). +Using this field configuration you can gather a non-array/non-object values. Note this acts as a global field when used with the `object` configuration, if you gather an array of values using `object` then the field gathered will be added to each resulting line protocol without acknowledging its location in the original JSON. This is defined in TOML as an array table using double brackets. + +* **path (REQUIRED)**: A string with valid GJSON path syntax to a non-array/non-object value * **name (OPTIONAL)**: You can define a string value to set the field name. If not defined it will use the trailing word from the provided query. * **type (OPTIONAL)**: You can define a string value to set the desired type (float, int, uint, string, bool). If not defined it won't enforce a type and default to using the original type defined in the JSON (bool, float, or string). #### **tag** -* **path (REQUIRED)**: You must define the path query that gathers the object with [GJSON Path Syntax](https://github.com/tidwall/gjson/blob/v1.7.5/SYNTAX.md). +Using this tag configuration you can gather a non-array/non-object values. Note this acts as a global tag when used with the `object` configuration, if you gather an array of values using `object` then the tag gathered will be added to each resulting line protocol without acknowledging its location in the original JSON. This is defined in TOML as an array table using double brackets. + + +* **path (REQUIRED)**: A string with valid GJSON path syntax to a non-array/non-object value * **name (OPTIONAL)**: You can define a string value to set the field name. If not defined it will use the trailing word from the provided query. For good examples in using `field` and `tag` you can reference the following example configs: -* [fields_and_tags](testdata/fields_and_tags/telegraf.conf) --- + ### object -With the configuration section `object`, you can gather metrics from [JSON objects](https://www.w3schools.com/js/js_json_objects.asp). +With the configuration section `object`, you can gather values from [JSON objects](https://www.w3schools.com/js/js_json_objects.asp). This is defined in TOML as an array table using double brackets. -The following keys can be set for `object`: +#### The following keys can be set for `object` * **path (REQUIRED)**: You must define the path query that gathers the object with [GJSON Path Syntax](https://github.com/tidwall/gjson/blob/v1.7.5/SYNTAX.md) + +*Keys to define what JSON keys should be used as timestamps:* + * **timestamp_key(OPTIONAL)**: You can define a json key (for a nested key, prepend the parent keys with underscores) for the value to be set as the timestamp from the JSON input. * **timestamp_format (OPTIONAL, but REQUIRED when timestamp_query is defined**: Must be set to `unix`, `unix_ms`, `unix_us`, `unix_ns`, or the Go "reference time" which is defined to be the specific time: @@ -97,22 +123,30 @@ the Go "reference time" which is defined to be the specific time: * **timestamp_timezone (OPTIONAL, but REQUIRES timestamp_query**: This option should be set to a [Unix TZ value](https://en.wikipedia.org/wiki/List_of_tz_database_time_zones), such as `America/New_York`, to `Local` to utilize the system timezone, or to `UTC`. Defaults to `UTC` -* **disable_prepend_keys (OPTIONAL)**: Set to true to prevent resulting nested data to contain the parent key prepended to its key **NOTE**: duplicate names can overwrite each other when this is enabled -* **included_keys (OPTIONAL)**: You can define a list of key's that should be the only data included in the metric, by default it will include everything. -* **excluded_keys (OPTIONAL)**: You can define json keys to be excluded in the metric, for a nested key, prepend the parent keys with underscores + +*Configuration to define what JSON keys should be included and how (field/tag):* + +* **included_keys (OPTIONAL)**: You can define a list of key's that should be the only data included in the line protocol, by default it will include everything. +* **excluded_keys (OPTIONAL)**: You can define json keys to be excluded in the line protocol, for a nested key, prepend the parent keys with underscores * **tags (OPTIONAL)**: You can define json keys to be set as tags instead of fields, if you define a key that is an array or object then all nested values will become a tag -* **renames (OPTIONAL)**: A table matching the json key with the desired name (oppossed to defaulting to using the key), use names that include the prepended keys of its parent keys for nested results -* **fields (OPTIONAL)**: A table matching the json key with the desired type (int,string,bool,float), if you define a key that is an array or object then all nested values will become that type +* **field (OPTIONAL, defined in TOML as an array table using double brackets)**: Identical to the [field](#field) table you can define, but with two key differences. The path supports arrays and objects and is defined under the object table and therefore will adhere to how the JSON is structured. You want to use this if you want the field/tag to be added as it would if it were in the included_key list, but then use the GJSON path syntax. +* **tag (OPTIONAL, defined in TOML as an array table using double brackets)**: Identical to the [tag](#tag) table you can define, but with two key differences. The path supports arrays and objects and is defined under the object table and therefore will adhere to how the JSON is structured. You want to use this if you want the field/tag to be added as it would if it were in the included_key list, but then use the GJSON path syntax. + +*Configuration to modify the resutling line protocol:* + +* **disable_prepend_keys (OPTIONAL)**: Set to true to prevent resulting nested data to contain the parent key prepended to its key **NOTE**: duplicate names can overwrite each other when this is enabled +* **renames (OPTIONAL, defined in TOML as a table using single bracket)**: A table matching the json key with the desired name (oppossed to defaulting to using the key), use names that include the prepended keys of its parent keys for nested results +* **fields (OPTIONAL, defined in TOML as a table using single bracket)**: A table matching the json key with the desired type (int,string,bool,float), if you define a key that is an array or object then all nested values will become that type ## Arrays and Objects The following describes the high-level approach when parsing arrays and objects: -**Array**: Every element in an array is treated as a *separate* metric +**Array**: Every element in an array is treated as a *separate* line protocol -**Object**: Every key/value in a object is treated as a *single* metric +**Object**: Every key/value in a object is treated as a *single* line protocol -When handling nested arrays and objects, these above rules continue to apply as the parser creates metrics. When an object has multiple array's as values, the array's will become separate metrics containing only non-array values from the obejct. Below you can see an example of this behavior, with an input json containing an array of book objects that has a nested array of characters. +When handling nested arrays and objects, these above rules continue to apply as the parser creates line protocol. When an object has multiple array's as values, the array's will become separate line protocol containing only non-array values from the obejct. Below you can see an example of this behavior, with an input json containing an array of book objects that has a nested array of characters. Example JSON: @@ -157,7 +191,7 @@ Example configuration: disable_prepend_keys = true ``` -Expected metrics: +Expected line protocol: ``` file,title=The\ Lord\ Of\ The\ Rings author="Tolkien",chapters="A Long-expected Party" @@ -173,7 +207,7 @@ You can find more complicated examples under the folder `testdata`. ## Types -For each field you have the option to define the types for each metric. The following rules are in place for this configuration: +For each field you have the option to define the types. The following rules are in place for this configuration: * If a type is explicitly defined, the parser will enforce this type and convert the data to the defined type if possible. If the type can't be converted then the parser will fail. * If a type isn't defined, the parser will use the default type defined in the JSON (int, float, string) diff --git a/plugins/parsers/json_v2/parser.go b/plugins/parsers/json_v2/parser.go index fa0946621cde4..f4f84c562e781 100644 --- a/plugins/parsers/json_v2/parser.go +++ b/plugins/parsers/json_v2/parser.go @@ -13,6 +13,7 @@ import ( ) type Parser struct { + InputJSON []byte Configs []Config DefaultTags map[string]string Log telegraf.Logger @@ -20,8 +21,16 @@ type Parser struct { measurementName string - iterateObjects bool + iterateObjects bool + currentSettings JSONObject + pathResults []PathResult +} + +type PathResult struct { + result gjson.Result + tag bool + DataSet } type Config struct { @@ -53,21 +62,30 @@ type JSONObject struct { IncludedKeys []string `toml:"included_keys"` // OPTIONAL ExcludedKeys []string `toml:"excluded_keys"` // OPTIONAL DisablePrependKeys bool `toml:"disable_prepend_keys"` // OPTIONAL + FieldPaths []DataSet // OPTIONAL + TagPaths []DataSet // OPTIONAL } type MetricNode struct { + ParentIndex int OutputName string SetName string Tag bool DesiredType string // Can be "int", "uint", "float", "bool", "string" + /* + IncludeCollection is only used when processing objects and is responsible for containing the gjson results + found by the gjson paths provided in the FieldPaths and TagPaths configs. + */ + IncludeCollection *PathResult Metric telegraf.Metric gjson.Result } func (p *Parser) Parse(input []byte) ([]telegraf.Metric, error) { + p.InputJSON = input // Only valid JSON is supported - if !gjson.Valid(string(input)) { + if !gjson.Valid(string(p.InputJSON)) { return nil, fmt.Errorf("Invalid JSON provided, unable to parse") } @@ -77,7 +95,7 @@ func (p *Parser) Parse(input []byte) ([]telegraf.Metric, error) { // Measurement name configuration p.measurementName = c.MeasurementName if c.MeasurementNamePath != "" { - result := gjson.GetBytes(input, c.MeasurementNamePath) + result := gjson.GetBytes(p.InputJSON, c.MeasurementNamePath) if !result.IsArray() && !result.IsObject() { p.measurementName = result.String() } @@ -86,7 +104,7 @@ func (p *Parser) Parse(input []byte) ([]telegraf.Metric, error) { // Timestamp configuration p.Timestamp = time.Now() if c.TimestampPath != "" { - result := gjson.GetBytes(input, c.TimestampPath) + result := gjson.GetBytes(p.InputJSON, c.TimestampPath) if !result.IsArray() && !result.IsObject() { if c.TimestampFormat == "" { err := fmt.Errorf("use of 'timestamp_query' requires 'timestamp_format'") @@ -101,17 +119,17 @@ func (p *Parser) Parse(input []byte) ([]telegraf.Metric, error) { } } - fields, err := p.processMetric(c.Fields, input, false) + fields, err := p.processMetric(c.Fields, false) if err != nil { return nil, err } - tags, err := p.processMetric(c.Tags, input, true) + tags, err := p.processMetric(c.Tags, true) if err != nil { return nil, err } - objects, err := p.processObjects(c.JSONObjects, input) + objects, err := p.processObjects(c.JSONObjects) if err != nil { return nil, err } @@ -137,7 +155,7 @@ func (p *Parser) Parse(input []byte) ([]telegraf.Metric, error) { // processMetric will iterate over all 'field' or 'tag' configs and create metrics for each // A field/tag can either be a single value or an array of values, each resulting in its own metric // For multiple configs, a set of metrics is created from the cartesian product of each separate config -func (p *Parser) processMetric(data []DataSet, input []byte, tag bool) ([]telegraf.Metric, error) { +func (p *Parser) processMetric(data []DataSet, tag bool) ([]telegraf.Metric, error) { if len(data) == 0 { return nil, nil } @@ -149,7 +167,7 @@ func (p *Parser) processMetric(data []DataSet, input []byte, tag bool) ([]telegr if c.Path == "" { return nil, fmt.Errorf("GJSON path is required") } - result := gjson.GetBytes(input, c.Path) + result := gjson.GetBytes(p.InputJSON, c.Path) if result.IsObject() { p.Log.Debugf("Found object in the path: %s, ignoring it please use 'object' to gather metrics from objects", c.Path) @@ -233,6 +251,9 @@ func (p *Parser) expandArray(result MetricNode) ([]telegraf.Metric, error) { p.Log.Debugf("Found object in query ignoring it please use 'object' to gather metrics from objects") return results, nil } + if result.IncludeCollection == nil && (len(p.currentSettings.FieldPaths) > 0 || len(p.currentSettings.TagPaths) > 0) { + result.IncludeCollection = p.existsInpathResults(result.Index, result.Raw) + } r, err := p.combineObject(result) if err != nil { return nil, err @@ -243,6 +264,9 @@ func (p *Parser) expandArray(result MetricNode) ([]telegraf.Metric, error) { if result.IsArray() { var err error + if result.IncludeCollection == nil && (len(p.currentSettings.FieldPaths) > 0 || len(p.currentSettings.TagPaths) > 0) { + result.IncludeCollection = p.existsInpathResults(result.Index, result.Raw) + } result.ForEach(func(_, val gjson.Result) bool { m := metric.New( p.measurementName, @@ -250,13 +274,14 @@ func (p *Parser) expandArray(result MetricNode) ([]telegraf.Metric, error) { map[string]interface{}{}, p.Timestamp, ) - if val.IsObject() { if p.iterateObjects { - n := MetricNode{ - SetName: result.SetName, - Metric: m, - Result: val, + n := result + n.ParentIndex += val.Index + n.Metric = m + n.Result = val + if n.IncludeCollection == nil && (len(p.currentSettings.FieldPaths) > 0 || len(p.currentSettings.TagPaths) > 0) { + n.IncludeCollection = p.existsInpathResults(n.Index, n.Raw) } r, err := p.combineObject(n) if err != nil { @@ -281,13 +306,12 @@ func (p *Parser) expandArray(result MetricNode) ([]telegraf.Metric, error) { for _, f := range result.Metric.TagList() { m.AddTag(f.Key, f.Value) } - n := MetricNode{ - Tag: result.Tag, - DesiredType: result.DesiredType, - OutputName: result.OutputName, - SetName: result.SetName, - Metric: m, - Result: val, + n := result + n.ParentIndex += val.Index + n.Metric = m + n.Result = val + if n.IncludeCollection == nil && (len(p.currentSettings.FieldPaths) > 0 || len(p.currentSettings.TagPaths) > 0) { + n.IncludeCollection = p.existsInpathResults(n.Index, n.Raw) } r, err := p.expandArray(n) if err != nil { @@ -314,17 +338,43 @@ func (p *Parser) expandArray(result MetricNode) ([]telegraf.Metric, error) { switch result.Value().(type) { case nil: // Ignore JSON values that are set as null default: + outputName := result.OutputName + desiredType := result.DesiredType + + if len(p.currentSettings.FieldPaths) > 0 || len(p.currentSettings.TagPaths) > 0 { + var pathResult *PathResult + // When IncludeCollection isn't nil, that means the current result is included in the collection. + if result.IncludeCollection != nil { + pathResult = result.IncludeCollection + } else { + // Verify that the result should be included based on the results of fieldpaths and tag paths + pathResult = p.existsInpathResults(result.ParentIndex, result.Raw) + } + if pathResult == nil { + return results, nil + } + if pathResult.tag { + result.Tag = true + } + if !pathResult.tag { + desiredType = pathResult.Type + } + if pathResult.Rename != "" { + outputName = pathResult.Rename + } + } + if result.Tag { - result.DesiredType = "string" + desiredType = "string" } - v, err := p.convertType(result.Result, result.DesiredType, result.SetName) + v, err := p.convertType(result.Result, desiredType, result.SetName) if err != nil { return nil, err } if result.Tag { - result.Metric.AddTag(result.OutputName, v.(string)) + result.Metric.AddTag(outputName, v.(string)) } else { - result.Metric.AddField(result.OutputName, v) + result.Metric.AddField(outputName, v) } } } @@ -335,22 +385,55 @@ func (p *Parser) expandArray(result MetricNode) ([]telegraf.Metric, error) { return results, nil } +func (p *Parser) existsInpathResults(index int, raw string) *PathResult { + for _, f := range p.pathResults { + if f.result.Index == 0 { + for _, i := range f.result.Indexes { + if i == index { + return &f + } + } + } else if f.result.Index == index { + return &f + } + } + return nil +} + // processObjects will iterate over all 'object' configs and create metrics for each -func (p *Parser) processObjects(objects []JSONObject, input []byte) ([]telegraf.Metric, error) { +func (p *Parser) processObjects(objects []JSONObject) ([]telegraf.Metric, error) { p.iterateObjects = true var t []telegraf.Metric for _, c := range objects { p.currentSettings = c + if c.Path == "" { return nil, fmt.Errorf("GJSON path is required") } - result := gjson.GetBytes(input, c.Path) + result := gjson.GetBytes(p.InputJSON, c.Path) + + scopedJSON := []byte(result.Raw) + for _, f := range c.FieldPaths { + var r PathResult + r.result = gjson.GetBytes(scopedJSON, f.Path) + r.DataSet = f + p.pathResults = append(p.pathResults, r) + } + + for _, f := range c.TagPaths { + var r PathResult + r.result = gjson.GetBytes(scopedJSON, f.Path) + r.DataSet = f + r.tag = true + p.pathResults = append(p.pathResults, r) + } if result.Type == gjson.Null { return nil, fmt.Errorf("GJSON Path returned null") } rootObject := MetricNode{ + ParentIndex: 0, Metric: metric.New( p.measurementName, map[string]string{}, @@ -401,14 +484,11 @@ func (p *Parser) combineObject(result MetricNode) ([]telegraf.Metric, error) { } } - arrayNode := MetricNode{ - DesiredType: result.DesiredType, - Tag: result.Tag, - OutputName: outputName, - SetName: setName, - Metric: result.Metric, - Result: val, - } + arrayNode := result + arrayNode.ParentIndex += val.Index + arrayNode.OutputName = outputName + arrayNode.SetName = setName + arrayNode.Result = val for k, t := range p.currentSettings.Fields { if setName == k { @@ -455,8 +535,8 @@ func (p *Parser) isIncluded(key string, val gjson.Result) bool { return true } // automatically adds tags to included_keys so it does NOT have to be repeated in the config - p.currentSettings.IncludedKeys = append(p.currentSettings.IncludedKeys, p.currentSettings.Tags...) - for _, i := range p.currentSettings.IncludedKeys { + allKeys := append(p.currentSettings.IncludedKeys, p.currentSettings.Tags...) + for _, i := range allKeys { if i == key { return true } diff --git a/plugins/parsers/json_v2/parser_test.go b/plugins/parsers/json_v2/parser_test.go index 7b34b83c0af8a..3ef08856190ac 100644 --- a/plugins/parsers/json_v2/parser_test.go +++ b/plugins/parsers/json_v2/parser_test.go @@ -28,6 +28,18 @@ func TestData(t *testing.T) { name: "Test having an array of objects", test: "array_of_objects", }, + { + name: "Test having multiple JSON inputs", + test: "multiple_json_input", + }, + { + name: "A second test when selecting with sub field and tags", + test: "subfieldtag_in_object_2", + }, + { + name: "Test selecting with sub field and tags", + test: "subfieldtag_in_object", + }, { name: "Test using just fields and tags", test: "fields_and_tags", diff --git a/plugins/parsers/json_v2/testdata/multiple_arrays_in_object/expected.out b/plugins/parsers/json_v2/testdata/multiple_arrays_in_object/expected.out index 2948da1720f64..04cd0635a5497 100644 --- a/plugins/parsers/json_v2/testdata/multiple_arrays_in_object/expected.out +++ b/plugins/parsers/json_v2/testdata/multiple_arrays_in_object/expected.out @@ -6,4 +6,3 @@ file,title=The\ Lord\ Of\ The\ Rings author="Tolkien",chapters="The Shadow of th file,title=The\ Lord\ Of\ The\ Rings author="Tolkien",chapters="The Shadow of the Past",name="Bilbo",species="hobbit",random=2 file,title=The\ Lord\ Of\ The\ Rings author="Tolkien",chapters="The Shadow of the Past",name="Frodo",species="hobbit",random=1 file,title=The\ Lord\ Of\ The\ Rings author="Tolkien",chapters="The Shadow of the Past",name="Frodo",species="hobbit",random=2 - diff --git a/plugins/parsers/json_v2/testdata/multiple_json_input/expected.out b/plugins/parsers/json_v2/testdata/multiple_json_input/expected.out new file mode 100644 index 0000000000000..f3fa9f0d8571c --- /dev/null +++ b/plugins/parsers/json_v2/testdata/multiple_json_input/expected.out @@ -0,0 +1,2 @@ +file,from_station=COLM,to_station=ANTC,etd_estimate_direction=North minutes=2i +file,from_station=POWL,to_station=DALY,etd_estimate_direction=South minutes=6i diff --git a/plugins/parsers/json_v2/testdata/multiple_json_input/input_1.json b/plugins/parsers/json_v2/testdata/multiple_json_input/input_1.json new file mode 100644 index 0000000000000..f60cd59f91247 --- /dev/null +++ b/plugins/parsers/json_v2/testdata/multiple_json_input/input_1.json @@ -0,0 +1,87 @@ +{ + "?xml": { + "@version": "1.0", + "@encoding": "utf-8" + }, + "root": { + "@id": "1", + "uri": { + "#cdata-section": "http://api.bart.gov/api/etd.aspx?cmd=etd&orig=COLM&dir=n&json=y" + }, + "date": "07/02/2021", + "time": "06:05:47 PM PDT", + "station": [ + { + "name": "Colma", + "abbr": "COLM", + "etd": [ + { + "destination": "Antioch", + "abbreviation": "ANTC", + "limited": "0", + "estimate": [ + { + "minutes": "2", + "platform": "2", + "direction": "North", + "length": "10", + "color": "YELLOW", + "hexcolor": "#ffff33", + "bikeflag": "1", + "delay": "0" + }, + { + "minutes": "16", + "platform": "2", + "direction": "North", + "length": "10", + "color": "YELLOW", + "hexcolor": "#ffff33", + "bikeflag": "1", + "delay": "0" + }, + { + "minutes": "31", + "platform": "2", + "direction": "North", + "length": "10", + "color": "YELLOW", + "hexcolor": "#ffff33", + "bikeflag": "1", + "delay": "0" + } + ] + }, + { + "destination": "Richmond", + "abbreviation": "RICH", + "limited": "0", + "estimate": [ + { + "minutes": "22", + "platform": "2", + "direction": "North", + "length": "10", + "color": "RED", + "hexcolor": "#ff0000", + "bikeflag": "1", + "delay": "0" + }, + { + "minutes": "52", + "platform": "2", + "direction": "North", + "length": "10", + "color": "RED", + "hexcolor": "#ff0000", + "bikeflag": "1", + "delay": "0" + } + ] + } + ] + } + ], + "message": "" + } +} diff --git a/plugins/parsers/json_v2/testdata/multiple_json_input/input_2.json b/plugins/parsers/json_v2/testdata/multiple_json_input/input_2.json new file mode 100644 index 0000000000000..e75e84a093b37 --- /dev/null +++ b/plugins/parsers/json_v2/testdata/multiple_json_input/input_2.json @@ -0,0 +1,134 @@ +{ + "?xml": { + "@version": "1.0", + "@encoding": "utf-8" + }, + "root": { + "@id": "1", + "uri": { + "#cdata-section": "http://api.bart.gov/api/etd.aspx?cmd=etd&orig=POWL&dir=s&json=y" + }, + "date": "07/02/2021", + "time": "06:06:01 PM PDT", + "station": [ + { + "name": "Powell St.", + "abbr": "POWL", + "etd": [ + { + "destination": "Daly City", + "abbreviation": "DALY", + "limited": "0", + "estimate": [ + { + "minutes": "6", + "platform": "1", + "direction": "South", + "length": "10", + "color": "GREEN", + "hexcolor": "#339933", + "bikeflag": "1", + "delay": "0" + }, + { + "minutes": "26", + "platform": "1", + "direction": "South", + "length": "9", + "color": "BLUE", + "hexcolor": "#0099cc", + "bikeflag": "1", + "delay": "0" + }, + { + "minutes": "36", + "platform": "1", + "direction": "South", + "length": "10", + "color": "GREEN", + "hexcolor": "#339933", + "bikeflag": "1", + "delay": "0" + } + ] + }, + { + "destination": "Millbrae", + "abbreviation": "MLBR", + "limited": "0", + "estimate": [ + { + "minutes": "19", + "platform": "1", + "direction": "South", + "length": "10", + "color": "RED", + "hexcolor": "#ff0000", + "bikeflag": "1", + "delay": "0" + }, + { + "minutes": "49", + "platform": "1", + "direction": "South", + "length": "10", + "color": "RED", + "hexcolor": "#ff0000", + "bikeflag": "1", + "delay": "0" + }, + { + "minutes": "79", + "platform": "1", + "direction": "South", + "length": "10", + "color": "RED", + "hexcolor": "#ff0000", + "bikeflag": "1", + "delay": "0" + } + ] + }, + { + "destination": "SF Airport", + "abbreviation": "SFIA", + "limited": "0", + "estimate": [ + { + "minutes": "7", + "platform": "1", + "direction": "South", + "length": "10", + "color": "YELLOW", + "hexcolor": "#ffff33", + "bikeflag": "1", + "delay": "0" + }, + { + "minutes": "37", + "platform": "1", + "direction": "South", + "length": "10", + "color": "YELLOW", + "hexcolor": "#ffff33", + "bikeflag": "1", + "delay": "0" + }, + { + "minutes": "67", + "platform": "1", + "direction": "South", + "length": "10", + "color": "YELLOW", + "hexcolor": "#ffff33", + "bikeflag": "1", + "delay": "0" + } + ] + } + ] + } + ], + "message": "" + } +} diff --git a/plugins/parsers/json_v2/testdata/multiple_json_input/telegraf.conf b/plugins/parsers/json_v2/testdata/multiple_json_input/telegraf.conf new file mode 100644 index 0000000000000..96c8ede181a54 --- /dev/null +++ b/plugins/parsers/json_v2/testdata/multiple_json_input/telegraf.conf @@ -0,0 +1,18 @@ +[[inputs.file]] + files = ["./testdata/multiple_json_input/input_1.json", "./testdata/multiple_json_input/input_2.json"] + data_format = "json_v2" + [[inputs.file.json_v2]] + [[inputs.file.json_v2.object]] + path = "root.station" + [[inputs.file.json_v2.object.tag]] + path="#.abbr" + rename = "from_station" + [[inputs.file.json_v2.object.field]] + path = "#.etd.0.estimate.0.minutes" + rename = "minutes" + type = "int" + [[inputs.file.json_v2.object.tag]] + path = "#.etd.0.abbreviation" + rename = "to_station" + [[inputs.file.json_v2.object.tag]] + path = "#.etd.0.estimate.0.direction" diff --git a/plugins/parsers/json_v2/testdata/subfieldtag_in_object/expected.out b/plugins/parsers/json_v2/testdata/subfieldtag_in_object/expected.out new file mode 100644 index 0000000000000..a7db83863a63c --- /dev/null +++ b/plugins/parsers/json_v2/testdata/subfieldtag_in_object/expected.out @@ -0,0 +1 @@ +file,from_station=COLM,to_station=ANTC,etd_estimate_direction=North etd_estimate_minutes=6i diff --git a/plugins/parsers/json_v2/testdata/subfieldtag_in_object/input.json b/plugins/parsers/json_v2/testdata/subfieldtag_in_object/input.json new file mode 100644 index 0000000000000..45d0d5514ae76 --- /dev/null +++ b/plugins/parsers/json_v2/testdata/subfieldtag_in_object/input.json @@ -0,0 +1,97 @@ +{ + "?xml": { + "@version": "1.0", + "@encoding": "utf-8" + }, + "root": { + "@id": "1", + "uri": { + "#cdata-section": "http://api.bart.gov/api/etd.aspx?cmd=etd&orig=COLM&dir=n&json=y" + }, + "date": "06/25/2021", + "time": "05:01:31 PM PDT", + "station": [ + { + "name": "Colma", + "abbr": "COLM", + "etd": [ + { + "destination": "Antioch", + "abbreviation": "ANTC", + "limited": "0", + "estimate": [ + { + "minutes": "6", + "platform": "2", + "direction": "North", + "length": "10", + "color": "YELLOW", + "hexcolor": "#ffff33", + "bikeflag": "1", + "delay": "0" + }, + { + "minutes": "36", + "platform": "2", + "direction": "North", + "length": "10", + "color": "YELLOW", + "hexcolor": "#ffff33", + "bikeflag": "1", + "delay": "0" + }, + { + "minutes": "51", + "platform": "2", + "direction": "North", + "length": "10", + "color": "YELLOW", + "hexcolor": "#ffff33", + "bikeflag": "1", + "delay": "0" + } + ] + }, + { + "destination": "Richmond", + "abbreviation": "RICH", + "limited": "0", + "estimate": [ + { + "minutes": "12", + "platform": "2", + "direction": "North", + "length": "10", + "color": "RED", + "hexcolor": "#ff0000", + "bikeflag": "1", + "delay": "0" + }, + { + "minutes": "26", + "platform": "2", + "direction": "North", + "length": "10", + "color": "RED", + "hexcolor": "#ff0000", + "bikeflag": "1", + "delay": "0" + }, + { + "minutes": "41", + "platform": "2", + "direction": "North", + "length": "10", + "color": "RED", + "hexcolor": "#ff0000", + "bikeflag": "1", + "delay": "0" + } + ] + } + ] + } + ], + "message": "" + } +} diff --git a/plugins/parsers/json_v2/testdata/subfieldtag_in_object/telegraf.conf b/plugins/parsers/json_v2/testdata/subfieldtag_in_object/telegraf.conf new file mode 100644 index 0000000000000..7a8a283d77c3d --- /dev/null +++ b/plugins/parsers/json_v2/testdata/subfieldtag_in_object/telegraf.conf @@ -0,0 +1,17 @@ +[[inputs.file]] + files = ["./testdata/subfieldtag_in_object/input.json"] + data_format = "json_v2" + [[inputs.file.json_v2]] + [[inputs.file.json_v2.object]] + path = "root.station" + [[inputs.file.json_v2.object.field]] + path = "#.etd.0.estimate.0.minutes" + type = "int" + [[inputs.file.json_v2.object.tag]] + path = "#.abbr" + rename = "from_station" + [[inputs.file.json_v2.object.tag]] + path = "#.etd.0.abbreviation" + rename = "to_station" + [[inputs.file.json_v2.object.tag]] + path = "#.etd.0.estimate.0.direction" diff --git a/plugins/parsers/json_v2/testdata/subfieldtag_in_object_2/expected.out b/plugins/parsers/json_v2/testdata/subfieldtag_in_object_2/expected.out new file mode 100644 index 0000000000000..89748967a1ee9 --- /dev/null +++ b/plugins/parsers/json_v2/testdata/subfieldtag_in_object_2/expected.out @@ -0,0 +1,4 @@ +file,data=3 cnt=23i,format=0i +file,data=7 cnt=23i,format=0i +file,data=10 cnt=23i,format=0i +file,data=23 cnt=23i,format=0i diff --git a/plugins/parsers/json_v2/testdata/subfieldtag_in_object_2/input.json b/plugins/parsers/json_v2/testdata/subfieldtag_in_object_2/input.json new file mode 100644 index 0000000000000..62b768eae05a7 --- /dev/null +++ b/plugins/parsers/json_v2/testdata/subfieldtag_in_object_2/input.json @@ -0,0 +1,10 @@ +{ + "cnt": 23, + "data": [ + 3, + 7, + 10, + 23 + ], + "format": 0 +} diff --git a/plugins/parsers/json_v2/testdata/subfieldtag_in_object_2/telegraf.conf b/plugins/parsers/json_v2/testdata/subfieldtag_in_object_2/telegraf.conf new file mode 100644 index 0000000000000..60d7d18da43d0 --- /dev/null +++ b/plugins/parsers/json_v2/testdata/subfieldtag_in_object_2/telegraf.conf @@ -0,0 +1,16 @@ +# Example taken from: https://github.com/influxdata/telegraf/issues/5940 + +[[inputs.file]] + files = ["./testdata/subfieldtag_in_object_2/input.json"] + data_format = "json_v2" + [[inputs.file.json_v2]] + [[inputs.file.json_v2.object]] + path = "@this" + [[inputs.file.json_v2.object.tag]] + path = "data" + [[inputs.file.json_v2.object.field]] + path = "cnt" + type = "int" + [[inputs.file.json_v2.object.field]] + path = "format" + type = "int" From 68333d70f02d5ad89eac0dce290c1ad8b3917ffd Mon Sep 17 00:00:00 2001 From: Helen Weller <38860767+helenosheaa@users.noreply.github.com> Date: Mon, 4 Oct 2021 17:30:59 -0400 Subject: [PATCH 47/81] fix: remove eg fix: which breaks label bot functionality (#9859) --- .github/PULL_REQUEST_TEMPLATE.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/.github/PULL_REQUEST_TEMPLATE.md b/.github/PULL_REQUEST_TEMPLATE.md index 1c717ddbb1a15..67b65a26247fb 100644 --- a/.github/PULL_REQUEST_TEMPLATE.md +++ b/.github/PULL_REQUEST_TEMPLATE.md @@ -5,7 +5,7 @@ show completion. --> - [ ] Updated associated README.md. - [ ] Wrote appropriate unit tests. -- [ ] Pull request title or commits are in [conventional commit format](https://www.conventionalcommits.org/en/v1.0.0/#summary) (e.g. feat: or fix:) +- [ ] Pull request title or commits are in [conventional commit format](https://www.conventionalcommits.org/en/v1.0.0/#summary) setting alias: {} name: {} id: {}'".format(alias, name, id)) + state["aliases"][id] = name + if "value" in metric.fields: + buildTopicTags(metric, topicFields) + buildNameTags(metric, name) + else: + output = None + + # Try to resolve the unresolved if any + if len(state["unresolved"]) > 0: + # Filter out the matching metrics and keep the rest as unresolved + log.debug(" unresolved") + unresolved = [("{}/{}".format(edgeid, m.fields["alias"]), m) for m in state["unresolved"]] + matching = [(mid, m) for mid, m in unresolved if mid == id] + state["unresolved"] = [m for mid, m in unresolved if mid != id] + + log.debug(" found {} matching unresolved metrics".format(len(matching))) + # Process the matching metrics and output - TODO - needs debugging + # for mid, m in matching: + # buildTopicTags(m,topicFields) + # buildNameTags(m) + # output = [m for _, m in matching] + [metric] + + elif DATA_TAG in topic: + log.debug(" metric msg_type: {} edgeid: {} topic: {}".format(DATA_TAG, edgeid, topic)) + if "alias" in metric.fields: + alias = metric.fields.get("alias") + + # Lookup the ID. If we know it, replace the name of the metric with the lookup value, + # otherwise we need to keep the metric for resolving later. + # This can happen if the messages are out-of-order for some reason... + id = "{}/{}".format(edgeid,alias) + if id in state["aliases"]: + name = state["aliases"][id] + log.debug(" found alias: {} name: {}".format(alias, name)) + buildTopicTags(metric,topicFields) + buildNameTags(metric,name) + else: + # We want to hold the metric until we get the corresponding birth message + log.debug(" id not found: {}".format(id)) + output = None + if len(state["unresolved"]) >= MAX_UNRESOLVED: + log.warn(" metric overflow, trimming {}".format(len(state["unresolved"]) - MAX_UNRESOLVED+1)) + # Release the unresolved metrics as raw and trim buffer + output = state["unresolved"][MAX_UNRESOLVED-1:] + state["unresolved"] = state["unresolved"][:MAX_UNRESOLVED-1] + log.debug(" --> keeping metric") + state["unresolved"].append(metric) + else: + output = None + + return output + From 014161cd0c2c75ae96c189f8227e9a94c6abc358 Mon Sep 17 00:00:00 2001 From: Joshua Powers Date: Tue, 5 Oct 2021 15:06:53 -0600 Subject: [PATCH 49/81] feat: add custom time/date format field for elasticsearch_query (#9838) --- plugins/inputs/elasticsearch_query/README.md | 8 ++++++++ .../elasticsearch_query/aggregation_query.go | 2 +- .../elasticsearch_query/elasticsearch_query.go | 8 ++++++++ .../elasticsearch_query_test.go | 17 +++++++++++++++++ 4 files changed, 34 insertions(+), 1 deletion(-) diff --git a/plugins/inputs/elasticsearch_query/README.md b/plugins/inputs/elasticsearch_query/README.md index 881cb6609b5b0..5e90d19e72f21 100755 --- a/plugins/inputs/elasticsearch_query/README.md +++ b/plugins/inputs/elasticsearch_query/README.md @@ -54,6 +54,13 @@ Currently it is known to break on 7.x or greater versions. ## The date/time field in the Elasticsearch index (mandatory). date_field = "@timestamp" + ## If the field used for the date/time field in Elasticsearch is also using + ## a custom date/time format it may be required to provide the format to + ## correctly parse the field. + ## + ## If using one of the built in elasticsearch formats this is not required. + # date_field_custom_format = "" + ## Time window to query (eg. "1m" to query documents from last minute). ## Normally should be set to same as collection interval query_period = "1m" @@ -150,6 +157,7 @@ Please note that the `[[inputs.elasticsearch_query]]` is still required for all ### Optional parameters +- `date_field_custom_format`: Not needed if using one of the built in date/time formats of Elasticsearch, but may be required if using a custom date/time format. The format syntax uses the [Joda date format](https://www.elastic.co/guide/en/elasticsearch/reference/6.8/search-aggregations-bucket-daterange-aggregation.html#date-format-pattern). - `filter_query`: Lucene query to filter the results (default: "\*") - `metric_fields`: The list of fields to perform metric aggregation (these must be indexed as numeric fields) - `metric_funcion`: The single-value metric aggregation function to be performed on the `metric_fields` defined. Currently supported aggregations are "avg", "min", "max", "sum". (see [https://www.elastic.co/guide/en/elasticsearch/reference/current/search-aggregations-metrics.html](https://www.elastic.co/guide/en/elasticsearch/reference/current/search-aggregations-metrics.html) diff --git a/plugins/inputs/elasticsearch_query/aggregation_query.go b/plugins/inputs/elasticsearch_query/aggregation_query.go index 51bdd98e7130b..aff67d2baa884 100644 --- a/plugins/inputs/elasticsearch_query/aggregation_query.go +++ b/plugins/inputs/elasticsearch_query/aggregation_query.go @@ -33,7 +33,7 @@ func (e *ElasticsearchQuery) runAggregationQuery(ctx context.Context, aggregatio query := elastic5.NewBoolQuery() query = query.Filter(elastic5.NewQueryStringQuery(filterQuery)) - query = query.Filter(elastic5.NewRangeQuery(aggregation.DateField).From(from).To(now)) + query = query.Filter(elastic5.NewRangeQuery(aggregation.DateField).From(from).To(now).Format(aggregation.DateFieldFormat)) src, err := query.Source() if err != nil { diff --git a/plugins/inputs/elasticsearch_query/elasticsearch_query.go b/plugins/inputs/elasticsearch_query/elasticsearch_query.go index 3c04f952b5bee..009577573a4f3 100644 --- a/plugins/inputs/elasticsearch_query/elasticsearch_query.go +++ b/plugins/inputs/elasticsearch_query/elasticsearch_query.go @@ -55,6 +55,13 @@ const sampleConfig = ` ## The date/time field in the Elasticsearch index (mandatory). date_field = "@timestamp" + ## If the field used for the date/time field in Elasticsearch is also using + ## a custom date/time format it may be required to provide the format to + ## correctly parse the field. + ## + ## If using one of the built in elasticsearch formats this is not required. + # date_field_custom_format = "" + ## Time window to query (eg. "1m" to query documents from last minute). ## Normally should be set to same as collection interval query_period = "1m" @@ -104,6 +111,7 @@ type esAggregation struct { Index string `toml:"index"` MeasurementName string `toml:"measurement_name"` DateField string `toml:"date_field"` + DateFieldFormat string `toml:"date_field_custom_format"` QueryPeriod config.Duration `toml:"query_period"` FilterQuery string `toml:"filter_query"` MetricFields []string `toml:"metric_fields"` diff --git a/plugins/inputs/elasticsearch_query/elasticsearch_query_test.go b/plugins/inputs/elasticsearch_query/elasticsearch_query_test.go index 6a89dc8eea617..e017681b7c58d 100644 --- a/plugins/inputs/elasticsearch_query/elasticsearch_query_test.go +++ b/plugins/inputs/elasticsearch_query/elasticsearch_query_test.go @@ -484,6 +484,23 @@ var testEsAggregationData = []esAggregationQueryTest{ false, false, }, + { + "query 14 - non-existing custom date/time format", + esAggregation{ + Index: testindex, + MeasurementName: "measurement14", + DateField: "@timestamp", + DateFieldFormat: "yyyy", + QueryPeriod: queryPeriod, + Tags: []string{}, + mapMetricFields: map[string]string{}, + }, + nil, + nil, + false, + false, + true, + }, } func setupIntegrationTest() error { From d2a25456d58cb269fd57e0204a52b32b309c5d4a Mon Sep 17 00:00:00 2001 From: Yuji Kawamoto Date: Wed, 6 Oct 2021 06:11:46 +0900 Subject: [PATCH 50/81] feat(prometheus): add ignore_timestamp option (#9740) --- config/config.go | 2 +- etc/telegraf.conf | 4 ++ plugins/inputs/prometheus/README.md | 4 ++ plugins/inputs/prometheus/parser.go | 4 +- plugins/inputs/prometheus/parser_test.go | 45 ++++++++++++++++++-- plugins/inputs/prometheus/prometheus.go | 13 +++++- plugins/inputs/prometheus/prometheus_test.go | 23 ++++++++++ plugins/parsers/prometheus/parser.go | 21 +++++---- plugins/parsers/prometheus/parser_test.go | 28 +++++++++++- plugins/parsers/registry.go | 13 ++++-- 10 files changed, 133 insertions(+), 24 deletions(-) diff --git a/config/config.go b/config/config.go index 76aa494c4ca43..d6081aedcfaf3 100644 --- a/config/config.go +++ b/config/config.go @@ -1593,7 +1593,7 @@ func (c *Config) missingTomlField(_ reflect.Type, key string) error { "json_string_fields", "json_time_format", "json_time_key", "json_timestamp_format", "json_timestamp_units", "json_timezone", "json_v2", "lvm", "metric_batch_size", "metric_buffer_limit", "name_override", "name_prefix", "name_suffix", "namedrop", "namepass", "order", "pass", "period", "precision", - "prefix", "prometheus_export_timestamp", "prometheus_sort_metrics", "prometheus_string_as_label", + "prefix", "prometheus_export_timestamp", "prometheus_ignore_timestamp", "prometheus_sort_metrics", "prometheus_string_as_label", "separator", "splunkmetric_hec_routing", "splunkmetric_multimetric", "tag_keys", "tagdrop", "tagexclude", "taginclude", "tagpass", "tags", "template", "templates", "value_field_name", "wavefront_source_override", "wavefront_use_strict", diff --git a/etc/telegraf.conf b/etc/telegraf.conf index 2892d99142be5..12672da45cf7d 100644 --- a/etc/telegraf.conf +++ b/etc/telegraf.conf @@ -8305,6 +8305,10 @@ # ## Url tag name (tag containing scrapped url. optional, default is "url") # # url_tag = "url" # +# ## Whether the timestamp of the scraped metrics will be ignored. +# ## If set to true, the gather time will be used. +# # ignore_timestamp = false +# # ## An array of Kubernetes services to scrape metrics from. # # kubernetes_services = ["http://my-service-dns.my-namespace:9100/metrics"] # diff --git a/plugins/inputs/prometheus/README.md b/plugins/inputs/prometheus/README.md index 955c6ab7d978b..fe6d3a8e816da 100644 --- a/plugins/inputs/prometheus/README.md +++ b/plugins/inputs/prometheus/README.md @@ -23,6 +23,10 @@ in Prometheus format. ## Url tag name (tag containing scrapped url. optional, default is "url") # url_tag = "url" + ## Whether the timestamp of the scraped metrics will be ignored. + ## If set to true, the gather time will be used. + # ignore_timestamp = false + ## An array of Kubernetes services to scrape metrics from. # kubernetes_services = ["http://my-service-dns.my-namespace:9100/metrics"] diff --git a/plugins/inputs/prometheus/parser.go b/plugins/inputs/prometheus/parser.go index 7d3140dc7d627..dfe5cc4749813 100644 --- a/plugins/inputs/prometheus/parser.go +++ b/plugins/inputs/prometheus/parser.go @@ -19,7 +19,7 @@ import ( "github.com/prometheus/common/expfmt" ) -func Parse(buf []byte, header http.Header) ([]telegraf.Metric, error) { +func Parse(buf []byte, header http.Header, ignoreTimestamp bool) ([]telegraf.Metric, error) { var parser expfmt.TextParser var metrics []telegraf.Metric var err error @@ -76,7 +76,7 @@ func Parse(buf []byte, header http.Header) ([]telegraf.Metric, error) { // converting to telegraf metric if len(fields) > 0 { var t time.Time - if m.TimestampMs != nil && *m.TimestampMs > 0 { + if !ignoreTimestamp && m.TimestampMs != nil && *m.TimestampMs > 0 { t = time.Unix(0, *m.TimestampMs*1000000) } else { t = now diff --git a/plugins/inputs/prometheus/parser_test.go b/plugins/inputs/prometheus/parser_test.go index 293e1968d2b5d..ffd5967458c9f 100644 --- a/plugins/inputs/prometheus/parser_test.go +++ b/plugins/inputs/prometheus/parser_test.go @@ -1,8 +1,10 @@ package prometheus import ( + "fmt" "net/http" "testing" + "time" "github.com/stretchr/testify/assert" ) @@ -42,7 +44,7 @@ apiserver_request_latencies_count{resource="bindings",verb="POST"} 2025 func TestParseValidPrometheus(t *testing.T) { // Gauge value - metrics, err := Parse([]byte(validUniqueGauge), http.Header{}) + metrics, err := Parse([]byte(validUniqueGauge), http.Header{}, false) assert.NoError(t, err) assert.Len(t, metrics, 1) assert.Equal(t, "cadvisor_version_info", metrics[0].Name()) @@ -58,7 +60,7 @@ func TestParseValidPrometheus(t *testing.T) { }, metrics[0].Tags()) // Counter value - metrics, err = Parse([]byte(validUniqueCounter), http.Header{}) + metrics, err = Parse([]byte(validUniqueCounter), http.Header{}, false) assert.NoError(t, err) assert.Len(t, metrics, 1) assert.Equal(t, "get_token_fail_count", metrics[0].Name()) @@ -69,7 +71,7 @@ func TestParseValidPrometheus(t *testing.T) { // Summary data //SetDefaultTags(map[string]string{}) - metrics, err = Parse([]byte(validUniqueSummary), http.Header{}) + metrics, err = Parse([]byte(validUniqueSummary), http.Header{}, false) assert.NoError(t, err) assert.Len(t, metrics, 1) assert.Equal(t, "http_request_duration_microseconds", metrics[0].Name()) @@ -83,7 +85,7 @@ func TestParseValidPrometheus(t *testing.T) { assert.Equal(t, map[string]string{"handler": "prometheus"}, metrics[0].Tags()) // histogram data - metrics, err = Parse([]byte(validUniqueHistogram), http.Header{}) + metrics, err = Parse([]byte(validUniqueHistogram), http.Header{}, false) assert.NoError(t, err) assert.Len(t, metrics, 1) assert.Equal(t, "apiserver_request_latencies", metrics[0].Name()) @@ -103,3 +105,38 @@ func TestParseValidPrometheus(t *testing.T) { map[string]string{"verb": "POST", "resource": "bindings"}, metrics[0].Tags()) } + +func TestMetricsWithTimestamp(t *testing.T) { + testTime := time.Date(2020, time.October, 4, 17, 0, 0, 0, time.UTC) + testTimeUnix := testTime.UnixNano() / int64(time.Millisecond) + metricsWithTimestamps := fmt.Sprintf(` +# TYPE test_counter counter +test_counter{label="test"} 1 %d +`, testTimeUnix) + + // IgnoreTimestamp is false + metrics, err := Parse([]byte(metricsWithTimestamps), http.Header{}, false) + assert.NoError(t, err) + assert.Len(t, metrics, 1) + assert.Equal(t, "test_counter", metrics[0].Name()) + assert.Equal(t, map[string]interface{}{ + "counter": float64(1), + }, metrics[0].Fields()) + assert.Equal(t, map[string]string{ + "label": "test", + }, metrics[0].Tags()) + assert.Equal(t, testTime, metrics[0].Time().UTC()) + + // IgnoreTimestamp is true + metrics, err = Parse([]byte(metricsWithTimestamps), http.Header{}, true) + assert.NoError(t, err) + assert.Len(t, metrics, 1) + assert.Equal(t, "test_counter", metrics[0].Name()) + assert.Equal(t, map[string]interface{}{ + "counter": float64(1), + }, metrics[0].Fields()) + assert.Equal(t, map[string]string{ + "label": "test", + }, metrics[0].Tags()) + assert.WithinDuration(t, time.Now(), metrics[0].Time().UTC(), 5*time.Second) +} diff --git a/plugins/inputs/prometheus/prometheus.go b/plugins/inputs/prometheus/prometheus.go index 136e8ae0f6d9d..18cbf6c8b3d59 100644 --- a/plugins/inputs/prometheus/prometheus.go +++ b/plugins/inputs/prometheus/prometheus.go @@ -58,6 +58,8 @@ type Prometheus struct { URLTag string `toml:"url_tag"` + IgnoreTimestamp bool `toml:"ignore_timestamp"` + tls.ClientConfig Log telegraf.Logger @@ -101,6 +103,10 @@ var sampleConfig = ` ## Url tag name (tag containing scrapped url. optional, default is "url") # url_tag = "url" + ## Whether the timestamp of the scraped metrics will be ignored. + ## If set to true, the gather time will be used. + # ignore_timestamp = false + ## An array of Kubernetes services to scrape metrics from. # kubernetes_services = ["http://my-service-dns.my-namespace:9100/metrics"] @@ -414,10 +420,13 @@ func (p *Prometheus) gatherURL(u URLAndAddress, acc telegraf.Accumulator) error } if p.MetricVersion == 2 { - parser := parser_v2.Parser{Header: resp.Header} + parser := parser_v2.Parser{ + Header: resp.Header, + IgnoreTimestamp: p.IgnoreTimestamp, + } metrics, err = parser.Parse(body) } else { - metrics, err = Parse(body, resp.Header) + metrics, err = Parse(body, resp.Header, p.IgnoreTimestamp) } if err != nil { diff --git a/plugins/inputs/prometheus/prometheus_test.go b/plugins/inputs/prometheus/prometheus_test.go index ea8ca0e9346ab..11117e05b45d9 100644 --- a/plugins/inputs/prometheus/prometheus_test.go +++ b/plugins/inputs/prometheus/prometheus_test.go @@ -242,6 +242,29 @@ func TestPrometheusGeneratesGaugeMetricsV2(t *testing.T) { assert.True(t, acc.HasTimestamp("prometheus", time.Unix(1490802350, 0))) } +func TestPrometheusGeneratesMetricsWithIgnoreTimestamp(t *testing.T) { + ts := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + _, err := fmt.Fprintln(w, sampleTextFormat) + require.NoError(t, err) + })) + defer ts.Close() + + p := &Prometheus{ + Log: testutil.Logger{}, + URLs: []string{ts.URL}, + URLTag: "url", + IgnoreTimestamp: true, + } + + var acc testutil.Accumulator + + err := acc.GatherError(p.Gather) + require.NoError(t, err) + + m, _ := acc.Get("test_metric") + assert.WithinDuration(t, time.Now(), m.Time, 5*time.Second) +} + func TestUnsupportedFieldSelector(t *testing.T) { fieldSelectorString := "spec.containerName=container" prom := &Prometheus{Log: testutil.Logger{}, KubernetesFieldSelector: fieldSelectorString} diff --git a/plugins/parsers/prometheus/parser.go b/plugins/parsers/prometheus/parser.go index e55789f7957b4..bc7ea0c636e4d 100644 --- a/plugins/parsers/prometheus/parser.go +++ b/plugins/parsers/prometheus/parser.go @@ -21,8 +21,9 @@ import ( ) type Parser struct { - DefaultTags map[string]string - Header http.Header + DefaultTags map[string]string + Header http.Header + IgnoreTimestamp bool } func (p *Parser) Parse(buf []byte) ([]telegraf.Metric, error) { @@ -65,14 +66,15 @@ func (p *Parser) Parse(buf []byte) ([]telegraf.Metric, error) { for _, m := range mf.Metric { // reading tags tags := common.MakeLabels(m, p.DefaultTags) + t := p.GetTimestamp(m, now) if mf.GetType() == dto.MetricType_SUMMARY { // summary metric - telegrafMetrics := makeQuantiles(m, tags, metricName, mf.GetType(), now) + telegrafMetrics := makeQuantiles(m, tags, metricName, mf.GetType(), t) metrics = append(metrics, telegrafMetrics...) } else if mf.GetType() == dto.MetricType_HISTOGRAM { // histogram metric - telegrafMetrics := makeBuckets(m, tags, metricName, mf.GetType(), now) + telegrafMetrics := makeBuckets(m, tags, metricName, mf.GetType(), t) metrics = append(metrics, telegrafMetrics...) } else { // standard metric @@ -80,7 +82,6 @@ func (p *Parser) Parse(buf []byte) ([]telegraf.Metric, error) { fields := getNameAndValue(m, metricName) // converting to telegraf metric if len(fields) > 0 { - t := getTimestamp(m, now) m := metric.New("prometheus", tags, fields, t, common.ValueType(mf.GetType())) metrics = append(metrics, m) } @@ -113,10 +114,9 @@ func (p *Parser) SetDefaultTags(tags map[string]string) { } // Get Quantiles for summary metric & Buckets for histogram -func makeQuantiles(m *dto.Metric, tags map[string]string, metricName string, metricType dto.MetricType, now time.Time) []telegraf.Metric { +func makeQuantiles(m *dto.Metric, tags map[string]string, metricName string, metricType dto.MetricType, t time.Time) []telegraf.Metric { var metrics []telegraf.Metric fields := make(map[string]interface{}) - t := getTimestamp(m, now) fields[metricName+"_count"] = float64(m.GetSummary().GetSampleCount()) fields[metricName+"_sum"] = float64(m.GetSummary().GetSampleSum()) @@ -137,10 +137,9 @@ func makeQuantiles(m *dto.Metric, tags map[string]string, metricName string, met } // Get Buckets from histogram metric -func makeBuckets(m *dto.Metric, tags map[string]string, metricName string, metricType dto.MetricType, now time.Time) []telegraf.Metric { +func makeBuckets(m *dto.Metric, tags map[string]string, metricName string, metricType dto.MetricType, t time.Time) []telegraf.Metric { var metrics []telegraf.Metric fields := make(map[string]interface{}) - t := getTimestamp(m, now) fields[metricName+"_count"] = float64(m.GetHistogram().GetSampleCount()) fields[metricName+"_sum"] = float64(m.GetHistogram().GetSampleSum()) @@ -179,9 +178,9 @@ func getNameAndValue(m *dto.Metric, metricName string) map[string]interface{} { return fields } -func getTimestamp(m *dto.Metric, now time.Time) time.Time { +func (p *Parser) GetTimestamp(m *dto.Metric, now time.Time) time.Time { var t time.Time - if m.TimestampMs != nil && *m.TimestampMs > 0 { + if !p.IgnoreTimestamp && m.TimestampMs != nil && *m.TimestampMs > 0 { t = time.Unix(0, m.GetTimestampMs()*1000000) } else { t = now diff --git a/plugins/parsers/prometheus/parser_test.go b/plugins/parsers/prometheus/parser_test.go index a403887e093b9..52ef2f5a3bed3 100644 --- a/plugins/parsers/prometheus/parser_test.go +++ b/plugins/parsers/prometheus/parser_test.go @@ -74,7 +74,7 @@ func TestParsingValidGauge(t *testing.T) { testutil.RequireMetricsEqual(t, expected, metrics, testutil.IgnoreTime(), testutil.SortMetrics()) } -func TestParsingValieCounter(t *testing.T) { +func TestParsingValidCounter(t *testing.T) { expected := []telegraf.Metric{ testutil.MustMetric( "prometheus", @@ -340,6 +340,32 @@ test_counter{label="test"} 1 %d testutil.RequireMetricsEqual(t, expected, metrics, testutil.SortMetrics()) } +func TestMetricsWithoutIgnoreTimestamp(t *testing.T) { + testTime := time.Date(2020, time.October, 4, 17, 0, 0, 0, time.UTC) + testTimeUnix := testTime.UnixNano() / int64(time.Millisecond) + metricsWithTimestamps := fmt.Sprintf(` +# TYPE test_counter counter +test_counter{label="test"} 1 %d +`, testTimeUnix) + expected := testutil.MustMetric( + "prometheus", + map[string]string{ + "label": "test", + }, + map[string]interface{}{ + "test_counter": float64(1.0), + }, + testTime, + telegraf.Counter, + ) + + parser := Parser{IgnoreTimestamp: true} + metric, _ := parser.ParseLine(metricsWithTimestamps) + + testutil.RequireMetricEqual(t, expected, metric, testutil.IgnoreTime(), testutil.SortMetrics()) + assert.WithinDuration(t, time.Now(), metric.Time(), 5*time.Second) +} + func parse(buf []byte) ([]telegraf.Metric, error) { parser := Parser{} return parser.Parse(buf) diff --git a/plugins/parsers/registry.go b/plugins/parsers/registry.go index f07c789a272f1..fcdfc473ae37a 100644 --- a/plugins/parsers/registry.go +++ b/plugins/parsers/registry.go @@ -156,6 +156,9 @@ type Config struct { // FormData configuration FormUrlencodedTagKeys []string `toml:"form_urlencoded_tag_keys"` + // Prometheus configuration + PrometheusIgnoreTimestamp bool `toml:"prometheus_ignore_timestamp"` + // Value configuration ValueFieldName string `toml:"value_field_name"` @@ -259,7 +262,10 @@ func NewParser(config *Config) (Parser, error) { config.FormUrlencodedTagKeys, ) case "prometheus": - parser, err = NewPrometheusParser(config.DefaultTags) + parser, err = NewPrometheusParser( + config.DefaultTags, + config.PrometheusIgnoreTimestamp, + ) case "prometheusremotewrite": parser, err = NewPrometheusRemoteWriteParser(config.DefaultTags) case "xml", "xpath_json", "xpath_msgpack", "xpath_protobuf": @@ -378,9 +384,10 @@ func NewFormUrlencodedParser( }, nil } -func NewPrometheusParser(defaultTags map[string]string) (Parser, error) { +func NewPrometheusParser(defaultTags map[string]string, ignoreTimestamp bool) (Parser, error) { return &prometheus.Parser{ - DefaultTags: defaultTags, + DefaultTags: defaultTags, + IgnoreTimestamp: ignoreTimestamp, }, nil } From e0c45e4a769f607e2b5028b6b65905924a58bf18 Mon Sep 17 00:00:00 2001 From: Minni Walia Date: Tue, 5 Oct 2021 21:51:45 +0000 Subject: [PATCH 51/81] docs: updated azure data explorer plugin documentation (#9816) --- plugins/outputs/azure_data_explorer/README.md | 112 +++++++++++------- 1 file changed, 66 insertions(+), 46 deletions(-) diff --git a/plugins/outputs/azure_data_explorer/README.md b/plugins/outputs/azure_data_explorer/README.md index bb6d0d039b0d2..4ae5bf7139924 100644 --- a/plugins/outputs/azure_data_explorer/README.md +++ b/plugins/outputs/azure_data_explorer/README.md @@ -1,10 +1,11 @@ # Azure Data Explorer output plugin -This plugin writes metrics collected by any of the input plugins of Telegraf to [Azure Data Explorer](https://azure.microsoft.com/en-au/services/data-explorer/). +This plugin writes data collected by any of the Telegraf input plugins to [Azure Data Explorer](https://azure.microsoft.com/en-au/services/data-explorer/). +Azure Data Explorer is a distributed, columnar store, purpose built for any type of logs, metrics and time series data. ## Pre-requisites: - [Create Azure Data Explorer cluster and database](https://docs.microsoft.com/en-us/azure/data-explorer/create-cluster-database-portal) -- VM/compute or container to host Telegraf - it could be hosted locally where an app/services to be monitored are deployed or remotely on a dedicated monitoring compute/container. +- VM/compute or container to host Telegraf - it could be hosted locally where an app/service to be monitored is deployed or remotely on a dedicated monitoring compute/container. ## Configuration: @@ -21,7 +22,7 @@ This plugin writes metrics collected by any of the input plugins of Telegraf to # database = "" ## Timeout for Azure Data Explorer operations - # timeout = "15s" + # timeout = "20s" ## Type of metrics grouping used when pushing to Azure Data Explorer. ## Default is "TablePerMetric" for one table per different metric. @@ -30,9 +31,6 @@ This plugin writes metrics collected by any of the input plugins of Telegraf to ## Name of the single table to store all the metrics (Only needed if metrics_grouping_type is "SingleTable"). # table_name = "" - - # timeout = "20s" - ``` ## Metrics Grouping @@ -48,12 +46,12 @@ The table name will match the `name` property of the metric, this means that the ### SingleTable -The plugin will send all the metrics received to a single Azure Data Explorer table. The name of the table must be supplied via `table_name` the config file. If the table doesn't exist the plugin will create the table, if the table exists then the plugin will try to merge the Telegraf metric schema to the existing table. For more information about the merge process check the [`.create-merge` documentation](https://docs.microsoft.com/en-us/azure/data-explorer/kusto/management/create-merge-table-command). +The plugin will send all the metrics received to a single Azure Data Explorer table. The name of the table must be supplied via `table_name` in the config file. If the table doesn't exist the plugin will create the table, if the table exists then the plugin will try to merge the Telegraf metric schema to the existing table. For more information about the merge process check the [`.create-merge` documentation](https://docs.microsoft.com/en-us/azure/data-explorer/kusto/management/create-merge-table-command). ## Tables Schema -The schema of the Azure Data Explorer table will match the structure of the Telegraf `Metric` object. The corresponding Azure Data Explorer command would be like the following: +The schema of the Azure Data Explorer table will match the structure of the Telegraf `Metric` object. The corresponding Azure Data Explorer command generated by the plugin would be like the following: ``` .create-merge table ['table-name'] (['fields']:dynamic, ['name']:string, ['tags']:dynamic, ['timestamp']:datetime) ``` @@ -63,7 +61,7 @@ The corresponding table mapping would be like the following: .create-or-alter table ['table-name'] ingestion json mapping 'table-name_mapping' '[{"column":"fields", "Properties":{"Path":"$[\'fields\']"}},{"column":"name", "Properties":{"Path":"$[\'name\']"}},{"column":"tags", "Properties":{"Path":"$[\'tags\']"}},{"column":"timestamp", "Properties":{"Path":"$[\'timestamp\']"}}]' ``` -**Note**: This plugin will automatically create Azure Data Explorer tables and corresponding table mapping as per the above mentioned commands. Since the `Metric` object is a complex type, the only output format supported is JSON. +**Note**: This plugin will automatically create Azure Data Explorer tables and corresponding table mapping as per the above mentioned commands. ## Authentiation @@ -95,7 +93,7 @@ The plugin will authenticate using the first available of the following configurations, **it's important to understand that the assessment, and consequently choosing the authentication method, will happen in order as below**: 1. **Client Credentials**: Azure AD Application ID and Secret. - + Set the following environment variables: - `AZURE_TENANT_ID`: Specifies the Tenant to which to authenticate. @@ -126,50 +124,72 @@ following configurations, **it's important to understand that the assessment, an [arm]: https://docs.microsoft.com/en-us/azure/azure-resource-manager/resource-group-overview -## Querying collected metrics data in Azure Data Explorer -With all above configurations, you will have data stored in following standard format for each metric type stored as an Azure Data Explorer table - -ColumnName | ColumnType ----------- | ---------- -fields | dynamic -name | string -tags | dynamic -timestamp | datetime - -As "fields" and "tags" are of dynamic data type so following multiple ways to query this data - -1. **Query JSON attributes directly**: This is one of the coolest feature of Azure Data Explorer so you can run query like this - - ``` - Tablename - | where fields.size_kb == 9120 - ``` -2. **Use [Update policy](https://docs.microsoft.com/en-us/azure/data-explorer/kusto/management/updatepolicy)**: to transform data, in this case, to flatten dynamic data type columns. This is the recommended performant way for querying over large data volumes compared to querying directly over JSON attributes. +## Querying data collected in Azure Data Explorer +Examples of data transformations and queries that would be useful to gain insights - +1. **Data collected using SQL input plugin** + + Sample SQL metrics data - + + name | tags | timestamp | fields + -----|------|-----------|------- + sqlserver_database_io|{"database_name":"azure-sql-db2","file_type":"DATA","host":"adx-vm","logical_filename":"tempdev","measurement_db_type":"AzureSQLDB","physical_filename":"tempdb.mdf","replica_updateability":"READ_WRITE","sql_instance":"adx-sql-server"}|2021-09-09T13:51:20Z|{"current_size_mb":16,"database_id":2,"file_id":1,"read_bytes":2965504,"read_latency_ms":68,"reads":47,"rg_read_stall_ms":42,"rg_write_stall_ms":0,"space_used_mb":0,"write_bytes":1220608,"write_latency_ms":103,"writes":149} + sqlserver_waitstats|{"database_name":"azure-sql-db2","host":"adx-vm","measurement_db_type":"AzureSQLDB","replica_updateability":"READ_WRITE","sql_instance":"adx-sql-server","wait_category":"Worker Thread","wait_type":"THREADPOOL"}|2021-09-09T13:51:20Z|{"max_wait_time_ms":15,"resource_wait_ms":4469,"signal_wait_time_ms":0,"wait_time_ms":4469,"waiting_tasks_count":1464} + + + Since collected metrics object is of complex type so "fields" and "tags" are stored as dynamic data type, multiple ways to query this data- + + - **Query JSON attributes directly**: Azure Data Explorer provides an ability to query JSON data in raw format without parsing it, so JSON attributes can be queried directly in following way - + ``` + Tablename + | where name == "sqlserver_azure_db_resource_stats" and todouble(fields.avg_cpu_percent) > 7 + ``` + ``` + Tablename + | distinct tostring(tags.database_name) + ``` + **Note** - This approach could have performance impact in case of large volumes of data, use belwo mentioned approach for such cases. + + - **Use [Update policy](https://docs.microsoft.com/en-us/azure/data-explorer/kusto/management/updatepolicy)**: Transform dynamic data type columns using update policy. This is the recommended performant way for querying over large volumes of data compared to querying directly over JSON attributes. + ``` // Function to transform data .create-or-alter function Transform_TargetTableName() { - SourceTableName - | extend clerk_type = tags.clerk_type - | extend host = tags.host + SourceTableName + | mv-apply fields on (extend key = tostring(bag_keys(fields)[0])) + | project fieldname=key, value=todouble(fields[key]), name, tags, timestamp } - // Create the destination table (if it doesn't exist already) + // Create destination table with above query's results schema (if it doesn't exist already) .set-or-append TargetTableName <| Transform_TargetTableName() | limit 0 // Apply update policy on destination table .alter table TargetTableName policy update - @'[{"IsEnabled": true, "Source": "SourceTableName", "Query": "Transform_TargetTableName()", "IsTransactional": false, "PropagateIngestionProperties": false}]' - + @'[{"IsEnabled": true, "Source": "SourceTableName", "Query": "Transform_TargetTableName()", "IsTransactional": true, "PropagateIngestionProperties": false}]' ``` - There are two ways to flatten dynamic columns as explained below. You can use either of these ways in above mentioned update policy function - 'Transform_TargetTableName()' - - Use [bag_unpack plugin](https://docs.microsoft.com/en-us/azure/data-explorer/kusto/query/bag-unpackplugin) to unpack the dynamic columns as shown below. This method will unpack all columns, it could lead to issues in case source schema changes. - ``` - Tablename - | evaluate bag_unpack(tags) - | evaluate bag_unpack(fields) - ``` - - - Use [extend](https://docs.microsoft.com/en-us/azure/data-explorer/kusto/query/extendoperator) operator as shown below. This is the best way provided you know what columns are needed in the final destination table. Another benefit of this method is even if schema changes, it will not break your queries or dashboards. - ``` - Tablename - | extend clerk_type = tags.clerk_type - | extend host = tags.host - ``` + +2. **Data collected using syslog input plugin** + + Sample syslog data - + + name | tags | timestamp | fields + -----|------|-----------|------- + syslog|{"appname":"azsecmond","facility":"user","host":"adx-linux-vm","hostname":"adx-linux-vm","severity":"info"}|2021-09-20T14:36:44Z|{"facility_code":1,"message":" 2021/09/20 14:36:44.890110 Failed to connect to mdsd: dial unix /var/run/mdsd/default_djson.socket: connect: no such file or directory","procid":"2184","severity_code":6,"timestamp":"1632148604890477000","version":1} + syslog|{"appname":"CRON","facility":"authpriv","host":"adx-linux-vm","hostname":"adx-linux-vm","severity":"info"}|2021-09-20T14:37:01Z|{"facility_code":10,"message":" pam_unix(cron:session): session opened for user root by (uid=0)","procid":"26446","severity_code":6,"timestamp":"1632148621120781000","version":1} + + There are multiple ways to flatten dynamic columns using 'extend' or 'bag_unpack' operator. You can use either of these ways in above mentioned update policy function - 'Transform_TargetTableName()' + + - Use [extend](https://docs.microsoft.com/en-us/azure/data-explorer/kusto/query/extendoperator) operator - This is the recommended approach compared to 'bag_unpack' as it is faster and robust. Even if schema changes, it will not break queries or dashboards. + ``` + Tablenmae + | extend facility_code=toint(fields.facility_code), message=tostring(fields.message), procid= tolong(fields.procid), severity_code=toint(fields.severity_code), + SysLogTimestamp=unixtime_nanoseconds_todatetime(tolong(fields.timestamp)), version= todouble(fields.version), + appname= tostring(tags.appname), facility= tostring(tags.facility),host= tostring(tags.host), hostname=tostring(tags.hostname), severity=tostring(tags.severity) + | project-away fields, tags + ``` + - Use [bag_unpack plugin](https://docs.microsoft.com/en-us/azure/data-explorer/kusto/query/bag-unpackplugin) to unpack the dynamic type columns automatically. This method could lead to issues if source schema changes as its dynamically expanding columns. + ``` + Tablename + | evaluate bag_unpack(tags, columnsConflict='replace_source') + | evaluate bag_unpack(fields, columnsConflict='replace_source') + ``` From d5c52bbfadb4b6103c92e84a816ecea1ea930a9b Mon Sep 17 00:00:00 2001 From: Mya Date: Tue, 5 Oct 2021 18:20:47 -0600 Subject: [PATCH 52/81] chore: update consul to v1.11.0 (#9863) --- go.mod | 2 +- go.sum | 4 ++-- 2 files changed, 3 insertions(+), 3 deletions(-) diff --git a/go.mod b/go.mod index 0999e764200ca..9680b8c71e258 100644 --- a/go.mod +++ b/go.mod @@ -131,7 +131,7 @@ require ( github.com/grpc-ecosystem/grpc-gateway v1.16.0 // indirect github.com/hailocab/go-hostpool v0.0.0-20160125115350-e80d13ce29ed // indirect github.com/harlow/kinesis-consumer v0.3.1-0.20181230152818-2f58b136fee0 - github.com/hashicorp/consul/api v1.9.1 + github.com/hashicorp/consul/api v1.11.0 github.com/hashicorp/go-cleanhttp v0.5.1 // indirect github.com/hashicorp/go-hclog v0.12.2 // indirect github.com/hashicorp/go-immutable-radix v1.2.0 // indirect diff --git a/go.sum b/go.sum index 6b60e06efb308..01652479bf4b9 100644 --- a/go.sum +++ b/go.sum @@ -873,8 +873,8 @@ github.com/harlow/kinesis-consumer v0.3.1-0.20181230152818-2f58b136fee0 h1:U0KvG github.com/harlow/kinesis-consumer v0.3.1-0.20181230152818-2f58b136fee0/go.mod h1:dk23l2BruuUzRP8wbybQbPn3J7sZga2QHICCeaEy5rQ= github.com/hashicorp/consul/api v1.3.0/go.mod h1:MmDNSzIMUjNpY/mQ398R4bk2FnqQLoPndWW5VkKPlCE= github.com/hashicorp/consul/api v1.6.0/go.mod h1:1NSuaUUkFaJzMasbfq/11wKYWSR67Xn6r2DXKhuDNFg= -github.com/hashicorp/consul/api v1.9.1 h1:SngrdG2L62qqLsUz85qcPhFZ78rPf8tcD5qjMgs6MME= -github.com/hashicorp/consul/api v1.9.1/go.mod h1:XjsvQN+RJGWI2TWy1/kqaE16HrR2J/FWgkYjdZQsX9M= +github.com/hashicorp/consul/api v1.11.0 h1:Hw/G8TtRvOElqxVIhBzXciiSTbapq8hZ2XKZsXk5ZCE= +github.com/hashicorp/consul/api v1.11.0/go.mod h1:XjsvQN+RJGWI2TWy1/kqaE16HrR2J/FWgkYjdZQsX9M= github.com/hashicorp/consul/sdk v0.3.0/go.mod h1:VKf9jXwCTEY1QZP2MOLRhb5i/I/ssyNV1vwHyQBF0x8= github.com/hashicorp/consul/sdk v0.6.0/go.mod h1:fY08Y9z5SvJqevyZNy6WWPXiG3KwBPAvlcdx16zZ0fM= github.com/hashicorp/consul/sdk v0.8.0 h1:OJtKBtEjboEZvG6AOUdh4Z1Zbyu0WcxQ0qatRrZHTVU= From 80188e35696c57064cb6c0db98144d9ec2038aac Mon Sep 17 00:00:00 2001 From: Joshua Powers Date: Wed, 6 Oct 2021 08:11:58 -0600 Subject: [PATCH 53/81] chore: update containerd to v1.5.7 (#9864) --- go.mod | 20 +++++------ go.sum | 108 +++++++++++++++++++++++++++++++++++++++++++++++---------- 2 files changed, 100 insertions(+), 28 deletions(-) diff --git a/go.mod b/go.mod index 9680b8c71e258..55de18a59efa0 100644 --- a/go.mod +++ b/go.mod @@ -30,8 +30,8 @@ require ( github.com/Azure/go-autorest/tracing v0.6.0 // indirect github.com/BurntSushi/toml v0.3.1 github.com/Mellanox/rdmamap v0.0.0-20191106181932-7c3c4763a6ee - github.com/Microsoft/go-winio v0.4.17-0.20210211115548-6eac466e5fa3 // indirect - github.com/Microsoft/hcsshim v0.8.16 // indirect + github.com/Microsoft/go-winio v0.4.17 // indirect + github.com/Microsoft/hcsshim v0.8.21 // indirect github.com/Shopify/sarama v1.27.2 github.com/StackExchange/wmi v0.0.0-20210224194228-fe8f1750fd46 // indirect github.com/aerospike/aerospike-client-go v1.27.0 @@ -70,8 +70,8 @@ require ( github.com/cenkalti/backoff v2.2.1+incompatible // indirect github.com/cespare/xxhash/v2 v2.1.1 // indirect github.com/cisco-ie/nx-telemetry-proto v0.0.0-20190531143454-82441e232cf6 - github.com/containerd/cgroups v0.0.0-20210114181951-8a68de567b68 // indirect - github.com/containerd/containerd v1.5.0-beta.4 // indirect + github.com/containerd/cgroups v1.0.1 // indirect + github.com/containerd/containerd v1.5.7 // indirect github.com/coreos/go-semver v0.3.0 // indirect github.com/couchbase/go-couchbase v0.1.0 github.com/couchbase/gomemcached v0.1.3 // indirect @@ -203,7 +203,7 @@ require ( github.com/openconfig/gnmi v0.0.0-20180912164834-33a1865c3029 github.com/opencontainers/go-digest v1.0.0 // indirect github.com/opencontainers/image-spec v1.0.1 // indirect - github.com/opencontainers/runc v1.0.0-rc95 // indirect + github.com/opencontainers/runc v1.0.2 // indirect github.com/opentracing-contrib/go-observer v0.0.0-20170622124052-a52f23424492 // indirect github.com/opentracing/opentracing-go v1.2.0 github.com/openzipkin-contrib/zipkin-go-opentracing v0.4.5 @@ -238,7 +238,7 @@ require ( github.com/signalfx/gohistogram v0.0.0-20160107210732-1ccfd2ff5083 // indirect github.com/signalfx/golib/v3 v3.3.34 github.com/signalfx/sapm-proto v0.4.0 // indirect - github.com/sirupsen/logrus v1.7.0 + github.com/sirupsen/logrus v1.8.1 github.com/sleepinggenius2/gosmi v0.4.3 github.com/snowflakedb/gosnowflake v1.5.0 github.com/streadway/amqp v0.0.0-20190827072141-edfb9018d271 @@ -253,8 +253,8 @@ require ( github.com/tklauser/go-sysconf v0.3.5 // indirect github.com/tklauser/numcpus v0.2.2 // indirect github.com/vapourismo/knx-go v0.0.0-20201122213738-75fe09ace330 - github.com/vishvananda/netlink v1.1.0 // indirect - github.com/vishvananda/netns v0.0.0-20191106174202-0a2b9b5464df // indirect + github.com/vishvananda/netlink v1.1.1-0.20201029203352-d40f9887b852 // indirect + github.com/vishvananda/netns v0.0.0-20200728191858-db3c7e526aae // indirect github.com/vjeantet/grok v1.0.1 github.com/vmware/govmomi v0.26.0 github.com/wavefronthq/wavefront-sdk-go v0.9.7 @@ -314,9 +314,9 @@ require ( gopkg.in/yaml.v2 v2.4.0 gopkg.in/yaml.v3 v3.0.0-20210107192922-496545a6307b // indirect gotest.tools v2.2.0+incompatible - k8s.io/api v0.20.4 + k8s.io/api v0.20.6 k8s.io/apimachinery v0.21.1 - k8s.io/client-go v0.20.4 + k8s.io/client-go v0.20.6 k8s.io/klog/v2 v2.8.0 // indirect k8s.io/utils v0.0.0-20201110183641-67b214c5f920 // indirect modernc.org/cc/v3 v3.33.5 // indirect diff --git a/go.sum b/go.sum index 01652479bf4b9..fccbfb9b9bdcb 100644 --- a/go.sum +++ b/go.sum @@ -158,10 +158,11 @@ github.com/Microsoft/go-winio v0.4.16-0.20201130162521-d1ffc52c7331/go.mod h1:XB github.com/Microsoft/go-winio v0.4.16-0.20201130162521-d1ffc52c7331/go.mod h1:XB6nPKklQyQ7GC9LdcBEcBl8PF76WugXOPRXwdLnMv0= github.com/Microsoft/go-winio v0.4.16/go.mod h1:XB6nPKklQyQ7GC9LdcBEcBl8PF76WugXOPRXwdLnMv0= github.com/Microsoft/go-winio v0.4.16/go.mod h1:XB6nPKklQyQ7GC9LdcBEcBl8PF76WugXOPRXwdLnMv0= -github.com/Microsoft/go-winio v0.4.17-0.20210211115548-6eac466e5fa3 h1:mw6pDQqv38/WGF1cO/jF5t/jyAJ2yi7CmtFLLO5tGFI= -github.com/Microsoft/go-winio v0.4.17-0.20210211115548-6eac466e5fa3 h1:mw6pDQqv38/WGF1cO/jF5t/jyAJ2yi7CmtFLLO5tGFI= github.com/Microsoft/go-winio v0.4.17-0.20210211115548-6eac466e5fa3/go.mod h1:JPGBdM1cNvN/6ISo+n8V5iA4v8pBzdOpzfwIujj1a84= github.com/Microsoft/go-winio v0.4.17-0.20210211115548-6eac466e5fa3/go.mod h1:JPGBdM1cNvN/6ISo+n8V5iA4v8pBzdOpzfwIujj1a84= +github.com/Microsoft/go-winio v0.4.17-0.20210324224401-5516f17a5958/go.mod h1:JPGBdM1cNvN/6ISo+n8V5iA4v8pBzdOpzfwIujj1a84= +github.com/Microsoft/go-winio v0.4.17 h1:iT12IBVClFevaf8PuVyi3UmZOVh4OqnaLxDTW2O6j3w= +github.com/Microsoft/go-winio v0.4.17/go.mod h1:JPGBdM1cNvN/6ISo+n8V5iA4v8pBzdOpzfwIujj1a84= github.com/Microsoft/hcsshim v0.8.6/go.mod h1:Op3hHsoHPAvb6lceZHDtd9OkTew38wNoXnJs8iY7rUg= github.com/Microsoft/hcsshim v0.8.6/go.mod h1:Op3hHsoHPAvb6lceZHDtd9OkTew38wNoXnJs8iY7rUg= github.com/Microsoft/hcsshim v0.8.7-0.20190325164909-8abdbb8205e4/go.mod h1:Op3hHsoHPAvb6lceZHDtd9OkTew38wNoXnJs8iY7rUg= @@ -174,8 +175,9 @@ github.com/Microsoft/hcsshim v0.8.14/go.mod h1:NtVKoYxQuTLx6gEq0L96c9Ju4JbRJ4nY2 github.com/Microsoft/hcsshim v0.8.14/go.mod h1:NtVKoYxQuTLx6gEq0L96c9Ju4JbRJ4nY2ow3VK6a9Lg= github.com/Microsoft/hcsshim v0.8.15/go.mod h1:x38A4YbHbdxJtc0sF6oIz+RG0npwSCAvn69iY6URG00= github.com/Microsoft/hcsshim v0.8.15/go.mod h1:x38A4YbHbdxJtc0sF6oIz+RG0npwSCAvn69iY6URG00= -github.com/Microsoft/hcsshim v0.8.16 h1:8/auA4LFIZFTGrqfKhGBSXwM6/4X1fHa/xniyEHu8ac= github.com/Microsoft/hcsshim v0.8.16/go.mod h1:o5/SZqmR7x9JNKsW3pu+nqHm0MF8vbA+VxGOoXdC600= +github.com/Microsoft/hcsshim v0.8.21 h1:btRfUDThBE5IKcvI8O8jOiIkujUsAMBSRsYDYmEi6oM= +github.com/Microsoft/hcsshim v0.8.21/go.mod h1:+w2gRZ5ReXQhFOrvSQeNfhrYB/dg3oDwTOcER2fw4I4= github.com/Microsoft/hcsshim/test v0.0.0-20201218223536-d3e5debf77da/go.mod h1:5hlzMzRKMLyo42nCZ9oml8AdTlq/0cvIaBv6tK1RehU= github.com/Microsoft/hcsshim/test v0.0.0-20201218223536-d3e5debf77da/go.mod h1:5hlzMzRKMLyo42nCZ9oml8AdTlq/0cvIaBv6tK1RehU= github.com/Microsoft/hcsshim/test v0.0.0-20210227013316-43a75bb4edd3/go.mod h1:mw7qgWloBUl75W/gVH3cQszUg1+gUITj7D6NY7ywVnY= @@ -244,6 +246,7 @@ github.com/aristanetworks/glog v0.0.0-20191112221043-67e8567f59f3/go.mod h1:KASm github.com/aristanetworks/goarista v0.0.0-20190325233358-a123909ec740 h1:FD4/ikKOFxwP8muWDypbmBWc634+YcAs3eBrYAmRdZY= github.com/aristanetworks/goarista v0.0.0-20190325233358-a123909ec740/go.mod h1:D/tb0zPVXnP7fmsLZjtdUhSsumbK/ij54UXjjVgMGxQ= github.com/armon/circbuf v0.0.0-20150827004946-bbbad097214e/go.mod h1:3U/XgcO3hCbHZ8TKRvWD2dDTCfh9M9ya+I9JpbB7O8o= +github.com/armon/consul-api v0.0.0-20180202201655-eb2c6b5be1b6/go.mod h1:grANhF5doyWs3UAsr3K4I6qtAmlQcZDesFNEHPZAzj8= github.com/armon/go-metrics v0.0.0-20180917152333-f0300d1749da/go.mod h1:Q73ZrmVTwzkszR9V5SSuryQ31EELlFMUz1kKyl939pY= github.com/armon/go-metrics v0.3.3 h1:a9F4rlj7EWWrbj7BYw8J8+x+ZZkJeqzNyRk8hdPF+ro= github.com/armon/go-metrics v0.3.3/go.mod h1:4O98XIr/9W0sxpJ8UaYkvjk10Iff7SnFrb4QAOwNTFc= @@ -302,6 +305,7 @@ github.com/bgentry/speakeasy v0.1.0/go.mod h1:+zsyZBPWlz7T6j88CTgSN5bM796AkVf0kB github.com/bitly/go-hostpool v0.1.0 h1:XKmsF6k5el6xHG3WPJ8U0Ku/ye7njX7W81Ng7O2ioR0= github.com/bitly/go-hostpool v0.1.0/go.mod h1:4gOCgp6+NZnVqlKyZ/iBZFTAJKembaVENUpMkpg42fw= github.com/bitly/go-simplejson v0.5.0/go.mod h1:cXHtHw4XUPsvGaxgjIAn8PhEWG9NfngEKAMDJEczWVA= +github.com/bits-and-blooms/bitset v1.2.0/go.mod h1:gIdJ4wp64HaoK2YrL1Q5/N7Y16edYb8uY+O0FJTyyDA= github.com/blang/semver v3.1.0+incompatible/go.mod h1:kRBLl5iJ+tD4TcOOxsy/0fnwebNt5EWlYSAyrTnjyyk= github.com/blang/semver v3.5.1+incompatible/go.mod h1:kRBLl5iJ+tD4TcOOxsy/0fnwebNt5EWlYSAyrTnjyyk= github.com/bmatcuk/doublestar/v3 v3.0.0 h1:TQtVPlDnAYwcrVNB2JiGuMc++H5qzWZd9PhkNo5WyHI= @@ -348,7 +352,8 @@ github.com/chzyer/test v0.0.0-20180213035817-a1ea475d72b1/go.mod h1:Q3SI9o4m/ZMn github.com/cilium/ebpf v0.0.0-20200110133405-4032b1d8aae3/go.mod h1:MA5e5Lr8slmEg9bt0VpxxWqJlO4iwu3FBdHUzV7wQVg= github.com/cilium/ebpf v0.0.0-20200702112145-1c8d4c9ef775/go.mod h1:7cR51M8ViRLIdUjrmSXlK9pkrsDlLHbO8jiB8X8JnOc= github.com/cilium/ebpf v0.2.0/go.mod h1:To2CFviqOWL/M0gIMsvSMlqe7em/l1ALkX1PyjrX2Qs= -github.com/cilium/ebpf v0.5.0/go.mod h1:4tRaxcgiL706VnOzHOdBlY8IEAIdxINsQBcU4xJJXRs= +github.com/cilium/ebpf v0.4.0/go.mod h1:4tRaxcgiL706VnOzHOdBlY8IEAIdxINsQBcU4xJJXRs= +github.com/cilium/ebpf v0.6.2/go.mod h1:4tRaxcgiL706VnOzHOdBlY8IEAIdxINsQBcU4xJJXRs= github.com/circonus-labs/circonus-gometrics v2.3.1+incompatible/go.mod h1:nmEj6Dob7S7YxXgwXpfOuvO54S+tGdZdw9fuRZt25Ag= github.com/circonus-labs/circonusllhist v0.1.3/go.mod h1:kMXHVDlOchFAehlya5ePtbp5jckzBHf4XRpQvBOLI+I= github.com/cisco-ie/nx-telemetry-proto v0.0.0-20190531143454-82441e232cf6 h1:57RI0wFkG/smvVTcz7F43+R0k+Hvci3jAVQF9lyMoOo= @@ -366,17 +371,20 @@ github.com/codahale/hdrhistogram v0.0.0-20161010025455-3a0bb77429bd/go.mod h1:sE github.com/containerd/aufs v0.0.0-20200908144142-dab0cbea06f4/go.mod h1:nukgQABAEopAHvB6j7cnP5zJ+/3aVcE7hCYqvIwAHyE= github.com/containerd/aufs v0.0.0-20201003224125-76a6863f2989/go.mod h1:AkGGQs9NM2vtYHaUen+NljV0/baGCAPELGm2q9ZXpWU= github.com/containerd/aufs v0.0.0-20210316121734-20793ff83c97/go.mod h1:kL5kd6KM5TzQjR79jljyi4olc1Vrx6XBlcyj3gNv2PU= +github.com/containerd/aufs v1.0.0/go.mod h1:kL5kd6KM5TzQjR79jljyi4olc1Vrx6XBlcyj3gNv2PU= github.com/containerd/btrfs v0.0.0-20201111183144-404b9149801e/go.mod h1:jg2QkJcsabfHugurUvvPhS3E08Oxiuh5W/g1ybB4e0E= github.com/containerd/btrfs v0.0.0-20201111183144-404b9149801e/go.mod h1:jg2QkJcsabfHugurUvvPhS3E08Oxiuh5W/g1ybB4e0E= github.com/containerd/btrfs v0.0.0-20210316141732-918d888fb676/go.mod h1:zMcX3qkXTAi9GI50+0HOeuV8LU2ryCE/V2vG/ZBiTss= +github.com/containerd/btrfs v1.0.0/go.mod h1:zMcX3qkXTAi9GI50+0HOeuV8LU2ryCE/V2vG/ZBiTss= github.com/containerd/cgroups v0.0.0-20190717030353-c4b9ac5c7601/go.mod h1:X9rLEHIqSf/wfK8NsPqxJmeZgW4pcfzdXITDrUSJ6uI= github.com/containerd/cgroups v0.0.0-20190919134610-bf292b21730f/go.mod h1:OApqhQ4XNSNC13gXIwDjhOQxjWa/NxkwZXJ1EvqT0ko= github.com/containerd/cgroups v0.0.0-20200531161412-0dbf7f05ba59/go.mod h1:pA0z1pT8KYB3TCXK/ocprsh7MAkoW8bZVzPdih9snmM= github.com/containerd/cgroups v0.0.0-20200710171044-318312a37340/go.mod h1:s5q4SojHctfxANBDvMeIaIovkq29IP48TKAxnhYRxvo= github.com/containerd/cgroups v0.0.0-20200824123100-0b889c03f102/go.mod h1:s5q4SojHctfxANBDvMeIaIovkq29IP48TKAxnhYRxvo= github.com/containerd/cgroups v0.0.0-20200824123100-0b889c03f102/go.mod h1:s5q4SojHctfxANBDvMeIaIovkq29IP48TKAxnhYRxvo= -github.com/containerd/cgroups v0.0.0-20210114181951-8a68de567b68 h1:hkGVFjz+plgr5UfxZUTPFbUFIF/Km6/s+RVRIRHLrrY= github.com/containerd/cgroups v0.0.0-20210114181951-8a68de567b68/go.mod h1:ZJeTFisyysqgcCdecO57Dj79RfL0LNeGiFUqLYQRYLE= +github.com/containerd/cgroups v1.0.1 h1:iJnMvco9XGvKUvNQkv88bE4uJXxRQH18efbKo9w5vHQ= +github.com/containerd/cgroups v1.0.1/go.mod h1:0SJrPIenamHDcZhEcJMNBB85rHcUsw4f25ZfBiPYRkU= github.com/containerd/console v0.0.0-20180822173158-c12b1e7919c1/go.mod h1:Tj/on1eG8kiEhd0+fhSDzsPAFESxzBBvdyEgyryXffw= github.com/containerd/console v0.0.0-20181022165439-0650fd9eeb50/go.mod h1:Tj/on1eG8kiEhd0+fhSDzsPAFESxzBBvdyEgyryXffw= github.com/containerd/console v0.0.0-20191206165004-02ecf6a7291e/go.mod h1:8Pf4gM6VEbTNRIT26AyyU7hxdQU3MvAvxVI0sc00XBE= @@ -394,8 +402,11 @@ github.com/containerd/containerd v1.4.3/go.mod h1:bC6axHOhabU15QhwfG7w5PipXdVtMX github.com/containerd/containerd v1.5.0-beta.1/go.mod h1:5HfvG1V2FsKesEGQ17k5/T7V960Tmcumvqn8Mc+pCYQ= github.com/containerd/containerd v1.5.0-beta.1/go.mod h1:5HfvG1V2FsKesEGQ17k5/T7V960Tmcumvqn8Mc+pCYQ= github.com/containerd/containerd v1.5.0-beta.3/go.mod h1:/wr9AVtEM7x9c+n0+stptlo/uBBoBORwEx6ardVcmKU= -github.com/containerd/containerd v1.5.0-beta.4 h1:zjz4MOAOFgdBlwid2nNUlJ3YLpVi/97L36lfMYJex60= github.com/containerd/containerd v1.5.0-beta.4/go.mod h1:GmdgZd2zA2GYIBZ0w09ZvgqEq8EfBp/m3lcVZIvPHhI= +github.com/containerd/containerd v1.5.0-rc.0/go.mod h1:V/IXoMqNGgBlabz3tHD2TWDoTJseu1FGOKuoA4nNb2s= +github.com/containerd/containerd v1.5.1/go.mod h1:0DOxVqwDy2iZvrZp2JUx/E+hS0UNTVn7dJnIOwtYR4g= +github.com/containerd/containerd v1.5.7 h1:rQyoYtj4KddB3bxG6SAqd4+08gePNyJjRqvOIfV3rkM= +github.com/containerd/containerd v1.5.7/go.mod h1:gyvv6+ugqY25TiXxcZC3L5yOeYgEw0QMhscqVp1AR9c= github.com/containerd/continuity v0.0.0-20190426062206-aaeac12a7ffc/go.mod h1:GL3xCUCBDV3CZiTSEKksMWbLE66hEyuu9qyDOOqM47Y= github.com/containerd/continuity v0.0.0-20190426062206-aaeac12a7ffc/go.mod h1:GL3xCUCBDV3CZiTSEKksMWbLE66hEyuu9qyDOOqM47Y= github.com/containerd/continuity v0.0.0-20190815185530-f2a389ac0a02/go.mod h1:GL3xCUCBDV3CZiTSEKksMWbLE66hEyuu9qyDOOqM47Y= @@ -403,25 +414,31 @@ github.com/containerd/continuity v0.0.0-20191127005431-f65d91d395eb/go.mod h1:GL github.com/containerd/continuity v0.0.0-20200710164510-efbc4488d8fe/go.mod h1:cECdGN1O8G9bgKTlLhuPJimka6Xb/Gg7vYzCTNVxhvo= github.com/containerd/continuity v0.0.0-20201208142359-180525291bb7/go.mod h1:kR3BEg7bDFaEddKm54WSmrol1fKWDU1nKYkgrcgZT7Y= github.com/containerd/continuity v0.0.0-20201208142359-180525291bb7/go.mod h1:kR3BEg7bDFaEddKm54WSmrol1fKWDU1nKYkgrcgZT7Y= -github.com/containerd/continuity v0.0.0-20210208174643-50096c924a4e h1:6JKvHHt396/qabvMhnhUZvWaHZzfVfldxE60TK8YLhg= github.com/containerd/continuity v0.0.0-20210208174643-50096c924a4e/go.mod h1:EXlVlkqNba9rJe3j7w3Xa924itAMLgZH4UD/Q4PExuQ= +github.com/containerd/continuity v0.1.0 h1:UFRRY5JemiAhPZrr/uE0n8fMTLcZsUvySPr1+D7pgr8= +github.com/containerd/continuity v0.1.0/go.mod h1:ICJu0PwR54nI0yPEnJ6jcS+J7CZAUXrLh8lPo2knzsM= github.com/containerd/fifo v0.0.0-20180307165137-3d5202aec260/go.mod h1:ODA38xgv3Kuk8dQz2ZQXpnv/UZZUHUCL7pnLehbXgQI= github.com/containerd/fifo v0.0.0-20190226154929-a9fb20d87448/go.mod h1:ODA38xgv3Kuk8dQz2ZQXpnv/UZZUHUCL7pnLehbXgQI= github.com/containerd/fifo v0.0.0-20200410184934-f15a3290365b/go.mod h1:jPQ2IAeZRCYxpS/Cm1495vGFww6ecHmMk1YJH2Q5ln0= github.com/containerd/fifo v0.0.0-20201026212402-0724c46b320c/go.mod h1:jPQ2IAeZRCYxpS/Cm1495vGFww6ecHmMk1YJH2Q5ln0= github.com/containerd/fifo v0.0.0-20210316144830-115abcc95a1d/go.mod h1:ocF/ME1SX5b1AOlWi9r677YJmCPSwwWnQ9O123vzpE4= +github.com/containerd/fifo v1.0.0/go.mod h1:ocF/ME1SX5b1AOlWi9r677YJmCPSwwWnQ9O123vzpE4= github.com/containerd/go-cni v1.0.1/go.mod h1:+vUpYxKvAF72G9i1WoDOiPGRtQpqsNW/ZHtSlv++smU= +github.com/containerd/go-cni v1.0.2/go.mod h1:nrNABBHzu0ZwCug9Ije8hL2xBCYh/pjfMb1aZGrrohk= github.com/containerd/go-runc v0.0.0-20180907222934-5a6d9f37cfa3/go.mod h1:IV7qH3hrUgRmyYrtgEeGWJfWbgcHL9CSRruz2Vqcph0= github.com/containerd/go-runc v0.0.0-20190911050354-e029b79d8cda/go.mod h1:IV7qH3hrUgRmyYrtgEeGWJfWbgcHL9CSRruz2Vqcph0= github.com/containerd/go-runc v0.0.0-20200220073739-7016d3ce2328/go.mod h1:PpyHrqVs8FTi9vpyHwPwiNEGaACDxT/N/pLcvMSRA9g= github.com/containerd/go-runc v0.0.0-20201020171139-16b287bc67d0/go.mod h1:cNU0ZbCgCQVZK4lgG3P+9tn9/PaJNmoDXPpoJhDR+Ok= +github.com/containerd/go-runc v1.0.0/go.mod h1:cNU0ZbCgCQVZK4lgG3P+9tn9/PaJNmoDXPpoJhDR+Ok= github.com/containerd/imgcrypt v1.0.1/go.mod h1:mdd8cEPW7TPgNG4FpuP3sGBiQ7Yi/zak9TYCG3juvb0= github.com/containerd/imgcrypt v1.0.1/go.mod h1:mdd8cEPW7TPgNG4FpuP3sGBiQ7Yi/zak9TYCG3juvb0= github.com/containerd/imgcrypt v1.0.4-0.20210301171431-0ae5c75f59ba/go.mod h1:6TNsg0ctmizkrOgXRNQjAPFWpMYRWuiB6dSF4Pfa5SA= github.com/containerd/imgcrypt v1.1.1-0.20210312161619-7ed62a527887/go.mod h1:5AZJNI6sLHJljKuI9IHnw1pWqo/F0nGDOuR9zgTs7ow= +github.com/containerd/imgcrypt v1.1.1/go.mod h1:xpLnwiQmEUJPvQoAapeb2SNCxz7Xr6PJrXQb0Dpc4ms= github.com/containerd/nri v0.0.0-20201007170849-eb1350a75164/go.mod h1:+2wGSDGFYfE5+So4M5syatU0N0f0LbWpuqyMi4/BE8c= github.com/containerd/nri v0.0.0-20201007170849-eb1350a75164/go.mod h1:+2wGSDGFYfE5+So4M5syatU0N0f0LbWpuqyMi4/BE8c= github.com/containerd/nri v0.0.0-20210316161719-dbaa18c31c14/go.mod h1:lmxnXF6oMkbqs39FiCt1s0R2HSMhcLel9vNL3m4AaeY= +github.com/containerd/nri v0.1.0/go.mod h1:lmxnXF6oMkbqs39FiCt1s0R2HSMhcLel9vNL3m4AaeY= github.com/containerd/ttrpc v0.0.0-20190828154514-0e0f228740de/go.mod h1:PvCDdDGpgqzQIzDW1TphrGLssLDZp2GuS+X5DkEJB8o= github.com/containerd/ttrpc v0.0.0-20190828172938-92c8520ef9f8/go.mod h1:PvCDdDGpgqzQIzDW1TphrGLssLDZp2GuS+X5DkEJB8o= github.com/containerd/ttrpc v0.0.0-20191028202541-4f1b8fe65a5c/go.mod h1:LPm1u0xBw8r8NOKoOdNMeVHSawSsltak+Ihv+etqsE8= @@ -430,15 +447,24 @@ github.com/containerd/ttrpc v1.0.2/go.mod h1:UAxOpgT9ziI0gJrmKvgcZivgxOp8iFPSk8h github.com/containerd/typeurl v0.0.0-20180627222232-a93fcdb778cd/go.mod h1:Cm3kwCdlkCfMSHURc+r6fwoGH6/F1hH3S4sg0rLFWPc= github.com/containerd/typeurl v0.0.0-20190911142611-5eb25027c9fd/go.mod h1:GeKYzf2pQcqv7tJ0AoCuuhtnqhva5LNU3U+OyKxxJpk= github.com/containerd/typeurl v1.0.1/go.mod h1:TB1hUtrpaiO88KEK56ijojHS1+NeF0izUACaJW2mdXg= +github.com/containerd/typeurl v1.0.2/go.mod h1:9trJWW2sRlGub4wZJRTW83VtbOLS6hwcDZXTn6oPz9s= github.com/containerd/zfs v0.0.0-20200918131355-0a33824f23a2/go.mod h1:8IgZOBdv8fAgXddBT4dBXJPtxyRsejFIpXoklgxgEjw= github.com/containerd/zfs v0.0.0-20210301145711-11e8f1707f62/go.mod h1:A9zfAbMlQwE+/is6hi0Xw8ktpL+6glmqZYtevJgaB8Y= github.com/containerd/zfs v0.0.0-20210315114300-dde8f0fda960/go.mod h1:m+m51S1DvAP6r3FcmYCp54bQ34pyOwTieQDNRIRHsFY= +github.com/containerd/zfs v0.0.0-20210324211415-d5c4544f0433/go.mod h1:m+m51S1DvAP6r3FcmYCp54bQ34pyOwTieQDNRIRHsFY= +github.com/containerd/zfs v1.0.0/go.mod h1:m+m51S1DvAP6r3FcmYCp54bQ34pyOwTieQDNRIRHsFY= github.com/containernetworking/cni v0.7.1/go.mod h1:LGwApLUm2FpoOfxTDEeq8T9ipbpZ61X79hmU3w8FmsY= github.com/containernetworking/cni v0.8.0/go.mod h1:LGwApLUm2FpoOfxTDEeq8T9ipbpZ61X79hmU3w8FmsY= +github.com/containernetworking/cni v0.8.1/go.mod h1:LGwApLUm2FpoOfxTDEeq8T9ipbpZ61X79hmU3w8FmsY= github.com/containernetworking/plugins v0.8.6/go.mod h1:qnw5mN19D8fIwkqW7oHHYDHVlzhJpcY6TQxn/fUyDDM= +github.com/containernetworking/plugins v0.9.1/go.mod h1:xP/idU2ldlzN6m4p5LmGiwRDjeJr6FLK6vuiUwoH7P8= github.com/containers/ocicrypt v1.0.1/go.mod h1:MeJDzk1RJHv89LjsH0Sp5KTY3ZYkjXO/C+bKAeWFIrc= github.com/containers/ocicrypt v1.1.0/go.mod h1:b8AOe0YR67uU8OqfVNcznfFpAzu3rdgUV4GP9qXPfu4= +github.com/containers/ocicrypt v1.1.1/go.mod h1:Dm55fwWm1YZAjYRaJ94z2mfZikIyIN4B0oB3dj3jFxY= +github.com/coreos/bbolt v1.3.2/go.mod h1:iRUV2dpdMOn7Bo10OQBFzIJO9kkE559Wcmn+qkEiiKk= +github.com/coreos/etcd v3.3.10+incompatible/go.mod h1:uF7uidLiAD3TWHmW31ZFd/JWoc32PjwdhPthX9715RE= github.com/coreos/go-iptables v0.4.5/go.mod h1:/mVI274lEDI2ns62jHCDnCyBF9Iwsmekav8Dbxlm1MU= +github.com/coreos/go-iptables v0.5.0/go.mod h1:/mVI274lEDI2ns62jHCDnCyBF9Iwsmekav8Dbxlm1MU= github.com/coreos/go-oidc v2.1.0+incompatible/go.mod h1:CgnwVTmzoESiwO9qyAFEMiHoZ1nMCKZlZ9V6mm3/LKc= github.com/coreos/go-semver v0.2.0/go.mod h1:nnelYz7RCh+5ahJtPPxZlU+153eP4D4r3EedlOD2RNk= github.com/coreos/go-semver v0.3.0 h1:wkHLiw0WNATZnSG7epLsujiMCgPAc9xhjJ4tgnAxmfM= @@ -450,7 +476,7 @@ github.com/coreos/go-systemd v0.0.0-20190321100706-95778dfbb74e/go.mod h1:F5haX7 github.com/coreos/go-systemd v0.0.0-20190719114852-fd7a80b32e1f/go.mod h1:F5haX7vjVVG0kc13fIWeqUViNPyEJxv/OmvnBo0Yme4= github.com/coreos/go-systemd/v22 v22.0.0/go.mod h1:xO0FLkIi5MaZafQlIrOotqXZ90ih+1atmu1JpKERPPk= github.com/coreos/go-systemd/v22 v22.1.0/go.mod h1:xO0FLkIi5MaZafQlIrOotqXZ90ih+1atmu1JpKERPPk= -github.com/coreos/go-systemd/v22 v22.3.1/go.mod h1:Y58oyj3AT4RCenI/lSvhwexgC+NSVTIJ3seZv2GcEnc= +github.com/coreos/go-systemd/v22 v22.3.2/go.mod h1:Y58oyj3AT4RCenI/lSvhwexgC+NSVTIJ3seZv2GcEnc= github.com/coreos/pkg v0.0.0-20160727233714-3ac0863d7acf/go.mod h1:E3G3o1h8I7cfcXa63jLwjI0eiQQMgzzUDFVpN/nH/eA= github.com/coreos/pkg v0.0.0-20180928190104-399ea9e2e55f/go.mod h1:E3G3o1h8I7cfcXa63jLwjI0eiQQMgzzUDFVpN/nH/eA= github.com/couchbase/go-couchbase v0.1.0 h1:g4bCvDwRL+ZL6HLhYeRlXxEYP31Wpy0VFxnFw6efEp8= @@ -486,6 +512,7 @@ github.com/dgrijalva/jwt-go v0.0.0-20170104182250-a601269ab70c/go.mod h1:E3ru+11 github.com/dgrijalva/jwt-go v3.2.0+incompatible h1:7qlOGliEKZXTDg6OTjfoBKDXWrumCAMpl/TFQ4/5kLM= github.com/dgrijalva/jwt-go v3.2.0+incompatible/go.mod h1:E3ru+11k8xSBh+hMPgOLZmtrrCbhqsmaPHjLKYnJCaQ= github.com/dgryski/go-bitstream v0.0.0-20180413035011-3522498ce2c8/go.mod h1:VMaSuZ+SZcx/wljOQKvp5srsbCiKDEb6K2wC4+PiBmQ= +github.com/dgryski/go-sip13 v0.0.0-20181026042036-e10d5fee7954/go.mod h1:vAd38F8PWV+bWy6jNmig1y/TA+kYO4g3RSRF0IAv0no= github.com/dgryski/go-sip13 v0.0.0-20190329191031-25c5027a8c7b/go.mod h1:vAd38F8PWV+bWy6jNmig1y/TA+kYO4g3RSRF0IAv0no= github.com/digitalocean/godo v1.42.1/go.mod h1:p7dOjjtSBqCTUksqtA5Fd3uaKs9kyTq2xcz76ulEJRU= github.com/dimchansky/utfbom v1.1.0/go.mod h1:rO41eb7gLfo8SF1jd9F8HplJm1Fewwi4mQvIirEdv+8= @@ -741,6 +768,7 @@ github.com/golang/geo v0.0.0-20190916061304-5b978397cfec h1:lJwO/92dFXWeXOZdoGXg github.com/golang/geo v0.0.0-20190916061304-5b978397cfec/go.mod h1:QZ0nwyI2jOfgRAoBvP+ab5aRr7c9x7lhGEJrKvBwjWI= github.com/golang/glog v0.0.0-20160126235308-23def4e6c14b/go.mod h1:SBH7ygxi8pfUlaOkMMuAQtPIUF8ecWP5IEl/CR7VP2Q= github.com/golang/groupcache v0.0.0-20160516000752-02826c3e7903/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc= +github.com/golang/groupcache v0.0.0-20190129154638-5b532d6fd5ef/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc= github.com/golang/groupcache v0.0.0-20190702054246-869f871628b6/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc= github.com/golang/groupcache v0.0.0-20191227052852-215e87163ea7/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc= github.com/golang/groupcache v0.0.0-20200121045136-8c9f03a8e57e h1:1r7pUrabqp18hOBcwBwiTsbnFeTZHV9eER/QT5JVZxY= @@ -852,6 +880,7 @@ github.com/gorilla/mux v1.7.2/go.mod h1:1lud6UwP+6orDFRuTfBEV8e9/aOM/c4fVVCaMa2z github.com/gorilla/mux v1.7.3 h1:gnP5JzjVOuiZD07fKKToCAOjS0yOpj/qPETTXCCS6hw= github.com/gorilla/mux v1.7.3/go.mod h1:1lud6UwP+6orDFRuTfBEV8e9/aOM/c4fVVCaMa2zaAs= github.com/gorilla/websocket v0.0.0-20170926233335-4201258b820c/go.mod h1:E7qHFY5m1UJ88s3WnNqhKjPHQ0heANvMoAMk2YaljkQ= +github.com/gorilla/websocket v1.4.0/go.mod h1:E7qHFY5m1UJ88s3WnNqhKjPHQ0heANvMoAMk2YaljkQ= github.com/gorilla/websocket v1.4.2 h1:+/TMaTYc4QFitKJxsQ7Yye35DkWvkdLcvGKqM+x0Ufc= github.com/gorilla/websocket v1.4.2/go.mod h1:YR8l580nyteQvAITg2hZ9XVh4b55+EU/adAjf1fMHhE= github.com/gosnmp/gosnmp v1.32.0 h1:gctewmZx5qFI0oHMzRnjETqIZ093d9NgZy9TQr3V0iA= @@ -861,8 +890,10 @@ github.com/grid-x/modbus v0.0.0-20210224155242-c4a3d042e99b h1:Y4xqzO0CDNoehCr3n github.com/grid-x/modbus v0.0.0-20210224155242-c4a3d042e99b/go.mod h1:YaK0rKJenZ74vZFcSSLlAQqtG74PMI68eDjpDCDDmTw= github.com/grid-x/serial v0.0.0-20191104121038-e24bc9bf6f08 h1:syBxnRYnSPUDdkdo5U4sy2roxBPQDjNiw4od7xlsABQ= github.com/grid-x/serial v0.0.0-20191104121038-e24bc9bf6f08/go.mod h1:kdOd86/VGFWRrtkNwf1MPk0u1gIjc4Y7R2j7nhwc7Rk= +github.com/grpc-ecosystem/go-grpc-middleware v1.0.0/go.mod h1:FiyG127CGDf3tlThmgyCl78X/SZQqEOJBCDaAfeWzPs= github.com/grpc-ecosystem/go-grpc-middleware v1.0.1-0.20190118093823-f849b5445de4/go.mod h1:FiyG127CGDf3tlThmgyCl78X/SZQqEOJBCDaAfeWzPs= github.com/grpc-ecosystem/go-grpc-prometheus v1.2.0/go.mod h1:8NvIoxWQoOIhqOTXgfV/d3M/q6VIi02HzZEHgUlZvzk= +github.com/grpc-ecosystem/grpc-gateway v1.9.0/go.mod h1:vNeuVxBJEsws4ogUvrchl83t/GYV9WGTSLVdBhOQFDY= github.com/grpc-ecosystem/grpc-gateway v1.9.5/go.mod h1:vNeuVxBJEsws4ogUvrchl83t/GYV9WGTSLVdBhOQFDY= github.com/grpc-ecosystem/grpc-gateway v1.14.6/go.mod h1:zdiPV4Yse/1gnckTHtghG4GkDEdKCRJduHpTxT3/jcw= github.com/grpc-ecosystem/grpc-gateway v1.16.0 h1:gmcG1KaJ57LophUzW0Hy8NmPhnMZb4M0+kPpLofRdBo= @@ -916,6 +947,7 @@ github.com/hashicorp/golang-lru v0.5.0/go.mod h1:/m3WP610KZHVQ1SGc6re/UDhFvYD7pJ github.com/hashicorp/golang-lru v0.5.1/go.mod h1:/m3WP610KZHVQ1SGc6re/UDhFvYD7pJ4Ao+sR/qLZy8= github.com/hashicorp/golang-lru v0.5.4 h1:YDjusn29QI/Das2iO9M0BHnIbxPeyuCHsjMW+lJfyTc= github.com/hashicorp/golang-lru v0.5.4/go.mod h1:iADmTwqILo4mZ8BN3D2Q6+9jd8WM5uGBxy+E8yxSoD4= +github.com/hashicorp/hcl v1.0.0/go.mod h1:E5yfLk+7swimpb2L/Alb/PJmXilQ/rhwaUYs4T20WEQ= github.com/hashicorp/logutils v1.0.0/go.mod h1:QIAnNjmIWmVIIkWDTG1z5v++HQmx9WQRO+LraFDTW64= github.com/hashicorp/mdns v1.0.0/go.mod h1:tL+uN++7HEJ6SQLQ2/p+z2pH24WQKWjBPkE0mNTz8vQ= github.com/hashicorp/mdns v1.0.1/go.mod h1:4gW7WsVCke5TE7EPeYliwHlRUyBtfCwuFwuMg2DmyNY= @@ -927,7 +959,6 @@ github.com/hashicorp/serf v0.9.3/go.mod h1:UWDWwZeL5cuWDJdl0C6wrvrUwEqtQ4ZKBKKEN github.com/hashicorp/serf v0.9.5 h1:EBWvyu9tcRszt3Bxp3KNssBMP1KuHWyO51lz9+786iM= github.com/hashicorp/serf v0.9.5/go.mod h1:UWDWwZeL5cuWDJdl0C6wrvrUwEqtQ4ZKBKKENpqIUyk= github.com/hetznercloud/hcloud-go v1.21.1/go.mod h1:xng8lbDUg+xM1dgc0yGHX5EeqbwIq7UYlMWMTx3SQVg= -github.com/hpcloud/tail v1.0.0 h1:nfCOvKYfkgYP8hkirhJocXT2+zOD8yUNjXaWfTlyFKI= github.com/hpcloud/tail v1.0.0/go.mod h1:ab1qPbhIpdTxEkNHXyeSf5vhxWSCs/tWer42PpOxQnU= github.com/hudl/fargo v1.3.0/go.mod h1:y3CKSmjA+wD2gak7sUSXTAoopbhU08POFhmITJgmKTg= github.com/ianlancetaylor/demangle v0.0.0-20181102032728-5e5cf60278f6/go.mod h1:aSSvb/t6k1mPoxDqO4vJh6VOCGPwU4O0C2/Eqndh1Sc= @@ -936,6 +967,7 @@ github.com/imdario/mergo v0.3.5/go.mod h1:2EnlNZ0deacrJVfApfmtdGgDfMuh/nq6Ok1EcJ github.com/imdario/mergo v0.3.8/go.mod h1:2EnlNZ0deacrJVfApfmtdGgDfMuh/nq6Ok1EcJh5FfA= github.com/imdario/mergo v0.3.10/go.mod h1:jmQim1M+e3UYxmgPu/WyfjB3N3VflVyUjjjwH0dnCYA= github.com/imdario/mergo v0.3.11/go.mod h1:jmQim1M+e3UYxmgPu/WyfjB3N3VflVyUjjjwH0dnCYA= +github.com/imdario/mergo v0.3.12/go.mod h1:jmQim1M+e3UYxmgPu/WyfjB3N3VflVyUjjjwH0dnCYA= github.com/inconshreveable/mousetrap v1.0.0/go.mod h1:PxqpIevigyE2G7u3NXJIT2ANytuPF1OarO4DADm73n8= github.com/influxdata/apcupsd v0.0.0-20210427145308-694d5caead0e h1:3J1OB4RDKwXs5l8uEV6BP/tucOJOPDQysiT7/9cuXzA= github.com/influxdata/apcupsd v0.0.0-20210427145308-694d5caead0e/go.mod h1:WYK/Z/aXq9cbMFIL5ihcA4sX/r/3/WCas/Qvs/2fXcA= @@ -1070,6 +1102,7 @@ github.com/klauspost/compress v1.11.0/go.mod h1:aoV0uJVorq1K+umq18yTdKaF57EivdYs github.com/klauspost/compress v1.11.3/go.mod h1:aoV0uJVorq1K+umq18yTdKaF57EivdYsUV+/s2qKfXs= github.com/klauspost/compress v1.11.12/go.mod h1:aoV0uJVorq1K+umq18yTdKaF57EivdYsUV+/s2qKfXs= github.com/klauspost/compress v1.11.12/go.mod h1:aoV0uJVorq1K+umq18yTdKaF57EivdYsUV+/s2qKfXs= +github.com/klauspost/compress v1.11.13/go.mod h1:aoV0uJVorq1K+umq18yTdKaF57EivdYsUV+/s2qKfXs= github.com/klauspost/compress v1.13.1 h1:wXr2uRxZTJXHLly6qhJabee5JqIhTRoLBhDOA74hDEQ= github.com/klauspost/compress v1.13.1/go.mod h1:8dP1Hq4DHOhN9w426knH3Rhby4rFm6D8eO+e+Dq5Gzg= github.com/klauspost/cpuid v0.0.0-20170728055534-ae7887de9fa5/go.mod h1:Pj4uuM528wm8OyEC2QMXAi2YiTZ96dNQPGgoMS4s3ek= @@ -1103,6 +1136,7 @@ github.com/lib/pq v1.3.0/go.mod h1:5WUZQaWbwv1U+lTReE5YruASi9Al49XbQIvNi/34Woo= github.com/lightstep/lightstep-tracer-common/golang/gogo v0.0.0-20190605223551-bc2310a04743/go.mod h1:qklhhLq1aX+mtWk9cPHPzaBjWImj5ULL6C7HFJtXQMM= github.com/lightstep/lightstep-tracer-go v0.18.1/go.mod h1:jlF1pusYV4pidLvZ+XD0UBX0ZE6WURAspgAczcDHrL4= github.com/lyft/protoc-gen-validate v0.0.13/go.mod h1:XbGvPuh87YZc5TdIa2/I4pLk0QoUACkjt2znoq26NVQ= +github.com/magiconair/properties v1.8.0/go.mod h1:PppfXfuXeibc/6YijjN8zIbojt8czPbwD3XqdrwzmxQ= github.com/mailru/easyjson v0.0.0-20160728113105-d5b7844b561a/go.mod h1:C1wdFJiN94OJF2b5HbByQZoLdCWB1Yqtg26g4irojpc= github.com/mailru/easyjson v0.0.0-20180823135443-60711f1a8329/go.mod h1:C1wdFJiN94OJF2b5HbByQZoLdCWB1Yqtg26g4irojpc= github.com/mailru/easyjson v0.0.0-20190312143242-1de009706dbe/go.mod h1:C1wdFJiN94OJF2b5HbByQZoLdCWB1Yqtg26g4irojpc= @@ -1180,6 +1214,7 @@ github.com/mitchellh/mapstructure v1.2.2/go.mod h1:bFUtVrKA4DC2yAKiSyO/QUcy7e+RR github.com/mitchellh/osext v0.0.0-20151018003038-5e2d6d41470f/go.mod h1:OkQIRizQZAeMln+1tSwduZz7+Af5oFlKirV/MSYes2A= github.com/moby/ipvs v1.0.1 h1:aoZ7fhLTXgDbzVrAnvV+XbKOU8kOET7B3+xULDF/1o0= github.com/moby/ipvs v1.0.1/go.mod h1:2pngiyseZbIKXNv7hsKj3O9UEz30c53MT9005gt2hxQ= +github.com/moby/locker v1.0.1/go.mod h1:S7SDdo5zpBK84bzzVlKr2V0hz+7x9hWbYC/kq7oQppc= github.com/moby/spdystream v0.2.0/go.mod h1:f7i0iNDQJ059oMTcWxx8MA/zKFIuD/lY+0GqbN2Wy8c= github.com/moby/sys/mount v0.2.0 h1:WhCW5B355jtxndN5ovugJlMFJawbUODuW8fSnEH6SSM= github.com/moby/sys/mount v0.2.0 h1:WhCW5B355jtxndN5ovugJlMFJawbUODuW8fSnEH6SSM= @@ -1243,6 +1278,8 @@ github.com/niemeyer/pretty v0.0.0-20200227124842-a10e7caefd8e/go.mod h1:zD1mROLA github.com/nishanths/predeclared v0.0.0-20200524104333-86fad755b4d3/go.mod h1:nt3d53pc1VYcphSCIaYAJtnPYnr3Zyn8fMq2wvPGPso= github.com/nsqio/go-nsq v1.0.8 h1:3L2F8tNLlwXXlp2slDUrUWSBn2O3nMh8R1/KEDFTHPk= github.com/nsqio/go-nsq v1.0.8/go.mod h1:vKq36oyeVXgsS5Q8YEO7WghqidAVXQlcFxzQbQTuDEY= +github.com/nxadm/tail v1.4.4 h1:DQuhQpB1tVlglWS2hLQ5OV6B5r8aGxSrPc5Qo6uTN78= +github.com/nxadm/tail v1.4.4/go.mod h1:kenIhsEOeOJmVchQTgglprH7qJGnHDVpk1VPCcaMI8A= github.com/oklog/oklog v0.3.2/go.mod h1:FCV+B7mhrz4o+ueLpx+KqkyXRGMWOYEvfiXtdGtbWGs= github.com/oklog/run v1.0.0/go.mod h1:dlhp/R75TPv97u0XWUtDeV/lRKWPKSdTuV0TZvrmrQA= github.com/oklog/run v1.1.0/go.mod h1:sVPdnTZT1zYwAJeCMu2Th4T21pA3FPOQRfWjQlk7DVU= @@ -1256,15 +1293,17 @@ github.com/onsi/ginkgo v1.8.0/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+W github.com/onsi/ginkgo v1.10.1/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE= github.com/onsi/ginkgo v1.10.1/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE= github.com/onsi/ginkgo v1.10.3/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE= -github.com/onsi/ginkgo v1.11.0 h1:JAKSXpt1YjtLA7YpPiqO9ss6sNXEsPfSGdwN0UHqzrw= github.com/onsi/ginkgo v1.11.0/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE= +github.com/onsi/ginkgo v1.12.1 h1:mFwc4LvZ0xpSvDZ3E+k8Yte0hLOMxXUlP+yXtJqkYfQ= +github.com/onsi/ginkgo v1.12.1/go.mod h1:zj2OWP4+oCPe1qIXoGWkgMRwljMUYCdkwsT2108oapk= github.com/onsi/gomega v0.0.0-20151007035656-2152b45fa28a/go.mod h1:C1qb7wdrVGGVU+Z6iS04AVkA3Q65CEZX59MT0QO5uiA= github.com/onsi/gomega v0.0.0-20170829124025-dcabb60a477c/go.mod h1:C1qb7wdrVGGVU+Z6iS04AVkA3Q65CEZX59MT0QO5uiA= github.com/onsi/gomega v1.4.3/go.mod h1:ex+gbHU/CVuBBDIJjb2X0qEXbFg53c61hWP/1CpauHY= github.com/onsi/gomega v1.5.0/go.mod h1:ex+gbHU/CVuBBDIJjb2X0qEXbFg53c61hWP/1CpauHY= github.com/onsi/gomega v1.7.0/go.mod h1:ex+gbHU/CVuBBDIJjb2X0qEXbFg53c61hWP/1CpauHY= -github.com/onsi/gomega v1.7.1 h1:K0jcRCwNQM3vFGh1ppMtDh/+7ApJrjldlX8fA0jDTLQ= github.com/onsi/gomega v1.7.1/go.mod h1:XdKZgCCFLUoM/7CFJVPcG8C1xQ1AJ0vpAezJrB7JYyY= +github.com/onsi/gomega v1.10.3 h1:gph6h/qe9GSUw1NhH1gp+qb+h8rXD8Cy60Z32Qw3ELA= +github.com/onsi/gomega v1.10.3/go.mod h1:V9xEwhxec5O8UDM77eCW8vLymOMltsqPVYWrpDsH8xc= github.com/op/go-logging v0.0.0-20160315200505-970db520ece7/go.mod h1:HzydrMdWErDVzsI23lYNej1Htcns9BCg93Dk0bBINWk= github.com/openconfig/gnmi v0.0.0-20180912164834-33a1865c3029 h1:lXQqyLroROhwR2Yq/kXbLzVecgmVeZh2TFLg6OxCd+w= github.com/openconfig/gnmi v0.0.0-20180912164834-33a1865c3029/go.mod h1:t+O9It+LKzfOAhKTT5O0ehDix+MTqbtT0T9t+7zzOvc= @@ -1286,8 +1325,8 @@ github.com/opencontainers/runc v1.0.0-rc9/go.mod h1:qT5XzbpPznkRYVz/mWwUaVBUv2rm github.com/opencontainers/runc v1.0.0-rc9/go.mod h1:qT5XzbpPznkRYVz/mWwUaVBUv2rmF59PVA73FjuZG0U= github.com/opencontainers/runc v1.0.0-rc93/go.mod h1:3NOsor4w32B2tC0Zbl8Knk4Wg84SM2ImC1fxBuqJ/H0= github.com/opencontainers/runc v1.0.0-rc93/go.mod h1:3NOsor4w32B2tC0Zbl8Knk4Wg84SM2ImC1fxBuqJ/H0= -github.com/opencontainers/runc v1.0.0-rc95 h1:RMuWVfY3E1ILlVsC3RhIq38n4sJtlOFwU9gfFZSqrd0= -github.com/opencontainers/runc v1.0.0-rc95/go.mod h1:z+bZxa/+Tz/FmYVWkhUajJdzFeOqjc5vrqskhVyHGUM= +github.com/opencontainers/runc v1.0.2 h1:opHZMaswlyxz1OuGpBE53Dwe4/xF7EZTY0A2L/FpCOg= +github.com/opencontainers/runc v1.0.2/go.mod h1:aTaHFFwQXuA71CiyxOdFFIorAoemI04suvGRQFzWTD0= github.com/opencontainers/runtime-spec v0.1.2-0.20190507144316-5b71a03e2700/go.mod h1:jwyrGlmzljRJv/Fgzds9SsS/C5hL+LL3ko9hs6T5lQ0= github.com/opencontainers/runtime-spec v0.1.2-0.20190507144316-5b71a03e2700/go.mod h1:jwyrGlmzljRJv/Fgzds9SsS/C5hL+LL3ko9hs6T5lQ0= github.com/opencontainers/runtime-spec v1.0.1/go.mod h1:jwyrGlmzljRJv/Fgzds9SsS/C5hL+LL3ko9hs6T5lQ0= @@ -1305,6 +1344,7 @@ github.com/opencontainers/selinux v1.6.0/go.mod h1:VVGKuOLlE7v4PJyT6h7mNWvq1rzqi github.com/opencontainers/selinux v1.6.0/go.mod h1:VVGKuOLlE7v4PJyT6h7mNWvq1rzqiriPsEqVhc+svHE= github.com/opencontainers/selinux v1.8.0/go.mod h1:RScLhm78qiWa2gbVCcGkC7tCGdgk3ogry1nUQF8Evvo= github.com/opencontainers/selinux v1.8.0/go.mod h1:RScLhm78qiWa2gbVCcGkC7tCGdgk3ogry1nUQF8Evvo= +github.com/opencontainers/selinux v1.8.2/go.mod h1:MUIHuUEvKB1wtJjQdOyYRgOnLD2xAPP8dBsCoU0KuF8= github.com/opentracing-contrib/go-observer v0.0.0-20170622124052-a52f23424492 h1:lM6RxxfUMrYL/f8bWEUqdXrANWtrL7Nndbm9iFN0DlU= github.com/opentracing-contrib/go-observer v0.0.0-20170622124052-a52f23424492/go.mod h1:Ngi6UdF0k5OKD5t5wlmGhe/EDKPoUM3BXZSSfIuJbis= github.com/opentracing-contrib/go-stdlib v1.0.0/go.mod h1:qtI1ogk+2JhVPIXVc6q+NHziSmy2W5GbdQZFUHADCBU= @@ -1328,8 +1368,10 @@ github.com/pascaldekloe/goe v0.1.0/go.mod h1:lzWF7FIEvWOWxwDKqyGYQf6ZUaNfKdP144T github.com/paulbellamy/ratecounter v0.2.0/go.mod h1:Hfx1hDpSGoqxkVVpBi/IlYD7kChlfo5C6hzIHwPqfFE= github.com/pavius/impi v0.0.0-20180302134524-c1cbdcb8df2b/go.mod h1:x/hU0bfdWIhuOT1SKwiJg++yvkk6EuOtJk8WtDZqgr8= github.com/pborman/uuid v1.2.0/go.mod h1:X/NO0urCmaxf9VXbdlT7C2Yzkj2IKimNn4k+gtPdI/k= +github.com/pelletier/go-toml v1.2.0/go.mod h1:5z9KED0ma1S8pY6P1sdut58dfprrGBbd/94hg7ilaic= github.com/pelletier/go-toml v1.4.0/go.mod h1:PN7xzY2wHTK0K9p34ErDQMlFxa51Fk0OUruD3k1mMwo= github.com/pelletier/go-toml v1.7.0/go.mod h1:vwGMzjaWMwyfHwgIBhI2YUM4fB6nL6lVAvS1LBMMhTE= +github.com/pelletier/go-toml v1.8.1/go.mod h1:T2/BmBdy8dvIRq1a/8aqjN41wvWlN4lrapLU/GW4pbc= github.com/performancecopilot/speed v3.0.0+incompatible/go.mod h1:/CLtqpZ5gBg1M9iaPbIdPPGyKcA8hKdoy6hAWba7Yac= github.com/peterbourgon/diskv v2.0.1+incompatible/go.mod h1:uqqh8zWWbv1HBMNONnaR/tNboyR3/BZd58JJSHlUSCU= github.com/peterh/liner v1.0.1-0.20180619022028-8c1271fcf47f/go.mod h1:xIteQHvHuaLYG9IFj6mSxM0fCKrs34IrEQUhOYuGPHc= @@ -1367,6 +1409,7 @@ github.com/prometheus/alertmanager v0.21.0/go.mod h1:h7tJ81NA0VLWvWEayi1QltevFkL github.com/prometheus/client_golang v0.0.0-20180209125602-c332b6f63c06/go.mod h1:7SWBe2y4D6OKWSNQJUaRYU/AaXPKyh/dDVn+NZz0KFw= github.com/prometheus/client_golang v0.9.1/go.mod h1:7SWBe2y4D6OKWSNQJUaRYU/AaXPKyh/dDVn+NZz0KFw= github.com/prometheus/client_golang v0.9.3-0.20190127221311-3c4408c8b829/go.mod h1:p2iRAGwDERtqlqzRXnrOVns+ignqQo//hLXqYxZYVNs= +github.com/prometheus/client_golang v0.9.3/go.mod h1:/TN21ttK/J9q6uSwhBd54HahCDft0ttaMvbicHlPoso= github.com/prometheus/client_golang v1.0.0/go.mod h1:db9x61etRT2tGnBNRi70OPL5FsnadC4Ky3P0J6CfImo= github.com/prometheus/client_golang v1.1.0/go.mod h1:I1FGZT9+L76gKKOs5djB6ezCbFQP1xR9D75/vuwEF3g= github.com/prometheus/client_golang v1.3.0/go.mod h1:hJaj2vgQTGQmVCsAACORcieXFeDPbaTKGT+JTgUa3og= @@ -1384,7 +1427,9 @@ github.com/prometheus/client_model v0.1.0/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6T github.com/prometheus/client_model v0.2.0 h1:uq5h0d+GuxiXLJLNABMgp2qUWDPiLvgCzz2dUR+/W/M= github.com/prometheus/client_model v0.2.0/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA= github.com/prometheus/common v0.0.0-20180110214958-89604d197083/go.mod h1:daVV7qP5qjZbuso7PdcryaAu0sAZbrN9i7WWcTMWvro= +github.com/prometheus/common v0.0.0-20181113130724-41aa239b4cce/go.mod h1:daVV7qP5qjZbuso7PdcryaAu0sAZbrN9i7WWcTMWvro= github.com/prometheus/common v0.2.0/go.mod h1:TNfzLD0ON7rHzMJeJkieUDPYmFC7Snx/y86RQel1bk4= +github.com/prometheus/common v0.4.0/go.mod h1:TNfzLD0ON7rHzMJeJkieUDPYmFC7Snx/y86RQel1bk4= github.com/prometheus/common v0.4.1/go.mod h1:TNfzLD0ON7rHzMJeJkieUDPYmFC7Snx/y86RQel1bk4= github.com/prometheus/common v0.6.0/go.mod h1:eBmuwkDJBwy6iBfxCBob6t6dR6ENT/y+J+Zk0j9GMYc= github.com/prometheus/common v0.7.0/go.mod h1:DjGbpBbp5NYNiECxcL/VnbXCCaQpKd3tt26CguLLsqA= @@ -1396,6 +1441,7 @@ github.com/prometheus/common v0.26.0/go.mod h1:M7rCNAaPfAosfx8veZJCuw84e35h3Cfd9 github.com/prometheus/procfs v0.0.0-20180125133057-cb4147076ac7/go.mod h1:c3At6R/oaqEKCNdg8wHV1ftS6bRYblBhIjjI8uT2IGk= github.com/prometheus/procfs v0.0.0-20181005140218-185b4288413d/go.mod h1:c3At6R/oaqEKCNdg8wHV1ftS6bRYblBhIjjI8uT2IGk= github.com/prometheus/procfs v0.0.0-20190117184657-bf6a532e95b1/go.mod h1:c3At6R/oaqEKCNdg8wHV1ftS6bRYblBhIjjI8uT2IGk= +github.com/prometheus/procfs v0.0.0-20190507164030-5867b95ac084/go.mod h1:TjEm7ze935MbeOT/UhFTIMYKhuLP4wbCsTZCD3I8kEA= github.com/prometheus/procfs v0.0.0-20190522114515-bc1a522cf7b1/go.mod h1:TjEm7ze935MbeOT/UhFTIMYKhuLP4wbCsTZCD3I8kEA= github.com/prometheus/procfs v0.0.2/go.mod h1:TjEm7ze935MbeOT/UhFTIMYKhuLP4wbCsTZCD3I8kEA= github.com/prometheus/procfs v0.0.3/go.mod h1:4A/X28fw3Fc593LaREMrKMqOKvUAntwMDaekg4FpcdQ= @@ -1409,6 +1455,7 @@ github.com/prometheus/procfs v0.6.0 h1:mxy4L2jP6qMonqmq+aTtOx1ifVWUgG/TAmntgbh3x github.com/prometheus/procfs v0.6.0/go.mod h1:cz+aTbrPOrUb4q7XlbU9ygM+/jj0fzG6c1xBZuNvfVA= github.com/prometheus/prometheus v1.8.2-0.20200911110723-e83ef207b6c2 h1:IB/5RJRcJiR/YzKs4Aou86s/RaMepZOZVCArYNHJHWc= github.com/prometheus/prometheus v1.8.2-0.20200911110723-e83ef207b6c2/go.mod h1:Td6hjwdXDmVt5CI9T03Sw+yBNxLBq/Yx3ZtmtP8zlCA= +github.com/prometheus/tsdb v0.7.1/go.mod h1:qhTCs0VvXwvX/y3TZrWD7rabWM+ijKTux40TwIPHuXU= github.com/rcrowley/go-metrics v0.0.0-20181016184325-3113b8401b8a/go.mod h1:bCqnVzQkZxMG4s8nGwiZ5l3QUCyqpo9Y+/ZMZ9VjZe4= github.com/rcrowley/go-metrics v0.0.0-20200313005456-10cdbea86bc0 h1:MkV+77GLUNo5oJ0jf870itWm3D0Sjh7+Za9gazKc5LQ= github.com/rcrowley/go-metrics v0.0.0-20200313005456-10cdbea86bc0/go.mod h1:bCqnVzQkZxMG4s8nGwiZ5l3QUCyqpo9Y+/ZMZ9VjZe4= @@ -1480,8 +1527,9 @@ github.com/sirupsen/logrus v1.4.2/go.mod h1:tLMulIdttU9McNUspp0xgXVQah82FyeX6Mwd github.com/sirupsen/logrus v1.6.0/go.mod h1:7uNnSEd1DgxDLC74fIahvMZmmYsHGZGEOFrfsX/uA88= github.com/sirupsen/logrus v1.6.0/go.mod h1:7uNnSEd1DgxDLC74fIahvMZmmYsHGZGEOFrfsX/uA88= github.com/sirupsen/logrus v1.6.0/go.mod h1:7uNnSEd1DgxDLC74fIahvMZmmYsHGZGEOFrfsX/uA88= -github.com/sirupsen/logrus v1.7.0 h1:ShrD1U9pZB12TX0cVy0DtePoCH97K8EtX+mg7ZARUtM= github.com/sirupsen/logrus v1.7.0/go.mod h1:yWOB1SBYBC5VeMP7gHvWumXLIWorT60ONWic61uBYv0= +github.com/sirupsen/logrus v1.8.1 h1:dJKuHgqk1NNQlqoA6BTlM1Wf9DOH3NBjQyu0h9+AZZE= +github.com/sirupsen/logrus v1.8.1/go.mod h1:yWOB1SBYBC5VeMP7gHvWumXLIWorT60ONWic61uBYv0= github.com/sleepinggenius2/gosmi v0.4.3 h1:99Zwzy1Cvgsh396sw07oR2G4ab88ILGZFMxSlGWnR6o= github.com/sleepinggenius2/gosmi v0.4.3/go.mod h1:l8OniPmd3bJzw0MXP2/qh7AhP/e+bTY2CNivIhsnDT0= github.com/smartystreets/assertions v0.0.0-20180927180507-b2de0cb4f26d h1:zE9ykElWQ6/NYmHa3jpm/yHnI4xSofP+UP6SpjHcSeM= @@ -1495,10 +1543,13 @@ github.com/snowflakedb/gosnowflake v1.5.0/go.mod h1:1kyg2XEduwti88V11PKRHImhXLK5 github.com/soheilhy/cmux v0.1.4/go.mod h1:IM3LyeVVIOuxMH7sFAkER9+bJ4dT7Ms6E4xg4kGIyLM= github.com/sony/gobreaker v0.4.1/go.mod h1:ZKptC7FHNvhBz7dN2LGjPVBz2sZJmc0/PkyDJOjmxWY= github.com/spaolacci/murmur3 v0.0.0-20180118202830-f09979ecbc72/go.mod h1:JwIasOWyU6f++ZhiEuf87xNszmSA2myDM2Kzu9HwQUA= +github.com/spf13/afero v1.1.2/go.mod h1:j4pytiNVoe2o6bmDsKpLACNPDBIoEAkihy7loJ1B0CQ= github.com/spf13/afero v1.2.2/go.mod h1:9ZxEEn6pIJ8Rxe320qSDBk6AsU0r9pR7Q4OcevTdifk= github.com/spf13/cast v1.3.0/go.mod h1:Qx5cxh0v+4UWYiBimWS+eyWzqEqokIECu5etghLkUJE= github.com/spf13/cobra v0.0.2-0.20171109065643-2da4a54c5cee/go.mod h1:1l0Ry5zgKvJasoi3XT1TypsSe7PqH0Sj9dhYf7v3XqQ= github.com/spf13/cobra v0.0.3/go.mod h1:1l0Ry5zgKvJasoi3XT1TypsSe7PqH0Sj9dhYf7v3XqQ= +github.com/spf13/cobra v1.0.0/go.mod h1:/6GTrnGXV9HjY+aR4k0oJ5tcvakLuG6EuKReYlHNrgE= +github.com/spf13/jwalterweatherman v1.0.0/go.mod h1:cQK4TGJAtQXfYWX+Ddv3mKDzgVb68N+wFjFa4jdeBTo= github.com/spf13/pflag v0.0.0-20170130214245-9ff6c6923cff/go.mod h1:DYY7MBk1bdzusC3SYhjObp+wFpr4gzcvqqNjLnInEg4= github.com/spf13/pflag v1.0.1-0.20171106142849-4c012f6dcd95/go.mod h1:DYY7MBk1bdzusC3SYhjObp+wFpr4gzcvqqNjLnInEg4= github.com/spf13/pflag v1.0.1/go.mod h1:DYY7MBk1bdzusC3SYhjObp+wFpr4gzcvqqNjLnInEg4= @@ -1506,6 +1557,7 @@ github.com/spf13/pflag v1.0.3/go.mod h1:DYY7MBk1bdzusC3SYhjObp+wFpr4gzcvqqNjLnIn github.com/spf13/pflag v1.0.3/go.mod h1:DYY7MBk1bdzusC3SYhjObp+wFpr4gzcvqqNjLnInEg4= github.com/spf13/pflag v1.0.5 h1:iy+VFUOCP1a+8yFto/drg2CJ5u0yRoB7fZw3DKv/JXA= github.com/spf13/pflag v1.0.5/go.mod h1:McXfInJRrz4CZXVZOBLb0bTZqETkiAhM9Iw0y3An2Bg= +github.com/spf13/viper v1.4.0/go.mod h1:PTJ7Z/lr49W6bUbkmS1V3by4uWynFiR9p7+dSq/yZzE= github.com/stefanberger/go-pkcs11uri v0.0.0-20201008174630-78d3cae3a980/go.mod h1:AO3tvPzVZ/ayst6UlUKUv6rcPQInYe3IknH3jYhAKu8= github.com/streadway/amqp v0.0.0-20190404075320-75d898a42a94/go.mod h1:AZpEONHx3DKn8O/DFsRAY58/XVQiIPMTMB1SddzLXVw= github.com/streadway/amqp v0.0.0-20190827072141-edfb9018d271 h1:WhxRHzgeVGETMlmVfqhRn8RIeeNoPr2Czh33I4Zdccw= @@ -1554,6 +1606,7 @@ github.com/tmc/grpc-websocket-proxy v0.0.0-20190109142713-0ad062ec5ee5/go.mod h1 github.com/tv42/httpunix v0.0.0-20150427012821-b75d8614f926/go.mod h1:9ESjWnEqriFuLhtthL60Sar/7RFoluCcXsuvEwTV5KM= github.com/uber/jaeger-client-go v2.25.0+incompatible/go.mod h1:WVhlPFC8FDjOFMMWRy2pZqQJSXxYSwNYOkTr/Z6d3Kk= github.com/uber/jaeger-lib v2.2.0+incompatible/go.mod h1:ComeNDZlWwrWnDv8aPp0Ba6+uUTzImX/AauajbLI56U= +github.com/ugorji/go v1.1.4/go.mod h1:uQMGLiO92mf5W77hV/PUCpI3pbzQx3CRekS0kk+RGrc= github.com/urfave/cli v0.0.0-20171014202726-7bc6a0acffa5/go.mod h1:70zkFmudgCuE/ngEzBv17Jvp/497gISqfk5gWijbERA= github.com/urfave/cli v1.20.0/go.mod h1:70zkFmudgCuE/ngEzBv17Jvp/497gISqfk5gWijbERA= github.com/urfave/cli v1.22.1/go.mod h1:Gos4lmkARVdJ6EkW0WaNv/tZAAMe9V7XWyB60NtXRu0= @@ -1563,11 +1616,13 @@ github.com/vapourismo/knx-go v0.0.0-20201122213738-75fe09ace330/go.mod h1:7+aWBs github.com/vaughan0/go-ini v0.0.0-20130923145212-a98ad7ee00ec/go.mod h1:owBmyHYMLkxyrugmfwE/DLJyW8Ro9mkphwuVErQ0iUw= github.com/vektah/gqlparser v1.1.2/go.mod h1:1ycwN7Ij5njmMkPPAOaRFY4rET2Enx7IkVv3vaXspKw= github.com/vishvananda/netlink v0.0.0-20181108222139-023a6dafdcdf/go.mod h1:+SR5DhBJrl6ZM7CoCKvpw5BKroDKQ+PJqOg65H/2ktk= -github.com/vishvananda/netlink v1.1.0 h1:1iyaYNBLmP6L0220aDnYQpo1QEV4t4hJ+xEEhhJH8j0= github.com/vishvananda/netlink v1.1.0/go.mod h1:cTgwzPIzzgDAYoQrMm0EdrjRUBkTqKYppBueQtXaqoE= +github.com/vishvananda/netlink v1.1.1-0.20201029203352-d40f9887b852 h1:cPXZWzzG0NllBLdjWoD1nDfaqu98YMv+OneaKc8sPOA= +github.com/vishvananda/netlink v1.1.1-0.20201029203352-d40f9887b852/go.mod h1:twkDnbuQxJYemMlGd4JFIcuhgX83tXhKS2B/PRMpOho= github.com/vishvananda/netns v0.0.0-20180720170159-13995c7128cc/go.mod h1:ZjcWmFBXmLKZu9Nxj3WKYEafiSqer2rnvPr0en9UNpI= -github.com/vishvananda/netns v0.0.0-20191106174202-0a2b9b5464df h1:OviZH7qLw/7ZovXvuNyL3XQl8UFofeikI1NW1Gypu7k= github.com/vishvananda/netns v0.0.0-20191106174202-0a2b9b5464df/go.mod h1:JP3t17pCcGlemwknint6hfoeCVQrEMVwxRLRjXpq+BU= +github.com/vishvananda/netns v0.0.0-20200728191858-db3c7e526aae h1:4hwBBUfQCFe3Cym0ZtKyq7L16eZUtYKs+BaHDN6mAns= +github.com/vishvananda/netns v0.0.0-20200728191858-db3c7e526aae/go.mod h1:DD4vA1DwXk04H54A1oHXtwZmA0grkVMdPxx/VGLCah0= github.com/vjeantet/grok v1.0.1 h1:2rhIR7J4gThTgcZ1m2JY4TrJZNgjn985U28kT2wQrJ4= github.com/vjeantet/grok v1.0.1/go.mod h1:ax1aAchzC6/QMXMcyzHQGZWaW1l195+uMYIkCWPCNIo= github.com/vmware/govmomi v0.26.0 h1:JMZR5c7MHH3nCEAVYS3WyRIA35W3+b3tLwAqxVzq1Rw= @@ -1599,6 +1654,7 @@ github.com/xeipuuv/gojsonschema v0.0.0-20180618132009-1d523034197f/go.mod h1:5yf github.com/xiang90/probing v0.0.0-20190116061207-43a291ad63a2/go.mod h1:UETIi67q53MR2AWcXfiuqkDkRtnGDLqkBTpCHuJHxtU= github.com/xlab/treeprint v0.0.0-20180616005107-d6fb6747feb6/go.mod h1:ce1O1j6UtZfjr22oyGxGLbauSBp2YVXpARAosm7dHBg= github.com/xlab/treeprint v1.0.0/go.mod h1:IoImgRak9i3zJyuxOKUP1v4UZd1tMoKkq/Cimt1uhCg= +github.com/xordataexchange/crypt v0.0.3-0.20170626215501-b2862e3d0a77/go.mod h1:aYKd//L2LvnjZzWKhF00oedf4jCCReLcmhLdhm1A27Q= github.com/youmark/pkcs8 v0.0.0-20181117223130-1be2e3e5546d/go.mod h1:rHwXgn7JulP+udvsHwJoVG1YGAP6VLg4y9I5dyZdqmA= github.com/youmark/pkcs8 v0.0.0-20201027041543-1326539a0a0a h1:fZHgsYlfvtyqToslyjUt3VOPF4J7aK/3MPcK7xp3PDk= github.com/youmark/pkcs8 v0.0.0-20201027041543-1326539a0a0a/go.mod h1:ul22v+Nro/R083muKhosV54bj5niojjWZvU8xrevuH4= @@ -1613,6 +1669,7 @@ github.com/yvasiyarov/go-metrics v0.0.0-20140926110328-57bccd1ccd43/go.mod h1:aX github.com/yvasiyarov/gorelic v0.0.0-20141212073537-a9bba5b9ab50/go.mod h1:NUSPSUX/bi6SeDMUh6brw0nXpxHnc96TguQh0+r/ssA= github.com/yvasiyarov/newrelic_platform_go v0.0.0-20140908184405-b21fdbd4370f/go.mod h1:GlGEuHIJweS1mbCqG+7vt2nvWLzLLnRHbXz5JKd/Qbg= github.com/zenazn/goji v0.9.0/go.mod h1:7S9M489iMyHBNxwZnk9/EHS098H4/F6TATF2mIxtB1Q= +go.etcd.io/bbolt v1.3.2/go.mod h1:IbVyRI1SCnLcuJnV2u8VeU0CEYM7e686BmAb1XKL+uU= go.etcd.io/bbolt v1.3.3/go.mod h1:IbVyRI1SCnLcuJnV2u8VeU0CEYM7e686BmAb1XKL+uU= go.etcd.io/bbolt v1.3.4/go.mod h1:G5EMThwa9y8QZGBClrRx5EY+Yw9kAhnjy3bSjsnlVTQ= go.etcd.io/bbolt v1.3.5/go.mod h1:G5EMThwa9y8QZGBClrRx5EY+Yw9kAhnjy3bSjsnlVTQ= @@ -1807,6 +1864,7 @@ golang.org/x/net v0.0.0-20200707034311-ab3426394381/go.mod h1:/O7V0waA8r7cgGh81R golang.org/x/net v0.0.0-20200813134508-3edf25e44fcc/go.mod h1:/O7V0waA8r7cgGh81Ro3o1hOxt32SMVPicZroKQ2sZA= golang.org/x/net v0.0.0-20200822124328-c89045814202/go.mod h1:/O7V0waA8r7cgGh81Ro3o1hOxt32SMVPicZroKQ2sZA= golang.org/x/net v0.0.0-20200904194848-62affa334b73/go.mod h1:/O7V0waA8r7cgGh81Ro3o1hOxt32SMVPicZroKQ2sZA= +golang.org/x/net v0.0.0-20201006153459-a7d1128ccaa0/go.mod h1:sp8m0HH+o8qH0wwXwYZr8TS3Oi6o0r6Gce1SSxlDquU= golang.org/x/net v0.0.0-20201021035429-f5854403a974/go.mod h1:sp8m0HH+o8qH0wwXwYZr8TS3Oi6o0r6Gce1SSxlDquU= golang.org/x/net v0.0.0-20201031054903-ff519b6c9102/go.mod h1:sp8m0HH+o8qH0wwXwYZr8TS3Oi6o0r6Gce1SSxlDquU= golang.org/x/net v0.0.0-20201110031124-69a78807bb2b/go.mod h1:sp8m0HH+o8qH0wwXwYZr8TS3Oi6o0r6Gce1SSxlDquU= @@ -1890,6 +1948,7 @@ golang.org/x/sys v0.0.0-20190801041406-cbf593c0f2f3/go.mod h1:h1NjWce9XRLGQEsW7w golang.org/x/sys v0.0.0-20190812073006-9eafafc0a87e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20190813064441-fde4db37ae7a/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20190826190057-c7b8b68b1456/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20190904154756-749cb33beabd/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20190916202348-b4ddaad3f8a3/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20190922100055-0a153f010e69/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20190924154521-2837fb4f24fe/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= @@ -1918,6 +1977,7 @@ golang.org/x/sys v0.0.0-20200122134326-e047566fdf82/go.mod h1:h1NjWce9XRLGQEsW7w golang.org/x/sys v0.0.0-20200124204421-9fbb57f87de9/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20200202164722-d101bd2416d5/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20200212091648-12a6c2dcc1e4/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200217220822-9197077df867/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20200223170610-d5e6a3e2c0ae/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20200302150141-5c8b2ff67527/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20200323222414-85ca7c5b95cd/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= @@ -1943,6 +2003,7 @@ golang.org/x/sys v0.0.0-20200916030750-2334cc1a136f/go.mod h1:h1NjWce9XRLGQEsW7w golang.org/x/sys v0.0.0-20200922070232-aee5d888a860/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20200930185726-fdedc70b468f/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20201112073958-5cba982894dd/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20201117170446-d9b008d0a637/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20201119102817-f84b799fce68/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20201126233918-771906719818/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20201201145000-ef89a241ccb3/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= @@ -2343,21 +2404,30 @@ honnef.co/go/tools v0.0.1-2020.1.3/go.mod h1:X/FiERA/W4tHapMX5mGpAtMSVEeEUOyHaw9 honnef.co/go/tools v0.0.1-2020.1.4/go.mod h1:X/FiERA/W4tHapMX5mGpAtMSVEeEUOyHaw9vFzvIQ3k= k8s.io/api v0.18.8/go.mod h1:d/CXqwWv+Z2XEG1LgceeDmHQwpUJhROPx16SlxJgERY= k8s.io/api v0.20.1/go.mod h1:KqwcCVogGxQY3nBlRpwt+wpAMF/KjaCc7RpywacvqUo= -k8s.io/api v0.20.4 h1:xZjKidCirayzX6tHONRQyTNDVIR55TYVqgATqo6ZULY= k8s.io/api v0.20.4/go.mod h1:++lNL1AJMkDymriNniQsWRkMDzRaX2Y/POTUi8yvqYQ= +k8s.io/api v0.20.6 h1:bgdZrW++LqgrLikWYNruIKAtltXbSCX2l5mJu11hrVE= +k8s.io/api v0.20.6/go.mod h1:X9e8Qag6JV/bL5G6bU8sdVRltWKmdHsFUGS3eVndqE8= k8s.io/apimachinery v0.18.8/go.mod h1:6sQd+iHEqmOtALqOFjSWp2KZ9F0wlU/nWm0ZgsYWMig= k8s.io/apimachinery v0.20.1/go.mod h1:WlLqWAHZGg07AeltaI0MV5uk1Omp8xaN0JGLY6gkRpU= k8s.io/apimachinery v0.20.4/go.mod h1:WlLqWAHZGg07AeltaI0MV5uk1Omp8xaN0JGLY6gkRpU= +k8s.io/apimachinery v0.20.6/go.mod h1:ejZXtW1Ra6V1O5H8xPBGz+T3+4gfkTCeExAHKU57MAc= k8s.io/apimachinery v0.21.1 h1:Q6XuHGlj2xc+hlMCvqyYfbv3H7SRGn2c8NycxJquDVs= k8s.io/apimachinery v0.21.1/go.mod h1:jbreFvJo3ov9rj7eWT7+sYiRx+qZuCYXwWT1bcDswPY= k8s.io/apiserver v0.20.1/go.mod h1:ro5QHeQkgMS7ZGpvf4tSMx6bBOgPfE+f52KwvXfScaU= +k8s.io/apiserver v0.20.4/go.mod h1:Mc80thBKOyy7tbvFtB4kJv1kbdD0eIH8k8vianJcbFM= +k8s.io/apiserver v0.20.6/go.mod h1:QIJXNt6i6JB+0YQRNcS0hdRHJlMhflFmsBDeSgT1r8Q= k8s.io/client-go v0.18.8/go.mod h1:HqFqMllQ5NnQJNwjro9k5zMyfhZlOwpuTLVrxjkYSxU= k8s.io/client-go v0.20.1/go.mod h1:/zcHdt1TeWSd5HoUe6elJmHSQ6uLLgp4bIJHVEuy+/Y= -k8s.io/client-go v0.20.4 h1:85crgh1IotNkLpKYKZHVNI1JT86nr/iDCvq2iWKsql4= k8s.io/client-go v0.20.4/go.mod h1:LiMv25ND1gLUdBeYxBIwKpkSC5IsozMMmOOeSJboP+k= +k8s.io/client-go v0.20.6 h1:nJZOfolnsVtDtbGJNCxzOtKUAu7zvXjB8+pMo9UNxZo= +k8s.io/client-go v0.20.6/go.mod h1:nNQMnOvEUEsOzRRFIIkdmYOjAZrC8bgq0ExboWSU1I0= k8s.io/component-base v0.20.1/go.mod h1:guxkoJnNoh8LNrbtiQOlyp2Y2XFCZQmrcg2n/DeYNLk= +k8s.io/component-base v0.20.4/go.mod h1:t4p9EdiagbVCJKrQ1RsA5/V4rFQNDfRlevJajlGwgjI= +k8s.io/component-base v0.20.6/go.mod h1:6f1MPBAeI+mvuts3sIdtpjljHWBQ2cIy38oBIWMYnrM= k8s.io/cri-api v0.17.3/go.mod h1:X1sbHmuXhwaHs9xxYffLqJogVsnI+f6cPRcgPel7ywM= k8s.io/cri-api v0.20.1/go.mod h1:2JRbKt+BFLTjtrILYVqQK5jqhI+XNdF6UiGMgczeBCI= +k8s.io/cri-api v0.20.4/go.mod h1:2JRbKt+BFLTjtrILYVqQK5jqhI+XNdF6UiGMgczeBCI= +k8s.io/cri-api v0.20.6/go.mod h1:ew44AjNXwyn1s0U4xCKGodU7J1HzBeZ1MpGrpa5r8Yc= k8s.io/gengo v0.0.0-20190128074634-0689ccc1d7d6/go.mod h1:ezvh/TsK7cY6rbqRK0oQQ8IAqLxYwwyPxAX1Pzy0ii0= k8s.io/gengo v0.0.0-20200413195148-3a45101e95ac/go.mod h1:ezvh/TsK7cY6rbqRK0oQQ8IAqLxYwwyPxAX1Pzy0ii0= k8s.io/klog v0.0.0-20181102134211-b9b56d5dfc92/go.mod h1:Gq+BEi5rUBO/HRz0bTSXDUcqjScdoY3a9IHpCEIOOfk= @@ -2410,9 +2480,11 @@ rsc.io/pdf v0.1.1/go.mod h1:n8OzWcQ6Sp37PL01nO98y4iUCRdTGarVfzxY20ICaU4= rsc.io/quote/v3 v3.1.0/go.mod h1:yEA65RcK8LyAZtP9Kv3t0HmxON59tX3rD+tICJqUlj0= rsc.io/sampler v1.3.0/go.mod h1:T1hPZKmBbMNahiBKFy5HrXp6adAjACjK9JXDnKaTXpA= sigs.k8s.io/apiserver-network-proxy/konnectivity-client v0.0.14/go.mod h1:LEScyzhFmoF5pso/YSeBstl57mOzx9xlU9n85RGrDQg= +sigs.k8s.io/apiserver-network-proxy/konnectivity-client v0.0.15/go.mod h1:LEScyzhFmoF5pso/YSeBstl57mOzx9xlU9n85RGrDQg= sigs.k8s.io/structured-merge-diff/v3 v3.0.0-20200116222232-67a7b8c61874/go.mod h1:PlARxl6Hbt/+BC80dRLi1qAmnMqwqDg62YvvVkZjemw= sigs.k8s.io/structured-merge-diff/v3 v3.0.0/go.mod h1:PlARxl6Hbt/+BC80dRLi1qAmnMqwqDg62YvvVkZjemw= sigs.k8s.io/structured-merge-diff/v4 v4.0.2/go.mod h1:bJZC9H9iH24zzfZ/41RGcq60oK1F7G282QMXDPYydCw= +sigs.k8s.io/structured-merge-diff/v4 v4.0.3/go.mod h1:bJZC9H9iH24zzfZ/41RGcq60oK1F7G282QMXDPYydCw= sigs.k8s.io/structured-merge-diff/v4 v4.1.0 h1:C4r9BgJ98vrKnnVCjwCSXcWjWe0NKcUQkmzDXZXGwH8= sigs.k8s.io/structured-merge-diff/v4 v4.1.0/go.mod h1:bJZC9H9iH24zzfZ/41RGcq60oK1F7G282QMXDPYydCw= sigs.k8s.io/yaml v1.1.0/go.mod h1:UJmg0vDUVViEyp3mgSv9WPwZCDxu4rQW1olrI1uml+o= From 83dae504ade945e982cae37b95ea983048b3a951 Mon Sep 17 00:00:00 2001 From: "telegraf-tiger[bot]" <76974415+telegraf-tiger[bot]@users.noreply.github.com> Date: Wed, 6 Oct 2021 11:48:58 -0500 Subject: [PATCH 54/81] fix: update etc/telegraf.conf and etc/telegraf_windows.conf (#9814) Co-authored-by: Tiger Bot <> --- etc/telegraf.conf | 284 +- etc/telegraf_windows.conf | 8843 ++++++++++++++++++++++++++++++++++++- 2 files changed, 8723 insertions(+), 404 deletions(-) diff --git a/etc/telegraf.conf b/etc/telegraf.conf index 12672da45cf7d..5564bc23ac8aa 100644 --- a/etc/telegraf.conf +++ b/etc/telegraf.conf @@ -765,6 +765,9 @@ # ## Endpoints for your graylog instances. # servers = ["udp://127.0.0.1:12201"] # +# ## Connection timeout. +# # timeout = "5s" +# # ## The field to use as the GELF short_message, if unset the static string # ## "telegraf" will be used. # ## example: short_message_field = "message" @@ -1254,6 +1257,12 @@ # ## actually reads it # # retain = false # +# ## Defines the maximum length of time that the broker and client may not communicate. +# ## Defaults to 0 which turns the feature off. For version v2.0.12 of eclipse/mosquitto there is a +# ## [bug](https://github.com/eclipse/mosquitto/issues/2117) which requires keep_alive to be set. +# ## As a reference eclipse/paho.mqtt.golang v1.3.0 defaults to 30. +# # keep_alive = 0 +# # ## Data format to output. # ## Each data format has its own unique set of configuration options, read # ## more about them here: @@ -3415,6 +3424,14 @@ # # ## Filter bucket fields to include only here. # # bucket_stats_included = ["quota_percent_used", "ops_per_sec", "disk_fetches", "item_count", "disk_used", "data_used", "mem_used"] +# +# ## Optional TLS Config +# # tls_ca = "/etc/telegraf/ca.pem" +# # tls_cert = "/etc/telegraf/cert.pem" +# # tls_key = "/etc/telegraf/key.pem" +# ## Use TLS but skip chain & host verification (defaults to false) +# ## If set to false, tls_cert and tls_key are required +# # insecure_skip_verify = false # # Read CouchDB Stats from one or more servers @@ -3769,6 +3786,13 @@ # ## The date/time field in the Elasticsearch index (mandatory). # date_field = "@timestamp" # +# ## If the field used for the date/time field in Elasticsearch is also using +# ## a custom date/time format it may be required to provide the format to +# ## correctly parse the field. +# ## +# ## If using one of the built in elasticsearch formats this is not required. +# # date_field_custom_format = "" +# # ## Time window to query (eg. "1m" to query documents from last minute). # ## Normally should be set to same as collection interval # query_period = "1m" @@ -4777,6 +4801,12 @@ # # ] +# # Read metrics about LVM physical volumes, volume groups, logical volumes. +# [[inputs.lvm]] +# ## Use sudo to run LVM commands +# use_sudo = false + + # # Gathers metrics from the /3.0/reports MailChimp API # [[inputs.mailchimp]] # ## MailChimp API key @@ -5471,6 +5501,12 @@ # ## Password. Required for auth_method = "UserName" # # password = "" # # +# ## Option to select the metric timestamp to use. Valid options are: +# ## "gather" -- uses the time of receiving the data in telegraf +# ## "server" -- uses the timestamp provided by the server +# ## "source" -- uses the timestamp provided by the source +# # timestamp = "gather" +# # # ## Node ID configuration # ## name - field name to use in the output # ## namespace - OPC UA namespace of the node (integer value 0 thru 3) @@ -5546,7 +5582,7 @@ # # timeout = "5ms" -# # A plugin to collect stats from Opensmtpd - a validating, recursive, and caching DNS resolver +# # A plugin to collect stats from Opensmtpd - a validating, recursive, and caching DNS resolver # [[inputs.opensmtpd]] # ## If running as a restricted user you can prepend sudo for additional access: # #use_sudo = false @@ -6486,219 +6522,6 @@ # ## General connection timeout # # timeout = "5s" -# # Input plugin to collect Windows Event Log messages -# [[inputs.win_eventlog]] -# ## Telegraf should have Administrator permissions to subscribe for some Windows Events channels -# ## (System log, for example) -# -# ## LCID (Locale ID) for event rendering -# ## 1033 to force English language -# ## 0 to use default Windows locale -# # locale = 0 -# -# ## Name of eventlog, used only if xpath_query is empty -# ## Example: "Application" -# # eventlog_name = "" -# -# ## xpath_query can be in defined short form like "Event/System[EventID=999]" -# ## or you can form a XML Query. Refer to the Consuming Events article: -# ## https://docs.microsoft.com/en-us/windows/win32/wes/consuming-events -# ## XML query is the recommended form, because it is most flexible -# ## You can create or debug XML Query by creating Custom View in Windows Event Viewer -# ## and then copying resulting XML here -# xpath_query = ''' -# -# -# -# *[System[( (EventID >= 5152 and EventID <= 5158) or EventID=5379 or EventID=4672)]] -# -# -# -# -# -# -# -# -# -# -# -# -# -# -# ''' -# -# ## System field names: -# ## "Source", "EventID", "Version", "Level", "Task", "Opcode", "Keywords", "TimeCreated", -# ## "EventRecordID", "ActivityID", "RelatedActivityID", "ProcessID", "ThreadID", "ProcessName", -# ## "Channel", "Computer", "UserID", "UserName", "Message", "LevelText", "TaskText", "OpcodeText" -# -# ## In addition to System, Data fields can be unrolled from additional XML nodes in event. -# ## Human-readable representation of those nodes is formatted into event Message field, -# ## but XML is more machine-parsable -# -# # Process UserData XML to fields, if this node exists in Event XML -# process_userdata = true -# -# # Process EventData XML to fields, if this node exists in Event XML -# process_eventdata = true -# -# ## Separator character to use for unrolled XML Data field names -# separator = "_" -# -# ## Get only first line of Message field. For most events first line is usually more than enough -# only_first_line_of_message = true -# -# ## Parse timestamp from TimeCreated.SystemTime event field. -# ## Will default to current time of telegraf processing on parsing error or if set to false -# timestamp_from_event = true -# -# ## Fields to include as tags. Globbing supported ("Level*" for both "Level" and "LevelText") -# event_tags = ["Source", "EventID", "Level", "LevelText", "Task", "TaskText", "Opcode", "OpcodeText", "Keywords", "Channel", "Computer"] -# -# ## Default list of fields to send. All fields are sent by default. Globbing supported -# event_fields = ["*"] -# -# ## Fields to exclude. Also applied to data fields. Globbing supported -# exclude_fields = ["TimeCreated", "Binary", "Data_Address*"] -# -# ## Skip those tags or fields if their value is empty or equals to zero. Globbing supported -# exclude_empty = ["*ActivityID", "UserID"] - - -# # Input plugin to counterPath Performance Counters on Windows operating systems -# [[inputs.win_perf_counters]] -# ## By default this plugin returns basic CPU and Disk statistics. -# ## See the README file for more examples. -# ## Uncomment examples below or write your own as you see fit. If the system -# ## being polled for data does not have the Object at startup of the Telegraf -# ## agent, it will not be gathered. -# ## Settings: -# # PrintValid = false # Print All matching performance counters -# # Whether request a timestamp along with the PerfCounter data or just use current time -# # UsePerfCounterTime=true -# # If UseWildcardsExpansion params is set to true, wildcards (partial wildcards in instance names and wildcards in counters names) in configured counter paths will be expanded -# # and in case of localized Windows, counter paths will be also localized. It also returns instance indexes in instance names. -# # If false, wildcards (not partial) in instance names will still be expanded, but instance indexes will not be returned in instance names. -# #UseWildcardsExpansion = false -# # Period after which counters will be reread from configuration and wildcards in counter paths expanded -# CountersRefreshInterval="1m" -# -# [[inputs.win_perf_counters.object]] -# # Processor usage, alternative to native, reports on a per core. -# ObjectName = "Processor" -# Instances = ["*"] -# Counters = [ -# "% Idle Time", -# "% Interrupt Time", -# "% Privileged Time", -# "% User Time", -# "% Processor Time", -# "% DPC Time", -# ] -# Measurement = "win_cpu" -# # Set to true to include _Total instance when querying for all (*). -# # IncludeTotal=false -# # Print out when the performance counter is missing from object, counter or instance. -# # WarnOnMissing = false -# -# [[inputs.win_perf_counters.object]] -# # Disk times and queues -# ObjectName = "LogicalDisk" -# Instances = ["*"] -# Counters = [ -# "% Idle Time", -# "% Disk Time", -# "% Disk Read Time", -# "% Disk Write Time", -# "% User Time", -# "% Free Space", -# "Current Disk Queue Length", -# "Free Megabytes", -# ] -# Measurement = "win_disk" -# -# [[inputs.win_perf_counters.object]] -# ObjectName = "PhysicalDisk" -# Instances = ["*"] -# Counters = [ -# "Disk Read Bytes/sec", -# "Disk Write Bytes/sec", -# "Current Disk Queue Length", -# "Disk Reads/sec", -# "Disk Writes/sec", -# "% Disk Time", -# "% Disk Read Time", -# "% Disk Write Time", -# ] -# Measurement = "win_diskio" -# -# [[inputs.win_perf_counters.object]] -# ObjectName = "Network Interface" -# Instances = ["*"] -# Counters = [ -# "Bytes Received/sec", -# "Bytes Sent/sec", -# "Packets Received/sec", -# "Packets Sent/sec", -# "Packets Received Discarded", -# "Packets Outbound Discarded", -# "Packets Received Errors", -# "Packets Outbound Errors", -# ] -# Measurement = "win_net" -# -# -# [[inputs.win_perf_counters.object]] -# ObjectName = "System" -# Counters = [ -# "Context Switches/sec", -# "System Calls/sec", -# "Processor Queue Length", -# "System Up Time", -# ] -# Instances = ["------"] -# Measurement = "win_system" -# -# [[inputs.win_perf_counters.object]] -# # Example counterPath where the Instance portion must be removed to get data back, -# # such as from the Memory object. -# ObjectName = "Memory" -# Counters = [ -# "Available Bytes", -# "Cache Faults/sec", -# "Demand Zero Faults/sec", -# "Page Faults/sec", -# "Pages/sec", -# "Transition Faults/sec", -# "Pool Nonpaged Bytes", -# "Pool Paged Bytes", -# "Standby Cache Reserve Bytes", -# "Standby Cache Normal Priority Bytes", -# "Standby Cache Core Bytes", -# ] -# Instances = ["------"] # Use 6 x - to remove the Instance bit from the counterPath. -# Measurement = "win_mem" -# -# [[inputs.win_perf_counters.object]] -# # Example query where the Instance portion must be removed to get data back, -# # such as from the Paging File object. -# ObjectName = "Paging File" -# Counters = [ -# "% Usage", -# ] -# Instances = ["_Total"] -# Measurement = "win_swap" - - -# # Input plugin to report Windows services info. -# [[inputs.win_services]] -# ## Names of the services to monitor. Leave empty to monitor all the available services on the host. Globs accepted. -# service_names = [ -# "LanmanServer", -# "TermService", -# "Win*", -# ] - # # A plugin to collect stats from Varnish HTTP Cache # [[inputs.varnish]] @@ -6802,7 +6625,7 @@ # # Listener capable of handling KNX bus messages provided through a KNX-IP Interface. -# [[inputs.knx_listener]] +# [[inputs.KNXListener]] # ## Type of KNX-IP interface. # ## Can be either "tunnel" or "router". # # service_type = "tunnel" @@ -7667,7 +7490,7 @@ # ## This value is propagated to pqos tool. Interval format is defined by pqos itself. # ## If not provided or provided 0, will be set to 10 = 10x100ms = 1s. # # sampling_interval = "10" -# +# # ## Optionally specify the path to pqos executable. # ## If not provided, auto discovery will be performed. # # pqos_path = "/usr/local/bin/pqos" @@ -7675,12 +7498,12 @@ # ## Optionally specify if IPC and LLC_Misses metrics shouldn't be propagated. # ## If not provided, default value is false. # # shortened_metrics = false -# +# # ## Specify the list of groups of CPU core(s) to be provided as pqos input. # ## Mandatory if processes aren't set and forbidden if processes are specified. # ## e.g. ["0-3", "4,5,6"] or ["1-3,4"] # # cores = ["0-3"] -# +# # ## Specify the list of processes for which Metrics will be collected. # ## Mandatory if cores aren't set and forbidden if cores are specified. # ## e.g. ["qemu", "pmd"] @@ -7924,6 +7747,30 @@ # table_name = "default" +# # Listener capable of handling KNX bus messages provided through a KNX-IP Interface. +# [[inputs.knx_listener]] +# ## Type of KNX-IP interface. +# ## Can be either "tunnel" or "router". +# # service_type = "tunnel" +# +# ## Address of the KNX-IP interface. +# service_address = "localhost:3671" +# +# ## Measurement definition(s) +# # [[inputs.knx_listener.measurement]] +# # ## Name of the measurement +# # name = "temperature" +# # ## Datapoint-Type (DPT) of the KNX messages +# # dpt = "9.001" +# # ## List of Group-Addresses (GAs) assigned to the measurement +# # addresses = ["5/5/1"] +# +# # [[inputs.knx_listener.measurement]] +# # name = "illumination" +# # dpt = "9.004" +# # addresses = ["5/5/3"] + + # # Read metrics off Arista LANZ, via socket # [[inputs.lanz]] # ## URL to Arista LANZ endpoint @@ -9103,3 +8950,4 @@ # [[inputs.zipkin]] # # path = "/api/v1/spans" # URL path for span data # # port = 9411 # Port on which Telegraf listens + diff --git a/etc/telegraf_windows.conf b/etc/telegraf_windows.conf index 5b7ca95057444..ee67219c3c3f5 100644 --- a/etc/telegraf_windows.conf +++ b/etc/telegraf_windows.conf @@ -90,8 +90,8 @@ ## If set to -1, no archives are removed. # logfile_rotation_max_archives = 5 - ## Pick a timezone to use when logging or type 'local' for local time. Example: 'America/Chicago'. - ## See https://socketloop.com/tutorials/golang-display-list-of-timezones-with-gmt for timezone formatting options. + ## Pick a timezone to use when logging or type 'local' for local time. + ## Example: America/Chicago # log_with_timezone = "" ## Override default hostname, if empty use os.Hostname() @@ -99,7 +99,6 @@ ## If set to true, do no set the "host" tag in the telegraf agent. omit_hostname = false - ############################################################################### # OUTPUT PLUGINS # ############################################################################### @@ -175,7 +174,7 @@ ## HTTP Content-Encoding for write request body, can be set to "gzip" to ## compress body or "identity" to apply no encoding. - # content_encoding = "identity" + # content_encoding = "gzip" ## When true, Telegraf will output unsigned integers as unsigned values, ## i.e.: "42u". You will need a version of InfluxDB supporting unsigned @@ -183,6 +182,696 @@ ## existing data has been written. # influx_uint_support = false + +# # Configuration for Amon Server to send metrics to. +# [[outputs.amon]] +# ## Amon Server Key +# server_key = "my-server-key" # required. +# +# ## Amon Instance URL +# amon_instance = "https://youramoninstance" # required +# +# ## Connection timeout. +# # timeout = "5s" + + +# # Publishes metrics to an AMQP broker +# [[outputs.amqp]] +# ## Broker to publish to. +# ## deprecated in 1.7; use the brokers option +# # url = "amqp://localhost:5672/influxdb" +# +# ## Brokers to publish to. If multiple brokers are specified a random broker +# ## will be selected anytime a connection is established. This can be +# ## helpful for load balancing when not using a dedicated load balancer. +# brokers = ["amqp://localhost:5672/influxdb"] +# +# ## Maximum messages to send over a connection. Once this is reached, the +# ## connection is closed and a new connection is made. This can be helpful for +# ## load balancing when not using a dedicated load balancer. +# # max_messages = 0 +# +# ## Exchange to declare and publish to. +# exchange = "telegraf" +# +# ## Exchange type; common types are "direct", "fanout", "topic", "header", "x-consistent-hash". +# # exchange_type = "topic" +# +# ## If true, exchange will be passively declared. +# # exchange_passive = false +# +# ## Exchange durability can be either "transient" or "durable". +# # exchange_durability = "durable" +# +# ## Additional exchange arguments. +# # exchange_arguments = { } +# # exchange_arguments = {"hash_property" = "timestamp"} +# +# ## Authentication credentials for the PLAIN auth_method. +# # username = "" +# # password = "" +# +# ## Auth method. PLAIN and EXTERNAL are supported +# ## Using EXTERNAL requires enabling the rabbitmq_auth_mechanism_ssl plugin as +# ## described here: https://www.rabbitmq.com/plugins.html +# # auth_method = "PLAIN" +# +# ## Metric tag to use as a routing key. +# ## ie, if this tag exists, its value will be used as the routing key +# # routing_tag = "host" +# +# ## Static routing key. Used when no routing_tag is set or as a fallback +# ## when the tag specified in routing tag is not found. +# # routing_key = "" +# # routing_key = "telegraf" +# +# ## Delivery Mode controls if a published message is persistent. +# ## One of "transient" or "persistent". +# # delivery_mode = "transient" +# +# ## InfluxDB database added as a message header. +# ## deprecated in 1.7; use the headers option +# # database = "telegraf" +# +# ## InfluxDB retention policy added as a message header +# ## deprecated in 1.7; use the headers option +# # retention_policy = "default" +# +# ## Static headers added to each published message. +# # headers = { } +# # headers = {"database" = "telegraf", "retention_policy" = "default"} +# +# ## Connection timeout. If not provided, will default to 5s. 0s means no +# ## timeout (not recommended). +# # timeout = "5s" +# +# ## Optional TLS Config +# # tls_ca = "/etc/telegraf/ca.pem" +# # tls_cert = "/etc/telegraf/cert.pem" +# # tls_key = "/etc/telegraf/key.pem" +# ## Use TLS but skip chain & host verification +# # insecure_skip_verify = false +# +# ## If true use batch serialization format instead of line based delimiting. +# ## Only applies to data formats which are not line based such as JSON. +# ## Recommended to set to true. +# # use_batch_format = false +# +# ## Content encoding for message payloads, can be set to "gzip" to or +# ## "identity" to apply no encoding. +# ## +# ## Please note that when use_batch_format = false each amqp message contains only +# ## a single metric, it is recommended to use compression with batch format +# ## for best results. +# # content_encoding = "identity" +# +# ## Data format to output. +# ## Each data format has its own unique set of configuration options, read +# ## more about them here: +# ## https://github.com/influxdata/telegraf/blob/master/docs/DATA_FORMATS_OUTPUT.md +# # data_format = "influx" + + +# # Send metrics to Azure Application Insights +# [[outputs.application_insights]] +# ## Instrumentation key of the Application Insights resource. +# instrumentation_key = "xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxx" +# +# ## Regions that require endpoint modification https://docs.microsoft.com/en-us/azure/azure-monitor/app/custom-endpoints +# # endpoint_url = "https://dc.services.visualstudio.com/v2/track" +# +# ## Timeout for closing (default: 5s). +# # timeout = "5s" +# +# ## Enable additional diagnostic logging. +# # enable_diagnostic_logging = false +# +# ## Context Tag Sources add Application Insights context tags to a tag value. +# ## +# ## For list of allowed context tag keys see: +# ## https://github.com/microsoft/ApplicationInsights-Go/blob/master/appinsights/contracts/contexttagkeys.go +# # [outputs.application_insights.context_tag_sources] +# # "ai.cloud.role" = "kubernetes_container_name" +# # "ai.cloud.roleInstance" = "kubernetes_pod_name" + + +# # Sends metrics to Azure Data Explorer +# [[outputs.azure_data_explorer]] +# ## Azure Data Exlorer cluster endpoint +# ## ex: endpoint_url = "https://clustername.australiasoutheast.kusto.windows.net" +# endpoint_url = "" +# +# ## The Azure Data Explorer database that the metrics will be ingested into. +# ## The plugin will NOT generate this database automatically, it's expected that this database already exists before ingestion. +# ## ex: "exampledatabase" +# database = "" +# +# ## Timeout for Azure Data Explorer operations +# # timeout = "20s" +# +# ## Type of metrics grouping used when pushing to Azure Data Explorer. +# ## Default is "TablePerMetric" for one table per different metric. +# ## For more information, please check the plugin README. +# # metrics_grouping_type = "TablePerMetric" +# +# ## Name of the single table to store all the metrics (Only needed if metrics_grouping_type is "SingleTable"). +# # table_name = "" +# + + +# # Send aggregate metrics to Azure Monitor +# [[outputs.azure_monitor]] +# ## Timeout for HTTP writes. +# # timeout = "20s" +# +# ## Set the namespace prefix, defaults to "Telegraf/". +# # namespace_prefix = "Telegraf/" +# +# ## Azure Monitor doesn't have a string value type, so convert string +# ## fields to dimensions (a.k.a. tags) if enabled. Azure Monitor allows +# ## a maximum of 10 dimensions so Telegraf will only send the first 10 +# ## alphanumeric dimensions. +# # strings_as_dimensions = false +# +# ## Both region and resource_id must be set or be available via the +# ## Instance Metadata service on Azure Virtual Machines. +# # +# ## Azure Region to publish metrics against. +# ## ex: region = "southcentralus" +# # region = "" +# # +# ## The Azure Resource ID against which metric will be logged, e.g. +# ## ex: resource_id = "/subscriptions//resourceGroups//providers/Microsoft.Compute/virtualMachines/" +# # resource_id = "" +# +# ## Optionally, if in Azure US Government, China or other sovereign +# ## cloud environment, set appropriate REST endpoint for receiving +# ## metrics. (Note: region may be unused in this context) +# # endpoint_url = "https://monitoring.core.usgovcloudapi.net" + + +# # Publish Telegraf metrics to a Google Cloud PubSub topic +# [[outputs.cloud_pubsub]] +# ## Required. Name of Google Cloud Platform (GCP) Project that owns +# ## the given PubSub topic. +# project = "my-project" +# +# ## Required. Name of PubSub topic to publish metrics to. +# topic = "my-topic" +# +# ## Required. Data format to consume. +# ## Each data format has its own unique set of configuration options. +# ## Read more about them here: +# ## https://github.com/influxdata/telegraf/blob/master/docs/DATA_FORMATS_INPUT.md +# data_format = "influx" +# +# ## Optional. Filepath for GCP credentials JSON file to authorize calls to +# ## PubSub APIs. If not set explicitly, Telegraf will attempt to use +# ## Application Default Credentials, which is preferred. +# # credentials_file = "path/to/my/creds.json" +# +# ## Optional. If true, will send all metrics per write in one PubSub message. +# # send_batched = true +# +# ## The following publish_* parameters specifically configures batching +# ## requests made to the GCP Cloud PubSub API via the PubSub Golang library. Read +# ## more here: https://godoc.org/cloud.google.com/go/pubsub#PublishSettings +# +# ## Optional. Send a request to PubSub (i.e. actually publish a batch) +# ## when it has this many PubSub messages. If send_batched is true, +# ## this is ignored and treated as if it were 1. +# # publish_count_threshold = 1000 +# +# ## Optional. Send a request to PubSub (i.e. actually publish a batch) +# ## when it has this many PubSub messages. If send_batched is true, +# ## this is ignored and treated as if it were 1 +# # publish_byte_threshold = 1000000 +# +# ## Optional. Specifically configures requests made to the PubSub API. +# # publish_num_go_routines = 2 +# +# ## Optional. Specifies a timeout for requests to the PubSub API. +# # publish_timeout = "30s" +# +# ## Optional. If true, published PubSub message data will be base64-encoded. +# # base64_data = false +# +# ## Optional. PubSub attributes to add to metrics. +# # [outputs.cloud_pubsub.attributes] +# # my_attr = "tag_value" + + +# # Configuration for AWS CloudWatch output. +# [[outputs.cloudwatch]] +# ## Amazon REGION +# region = "us-east-1" +# +# ## Amazon Credentials +# ## Credentials are loaded in the following order +# ## 1) Web identity provider credentials via STS if role_arn and web_identity_token_file are specified +# ## 2) Assumed credentials via STS if role_arn is specified +# ## 3) explicit credentials from 'access_key' and 'secret_key' +# ## 4) shared profile from 'profile' +# ## 5) environment variables +# ## 6) shared credentials file +# ## 7) EC2 Instance Profile +# #access_key = "" +# #secret_key = "" +# #token = "" +# #role_arn = "" +# #web_identity_token_file = "" +# #role_session_name = "" +# #profile = "" +# #shared_credential_file = "" +# +# ## Endpoint to make request against, the correct endpoint is automatically +# ## determined and this option should only be set if you wish to override the +# ## default. +# ## ex: endpoint_url = "http://localhost:8000" +# # endpoint_url = "" +# +# ## Namespace for the CloudWatch MetricDatums +# namespace = "InfluxData/Telegraf" +# +# ## If you have a large amount of metrics, you should consider to send statistic +# ## values instead of raw metrics which could not only improve performance but +# ## also save AWS API cost. If enable this flag, this plugin would parse the required +# ## CloudWatch statistic fields (count, min, max, and sum) and send them to CloudWatch. +# ## You could use basicstats aggregator to calculate those fields. If not all statistic +# ## fields are available, all fields would still be sent as raw metrics. +# # write_statistics = false +# +# ## Enable high resolution metrics of 1 second (if not enabled, standard resolution are of 60 seconds precision) +# # high_resolution_metrics = false + + +# # Configuration for AWS CloudWatchLogs output. +# [[outputs.cloudwatch_logs]] +# ## The region is the Amazon region that you wish to connect to. +# ## Examples include but are not limited to: +# ## - us-west-1 +# ## - us-west-2 +# ## - us-east-1 +# ## - ap-southeast-1 +# ## - ap-southeast-2 +# ## ... +# region = "us-east-1" +# +# ## Amazon Credentials +# ## Credentials are loaded in the following order +# ## 1) Web identity provider credentials via STS if role_arn and web_identity_token_file are specified +# ## 2) Assumed credentials via STS if role_arn is specified +# ## 3) explicit credentials from 'access_key' and 'secret_key' +# ## 4) shared profile from 'profile' +# ## 5) environment variables +# ## 6) shared credentials file +# ## 7) EC2 Instance Profile +# #access_key = "" +# #secret_key = "" +# #token = "" +# #role_arn = "" +# #web_identity_token_file = "" +# #role_session_name = "" +# #profile = "" +# #shared_credential_file = "" +# +# ## Endpoint to make request against, the correct endpoint is automatically +# ## determined and this option should only be set if you wish to override the +# ## default. +# ## ex: endpoint_url = "http://localhost:8000" +# # endpoint_url = "" +# +# ## Cloud watch log group. Must be created in AWS cloudwatch logs upfront! +# ## For example, you can specify the name of the k8s cluster here to group logs from all cluster in oine place +# log_group = "my-group-name" +# +# ## Log stream in log group +# ## Either log group name or reference to metric attribute, from which it can be parsed: +# ## tag: or field:. If log stream is not exist, it will be created. +# ## Since AWS is not automatically delete logs streams with expired logs entries (i.e. empty log stream) +# ## you need to put in place appropriate house-keeping (https://forums.aws.amazon.com/thread.jspa?threadID=178855) +# log_stream = "tag:location" +# +# ## Source of log data - metric name +# ## specify the name of the metric, from which the log data should be retrieved. +# ## I.e., if you are using docker_log plugin to stream logs from container, then +# ## specify log_data_metric_name = "docker_log" +# log_data_metric_name = "docker_log" +# +# ## Specify from which metric attribute the log data should be retrieved: +# ## tag: or field:. +# ## I.e., if you are using docker_log plugin to stream logs from container, then +# ## specify log_data_source = "field:message" +# log_data_source = "field:message" + + +# # Configuration for CrateDB to send metrics to. +# [[outputs.cratedb]] +# # A github.com/jackc/pgx/v4 connection string. +# # See https://pkg.go.dev/github.com/jackc/pgx/v4#ParseConfig +# url = "postgres://user:password@localhost/schema?sslmode=disable" +# # Timeout for all CrateDB queries. +# timeout = "5s" +# # Name of the table to store metrics in. +# table = "metrics" +# # If true, and the metrics table does not exist, create it automatically. +# table_create = true +# # The character(s) to replace any '.' in an object key with +# key_separator = "_" + + +# # Configuration for DataDog API to send metrics to. +# [[outputs.datadog]] +# ## Datadog API key +# apikey = "my-secret-key" +# +# ## Connection timeout. +# # timeout = "5s" +# +# ## Write URL override; useful for debugging. +# # url = "https://app.datadoghq.com/api/v1/series" +# +# ## Set http_proxy (telegraf uses the system wide proxy settings if it isn't set) +# # http_proxy_url = "http://localhost:8888" + + +# # Send metrics to nowhere at all +# [[outputs.discard]] +# # no configuration + + +# # Send telegraf metrics to a Dynatrace environment +# [[outputs.dynatrace]] +# ## For usage with the Dynatrace OneAgent you can omit any configuration, +# ## the only requirement is that the OneAgent is running on the same host. +# ## Only setup environment url and token if you want to monitor a Host without the OneAgent present. +# ## +# ## Your Dynatrace environment URL. +# ## For Dynatrace OneAgent you can leave this empty or set it to "http://127.0.0.1:14499/metrics/ingest" (default) +# ## For Dynatrace SaaS environments the URL scheme is "https://{your-environment-id}.live.dynatrace.com/api/v2/metrics/ingest" +# ## For Dynatrace Managed environments the URL scheme is "https://{your-domain}/e/{your-environment-id}/api/v2/metrics/ingest" +# url = "" +# +# ## Your Dynatrace API token. +# ## Create an API token within your Dynatrace environment, by navigating to Settings > Integration > Dynatrace API +# ## The API token needs data ingest scope permission. When using OneAgent, no API token is required. +# api_token = "" +# +# ## Optional prefix for metric names (e.g.: "telegraf") +# prefix = "telegraf" +# +# ## Optional TLS Config +# # tls_ca = "/etc/telegraf/ca.pem" +# # tls_cert = "/etc/telegraf/cert.pem" +# # tls_key = "/etc/telegraf/key.pem" +# +# ## Optional flag for ignoring tls certificate check +# # insecure_skip_verify = false +# +# +# ## Connection timeout, defaults to "5s" if not set. +# timeout = "5s" +# +# ## If you want metrics to be treated and reported as delta counters, add the metric names here +# additional_counters = [ ] +# +# ## Optional dimensions to be added to every metric +# # [outputs.dynatrace.default_dimensions] +# # default_key = "default value" + + +# # Configuration for Elasticsearch to send metrics to. +# [[outputs.elasticsearch]] +# ## The full HTTP endpoint URL for your Elasticsearch instance +# ## Multiple urls can be specified as part of the same cluster, +# ## this means that only ONE of the urls will be written to each interval. +# urls = [ "http://node1.es.example.com:9200" ] # required. +# ## Elasticsearch client timeout, defaults to "5s" if not set. +# timeout = "5s" +# ## Set to true to ask Elasticsearch a list of all cluster nodes, +# ## thus it is not necessary to list all nodes in the urls config option. +# enable_sniffer = false +# ## Set to true to enable gzip compression +# enable_gzip = false +# ## Set the interval to check if the Elasticsearch nodes are available +# ## Setting to "0s" will disable the health check (not recommended in production) +# health_check_interval = "10s" +# ## HTTP basic authentication details +# # username = "telegraf" +# # password = "mypassword" +# +# ## Index Config +# ## The target index for metrics (Elasticsearch will create if it not exists). +# ## You can use the date specifiers below to create indexes per time frame. +# ## The metric timestamp will be used to decide the destination index name +# # %Y - year (2016) +# # %y - last two digits of year (00..99) +# # %m - month (01..12) +# # %d - day of month (e.g., 01) +# # %H - hour (00..23) +# # %V - week of the year (ISO week) (01..53) +# ## Additionally, you can specify a tag name using the notation {{tag_name}} +# ## which will be used as part of the index name. If the tag does not exist, +# ## the default tag value will be used. +# # index_name = "telegraf-{{host}}-%Y.%m.%d" +# # default_tag_value = "none" +# index_name = "telegraf-%Y.%m.%d" # required. +# +# ## Optional TLS Config +# # tls_ca = "/etc/telegraf/ca.pem" +# # tls_cert = "/etc/telegraf/cert.pem" +# # tls_key = "/etc/telegraf/key.pem" +# ## Use TLS but skip chain & host verification +# # insecure_skip_verify = false +# +# ## Template Config +# ## Set to true if you want telegraf to manage its index template. +# ## If enabled it will create a recommended index template for telegraf indexes +# manage_template = true +# ## The template name used for telegraf indexes +# template_name = "telegraf" +# ## Set to true if you want telegraf to overwrite an existing template +# overwrite_template = false +# ## If set to true a unique ID hash will be sent as sha256(concat(timestamp,measurement,series-hash)) string +# ## it will enable data resend and update metric points avoiding duplicated metrics with diferent id's +# force_document_id = false + + +# # Send metrics to command as input over stdin +# [[outputs.exec]] +# ## Command to ingest metrics via stdin. +# command = ["tee", "-a", "/dev/null"] +# +# ## Timeout for command to complete. +# # timeout = "5s" +# +# ## Data format to output. +# ## Each data format has its own unique set of configuration options, read +# ## more about them here: +# ## https://github.com/influxdata/telegraf/blob/master/docs/DATA_FORMATS_OUTPUT.md +# # data_format = "influx" + + +# # Run executable as long-running output plugin +# [[outputs.execd]] +# ## Program to run as daemon +# command = ["my-telegraf-output", "--some-flag", "value"] +# +# ## Delay before the process is restarted after an unexpected termination +# restart_delay = "10s" +# +# ## Data format to export. +# ## Each data format has its own unique set of configuration options, read +# ## more about them here: +# ## https://github.com/influxdata/telegraf/blob/master/docs/DATA_FORMATS_OUTPUT.md +# data_format = "influx" + + +# # Send telegraf metrics to file(s) +# [[outputs.file]] +# ## Files to write to, "stdout" is a specially handled file. +# files = ["stdout", "/tmp/metrics.out"] +# +# ## Use batch serialization format instead of line based delimiting. The +# ## batch format allows for the production of non line based output formats and +# ## may more efficiently encode metric groups. +# # use_batch_format = false +# +# ## The file will be rotated after the time interval specified. When set +# ## to 0 no time based rotation is performed. +# # rotation_interval = "0d" +# +# ## The logfile will be rotated when it becomes larger than the specified +# ## size. When set to 0 no size based rotation is performed. +# # rotation_max_size = "0MB" +# +# ## Maximum number of rotated archives to keep, any older logs are deleted. +# ## If set to -1, no archives are removed. +# # rotation_max_archives = 5 +# +# ## Data format to output. +# ## Each data format has its own unique set of configuration options, read +# ## more about them here: +# ## https://github.com/influxdata/telegraf/blob/master/docs/DATA_FORMATS_OUTPUT.md +# data_format = "influx" + + +# # Configuration for Graphite server to send metrics to +# [[outputs.graphite]] +# ## TCP endpoint for your graphite instance. +# ## If multiple endpoints are configured, output will be load balanced. +# ## Only one of the endpoints will be written to with each iteration. +# servers = ["localhost:2003"] +# ## Prefix metrics name +# prefix = "" +# ## Graphite output template +# ## see https://github.com/influxdata/telegraf/blob/master/docs/DATA_FORMATS_OUTPUT.md +# template = "host.tags.measurement.field" +# +# ## Enable Graphite tags support +# # graphite_tag_support = false +# +# ## Define how metric names and tags are sanitized; options are "strict", or "compatible" +# ## strict - Default method, and backwards compatible with previous versionf of Telegraf +# ## compatible - More relaxed sanitizing when using tags, and compatible with the graphite spec +# # graphite_tag_sanitize_mode = "strict" +# +# ## Character for separating metric name and field for Graphite tags +# # graphite_separator = "." +# +# ## Graphite templates patterns +# ## 1. Template for cpu +# ## 2. Template for disk* +# ## 3. Default template +# # templates = [ +# # "cpu tags.measurement.host.field", +# # "disk* measurement.field", +# # "host.measurement.tags.field" +# #] +# +# ## timeout in seconds for the write connection to graphite +# timeout = 2 +# +# ## Optional TLS Config +# # tls_ca = "/etc/telegraf/ca.pem" +# # tls_cert = "/etc/telegraf/cert.pem" +# # tls_key = "/etc/telegraf/key.pem" +# ## Use TLS but skip chain & host verification +# # insecure_skip_verify = false + + +# # Send telegraf metrics to graylog +# [[outputs.graylog]] +# ## Endpoints for your graylog instances. +# servers = ["udp://127.0.0.1:12201"] +# +# ## Connection timeout. +# # timeout = "5s" +# +# ## The field to use as the GELF short_message, if unset the static string +# ## "telegraf" will be used. +# ## example: short_message_field = "message" +# # short_message_field = "" + + +# # Configurable HTTP health check resource based on metrics +# [[outputs.health]] +# ## Address and port to listen on. +# ## ex: service_address = "http://localhost:8080" +# ## service_address = "unix:///var/run/telegraf-health.sock" +# # service_address = "http://:8080" +# +# ## The maximum duration for reading the entire request. +# # read_timeout = "5s" +# ## The maximum duration for writing the entire response. +# # write_timeout = "5s" +# +# ## Username and password to accept for HTTP basic authentication. +# # basic_username = "user1" +# # basic_password = "secret" +# +# ## Allowed CA certificates for client certificates. +# # tls_allowed_cacerts = ["/etc/telegraf/clientca.pem"] +# +# ## TLS server certificate and private key. +# # tls_cert = "/etc/telegraf/cert.pem" +# # tls_key = "/etc/telegraf/key.pem" +# +# ## One or more check sub-tables should be defined, it is also recommended to +# ## use metric filtering to limit the metrics that flow into this output. +# ## +# ## When using the default buffer sizes, this example will fail when the +# ## metric buffer is half full. +# ## +# ## namepass = ["internal_write"] +# ## tagpass = { output = ["influxdb"] } +# ## +# ## [[outputs.health.compares]] +# ## field = "buffer_size" +# ## lt = 5000.0 +# ## +# ## [[outputs.health.contains]] +# ## field = "buffer_size" + + +# # A plugin that can transmit metrics over HTTP +# [[outputs.http]] +# ## URL is the address to send metrics to +# url = "http://127.0.0.1:8080/telegraf" +# +# ## Timeout for HTTP message +# # timeout = "5s" +# +# ## HTTP method, one of: "POST" or "PUT" +# # method = "POST" +# +# ## HTTP Basic Auth credentials +# # username = "username" +# # password = "pa$$word" +# +# ## OAuth2 Client Credentials Grant +# # client_id = "clientid" +# # client_secret = "secret" +# # token_url = "https://indentityprovider/oauth2/v1/token" +# # scopes = ["urn:opc:idm:__myscopes__"] +# +# ## Optional TLS Config +# # tls_ca = "/etc/telegraf/ca.pem" +# # tls_cert = "/etc/telegraf/cert.pem" +# # tls_key = "/etc/telegraf/key.pem" +# ## Use TLS but skip chain & host verification +# # insecure_skip_verify = false +# +# ## Optional Cookie authentication +# # cookie_auth_url = "https://localhost/authMe" +# # cookie_auth_method = "POST" +# # cookie_auth_username = "username" +# # cookie_auth_password = "pa$$word" +# # cookie_auth_body = '{"username": "user", "password": "pa$$word", "authenticate": "me"}' +# ## cookie_auth_renewal not set or set to "0" will auth once and never renew the cookie +# # cookie_auth_renewal = "5m" +# +# ## Data format to output. +# ## Each data format has it's own unique set of configuration options, read +# ## more about them here: +# ## https://github.com/influxdata/telegraf/blob/master/docs/DATA_FORMATS_OUTPUT.md +# # data_format = "influx" +# +# ## HTTP Content-Encoding for write request body, can be set to "gzip" to +# ## compress body or "identity" to apply no encoding. +# # content_encoding = "identity" +# +# ## Additional HTTP headers +# # [outputs.http.headers] +# # # Should be set manually to "application/json" for json data_format +# # Content-Type = "text/plain; charset=utf-8" +# +# ## Idle (keep-alive) connection timeout. +# ## Maximum amount of time before idle connection is closed. +# ## Zero means no limit. +# # idle_conn_timeout = 0 + + # # Configuration for sending metrics to InfluxDB # [[outputs.influxdb_v2]] # ## The URLs of the InfluxDB cluster nodes. @@ -190,7 +879,7 @@ # ## Multiple URLs can be specified for a single cluster, only ONE of the # ## urls will be written to each interval. # ## ex: urls = ["https://us-west-2-1.aws.cloud2.influxdata.com"] -# urls = ["http://127.0.0.1:9999"] +# urls = ["http://127.0.0.1:8086"] # # ## Token for authentication. # token = "" @@ -236,188 +925,7970 @@ # # insecure_skip_verify = false -############################################################################### -# INPUT PLUGINS # -############################################################################### +# # Configuration for sending metrics to an Instrumental project +# [[outputs.instrumental]] +# ## Project API Token (required) +# api_token = "API Token" # required +# ## Prefix the metrics with a given name +# prefix = "" +# ## Stats output template (Graphite formatting) +# ## see https://github.com/influxdata/telegraf/blob/master/docs/DATA_FORMATS_OUTPUT.md#graphite +# template = "host.tags.measurement.field" +# ## Timeout in seconds to connect +# timeout = "2s" +# ## Display Communication to Instrumental +# debug = false -# Windows Performance Counters plugin. -# These are the recommended method of monitoring system metrics on windows, -# as the regular system plugins (inputs.cpu, inputs.mem, etc.) rely on WMI, -# which utilize more system resources. -# -# See more configuration examples at: -# https://github.com/influxdata/telegraf/tree/master/plugins/inputs/win_perf_counters - -[[inputs.win_perf_counters]] - [[inputs.win_perf_counters.object]] - # Processor usage, alternative to native, reports on a per core. - ObjectName = "Processor" - Instances = ["*"] - Counters = [ - "% Idle Time", - "% Interrupt Time", - "% Privileged Time", - "% User Time", - "% Processor Time", - "% DPC Time", - ] - Measurement = "win_cpu" - # Set to true to include _Total instance when querying for all (*). - IncludeTotal=true - - [[inputs.win_perf_counters.object]] - # Disk times and queues - ObjectName = "LogicalDisk" - Instances = ["*"] - Counters = [ - "% Idle Time", - "% Disk Time", - "% Disk Read Time", - "% Disk Write Time", - "% Free Space", - "Current Disk Queue Length", - "Free Megabytes", - ] - Measurement = "win_disk" - # Set to true to include _Total instance when querying for all (*). - #IncludeTotal=false - - [[inputs.win_perf_counters.object]] - ObjectName = "PhysicalDisk" - Instances = ["*"] - Counters = [ - "Disk Read Bytes/sec", - "Disk Write Bytes/sec", - "Current Disk Queue Length", - "Disk Reads/sec", - "Disk Writes/sec", - "% Disk Time", - "% Disk Read Time", - "% Disk Write Time", - ] - Measurement = "win_diskio" - - [[inputs.win_perf_counters.object]] - ObjectName = "Network Interface" - Instances = ["*"] - Counters = [ - "Bytes Received/sec", - "Bytes Sent/sec", - "Packets Received/sec", - "Packets Sent/sec", - "Packets Received Discarded", - "Packets Outbound Discarded", - "Packets Received Errors", - "Packets Outbound Errors", - ] - Measurement = "win_net" - - [[inputs.win_perf_counters.object]] - ObjectName = "System" - Counters = [ - "Context Switches/sec", - "System Calls/sec", - "Processor Queue Length", - "System Up Time", - ] - Instances = ["------"] - Measurement = "win_system" - # Set to true to include _Total instance when querying for all (*). - #IncludeTotal=false - - [[inputs.win_perf_counters.object]] - # Example query where the Instance portion must be removed to get data back, - # such as from the Memory object. - ObjectName = "Memory" - Counters = [ - "Available Bytes", - "Cache Faults/sec", - "Demand Zero Faults/sec", - "Page Faults/sec", - "Pages/sec", - "Transition Faults/sec", - "Pool Nonpaged Bytes", - "Pool Paged Bytes", - "Standby Cache Reserve Bytes", - "Standby Cache Normal Priority Bytes", - "Standby Cache Core Bytes", - ] - # Use 6 x - to remove the Instance bit from the query. - Instances = ["------"] - Measurement = "win_mem" - # Set to true to include _Total instance when querying for all (*). - #IncludeTotal=false - - [[inputs.win_perf_counters.object]] - # Example query where the Instance portion must be removed to get data back, - # such as from the Paging File object. - ObjectName = "Paging File" - Counters = [ - "% Usage", - ] - Instances = ["_Total"] - Measurement = "win_swap" - - -# Windows system plugins using WMI (disabled by default, using -# win_perf_counters over WMI is recommended) - - -# # Read metrics about cpu usage -# [[inputs.cpu]] -# ## Whether to report per-cpu stats or not -# percpu = true -# ## Whether to report total system cpu stats or not -# totalcpu = true -# ## If true, collect raw CPU time metrics. -# collect_cpu_time = false -# ## If true, compute and report the sum of all non-idle CPU states. -# report_active = false - - -# # Read metrics about disk usage by mount point -# [[inputs.disk]] -# ## By default stats will be gathered for all mount points. -# ## Set mount_points will restrict the stats to only the specified mount points. -# # mount_points = ["/"] -# -# ## Ignore mount points by filesystem type. -# ignore_fs = ["tmpfs", "devtmpfs", "devfs", "overlay", "aufs", "squashfs"] - - -# # Read metrics about disk IO by device -# [[inputs.diskio]] -# ## By default, telegraf will gather stats for all devices including -# ## disk partitions. -# ## Setting devices will restrict the stats to the specified devices. -# # devices = ["sda", "sdb", "vd*"] -# ## Uncomment the following line if you need disk serial numbers. -# # skip_serial_number = false -# # -# ## On systems which support it, device metadata can be added in the form of -# ## tags. -# ## Currently only Linux is supported via udev properties. You can view -# ## available properties for a device by running: -# ## 'udevadm info -q property -n /dev/sda' -# # device_tags = ["ID_FS_TYPE", "ID_FS_USAGE"] -# # -# ## Using the same metadata source as device_tags, you can also customize the -# ## name of the device via templates. -# ## The 'name_templates' parameter is a list of templates to try and apply to -# ## the device. The template may contain variables in the form of '$PROPERTY' or -# ## '${PROPERTY}'. The first template which does not contain any variables not -# ## present for the device is used as the device name tag. -# ## The typical use case is for LVM volumes, to get the VG/LV name instead of -# ## the near-meaningless DM-0 name. -# # name_templates = ["$ID_FS_LABEL","$DM_VG_NAME/$DM_LV_NAME"] - - -# # Read metrics about memory usage -# [[inputs.mem]] -# # no configuration +# # Configuration for the Kafka server to send metrics to +# [[outputs.kafka]] +# ## URLs of kafka brokers +# brokers = ["localhost:9092"] +# ## Kafka topic for producer messages +# topic = "telegraf" +# +# ## The value of this tag will be used as the topic. If not set the 'topic' +# ## option is used. +# # topic_tag = "" +# +# ## If true, the 'topic_tag' will be removed from to the metric. +# # exclude_topic_tag = false +# +# ## Optional Client id +# # client_id = "Telegraf" +# +# ## Set the minimal supported Kafka version. Setting this enables the use of new +# ## Kafka features and APIs. Of particular interest, lz4 compression +# ## requires at least version 0.10.0.0. +# ## ex: version = "1.1.0" +# # version = "" +# +# ## Optional topic suffix configuration. +# ## If the section is omitted, no suffix is used. +# ## Following topic suffix methods are supported: +# ## measurement - suffix equals to separator + measurement's name +# ## tags - suffix equals to separator + specified tags' values +# ## interleaved with separator +# +# ## Suffix equals to "_" + measurement name +# # [outputs.kafka.topic_suffix] +# # method = "measurement" +# # separator = "_" +# +# ## Suffix equals to "__" + measurement's "foo" tag value. +# ## If there's no such a tag, suffix equals to an empty string +# # [outputs.kafka.topic_suffix] +# # method = "tags" +# # keys = ["foo"] +# # separator = "__" +# +# ## Suffix equals to "_" + measurement's "foo" and "bar" +# ## tag values, separated by "_". If there is no such tags, +# ## their values treated as empty strings. +# # [outputs.kafka.topic_suffix] +# # method = "tags" +# # keys = ["foo", "bar"] +# # separator = "_" +# +# ## The routing tag specifies a tagkey on the metric whose value is used as +# ## the message key. The message key is used to determine which partition to +# ## send the message to. This tag is prefered over the routing_key option. +# routing_tag = "host" +# +# ## The routing key is set as the message key and used to determine which +# ## partition to send the message to. This value is only used when no +# ## routing_tag is set or as a fallback when the tag specified in routing tag +# ## is not found. +# ## +# ## If set to "random", a random value will be generated for each message. +# ## +# ## When unset, no message key is added and each message is routed to a random +# ## partition. +# ## +# ## ex: routing_key = "random" +# ## routing_key = "telegraf" +# # routing_key = "" +# +# ## Compression codec represents the various compression codecs recognized by +# ## Kafka in messages. +# ## 0 : None +# ## 1 : Gzip +# ## 2 : Snappy +# ## 3 : LZ4 +# ## 4 : ZSTD +# # compression_codec = 0 +# +# ## Idempotent Writes +# ## If enabled, exactly one copy of each message is written. +# # idempotent_writes = false +# +# ## RequiredAcks is used in Produce Requests to tell the broker how many +# ## replica acknowledgements it must see before responding +# ## 0 : the producer never waits for an acknowledgement from the broker. +# ## This option provides the lowest latency but the weakest durability +# ## guarantees (some data will be lost when a server fails). +# ## 1 : the producer gets an acknowledgement after the leader replica has +# ## received the data. This option provides better durability as the +# ## client waits until the server acknowledges the request as successful +# ## (only messages that were written to the now-dead leader but not yet +# ## replicated will be lost). +# ## -1: the producer gets an acknowledgement after all in-sync replicas have +# ## received the data. This option provides the best durability, we +# ## guarantee that no messages will be lost as long as at least one in +# ## sync replica remains. +# # required_acks = -1 +# +# ## The maximum number of times to retry sending a metric before failing +# ## until the next flush. +# # max_retry = 3 +# +# ## The maximum permitted size of a message. Should be set equal to or +# ## smaller than the broker's 'message.max.bytes'. +# # max_message_bytes = 1000000 +# +# ## Optional TLS Config +# # tls_ca = "/etc/telegraf/ca.pem" +# # tls_cert = "/etc/telegraf/cert.pem" +# # tls_key = "/etc/telegraf/key.pem" +# ## Use TLS but skip chain & host verification +# # insecure_skip_verify = false +# +# ## Optional SASL Config +# # sasl_username = "kafka" +# # sasl_password = "secret" +# +# ## Optional SASL: +# ## one of: OAUTHBEARER, PLAIN, SCRAM-SHA-256, SCRAM-SHA-512, GSSAPI +# ## (defaults to PLAIN) +# # sasl_mechanism = "" +# +# ## used if sasl_mechanism is GSSAPI (experimental) +# # sasl_gssapi_service_name = "" +# # ## One of: KRB5_USER_AUTH and KRB5_KEYTAB_AUTH +# # sasl_gssapi_auth_type = "KRB5_USER_AUTH" +# # sasl_gssapi_kerberos_config_path = "/" +# # sasl_gssapi_realm = "realm" +# # sasl_gssapi_key_tab_path = "" +# # sasl_gssapi_disable_pafxfast = false +# +# ## used if sasl_mechanism is OAUTHBEARER (experimental) +# # sasl_access_token = "" +# +# ## SASL protocol version. When connecting to Azure EventHub set to 0. +# # sasl_version = 1 +# +# ## Data format to output. +# ## Each data format has its own unique set of configuration options, read +# ## more about them here: +# ## https://github.com/influxdata/telegraf/blob/master/docs/DATA_FORMATS_OUTPUT.md +# # data_format = "influx" -# # Read metrics about swap memory usage -# [[inputs.swap]] -# # no configuration +# # Configuration for the AWS Kinesis output. +# [[outputs.kinesis]] +# ## Amazon REGION of kinesis endpoint. +# region = "ap-southeast-2" +# +# ## Amazon Credentials +# ## Credentials are loaded in the following order +# ## 1) Web identity provider credentials via STS if role_arn and web_identity_token_file are specified +# ## 2) Assumed credentials via STS if role_arn is specified +# ## 3) explicit credentials from 'access_key' and 'secret_key' +# ## 4) shared profile from 'profile' +# ## 5) environment variables +# ## 6) shared credentials file +# ## 7) EC2 Instance Profile +# #access_key = "" +# #secret_key = "" +# #token = "" +# #role_arn = "" +# #web_identity_token_file = "" +# #role_session_name = "" +# #profile = "" +# #shared_credential_file = "" +# +# ## Endpoint to make request against, the correct endpoint is automatically +# ## determined and this option should only be set if you wish to override the +# ## default. +# ## ex: endpoint_url = "http://localhost:8000" +# # endpoint_url = "" +# +# ## Kinesis StreamName must exist prior to starting telegraf. +# streamname = "StreamName" +# ## DEPRECATED: PartitionKey as used for sharding data. +# partitionkey = "PartitionKey" +# ## DEPRECATED: If set the partitionKey will be a random UUID on every put. +# ## This allows for scaling across multiple shards in a stream. +# ## This will cause issues with ordering. +# use_random_partitionkey = false +# ## The partition key can be calculated using one of several methods: +# ## +# ## Use a static value for all writes: +# # [outputs.kinesis.partition] +# # method = "static" +# # key = "howdy" +# # +# ## Use a random partition key on each write: +# # [outputs.kinesis.partition] +# # method = "random" +# # +# ## Use the measurement name as the partition key: +# # [outputs.kinesis.partition] +# # method = "measurement" +# # +# ## Use the value of a tag for all writes, if the tag is not set the empty +# ## default option will be used. When no default, defaults to "telegraf" +# # [outputs.kinesis.partition] +# # method = "tag" +# # key = "host" +# # default = "mykey" +# +# +# ## Data format to output. +# ## Each data format has its own unique set of configuration options, read +# ## more about them here: +# ## https://github.com/influxdata/telegraf/blob/master/docs/DATA_FORMATS_OUTPUT.md +# data_format = "influx" +# +# ## debug will show upstream aws messages. +# debug = false + + +# # Configuration for Librato API to send metrics to. +# [[outputs.librato]] +# ## Librato API Docs +# ## http://dev.librato.com/v1/metrics-authentication +# ## Librato API user +# api_user = "telegraf@influxdb.com" # required. +# ## Librato API token +# api_token = "my-secret-token" # required. +# ## Debug +# # debug = false +# ## Connection timeout. +# # timeout = "5s" +# ## Output source Template (same as graphite buckets) +# ## see https://github.com/influxdata/telegraf/blob/master/docs/DATA_FORMATS_OUTPUT.md#graphite +# ## This template is used in librato's source (not metric's name) +# template = "host" +# + + +# # Send aggregate metrics to Logz.io +# [[outputs.logzio]] +# ## Connection timeout, defaults to "5s" if not set. +# timeout = "5s" +# +# ## Optional TLS Config +# # tls_ca = "/etc/telegraf/ca.pem" +# # tls_cert = "/etc/telegraf/cert.pem" +# # tls_key = "/etc/telegraf/key.pem" +# +# ## Logz.io account token +# token = "your logz.io token" # required +# +# ## Use your listener URL for your Logz.io account region. +# # url = "https://listener.logz.io:8071" + + +# # Send logs to Loki +# [[outputs.loki]] +# ## The domain of Loki +# domain = "https://loki.domain.tld" +# +# ## Endpoint to write api +# # endpoint = "/loki/api/v1/push" +# +# ## Connection timeout, defaults to "5s" if not set. +# # timeout = "5s" +# +# ## Basic auth credential +# # username = "loki" +# # password = "pass" +# +# ## Additional HTTP headers +# # http_headers = {"X-Scope-OrgID" = "1"} +# +# ## If the request must be gzip encoded +# # gzip_request = false +# +# ## Optional TLS Config +# # tls_ca = "/etc/telegraf/ca.pem" +# # tls_cert = "/etc/telegraf/cert.pem" +# # tls_key = "/etc/telegraf/key.pem" + + +# # Configuration for MQTT server to send metrics to +# [[outputs.mqtt]] +# servers = ["localhost:1883"] # required. +# +# ## MQTT outputs send metrics to this topic format +# ## "///" +# ## ex: prefix/web01.example.com/mem +# topic_prefix = "telegraf" +# +# ## QoS policy for messages +# ## 0 = at most once +# ## 1 = at least once +# ## 2 = exactly once +# # qos = 2 +# +# ## username and password to connect MQTT server. +# # username = "telegraf" +# # password = "metricsmetricsmetricsmetrics" +# +# ## client ID, if not set a random ID is generated +# # client_id = "" +# +# ## Timeout for write operations. default: 5s +# # timeout = "5s" +# +# ## Optional TLS Config +# # tls_ca = "/etc/telegraf/ca.pem" +# # tls_cert = "/etc/telegraf/cert.pem" +# # tls_key = "/etc/telegraf/key.pem" +# ## Use TLS but skip chain & host verification +# # insecure_skip_verify = false +# +# ## When true, metrics will be sent in one MQTT message per flush. Otherwise, +# ## metrics are written one metric per MQTT message. +# # batch = false +# +# ## When true, metric will have RETAIN flag set, making broker cache entries until someone +# ## actually reads it +# # retain = false +# +# ## Defines the maximum length of time that the broker and client may not communicate. +# ## Defaults to 0 which turns the feature off. For version v2.0.12 of eclipse/mosquitto there is a +# ## [bug](https://github.com/eclipse/mosquitto/issues/2117) which requires keep_alive to be set. +# ## As a reference eclipse/paho.mqtt.golang v1.3.0 defaults to 30. +# # keep_alive = 0 +# +# ## Data format to output. +# ## Each data format has its own unique set of configuration options, read +# ## more about them here: +# ## https://github.com/influxdata/telegraf/blob/master/docs/DATA_FORMATS_OUTPUT.md +# data_format = "influx" + + +# # Send telegraf measurements to NATS +# [[outputs.nats]] +# ## URLs of NATS servers +# servers = ["nats://localhost:4222"] +# +# ## Optional client name +# # name = "" +# +# ## Optional credentials +# # username = "" +# # password = "" +# +# ## Optional NATS 2.0 and NATS NGS compatible user credentials +# # credentials = "/etc/telegraf/nats.creds" +# +# ## NATS subject for producer messages +# subject = "telegraf" +# +# ## Use Transport Layer Security +# # secure = false +# +# ## Optional TLS Config +# # tls_ca = "/etc/telegraf/ca.pem" +# # tls_cert = "/etc/telegraf/cert.pem" +# # tls_key = "/etc/telegraf/key.pem" +# ## Use TLS but skip chain & host verification +# # insecure_skip_verify = false +# +# ## Data format to output. +# ## Each data format has its own unique set of configuration options, read +# ## more about them here: +# ## https://github.com/influxdata/telegraf/blob/master/docs/DATA_FORMATS_OUTPUT.md +# data_format = "influx" + + +# # Send metrics to New Relic metrics endpoint +# [[outputs.newrelic]] +# ## New Relic Insights API key +# insights_key = "insights api key" +# +# ## Prefix to add to add to metric name for easy identification. +# # metric_prefix = "" +# +# ## Timeout for writes to the New Relic API. +# # timeout = "15s" +# +# ## HTTP Proxy override. If unset use values from the standard +# ## proxy environment variables to determine proxy, if any. +# # http_proxy = "http://corporate.proxy:3128" +# +# ## Metric URL override to enable geographic location endpoints. +# # If not set use values from the standard +# # metric_url = "https://metric-api.newrelic.com/metric/v1" + + +# # Send telegraf measurements to NSQD +# [[outputs.nsq]] +# ## Location of nsqd instance listening on TCP +# server = "localhost:4150" +# ## NSQ topic for producer messages +# topic = "telegraf" +# +# ## Data format to output. +# ## Each data format has its own unique set of configuration options, read +# ## more about them here: +# ## https://github.com/influxdata/telegraf/blob/master/docs/DATA_FORMATS_OUTPUT.md +# data_format = "influx" + + +# # Send OpenTelemetry metrics over gRPC +# [[outputs.opentelemetry]] +# ## Override the default (localhost:4317) OpenTelemetry gRPC service +# ## address:port +# # service_address = "localhost:4317" +# +# ## Override the default (5s) request timeout +# # timeout = "5s" +# +# ## Optional TLS Config. +# ## +# ## Root certificates for verifying server certificates encoded in PEM format. +# # tls_ca = "/etc/telegraf/ca.pem" +# ## The public and private keypairs for the client encoded in PEM format. +# ## May contain intermediate certificates. +# # tls_cert = "/etc/telegraf/cert.pem" +# # tls_key = "/etc/telegraf/key.pem" +# ## Use TLS, but skip TLS chain and host verification. +# # insecure_skip_verify = false +# ## Send the specified TLS server name via SNI. +# # tls_server_name = "foo.example.com" +# +# ## Override the default (gzip) compression used to send data. +# ## Supports: "gzip", "none" +# # compression = "gzip" +# +# ## Additional OpenTelemetry resource attributes +# # [outputs.opentelemetry.attributes] +# # "service.name" = "demo" +# +# ## Additional gRPC request metadata +# # [outputs.opentelemetry.headers] +# # key1 = "value1" + + +# # Configuration for OpenTSDB server to send metrics to +# [[outputs.opentsdb]] +# ## prefix for metrics keys +# prefix = "my.specific.prefix." +# +# ## DNS name of the OpenTSDB server +# ## Using "opentsdb.example.com" or "tcp://opentsdb.example.com" will use the +# ## telnet API. "http://opentsdb.example.com" will use the Http API. +# host = "opentsdb.example.com" +# +# ## Port of the OpenTSDB server +# port = 4242 +# +# ## Number of data points to send to OpenTSDB in Http requests. +# ## Not used with telnet API. +# http_batch_size = 50 +# +# ## URI Path for Http requests to OpenTSDB. +# ## Used in cases where OpenTSDB is located behind a reverse proxy. +# http_path = "/api/put" +# +# ## Debug true - Prints OpenTSDB communication +# debug = false +# +# ## Separator separates measurement name from field +# separator = "_" + + +# # Configuration for the Prometheus client to spawn +# [[outputs.prometheus_client]] +# ## Address to listen on +# listen = ":9273" +# +# ## Metric version controls the mapping from Telegraf metrics into +# ## Prometheus format. When using the prometheus input, use the same value in +# ## both plugins to ensure metrics are round-tripped without modification. +# ## +# ## example: metric_version = 1; +# ## metric_version = 2; recommended version +# # metric_version = 1 +# +# ## Use HTTP Basic Authentication. +# # basic_username = "Foo" +# # basic_password = "Bar" +# +# ## If set, the IP Ranges which are allowed to access metrics. +# ## ex: ip_range = ["192.168.0.0/24", "192.168.1.0/30"] +# # ip_range = [] +# +# ## Path to publish the metrics on. +# # path = "/metrics" +# +# ## Expiration interval for each metric. 0 == no expiration +# # expiration_interval = "60s" +# +# ## Collectors to enable, valid entries are "gocollector" and "process". +# ## If unset, both are enabled. +# # collectors_exclude = ["gocollector", "process"] +# +# ## Send string metrics as Prometheus labels. +# ## Unless set to false all string metrics will be sent as labels. +# # string_as_label = true +# +# ## If set, enable TLS with the given certificate. +# # tls_cert = "/etc/ssl/telegraf.crt" +# # tls_key = "/etc/ssl/telegraf.key" +# +# ## Set one or more allowed client CA certificate file names to +# ## enable mutually authenticated TLS connections +# # tls_allowed_cacerts = ["/etc/telegraf/clientca.pem"] +# +# ## Export metric collection time. +# # export_timestamp = false + + +# # Configuration for the Riemann server to send metrics to +# [[outputs.riemann]] +# ## The full TCP or UDP URL of the Riemann server +# url = "tcp://localhost:5555" +# +# ## Riemann event TTL, floating-point time in seconds. +# ## Defines how long that an event is considered valid for in Riemann +# # ttl = 30.0 +# +# ## Separator to use between measurement and field name in Riemann service name +# ## This does not have any effect if 'measurement_as_attribute' is set to 'true' +# separator = "/" +# +# ## Set measurement name as Riemann attribute 'measurement', instead of prepending it to the Riemann service name +# # measurement_as_attribute = false +# +# ## Send string metrics as Riemann event states. +# ## Unless enabled all string metrics will be ignored +# # string_as_state = false +# +# ## A list of tag keys whose values get sent as Riemann tags. +# ## If empty, all Telegraf tag values will be sent as tags +# # tag_keys = ["telegraf","custom_tag"] +# +# ## Additional Riemann tags to send. +# # tags = ["telegraf-output"] +# +# ## Description for Riemann event +# # description_text = "metrics collected from telegraf" +# +# ## Riemann client write timeout, defaults to "5s" if not set. +# # timeout = "5s" + + +# # Configuration for the Riemann server to send metrics to +# [[outputs.riemann_legacy]] +# ## URL of server +# url = "localhost:5555" +# ## transport protocol to use either tcp or udp +# transport = "tcp" +# ## separator to use between input name and field name in Riemann service name +# separator = " " + + +# # Send aggregate metrics to Sensu Monitor +# [[outputs.sensu]] +# ## BACKEND API URL is the Sensu Backend API root URL to send metrics to +# ## (protocol, host, and port only). The output plugin will automatically +# ## append the corresponding backend API path +# ## /api/core/v2/namespaces/:entity_namespace/events/:entity_name/:check_name). +# ## +# ## Backend Events API reference: +# ## https://docs.sensu.io/sensu-go/latest/api/events/ +# ## +# ## AGENT API URL is the Sensu Agent API root URL to send metrics to +# ## (protocol, host, and port only). The output plugin will automatically +# ## append the correspeonding agent API path (/events). +# ## +# ## Agent API Events API reference: +# ## https://docs.sensu.io/sensu-go/latest/api/events/ +# ## +# ## NOTE: if backend_api_url and agent_api_url and api_key are set, the output +# ## plugin will use backend_api_url. If backend_api_url and agent_api_url are +# ## not provided, the output plugin will default to use an agent_api_url of +# ## http://127.0.0.1:3031 +# ## +# # backend_api_url = "http://127.0.0.1:8080" +# # agent_api_url = "http://127.0.0.1:3031" +# +# ## API KEY is the Sensu Backend API token +# ## Generate a new API token via: +# ## +# ## $ sensuctl cluster-role create telegraf --verb create --resource events,entities +# ## $ sensuctl cluster-role-binding create telegraf --cluster-role telegraf --group telegraf +# ## $ sensuctl user create telegraf --group telegraf --password REDACTED +# ## $ sensuctl api-key grant telegraf +# ## +# ## For more information on Sensu RBAC profiles & API tokens, please visit: +# ## - https://docs.sensu.io/sensu-go/latest/reference/rbac/ +# ## - https://docs.sensu.io/sensu-go/latest/reference/apikeys/ +# ## +# # api_key = "${SENSU_API_KEY}" +# +# ## Optional TLS Config +# # tls_ca = "/etc/telegraf/ca.pem" +# # tls_cert = "/etc/telegraf/cert.pem" +# # tls_key = "/etc/telegraf/key.pem" +# ## Use TLS but skip chain & host verification +# # insecure_skip_verify = false +# +# ## Timeout for HTTP message +# # timeout = "5s" +# +# ## HTTP Content-Encoding for write request body, can be set to "gzip" to +# ## compress body or "identity" to apply no encoding. +# # content_encoding = "identity" +# +# ## Sensu Event details +# ## +# ## Below are the event details to be sent to Sensu. The main portions of the +# ## event are the check, entity, and metrics specifications. For more information +# ## on Sensu events and its components, please visit: +# ## - Events - https://docs.sensu.io/sensu-go/latest/reference/events +# ## - Checks - https://docs.sensu.io/sensu-go/latest/reference/checks +# ## - Entities - https://docs.sensu.io/sensu-go/latest/reference/entities +# ## - Metrics - https://docs.sensu.io/sensu-go/latest/reference/events#metrics +# ## +# ## Check specification +# ## The check name is the name to give the Sensu check associated with the event +# ## created. This maps to check.metatadata.name in the event. +# [outputs.sensu.check] +# name = "telegraf" +# +# ## Entity specification +# ## Configure the entity name and namespace, if necessary. This will be part of +# ## the entity.metadata in the event. +# ## +# ## NOTE: if the output plugin is configured to send events to a +# ## backend_api_url and entity_name is not set, the value returned by +# ## os.Hostname() will be used; if the output plugin is configured to send +# ## events to an agent_api_url, entity_name and entity_namespace are not used. +# # [outputs.sensu.entity] +# # name = "server-01" +# # namespace = "default" +# +# ## Metrics specification +# ## Configure the tags for the metrics that are sent as part of the Sensu event +# # [outputs.sensu.tags] +# # source = "telegraf" +# +# ## Configure the handler(s) for processing the provided metrics +# # [outputs.sensu.metrics] +# # handlers = ["influxdb","elasticsearch"] + + +# # Send metrics and events to SignalFx +# [[outputs.signalfx]] +# ## SignalFx Org Access Token +# access_token = "my-secret-token" +# +# ## The SignalFx realm that your organization resides in +# signalfx_realm = "us9" # Required if ingest_url is not set +# +# ## You can optionally provide a custom ingest url instead of the +# ## signalfx_realm option above if you are using a gateway or proxy +# ## instance. This option takes precident over signalfx_realm. +# ingest_url = "https://my-custom-ingest/" +# +# ## Event typed metrics are omitted by default, +# ## If you require an event typed metric you must specify the +# ## metric name in the following list. +# included_event_names = ["plugin.metric_name"] + + +# # Generic socket writer capable of handling multiple socket types. +# [[outputs.socket_writer]] +# ## URL to connect to +# # address = "tcp://127.0.0.1:8094" +# # address = "tcp://example.com:http" +# # address = "tcp4://127.0.0.1:8094" +# # address = "tcp6://127.0.0.1:8094" +# # address = "tcp6://[2001:db8::1]:8094" +# # address = "udp://127.0.0.1:8094" +# # address = "udp4://127.0.0.1:8094" +# # address = "udp6://127.0.0.1:8094" +# # address = "unix:///tmp/telegraf.sock" +# # address = "unixgram:///tmp/telegraf.sock" +# +# ## Optional TLS Config +# # tls_ca = "/etc/telegraf/ca.pem" +# # tls_cert = "/etc/telegraf/cert.pem" +# # tls_key = "/etc/telegraf/key.pem" +# ## Use TLS but skip chain & host verification +# # insecure_skip_verify = false +# +# ## Period between keep alive probes. +# ## Only applies to TCP sockets. +# ## 0 disables keep alive probes. +# ## Defaults to the OS configuration. +# # keep_alive_period = "5m" +# +# ## Content encoding for packet-based connections (i.e. UDP, unixgram). +# ## Can be set to "gzip" or to "identity" to apply no encoding. +# ## +# # content_encoding = "identity" +# +# ## Data format to generate. +# ## Each data format has its own unique set of configuration options, read +# ## more about them here: +# ## https://github.com/influxdata/telegraf/blob/master/docs/DATA_FORMATS_INPUT.md +# # data_format = "influx" + + +# # Send metrics to SQL Database +# [[outputs.sql]] +# ## Database driver +# ## Valid options: mssql (Microsoft SQL Server), mysql (MySQL), pgx (Postgres), +# ## sqlite (SQLite3), snowflake (snowflake.com) +# # driver = "" +# +# ## Data source name +# ## The format of the data source name is different for each database driver. +# ## See the plugin readme for details. +# # data_source_name = "" +# +# ## Timestamp column name +# # timestamp_column = "timestamp" +# +# ## Table creation template +# ## Available template variables: +# ## {TABLE} - table name as a quoted identifier +# ## {TABLELITERAL} - table name as a quoted string literal +# ## {COLUMNS} - column definitions (list of quoted identifiers and types) +# # table_template = "CREATE TABLE {TABLE}({COLUMNS})" +# +# ## Table existence check template +# ## Available template variables: +# ## {TABLE} - tablename as a quoted identifier +# # table_exists_template = "SELECT 1 FROM {TABLE} LIMIT 1" +# +# ## Initialization SQL +# # init_sql = "" +# +# ## Metric type to SQL type conversion +# #[outputs.sql.convert] +# # integer = "INT" +# # real = "DOUBLE" +# # text = "TEXT" +# # timestamp = "TIMESTAMP" +# # defaultvalue = "TEXT" +# # unsigned = "UNSIGNED" + + +# # Configuration for Google Cloud Stackdriver to send metrics to +# [[outputs.stackdriver]] +# ## GCP Project +# project = "erudite-bloom-151019" +# +# ## The namespace for the metric descriptor +# namespace = "telegraf" +# +# ## Custom resource type +# # resource_type = "generic_node" +# +# ## Additional resource labels +# # [outputs.stackdriver.resource_labels] +# # node_id = "$HOSTNAME" +# # namespace = "myapp" +# # location = "eu-north0" + + +# # A plugin that can transmit metrics to Sumo Logic HTTP Source +# [[outputs.sumologic]] +# ## Unique URL generated for your HTTP Metrics Source. +# ## This is the address to send metrics to. +# # url = "https://events.sumologic.net/receiver/v1/http/" +# +# ## Data format to be used for sending metrics. +# ## This will set the "Content-Type" header accordingly. +# ## Currently supported formats: +# ## * graphite - for Content-Type of application/vnd.sumologic.graphite +# ## * carbon2 - for Content-Type of application/vnd.sumologic.carbon2 +# ## * prometheus - for Content-Type of application/vnd.sumologic.prometheus +# ## +# ## More information can be found at: +# ## https://help.sumologic.com/03Send-Data/Sources/02Sources-for-Hosted-Collectors/HTTP-Source/Upload-Metrics-to-an-HTTP-Source#content-type-headers-for-metrics +# ## +# ## NOTE: +# ## When unset, telegraf will by default use the influx serializer which is currently unsupported +# ## in HTTP Source. +# data_format = "carbon2" +# +# ## Timeout used for HTTP request +# # timeout = "5s" +# +# ## Max HTTP request body size in bytes before compression (if applied). +# ## By default 1MB is recommended. +# ## NOTE: +# ## Bear in mind that in some serializer a metric even though serialized to multiple +# ## lines cannot be split any further so setting this very low might not work +# ## as expected. +# # max_request_body_size = 1000000 +# +# ## Additional, Sumo specific options. +# ## Full list can be found here: +# ## https://help.sumologic.com/03Send-Data/Sources/02Sources-for-Hosted-Collectors/HTTP-Source/Upload-Metrics-to-an-HTTP-Source#supported-http-headers +# +# ## Desired source name. +# ## Useful if you want to override the source name configured for the source. +# # source_name = "" +# +# ## Desired host name. +# ## Useful if you want to override the source host configured for the source. +# # source_host = "" +# +# ## Desired source category. +# ## Useful if you want to override the source category configured for the source. +# # source_category = "" +# +# ## Comma-separated key=value list of dimensions to apply to every metric. +# ## Custom dimensions will allow you to query your metrics at a more granular level. +# # dimensions = "" + + +# # Configuration for Syslog server to send metrics to +# [[outputs.syslog]] +# ## URL to connect to +# ## ex: address = "tcp://127.0.0.1:8094" +# ## ex: address = "tcp4://127.0.0.1:8094" +# ## ex: address = "tcp6://127.0.0.1:8094" +# ## ex: address = "tcp6://[2001:db8::1]:8094" +# ## ex: address = "udp://127.0.0.1:8094" +# ## ex: address = "udp4://127.0.0.1:8094" +# ## ex: address = "udp6://127.0.0.1:8094" +# address = "tcp://127.0.0.1:8094" +# +# ## Optional TLS Config +# # tls_ca = "/etc/telegraf/ca.pem" +# # tls_cert = "/etc/telegraf/cert.pem" +# # tls_key = "/etc/telegraf/key.pem" +# ## Use TLS but skip chain & host verification +# # insecure_skip_verify = false +# +# ## Period between keep alive probes. +# ## Only applies to TCP sockets. +# ## 0 disables keep alive probes. +# ## Defaults to the OS configuration. +# # keep_alive_period = "5m" +# +# ## The framing technique with which it is expected that messages are +# ## transported (default = "octet-counting"). Whether the messages come +# ## using the octect-counting (RFC5425#section-4.3.1, RFC6587#section-3.4.1), +# ## or the non-transparent framing technique (RFC6587#section-3.4.2). Must +# ## be one of "octet-counting", "non-transparent". +# # framing = "octet-counting" +# +# ## The trailer to be expected in case of non-transparent framing (default = "LF"). +# ## Must be one of "LF", or "NUL". +# # trailer = "LF" +# +# ## SD-PARAMs settings +# ## Syslog messages can contain key/value pairs within zero or more +# ## structured data sections. For each unrecognized metric tag/field a +# ## SD-PARAMS is created. +# ## +# ## Example: +# ## [[outputs.syslog]] +# ## sdparam_separator = "_" +# ## default_sdid = "default@32473" +# ## sdids = ["foo@123", "bar@456"] +# ## +# ## input => xyzzy,x=y foo@123_value=42,bar@456_value2=84,something_else=1 +# ## output (structured data only) => [foo@123 value=42][bar@456 value2=84][default@32473 something_else=1 x=y] +# +# ## SD-PARAMs separator between the sdid and tag/field key (default = "_") +# # sdparam_separator = "_" +# +# ## Default sdid used for tags/fields that don't contain a prefix defined in +# ## the explicit sdids setting below If no default is specified, no SD-PARAMs +# ## will be used for unrecognized field. +# # default_sdid = "default@32473" +# +# ## List of explicit prefixes to extract from tag/field keys and use as the +# ## SDID, if they match (see above example for more details): +# # sdids = ["foo@123", "bar@456"] +# +# ## Default severity value. Severity and Facility are used to calculate the +# ## message PRI value (RFC5424#section-6.2.1). Used when no metric field +# ## with key "severity_code" is defined. If unset, 5 (notice) is the default +# # default_severity_code = 5 +# +# ## Default facility value. Facility and Severity are used to calculate the +# ## message PRI value (RFC5424#section-6.2.1). Used when no metric field with +# ## key "facility_code" is defined. If unset, 1 (user-level) is the default +# # default_facility_code = 1 +# +# ## Default APP-NAME value (RFC5424#section-6.2.5) +# ## Used when no metric tag with key "appname" is defined. +# ## If unset, "Telegraf" is the default +# # default_appname = "Telegraf" + + +# # Configuration for Amazon Timestream output. +# [[outputs.timestream]] +# ## Amazon Region +# region = "us-east-1" +# +# ## Amazon Credentials +# ## Credentials are loaded in the following order: +# ## 1) Web identity provider credentials via STS if role_arn and web_identity_token_file are specified +# ## 2) Assumed credentials via STS if role_arn is specified +# ## 3) explicit credentials from 'access_key' and 'secret_key' +# ## 4) shared profile from 'profile' +# ## 5) environment variables +# ## 6) shared credentials file +# ## 7) EC2 Instance Profile +# #access_key = "" +# #secret_key = "" +# #token = "" +# #role_arn = "" +# #web_identity_token_file = "" +# #role_session_name = "" +# #profile = "" +# #shared_credential_file = "" +# +# ## Endpoint to make request against, the correct endpoint is automatically +# ## determined and this option should only be set if you wish to override the +# ## default. +# ## ex: endpoint_url = "http://localhost:8000" +# # endpoint_url = "" +# +# ## Timestream database where the metrics will be inserted. +# ## The database must exist prior to starting Telegraf. +# database_name = "yourDatabaseNameHere" +# +# ## Specifies if the plugin should describe the Timestream database upon starting +# ## to validate if it has access necessary permissions, connection, etc., as a safety check. +# ## If the describe operation fails, the plugin will not start +# ## and therefore the Telegraf agent will not start. +# describe_database_on_start = false +# +# ## The mapping mode specifies how Telegraf records are represented in Timestream. +# ## Valid values are: single-table, multi-table. +# ## For example, consider the following data in line protocol format: +# ## weather,location=us-midwest,season=summer temperature=82,humidity=71 1465839830100400200 +# ## airquality,location=us-west no2=5,pm25=16 1465839830100400200 +# ## where weather and airquality are the measurement names, location and season are tags, +# ## and temperature, humidity, no2, pm25 are fields. +# ## In multi-table mode: +# ## - first line will be ingested to table named weather +# ## - second line will be ingested to table named airquality +# ## - the tags will be represented as dimensions +# ## - first table (weather) will have two records: +# ## one with measurement name equals to temperature, +# ## another with measurement name equals to humidity +# ## - second table (airquality) will have two records: +# ## one with measurement name equals to no2, +# ## another with measurement name equals to pm25 +# ## - the Timestream tables from the example will look like this: +# ## TABLE "weather": +# ## time | location | season | measure_name | measure_value::bigint +# ## 2016-06-13 17:43:50 | us-midwest | summer | temperature | 82 +# ## 2016-06-13 17:43:50 | us-midwest | summer | humidity | 71 +# ## TABLE "airquality": +# ## time | location | measure_name | measure_value::bigint +# ## 2016-06-13 17:43:50 | us-west | no2 | 5 +# ## 2016-06-13 17:43:50 | us-west | pm25 | 16 +# ## In single-table mode: +# ## - the data will be ingested to a single table, which name will be valueOf(single_table_name) +# ## - measurement name will stored in dimension named valueOf(single_table_dimension_name_for_telegraf_measurement_name) +# ## - location and season will be represented as dimensions +# ## - temperature, humidity, no2, pm25 will be represented as measurement name +# ## - the Timestream table from the example will look like this: +# ## Assuming: +# ## - single_table_name = "my_readings" +# ## - single_table_dimension_name_for_telegraf_measurement_name = "namespace" +# ## TABLE "my_readings": +# ## time | location | season | namespace | measure_name | measure_value::bigint +# ## 2016-06-13 17:43:50 | us-midwest | summer | weather | temperature | 82 +# ## 2016-06-13 17:43:50 | us-midwest | summer | weather | humidity | 71 +# ## 2016-06-13 17:43:50 | us-west | NULL | airquality | no2 | 5 +# ## 2016-06-13 17:43:50 | us-west | NULL | airquality | pm25 | 16 +# ## In most cases, using multi-table mapping mode is recommended. +# ## However, you can consider using single-table in situations when you have thousands of measurement names. +# mapping_mode = "multi-table" +# +# ## Only valid and required for mapping_mode = "single-table" +# ## Specifies the Timestream table where the metrics will be uploaded. +# # single_table_name = "yourTableNameHere" +# +# ## Only valid and required for mapping_mode = "single-table" +# ## Describes what will be the Timestream dimension name for the Telegraf +# ## measurement name. +# # single_table_dimension_name_for_telegraf_measurement_name = "namespace" +# +# ## Specifies if the plugin should create the table, if the table do not exist. +# ## The plugin writes the data without prior checking if the table exists. +# ## When the table does not exist, the error returned from Timestream will cause +# ## the plugin to create the table, if this parameter is set to true. +# create_table_if_not_exists = true +# +# ## Only valid and required if create_table_if_not_exists = true +# ## Specifies the Timestream table magnetic store retention period in days. +# ## Check Timestream documentation for more details. +# create_table_magnetic_store_retention_period_in_days = 365 +# +# ## Only valid and required if create_table_if_not_exists = true +# ## Specifies the Timestream table memory store retention period in hours. +# ## Check Timestream documentation for more details. +# create_table_memory_store_retention_period_in_hours = 24 +# +# ## Only valid and optional if create_table_if_not_exists = true +# ## Specifies the Timestream table tags. +# ## Check Timestream documentation for more details +# # create_table_tags = { "foo" = "bar", "environment" = "dev"} + + +# # Write metrics to Warp 10 +# [[outputs.warp10]] +# # Prefix to add to the measurement. +# prefix = "telegraf." +# +# # URL of the Warp 10 server +# warp_url = "http://localhost:8080" +# +# # Write token to access your app on warp 10 +# token = "Token" +# +# # Warp 10 query timeout +# # timeout = "15s" +# +# ## Print Warp 10 error body +# # print_error_body = false +# +# ## Max string error size +# # max_string_error_size = 511 +# +# ## Optional TLS Config +# # tls_ca = "/etc/telegraf/ca.pem" +# # tls_cert = "/etc/telegraf/cert.pem" +# # tls_key = "/etc/telegraf/key.pem" +# ## Use TLS but skip chain & host verification +# # insecure_skip_verify = false + + +# # Configuration for Wavefront server to send metrics to +# [[outputs.wavefront]] +# ## Url for Wavefront Direct Ingestion or using HTTP with Wavefront Proxy +# ## If using Wavefront Proxy, also specify port. example: http://proxyserver:2878 +# url = "https://metrics.wavefront.com" +# +# ## Authentication Token for Wavefront. Only required if using Direct Ingestion +# #token = "DUMMY_TOKEN" +# +# ## DNS name of the wavefront proxy server. Do not use if url is specified +# #host = "wavefront.example.com" +# +# ## Port that the Wavefront proxy server listens on. Do not use if url is specified +# #port = 2878 +# +# ## prefix for metrics keys +# #prefix = "my.specific.prefix." +# +# ## whether to use "value" for name of simple fields. default is false +# #simple_fields = false +# +# ## character to use between metric and field name. default is . (dot) +# #metric_separator = "." +# +# ## Convert metric name paths to use metricSeparator character +# ## When true will convert all _ (underscore) characters in final metric name. default is true +# #convert_paths = true +# +# ## Use Strict rules to sanitize metric and tag names from invalid characters +# ## When enabled forward slash (/) and comma (,) will be accepted +# #use_strict = false +# +# ## Use Regex to sanitize metric and tag names from invalid characters +# ## Regex is more thorough, but significantly slower. default is false +# #use_regex = false +# +# ## point tags to use as the source name for Wavefront (if none found, host will be used) +# #source_override = ["hostname", "address", "agent_host", "node_host"] +# +# ## whether to convert boolean values to numeric values, with false -> 0.0 and true -> 1.0. default is true +# #convert_bool = true +# +# ## Truncate metric tags to a total of 254 characters for the tag name value. Wavefront will reject any +# ## data point exceeding this limit if not truncated. Defaults to 'false' to provide backwards compatibility. +# #truncate_tags = false +# +# ## Flush the internal buffers after each batch. This effectively bypasses the background sending of metrics +# ## normally done by the Wavefront SDK. This can be used if you are experiencing buffer overruns. The sending +# ## of metrics will block for a longer time, but this will be handled gracefully by the internal buffering in +# ## Telegraf. +# #immediate_flush = true +# +# ## Define a mapping, namespaced by metric prefix, from string values to numeric values +# ## deprecated in 1.9; use the enum processor plugin +# #[[outputs.wavefront.string_to_number.elasticsearch]] +# # green = 1.0 +# # yellow = 0.5 +# # red = 0.0 + + +# # Generic WebSocket output writer. +# [[outputs.websocket]] +# ## URL is the address to send metrics to. Make sure ws or wss scheme is used. +# url = "ws://127.0.0.1:8080/telegraf" +# +# ## Timeouts (make sure read_timeout is larger than server ping interval or set to zero). +# # connect_timeout = "30s" +# # write_timeout = "30s" +# # read_timeout = "30s" +# +# ## Optionally turn on using text data frames (binary by default). +# # use_text_frames = false +# +# ## Optional TLS Config +# # tls_ca = "/etc/telegraf/ca.pem" +# # tls_cert = "/etc/telegraf/cert.pem" +# # tls_key = "/etc/telegraf/key.pem" +# ## Use TLS but skip chain & host verification +# # insecure_skip_verify = false +# +# ## Data format to output. +# ## Each data format has it's own unique set of configuration options, read +# ## more about them here: +# ## https://github.com/influxdata/telegraf/blob/master/docs/DATA_FORMATS_OUTPUT.md +# # data_format = "influx" +# +# ## Additional HTTP Upgrade headers +# # [outputs.websocket.headers] +# # Authorization = "Bearer " + + +# # Send aggregated metrics to Yandex.Cloud Monitoring +# [[outputs.yandex_cloud_monitoring]] +# ## Timeout for HTTP writes. +# # timeout = "20s" +# +# ## Yandex.Cloud monitoring API endpoint. Normally should not be changed +# # endpoint_url = "https://monitoring.api.cloud.yandex.net/monitoring/v2/data/write" +# +# ## All user metrics should be sent with "custom" service specified. Normally should not be changed +# # service = "custom" + + +############################################################################### +# PROCESSOR PLUGINS # +############################################################################### + + +# # Attach AWS EC2 metadata to metrics +# [[processors.aws_ec2]] +# ## Instance identity document tags to attach to metrics. +# ## For more information see: +# ## https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/instance-identity-documents.html +# ## +# ## Available tags: +# ## * accountId +# ## * architecture +# ## * availabilityZone +# ## * billingProducts +# ## * imageId +# ## * instanceId +# ## * instanceType +# ## * kernelId +# ## * pendingTime +# ## * privateIp +# ## * ramdiskId +# ## * region +# ## * version +# imds_tags = [] +# +# ## EC2 instance tags retrieved with DescribeTags action. +# ## In case tag is empty upon retrieval it's omitted when tagging metrics. +# ## Note that in order for this to work, role attached to EC2 instance or AWS +# ## credentials available from the environment must have a policy attached, that +# ## allows ec2:DescribeTags. +# ## +# ## For more information see: +# ## https://docs.aws.amazon.com/AWSEC2/latest/APIReference/API_DescribeTags.html +# ec2_tags = [] +# +# ## Timeout for http requests made by against aws ec2 metadata endpoint. +# timeout = "10s" +# +# ## ordered controls whether or not the metrics need to stay in the same order +# ## this plugin received them in. If false, this plugin will change the order +# ## with requests hitting cached results moving through immediately and not +# ## waiting on slower lookups. This may cause issues for you if you are +# ## depending on the order of metrics staying the same. If so, set this to true. +# ## Keeping the metrics ordered may be slightly slower. +# ordered = false +# +# ## max_parallel_calls is the maximum number of AWS API calls to be in flight +# ## at the same time. +# ## It's probably best to keep this number fairly low. +# max_parallel_calls = 10 + + +# # Clone metrics and apply modifications. +# [[processors.clone]] +# ## All modifications on inputs and aggregators can be overridden: +# # name_override = "new_name" +# # name_prefix = "new_name_prefix" +# # name_suffix = "new_name_suffix" +# +# ## Tags to be added (all values must be strings) +# # [processors.clone.tags] +# # additional_tag = "tag_value" + + +# # Convert values to another metric value type +# [[processors.converter]] +# ## Tags to convert +# ## +# ## The table key determines the target type, and the array of key-values +# ## select the keys to convert. The array may contain globs. +# ## = [...] +# [processors.converter.tags] +# measurement = [] +# string = [] +# integer = [] +# unsigned = [] +# boolean = [] +# float = [] +# +# ## Fields to convert +# ## +# ## The table key determines the target type, and the array of key-values +# ## select the keys to convert. The array may contain globs. +# ## = [...] +# [processors.converter.fields] +# measurement = [] +# tag = [] +# string = [] +# integer = [] +# unsigned = [] +# boolean = [] +# float = [] + + +# # Dates measurements, tags, and fields that pass through this filter. +# [[processors.date]] +# ## New tag to create +# tag_key = "month" +# +# ## New field to create (cannot set both field_key and tag_key) +# # field_key = "month" +# +# ## Date format string, must be a representation of the Go "reference time" +# ## which is "Mon Jan 2 15:04:05 -0700 MST 2006". +# date_format = "Jan" +# +# ## If destination is a field, date format can also be one of +# ## "unix", "unix_ms", "unix_us", or "unix_ns", which will insert an integer field. +# # date_format = "unix" +# +# ## Offset duration added to the date string when writing the new tag. +# # date_offset = "0s" +# +# ## Timezone to use when creating the tag or field using a reference time +# ## string. This can be set to one of "UTC", "Local", or to a location name +# ## in the IANA Time Zone database. +# ## example: timezone = "America/Los_Angeles" +# # timezone = "UTC" + + +# # Filter metrics with repeating field values +# [[processors.dedup]] +# ## Maximum time to suppress output +# dedup_interval = "600s" + + +# # Defaults sets default value(s) for specified fields that are not set on incoming metrics. +# [[processors.defaults]] +# ## Ensures a set of fields always exists on your metric(s) with their +# ## respective default value. +# ## For any given field pair (key = default), if it's not set, a field +# ## is set on the metric with the specified default. +# ## +# ## A field is considered not set if it is nil on the incoming metric; +# ## or it is not nil but its value is an empty string or is a string +# ## of one or more spaces. +# ## = +# # [processors.defaults.fields] +# # field_1 = "bar" +# # time_idle = 0 +# # is_error = true + + +# # Map enum values according to given table. +# [[processors.enum]] +# [[processors.enum.mapping]] +# ## Name of the field to map. Globs accepted. +# field = "status" +# +# ## Name of the tag to map. Globs accepted. +# # tag = "status" +# +# ## Destination tag or field to be used for the mapped value. By default the +# ## source tag or field is used, overwriting the original value. +# dest = "status_code" +# +# ## Default value to be used for all values not contained in the mapping +# ## table. When unset, the unmodified value for the field will be used if no +# ## match is found. +# # default = 0 +# +# ## Table of mappings +# [processors.enum.mapping.value_mappings] +# green = 1 +# amber = 2 +# red = 3 + + +# # Run executable as long-running processor plugin +# [[processors.execd]] +# ## Program to run as daemon +# ## eg: command = ["/path/to/your_program", "arg1", "arg2"] +# command = ["cat"] +# +# ## Delay before the process is restarted after an unexpected termination +# restart_delay = "10s" + + +# # Performs file path manipulations on tags and fields +# [[processors.filepath]] +# ## Treat the tag value as a path and convert it to its last element, storing the result in a new tag +# # [[processors.filepath.basename]] +# # tag = "path" +# # dest = "basepath" +# +# ## Treat the field value as a path and keep all but the last element of path, typically the path's directory +# # [[processors.filepath.dirname]] +# # field = "path" +# +# ## Treat the tag value as a path, converting it to its the last element without its suffix +# # [[processors.filepath.stem]] +# # tag = "path" +# +# ## Treat the tag value as a path, converting it to the shortest path name equivalent +# ## to path by purely lexical processing +# # [[processors.filepath.clean]] +# # tag = "path" +# +# ## Treat the tag value as a path, converting it to a relative path that is lexically +# ## equivalent to the source path when joined to 'base_path' +# # [[processors.filepath.rel]] +# # tag = "path" +# # base_path = "/var/log" +# +# ## Treat the tag value as a path, replacing each separator character in path with a '/' character. Has only +# ## effect on Windows +# # [[processors.filepath.toslash]] +# # tag = "path" + + +# # Add a tag of the network interface name looked up over SNMP by interface number +# [[processors.ifname]] +# ## Name of tag holding the interface number +# # tag = "ifIndex" +# +# ## Name of output tag where service name will be added +# # dest = "ifName" +# +# ## Name of tag of the SNMP agent to request the interface name from +# # agent = "agent" +# +# ## Timeout for each request. +# # timeout = "5s" +# +# ## SNMP version; can be 1, 2, or 3. +# # version = 2 +# +# ## SNMP community string. +# # community = "public" +# +# ## Number of retries to attempt. +# # retries = 3 +# +# ## The GETBULK max-repetitions parameter. +# # max_repetitions = 10 +# +# ## SNMPv3 authentication and encryption options. +# ## +# ## Security Name. +# # sec_name = "myuser" +# ## Authentication protocol; one of "MD5", "SHA", or "". +# # auth_protocol = "MD5" +# ## Authentication password. +# # auth_password = "pass" +# ## Security Level; one of "noAuthNoPriv", "authNoPriv", or "authPriv". +# # sec_level = "authNoPriv" +# ## Context Name. +# # context_name = "" +# ## Privacy protocol used for encrypted messages; one of "DES", "AES" or "". +# # priv_protocol = "" +# ## Privacy password used for encrypted messages. +# # priv_password = "" +# +# ## max_parallel_lookups is the maximum number of SNMP requests to +# ## make at the same time. +# # max_parallel_lookups = 100 +# +# ## ordered controls whether or not the metrics need to stay in the +# ## same order this plugin received them in. If false, this plugin +# ## may change the order when data is cached. If you need metrics to +# ## stay in order set this to true. keeping the metrics ordered may +# ## be slightly slower +# # ordered = false +# +# ## cache_ttl is the amount of time interface names are cached for a +# ## given agent. After this period elapses if names are needed they +# ## will be retrieved again. +# # cache_ttl = "8h" + + +# # Apply metric modifications using override semantics. +# [[processors.override]] +# ## All modifications on inputs and aggregators can be overridden: +# # name_override = "new_name" +# # name_prefix = "new_name_prefix" +# # name_suffix = "new_name_suffix" +# +# ## Tags to be added (all values must be strings) +# # [processors.override.tags] +# # additional_tag = "tag_value" + + +# # Parse a value in a specified field/tag(s) and add the result in a new metric +# [[processors.parser]] +# ## The name of the fields whose value will be parsed. +# parse_fields = [] +# +# ## If true, incoming metrics are not emitted. +# drop_original = false +# +# ## If set to override, emitted metrics will be merged by overriding the +# ## original metric using the newly parsed metrics. +# merge = "override" +# +# ## The dataformat to be read from files +# ## Each data format has its own unique set of configuration options, read +# ## more about them here: +# ## https://github.com/influxdata/telegraf/blob/master/docs/DATA_FORMATS_INPUT.md +# data_format = "influx" + + +# # Rotate a single valued metric into a multi field metric +# [[processors.pivot]] +# ## Tag to use for naming the new field. +# tag_key = "name" +# ## Field to use as the value of the new field. +# value_key = "value" + + +# # Given a tag/field of a TCP or UDP port number, add a tag/field of the service name looked up in the system services file +# [[processors.port_name]] +# [[processors.port_name]] +# ## Name of tag holding the port number +# # tag = "port" +# ## Or name of the field holding the port number +# # field = "port" +# +# ## Name of output tag or field (depending on the source) where service name will be added +# # dest = "service" +# +# ## Default tcp or udp +# # default_protocol = "tcp" +# +# ## Tag containing the protocol (tcp or udp, case-insensitive) +# # protocol_tag = "proto" +# +# ## Field containing the protocol (tcp or udp, case-insensitive) +# # protocol_field = "proto" + + +# # Print all metrics that pass through this filter. +# [[processors.printer]] + + +# # Transforms tag and field values with regex pattern +# [[processors.regex]] +# ## Tag and field conversions defined in a separate sub-tables +# # [[processors.regex.tags]] +# # ## Tag to change +# # key = "resp_code" +# # ## Regular expression to match on a tag value +# # pattern = "^(\\d)\\d\\d$" +# # ## Matches of the pattern will be replaced with this string. Use ${1} +# # ## notation to use the text of the first submatch. +# # replacement = "${1}xx" +# +# # [[processors.regex.fields]] +# # ## Field to change +# # key = "request" +# # ## All the power of the Go regular expressions available here +# # ## For example, named subgroups +# # pattern = "^/api(?P/[\\w/]+)\\S*" +# # replacement = "${method}" +# # ## If result_key is present, a new field will be created +# # ## instead of changing existing field +# # result_key = "method" +# +# ## Multiple conversions may be applied for one field sequentially +# ## Let's extract one more value +# # [[processors.regex.fields]] +# # key = "request" +# # pattern = ".*category=(\\w+).*" +# # replacement = "${1}" +# # result_key = "search_category" + + +# # Rename measurements, tags, and fields that pass through this filter. +# [[processors.rename]] + + +# # ReverseDNS does a reverse lookup on IP addresses to retrieve the DNS name +# [[processors.reverse_dns]] +# ## For optimal performance, you may want to limit which metrics are passed to this +# ## processor. eg: +# ## namepass = ["my_metric_*"] +# +# ## cache_ttl is how long the dns entries should stay cached for. +# ## generally longer is better, but if you expect a large number of diverse lookups +# ## you'll want to consider memory use. +# cache_ttl = "24h" +# +# ## lookup_timeout is how long should you wait for a single dns request to repsond. +# ## this is also the maximum acceptable latency for a metric travelling through +# ## the reverse_dns processor. After lookup_timeout is exceeded, a metric will +# ## be passed on unaltered. +# ## multiple simultaneous resolution requests for the same IP will only make a +# ## single rDNS request, and they will all wait for the answer for this long. +# lookup_timeout = "3s" +# +# ## max_parallel_lookups is the maximum number of dns requests to be in flight +# ## at the same time. Requesting hitting cached values do not count against this +# ## total, and neither do mulptiple requests for the same IP. +# ## It's probably best to keep this number fairly low. +# max_parallel_lookups = 10 +# +# ## ordered controls whether or not the metrics need to stay in the same order +# ## this plugin received them in. If false, this plugin will change the order +# ## with requests hitting cached results moving through immediately and not +# ## waiting on slower lookups. This may cause issues for you if you are +# ## depending on the order of metrics staying the same. If so, set this to true. +# ## keeping the metrics ordered may be slightly slower. +# ordered = false +# +# [[processors.reverse_dns.lookup]] +# ## get the ip from the field "source_ip", and put the result in the field "source_name" +# field = "source_ip" +# dest = "source_name" +# +# [[processors.reverse_dns.lookup]] +# ## get the ip from the tag "destination_ip", and put the result in the tag +# ## "destination_name". +# tag = "destination_ip" +# dest = "destination_name" +# +# ## If you would prefer destination_name to be a field instead, you can use a +# ## processors.converter after this one, specifying the order attribute. + + +# # Add the S2 Cell ID as a tag based on latitude and longitude fields +# [[processors.s2geo]] +# ## The name of the lat and lon fields containing WGS-84 latitude and +# ## longitude in decimal degrees. +# # lat_field = "lat" +# # lon_field = "lon" +# +# ## New tag to create +# # tag_key = "s2_cell_id" +# +# ## Cell level (see https://s2geometry.io/resources/s2cell_statistics.html) +# # cell_level = 9 + + +# # Process metrics using a Starlark script +# [[processors.starlark]] +# ## The Starlark source can be set as a string in this configuration file, or +# ## by referencing a file containing the script. Only one source or script +# ## should be set at once. +# ## +# ## Source of the Starlark script. +# source = ''' +# def apply(metric): +# return metric +# ''' +# +# ## File containing a Starlark script. +# # script = "/usr/local/bin/myscript.star" +# +# ## The constants of the Starlark script. +# # [processors.starlark.constants] +# # max_size = 10 +# # threshold = 0.75 +# # default_name = "Julia" +# # debug_mode = true + + +# # Perform string processing on tags, fields, and measurements +# [[processors.strings]] +# ## Convert a tag value to uppercase +# # [[processors.strings.uppercase]] +# # tag = "method" +# +# ## Convert a field value to lowercase and store in a new field +# # [[processors.strings.lowercase]] +# # field = "uri_stem" +# # dest = "uri_stem_normalised" +# +# ## Convert a field value to titlecase +# # [[processors.strings.titlecase]] +# # field = "status" +# +# ## Trim leading and trailing whitespace using the default cutset +# # [[processors.strings.trim]] +# # field = "message" +# +# ## Trim leading characters in cutset +# # [[processors.strings.trim_left]] +# # field = "message" +# # cutset = "\t" +# +# ## Trim trailing characters in cutset +# # [[processors.strings.trim_right]] +# # field = "message" +# # cutset = "\r\n" +# +# ## Trim the given prefix from the field +# # [[processors.strings.trim_prefix]] +# # field = "my_value" +# # prefix = "my_" +# +# ## Trim the given suffix from the field +# # [[processors.strings.trim_suffix]] +# # field = "read_count" +# # suffix = "_count" +# +# ## Replace all non-overlapping instances of old with new +# # [[processors.strings.replace]] +# # measurement = "*" +# # old = ":" +# # new = "_" +# +# ## Trims strings based on width +# # [[processors.strings.left]] +# # field = "message" +# # width = 10 +# +# ## Decode a base64 encoded utf-8 string +# # [[processors.strings.base64decode]] +# # field = "message" +# +# ## Sanitize a string to ensure it is a valid utf-8 string +# ## Each run of invalid UTF-8 byte sequences is replaced by the replacement string, which may be empty +# # [[processors.strings.valid_utf8]] +# # field = "message" +# # replacement = "" + + +# # Restricts the number of tags that can pass through this filter and chooses which tags to preserve when over the limit. +# [[processors.tag_limit]] +# ## Maximum number of tags to preserve +# limit = 10 +# +# ## List of tags to preferentially preserve +# keep = ["foo", "bar", "baz"] + + +# # Uses a Go template to create a new tag +# [[processors.template]] +# ## Tag to set with the output of the template. +# tag = "topic" +# +# ## Go template used to create the tag value. In order to ease TOML +# ## escaping requirements, you may wish to use single quotes around the +# ## template string. +# template = '{{ .Tag "hostname" }}.{{ .Tag "level" }}' + + +# # Print all metrics that pass through this filter. +# [[processors.topk]] +# ## How many seconds between aggregations +# # period = 10 +# +# ## How many top metrics to return +# # k = 10 +# +# ## Over which tags should the aggregation be done. Globs can be specified, in +# ## which case any tag matching the glob will aggregated over. If set to an +# ## empty list is no aggregation over tags is done +# # group_by = ['*'] +# +# ## Over which fields are the top k are calculated +# # fields = ["value"] +# +# ## What aggregation to use. Options: sum, mean, min, max +# # aggregation = "mean" +# +# ## Instead of the top k largest metrics, return the bottom k lowest metrics +# # bottomk = false +# +# ## The plugin assigns each metric a GroupBy tag generated from its name and +# ## tags. If this setting is different than "" the plugin will add a +# ## tag (which name will be the value of this setting) to each metric with +# ## the value of the calculated GroupBy tag. Useful for debugging +# # add_groupby_tag = "" +# +# ## These settings provide a way to know the position of each metric in +# ## the top k. The 'add_rank_field' setting allows to specify for which +# ## fields the position is required. If the list is non empty, then a field +# ## will be added to each and every metric for each string present in this +# ## setting. This field will contain the ranking of the group that +# ## the metric belonged to when aggregated over that field. +# ## The name of the field will be set to the name of the aggregation field, +# ## suffixed with the string '_topk_rank' +# # add_rank_fields = [] +# +# ## These settings provide a way to know what values the plugin is generating +# ## when aggregating metrics. The 'add_aggregate_field' setting allows to +# ## specify for which fields the final aggregation value is required. If the +# ## list is non empty, then a field will be added to each every metric for +# ## each field present in this setting. This field will contain +# ## the computed aggregation for the group that the metric belonged to when +# ## aggregated over that field. +# ## The name of the field will be set to the name of the aggregation field, +# ## suffixed with the string '_topk_aggregate' +# # add_aggregate_fields = [] + + +# # Rotate multi field metric into several single field metrics +# [[processors.unpivot]] +# ## Tag to use for the name. +# tag_key = "name" +# ## Field to use for the name of the value. +# value_key = "value" + + +############################################################################### +# AGGREGATOR PLUGINS # +############################################################################### + + +# # Keep the aggregate basicstats of each metric passing through. +# [[aggregators.basicstats]] +# ## The period on which to flush & clear the aggregator. +# period = "30s" +# +# ## If true, the original metric will be dropped by the +# ## aggregator and will not get sent to the output plugins. +# drop_original = false +# +# ## Configures which basic stats to push as fields +# # stats = ["count", "min", "max", "mean", "stdev", "s2", "sum"] + + +# # Calculates a derivative for every field. +# [[aggregators.derivative]] +# ## The period in which to flush the aggregator. +# period = "30s" +# ## +# ## If true, the original metric will be dropped by the +# ## aggregator and will not get sent to the output plugins. +# drop_original = false +# ## +# ## This aggregator will estimate a derivative for each field, which is +# ## contained in both the first and last metric of the aggregation interval. +# ## Without further configuration the derivative will be calculated with +# ## respect to the time difference between these two measurements in seconds. +# ## The formula applied is for every field: +# ## +# ## value_last - value_first +# ## derivative = -------------------------- +# ## time_difference_in_seconds +# ## +# ## The resulting derivative will be named *fieldname_rate*. The suffix +# ## "_rate" can be configured by the *suffix* parameter. When using a +# ## derivation variable you can include its name for more clarity. +# # suffix = "_rate" +# ## +# ## As an abstraction the derivative can be calculated not only by the time +# ## difference but by the difference of a field, which is contained in the +# ## measurement. This field is assumed to be monotonously increasing. This +# ## feature is used by specifying a *variable*. +# ## Make sure the specified variable is not filtered and exists in the metrics +# ## passed to this aggregator! +# # variable = "" +# ## +# ## When using a field as the derivation parameter the name of that field will +# ## be used for the resulting derivative, e.g. *fieldname_by_parameter*. +# ## +# ## Note, that the calculation is based on the actual timestamp of the +# ## measurements. When there is only one measurement during that period, the +# ## measurement will be rolled over to the next period. The maximum number of +# ## such roll-overs can be configured with a default of 10. +# # max_roll_over = 10 +# ## + + +# # Report the final metric of a series +# [[aggregators.final]] +# ## The period on which to flush & clear the aggregator. +# period = "30s" +# ## If true, the original metric will be dropped by the +# ## aggregator and will not get sent to the output plugins. +# drop_original = false +# +# ## The time that a series is not updated until considering it final. +# series_timeout = "5m" + + +# # Create aggregate histograms. +# [[aggregators.histogram]] +# ## The period in which to flush the aggregator. +# period = "30s" +# +# ## If true, the original metric will be dropped by the +# ## aggregator and will not get sent to the output plugins. +# drop_original = false +# +# ## If true, the histogram will be reset on flush instead +# ## of accumulating the results. +# reset = false +# +# ## Whether bucket values should be accumulated. If set to false, "gt" tag will be added. +# ## Defaults to true. +# cumulative = true +# +# ## Example config that aggregates all fields of the metric. +# # [[aggregators.histogram.config]] +# # ## Right borders of buckets (with +Inf implicitly added). +# # buckets = [0.0, 15.6, 34.5, 49.1, 71.5, 80.5, 94.5, 100.0] +# # ## The name of metric. +# # measurement_name = "cpu" +# +# ## Example config that aggregates only specific fields of the metric. +# # [[aggregators.histogram.config]] +# # ## Right borders of buckets (with +Inf implicitly added). +# # buckets = [0.0, 10.0, 20.0, 30.0, 40.0, 50.0, 60.0, 70.0, 80.0, 90.0, 100.0] +# # ## The name of metric. +# # measurement_name = "diskio" +# # ## The concrete fields of metric +# # fields = ["io_time", "read_time", "write_time"] + + +# # Merge metrics into multifield metrics by series key +# [[aggregators.merge]] +# ## If true, the original metric will be dropped by the +# ## aggregator and will not get sent to the output plugins. +# drop_original = true + + +# # Keep the aggregate min/max of each metric passing through. +# [[aggregators.minmax]] +# ## General Aggregator Arguments: +# ## The period on which to flush & clear the aggregator. +# period = "30s" +# ## If true, the original metric will be dropped by the +# ## aggregator and will not get sent to the output plugins. +# drop_original = false + + +# # Keep the aggregate quantiles of each metric passing through. +# [[aggregators.quantile]] +# ## General Aggregator Arguments: +# ## The period on which to flush & clear the aggregator. +# period = "30s" +# +# ## If true, the original metric will be dropped by the +# ## aggregator and will not get sent to the output plugins. +# drop_original = false +# +# ## Quantiles to output in the range [0,1] +# # quantiles = [0.25, 0.5, 0.75] +# +# ## Type of aggregation algorithm +# ## Supported are: +# ## "t-digest" -- approximation using centroids, can cope with large number of samples +# ## "exact R7" -- exact computation also used by Excel or NumPy (Hyndman & Fan 1996 R7) +# ## "exact R8" -- exact computation (Hyndman & Fan 1996 R8) +# ## NOTE: Do not use "exact" algorithms with large number of samples +# ## to not impair performance or memory consumption! +# # algorithm = "t-digest" +# +# ## Compression for approximation (t-digest). The value needs to be +# ## greater or equal to 1.0. Smaller values will result in more +# ## performance but less accuracy. +# # compression = 100.0 + + +# # Count the occurrence of values in fields. +# [[aggregators.valuecounter]] +# ## General Aggregator Arguments: +# ## The period on which to flush & clear the aggregator. +# period = "30s" +# ## If true, the original metric will be dropped by the +# ## aggregator and will not get sent to the output plugins. +# drop_original = false +# ## The fields for which the values will be counted +# fields = [] + + +############################################################################### +# INPUT PLUGINS # +############################################################################### + + +# Read metrics about cpu usage +[[inputs.cpu]] + ## Whether to report per-cpu stats or not + percpu = true + ## Whether to report total system cpu stats or not + totalcpu = true + ## If true, collect raw CPU time metrics + collect_cpu_time = false + ## If true, compute and report the sum of all non-idle CPU states + report_active = false + + +# Read metrics about disk usage by mount point +[[inputs.disk]] + ## By default stats will be gathered for all mount points. + ## Set mount_points will restrict the stats to only the specified mount points. + # mount_points = ["/"] + + ## Ignore mount points by filesystem type. + ignore_fs = ["tmpfs", "devtmpfs", "devfs", "iso9660", "overlay", "aufs", "squashfs"] + + +# Read metrics about disk IO by device +[[inputs.diskio]] + ## By default, telegraf will gather stats for all devices including + ## disk partitions. + ## Setting devices will restrict the stats to the specified devices. + # devices = ["sda", "sdb", "vd*"] + ## Uncomment the following line if you need disk serial numbers. + # skip_serial_number = false + # + ## On systems which support it, device metadata can be added in the form of + ## tags. + ## Currently only Linux is supported via udev properties. You can view + ## available properties for a device by running: + ## 'udevadm info -q property -n /dev/sda' + ## Note: Most, but not all, udev properties can be accessed this way. Properties + ## that are currently inaccessible include DEVTYPE, DEVNAME, and DEVPATH. + # device_tags = ["ID_FS_TYPE", "ID_FS_USAGE"] + # + ## Using the same metadata source as device_tags, you can also customize the + ## name of the device via templates. + ## The 'name_templates' parameter is a list of templates to try and apply to + ## the device. The template may contain variables in the form of '$PROPERTY' or + ## '${PROPERTY}'. The first template which does not contain any variables not + ## present for the device is used as the device name tag. + ## The typical use case is for LVM volumes, to get the VG/LV name instead of + ## the near-meaningless DM-0 name. + # name_templates = ["$ID_FS_LABEL","$DM_VG_NAME/$DM_LV_NAME"] + + +# Get kernel statistics from /proc/stat +[[inputs.kernel]] + # no configuration + + +# Read metrics about memory usage +[[inputs.mem]] + # no configuration + + +# Get the number of processes and group them by status +[[inputs.processes]] + # no configuration + + +# Read metrics about swap memory usage +[[inputs.swap]] + # no configuration + + +# Read metrics about system load & uptime +[[inputs.system]] + ## Uncomment to remove deprecated metrics. + # fielddrop = ["uptime_format"] + + +# # Gather ActiveMQ metrics +# [[inputs.activemq]] +# ## ActiveMQ WebConsole URL +# url = "http://127.0.0.1:8161" +# +# ## Required ActiveMQ Endpoint +# ## deprecated in 1.11; use the url option +# # server = "127.0.0.1" +# # port = 8161 +# +# ## Credentials for basic HTTP authentication +# # username = "admin" +# # password = "admin" +# +# ## Required ActiveMQ webadmin root path +# # webadmin = "admin" +# +# ## Maximum time to receive response. +# # response_timeout = "5s" +# +# ## Optional TLS Config +# # tls_ca = "/etc/telegraf/ca.pem" +# # tls_cert = "/etc/telegraf/cert.pem" +# # tls_key = "/etc/telegraf/key.pem" +# ## Use TLS but skip chain & host verification +# # insecure_skip_verify = false + + +# # Read stats from aerospike server(s) +# [[inputs.aerospike]] +# ## Aerospike servers to connect to (with port) +# ## This plugin will query all namespaces the aerospike +# ## server has configured and get stats for them. +# servers = ["localhost:3000"] +# +# # username = "telegraf" +# # password = "pa$$word" +# +# ## Optional TLS Config +# # enable_tls = false +# # tls_ca = "/etc/telegraf/ca.pem" +# # tls_cert = "/etc/telegraf/cert.pem" +# # tls_key = "/etc/telegraf/key.pem" +# ## If false, skip chain & host verification +# # insecure_skip_verify = true +# +# # Feature Options +# # Add namespace variable to limit the namespaces executed on +# # Leave blank to do all +# # disable_query_namespaces = true # default false +# # namespaces = ["namespace1", "namespace2"] +# +# # Enable set level telemetry +# # query_sets = true # default: false +# # Add namespace set combinations to limit sets executed on +# # Leave blank to do all sets +# # sets = ["namespace1/set1", "namespace1/set2", "namespace3"] +# +# # Histograms +# # enable_ttl_histogram = true # default: false +# # enable_object_size_linear_histogram = true # default: false +# +# # by default, aerospike produces a 100 bucket histogram +# # this is not great for most graphing tools, this will allow +# # the ability to squash this to a smaller number of buckets +# # To have a balanced histogram, the number of buckets chosen +# # should divide evenly into 100. +# # num_histogram_buckets = 100 # default: 10 + + +# # Query statistics from AMD Graphics cards using rocm-smi binary +# [[inputs.amd_rocm_smi]] +# ## Optional: path to rocm-smi binary, defaults to $PATH via exec.LookPath +# # bin_path = "/opt/rocm/bin/rocm-smi" +# +# ## Optional: timeout for GPU polling +# # timeout = "5s" + + +# # Read Apache status information (mod_status) +# [[inputs.apache]] +# ## An array of URLs to gather from, must be directed at the machine +# ## readable version of the mod_status page including the auto query string. +# ## Default is "http://localhost/server-status?auto". +# urls = ["http://localhost/server-status?auto"] +# +# ## Credentials for basic HTTP authentication. +# # username = "myuser" +# # password = "mypassword" +# +# ## Maximum time to receive response. +# # response_timeout = "5s" +# +# ## Optional TLS Config +# # tls_ca = "/etc/telegraf/ca.pem" +# # tls_cert = "/etc/telegraf/cert.pem" +# # tls_key = "/etc/telegraf/key.pem" +# ## Use TLS but skip chain & host verification +# # insecure_skip_verify = false + + +# # Monitor APC UPSes connected to apcupsd +# [[inputs.apcupsd]] +# # A list of running apcupsd server to connect to. +# # If not provided will default to tcp://127.0.0.1:3551 +# servers = ["tcp://127.0.0.1:3551"] +# +# ## Timeout for dialing server. +# timeout = "5s" + + +# # Gather metrics from Apache Aurora schedulers +# [[inputs.aurora]] +# ## Schedulers are the base addresses of your Aurora Schedulers +# schedulers = ["http://127.0.0.1:8081"] +# +# ## Set of role types to collect metrics from. +# ## +# ## The scheduler roles are checked each interval by contacting the +# ## scheduler nodes; zookeeper is not contacted. +# # roles = ["leader", "follower"] +# +# ## Timeout is the max time for total network operations. +# # timeout = "5s" +# +# ## Username and password are sent using HTTP Basic Auth. +# # username = "username" +# # password = "pa$$word" +# +# ## Optional TLS Config +# # tls_ca = "/etc/telegraf/ca.pem" +# # tls_cert = "/etc/telegraf/cert.pem" +# # tls_key = "/etc/telegraf/key.pem" +# ## Use TLS but skip chain & host verification +# # insecure_skip_verify = false + + +# # Gather Azure Storage Queue metrics +# [[inputs.azure_storage_queue]] +# ## Required Azure Storage Account name +# account_name = "mystorageaccount" +# +# ## Required Azure Storage Account access key +# account_key = "storageaccountaccesskey" +# +# ## Set to false to disable peeking age of oldest message (executes faster) +# # peek_oldest_message_age = true + + +# # Collects Beanstalkd server and tubes stats +# [[inputs.beanstalkd]] +# ## Server to collect data from +# server = "localhost:11300" +# +# ## List of tubes to gather stats about. +# ## If no tubes specified then data gathered for each tube on server reported by list-tubes command +# tubes = ["notifications"] + + +# # Read metrics exposed by Beat +# [[inputs.beat]] +# ## An URL from which to read Beat-formatted JSON +# ## Default is "http://127.0.0.1:5066". +# url = "http://127.0.0.1:5066" +# +# ## Enable collection of the listed stats +# ## An empty list means collect all. Available options are currently +# ## "beat", "libbeat", "system" and "filebeat". +# # include = ["beat", "libbeat", "filebeat"] +# +# ## HTTP method +# # method = "GET" +# +# ## Optional HTTP headers +# # headers = {"X-Special-Header" = "Special-Value"} +# +# ## Override HTTP "Host" header +# # host_header = "logstash.example.com" +# +# ## Timeout for HTTP requests +# # timeout = "5s" +# +# ## Optional HTTP Basic Auth credentials +# # username = "username" +# # password = "pa$$word" +# +# ## Optional TLS Config +# # tls_ca = "/etc/telegraf/ca.pem" +# # tls_cert = "/etc/telegraf/cert.pem" +# # tls_key = "/etc/telegraf/key.pem" +# ## Use TLS but skip chain & host verification +# # insecure_skip_verify = false + + +# # Read BIND nameserver XML statistics +# [[inputs.bind]] +# ## An array of BIND XML statistics URI to gather stats. +# ## Default is "http://localhost:8053/xml/v3". +# # urls = ["http://localhost:8053/xml/v3"] +# # gather_memory_contexts = false +# # gather_views = false +# +# ## Timeout for http requests made by bind nameserver +# # timeout = "4s" + + +# # Collect bond interface status, slaves statuses and failures count +# [[inputs.bond]] +# ## Sets 'proc' directory path +# ## If not specified, then default is /proc +# # host_proc = "/proc" +# +# ## By default, telegraf gather stats for all bond interfaces +# ## Setting interfaces will restrict the stats to the specified +# ## bond interfaces. +# # bond_interfaces = ["bond0"] + + +# # Collect Kafka topics and consumers status from Burrow HTTP API. +# [[inputs.burrow]] +# ## Burrow API endpoints in format "schema://host:port". +# ## Default is "http://localhost:8000". +# servers = ["http://localhost:8000"] +# +# ## Override Burrow API prefix. +# ## Useful when Burrow is behind reverse-proxy. +# # api_prefix = "/v3/kafka" +# +# ## Maximum time to receive response. +# # response_timeout = "5s" +# +# ## Limit per-server concurrent connections. +# ## Useful in case of large number of topics or consumer groups. +# # concurrent_connections = 20 +# +# ## Filter clusters, default is no filtering. +# ## Values can be specified as glob patterns. +# # clusters_include = [] +# # clusters_exclude = [] +# +# ## Filter consumer groups, default is no filtering. +# ## Values can be specified as glob patterns. +# # groups_include = [] +# # groups_exclude = [] +# +# ## Filter topics, default is no filtering. +# ## Values can be specified as glob patterns. +# # topics_include = [] +# # topics_exclude = [] +# +# ## Credentials for basic HTTP authentication. +# # username = "" +# # password = "" +# +# ## Optional SSL config +# # ssl_ca = "/etc/telegraf/ca.pem" +# # ssl_cert = "/etc/telegraf/cert.pem" +# # ssl_key = "/etc/telegraf/key.pem" +# # insecure_skip_verify = false + + +# # Collects performance metrics from the MON, OSD, MDS and RGW nodes in a Ceph storage cluster. +# [[inputs.ceph]] +# ## This is the recommended interval to poll. Too frequent and you will lose +# ## data points due to timeouts during rebalancing and recovery +# interval = '1m' +# +# ## All configuration values are optional, defaults are shown below +# +# ## location of ceph binary +# ceph_binary = "/usr/bin/ceph" +# +# ## directory in which to look for socket files +# socket_dir = "/var/run/ceph" +# +# ## prefix of MON and OSD socket files, used to determine socket type +# mon_prefix = "ceph-mon" +# osd_prefix = "ceph-osd" +# mds_prefix = "ceph-mds" +# rgw_prefix = "ceph-client" +# +# ## suffix used to identify socket files +# socket_suffix = "asok" +# +# ## Ceph user to authenticate as, ceph will search for the corresponding keyring +# ## e.g. client.admin.keyring in /etc/ceph, or the explicit path defined in the +# ## client section of ceph.conf for example: +# ## +# ## [client.telegraf] +# ## keyring = /etc/ceph/client.telegraf.keyring +# ## +# ## Consult the ceph documentation for more detail on keyring generation. +# ceph_user = "client.admin" +# +# ## Ceph configuration to use to locate the cluster +# ceph_config = "/etc/ceph/ceph.conf" +# +# ## Whether to gather statistics via the admin socket +# gather_admin_socket_stats = true +# +# ## Whether to gather statistics via ceph commands, requires ceph_user and ceph_config +# ## to be specified +# gather_cluster_stats = false + + +# # Read specific statistics per cgroup +# [[inputs.cgroup]] +# ## Directories in which to look for files, globs are supported. +# ## Consider restricting paths to the set of cgroups you really +# ## want to monitor if you have a large number of cgroups, to avoid +# ## any cardinality issues. +# # paths = [ +# # "/sys/fs/cgroup/memory", +# # "/sys/fs/cgroup/memory/child1", +# # "/sys/fs/cgroup/memory/child2/*", +# # ] +# ## cgroup stat fields, as file names, globs are supported. +# ## these file names are appended to each path from above. +# # files = ["memory.*usage*", "memory.limit_in_bytes"] + + +# # Get standard chrony metrics, requires chronyc executable. +# [[inputs.chrony]] +# ## If true, chronyc tries to perform a DNS lookup for the time server. +# # dns_lookup = false + + +# # Pull Metric Statistics from Amazon CloudWatch +# [[inputs.cloudwatch]] +# ## Amazon Region +# region = "us-east-1" +# +# ## Amazon Credentials +# ## Credentials are loaded in the following order +# ## 1) Web identity provider credentials via STS if role_arn and web_identity_token_file are specified +# ## 2) Assumed credentials via STS if role_arn is specified +# ## 3) explicit credentials from 'access_key' and 'secret_key' +# ## 4) shared profile from 'profile' +# ## 5) environment variables +# ## 6) shared credentials file +# ## 7) EC2 Instance Profile +# # access_key = "" +# # secret_key = "" +# # token = "" +# # role_arn = "" +# # web_identity_token_file = "" +# # role_session_name = "" +# # profile = "" +# # shared_credential_file = "" +# +# ## Endpoint to make request against, the correct endpoint is automatically +# ## determined and this option should only be set if you wish to override the +# ## default. +# ## ex: endpoint_url = "http://localhost:8000" +# # endpoint_url = "" +# +# ## Set http_proxy (telegraf uses the system wide proxy settings if it's is not set) +# # http_proxy_url = "http://localhost:8888" +# +# # The minimum period for Cloudwatch metrics is 1 minute (60s). However not all +# # metrics are made available to the 1 minute period. Some are collected at +# # 3 minute, 5 minute, or larger intervals. See https://aws.amazon.com/cloudwatch/faqs/#monitoring. +# # Note that if a period is configured that is smaller than the minimum for a +# # particular metric, that metric will not be returned by the Cloudwatch API +# # and will not be collected by Telegraf. +# # +# ## Requested CloudWatch aggregation Period (required - must be a multiple of 60s) +# period = "5m" +# +# ## Collection Delay (required - must account for metrics availability via CloudWatch API) +# delay = "5m" +# +# ## Recommended: use metric 'interval' that is a multiple of 'period' to avoid +# ## gaps or overlap in pulled data +# interval = "5m" +# +# ## Recommended if "delay" and "period" are both within 3 hours of request time. Invalid values will be ignored. +# ## Recently Active feature will only poll for CloudWatch ListMetrics values that occurred within the last 3 Hours. +# ## If enabled, it will reduce total API usage of the CloudWatch ListMetrics API and require less memory to retain. +# ## Do not enable if "period" or "delay" is longer than 3 hours, as it will not return data more than 3 hours old. +# ## See https://docs.aws.amazon.com/AmazonCloudWatch/latest/APIReference/API_ListMetrics.html +# #recently_active = "PT3H" +# +# ## Configure the TTL for the internal cache of metrics. +# # cache_ttl = "1h" +# +# ## Metric Statistic Namespaces (required) +# namespaces = ["AWS/ELB"] +# # A single metric statistic namespace that will be appended to namespaces on startup +# # namespace = "AWS/ELB" +# +# ## Maximum requests per second. Note that the global default AWS rate limit is +# ## 50 reqs/sec, so if you define multiple namespaces, these should add up to a +# ## maximum of 50. +# ## See http://docs.aws.amazon.com/AmazonCloudWatch/latest/monitoring/cloudwatch_limits.html +# # ratelimit = 25 +# +# ## Timeout for http requests made by the cloudwatch client. +# # timeout = "5s" +# +# ## Namespace-wide statistic filters. These allow fewer queries to be made to +# ## cloudwatch. +# # statistic_include = [ "average", "sum", "minimum", "maximum", sample_count" ] +# # statistic_exclude = [] +# +# ## Metrics to Pull +# ## Defaults to all Metrics in Namespace if nothing is provided +# ## Refreshes Namespace available metrics every 1h +# #[[inputs.cloudwatch.metrics]] +# # names = ["Latency", "RequestCount"] +# # +# # ## Statistic filters for Metric. These allow for retrieving specific +# # ## statistics for an individual metric. +# # # statistic_include = [ "average", "sum", "minimum", "maximum", sample_count" ] +# # # statistic_exclude = [] +# # +# # ## Dimension filters for Metric. All dimensions defined for the metric names +# # ## must be specified in order to retrieve the metric statistics. +# # ## 'value' has wildcard / 'glob' matching support such as 'p-*'. +# # [[inputs.cloudwatch.metrics.dimensions]] +# # name = "LoadBalancerName" +# # value = "p-example" + + +# # Gather health check statuses from services registered in Consul +# [[inputs.consul]] +# ## Consul server address +# # address = "localhost:8500" +# +# ## URI scheme for the Consul server, one of "http", "https" +# # scheme = "http" +# +# ## Metric version controls the mapping from Consul metrics into +# ## Telegraf metrics. +# ## +# ## example: metric_version = 1; deprecated in 1.15 +# ## metric_version = 2; recommended version +# # metric_version = 1 +# +# ## ACL token used in every request +# # token = "" +# +# ## HTTP Basic Authentication username and password. +# # username = "" +# # password = "" +# +# ## Data center to query the health checks from +# # datacenter = "" +# +# ## Optional TLS Config +# # tls_ca = "/etc/telegraf/ca.pem" +# # tls_cert = "/etc/telegraf/cert.pem" +# # tls_key = "/etc/telegraf/key.pem" +# ## Use TLS but skip chain & host verification +# # insecure_skip_verify = true +# +# ## Consul checks' tag splitting +# # When tags are formatted like "key:value" with ":" as a delimiter then +# # they will be splitted and reported as proper key:value in Telegraf +# # tag_delimiter = ":" + + +# # Read per-node and per-bucket metrics from Couchbase +# [[inputs.couchbase]] +# ## specify servers via a url matching: +# ## [protocol://][:password]@address[:port] +# ## e.g. +# ## http://couchbase-0.example.com/ +# ## http://admin:secret@couchbase-0.example.com:8091/ +# ## +# ## If no servers are specified, then localhost is used as the host. +# ## If no protocol is specified, HTTP is used. +# ## If no port is specified, 8091 is used. +# servers = ["http://localhost:8091"] +# +# ## Filter bucket fields to include only here. +# # bucket_stats_included = ["quota_percent_used", "ops_per_sec", "disk_fetches", "item_count", "disk_used", "data_used", "mem_used"] +# +# ## Optional TLS Config +# # tls_ca = "/etc/telegraf/ca.pem" +# # tls_cert = "/etc/telegraf/cert.pem" +# # tls_key = "/etc/telegraf/key.pem" +# ## Use TLS but skip chain & host verification (defaults to false) +# ## If set to false, tls_cert and tls_key are required +# # insecure_skip_verify = false + + +# # Read CouchDB Stats from one or more servers +# [[inputs.couchdb]] +# ## Works with CouchDB stats endpoints out of the box +# ## Multiple Hosts from which to read CouchDB stats: +# hosts = ["http://localhost:8086/_stats"] +# +# ## Use HTTP Basic Authentication. +# # basic_username = "telegraf" +# # basic_password = "p@ssw0rd" + + +# # Fetch metrics from a CSGO SRCDS +# [[inputs.csgo]] +# ## Specify servers using the following format: +# ## servers = [ +# ## ["ip1:port1", "rcon_password1"], +# ## ["ip2:port2", "rcon_password2"], +# ## ] +# # +# ## If no servers are specified, no data will be collected +# servers = [] + + +# # Input plugin for DC/OS metrics +# [[inputs.dcos]] +# ## The DC/OS cluster URL. +# cluster_url = "https://dcos-ee-master-1" +# +# ## The ID of the service account. +# service_account_id = "telegraf" +# ## The private key file for the service account. +# service_account_private_key = "/etc/telegraf/telegraf-sa-key.pem" +# +# ## Path containing login token. If set, will read on every gather. +# # token_file = "/home/dcos/.dcos/token" +# +# ## In all filter options if both include and exclude are empty all items +# ## will be collected. Arrays may contain glob patterns. +# ## +# ## Node IDs to collect metrics from. If a node is excluded, no metrics will +# ## be collected for its containers or apps. +# # node_include = [] +# # node_exclude = [] +# ## Container IDs to collect container metrics from. +# # container_include = [] +# # container_exclude = [] +# ## Container IDs to collect app metrics from. +# # app_include = [] +# # app_exclude = [] +# +# ## Maximum concurrent connections to the cluster. +# # max_connections = 10 +# ## Maximum time to receive a response from cluster. +# # response_timeout = "20s" +# +# ## Optional TLS Config +# # tls_ca = "/etc/telegraf/ca.pem" +# # tls_cert = "/etc/telegraf/cert.pem" +# # tls_key = "/etc/telegraf/key.pem" +# ## If false, skip chain & host verification +# # insecure_skip_verify = true +# +# ## Recommended filtering to reduce series cardinality. +# # [inputs.dcos.tagdrop] +# # path = ["/var/lib/mesos/slave/slaves/*"] + + +# # Read metrics from one or many disque servers +# [[inputs.disque]] +# ## An array of URI to gather stats about. Specify an ip or hostname +# ## with optional port and password. +# ## ie disque://localhost, disque://10.10.3.33:18832, 10.0.0.1:10000, etc. +# ## If no servers are specified, then localhost is used as the host. +# servers = ["localhost"] + + +# # Provide a native collection for dmsetup based statistics for dm-cache +# [[inputs.dmcache]] +# ## Whether to report per-device stats or not +# per_device = true + + +# # Query given DNS server and gives statistics +# [[inputs.dns_query]] +# ## servers to query +# servers = ["8.8.8.8"] +# +# ## Network is the network protocol name. +# # network = "udp" +# +# ## Domains or subdomains to query. +# # domains = ["."] +# +# ## Query record type. +# ## Possible values: A, AAAA, CNAME, MX, NS, PTR, TXT, SOA, SPF, SRV. +# # record_type = "A" +# +# ## Dns server port. +# # port = 53 +# +# ## Query timeout in seconds. +# # timeout = 2 + + +# # Read metrics about docker containers +# [[inputs.docker]] +# ## Docker Endpoint +# ## To use TCP, set endpoint = "tcp://[ip]:[port]" +# ## To use environment variables (ie, docker-machine), set endpoint = "ENV" +# endpoint = "unix:///var/run/docker.sock" +# +# ## Set to true to collect Swarm metrics(desired_replicas, running_replicas) +# gather_services = false +# +# ## Only collect metrics for these containers, collect all if empty +# container_names = [] +# +# ## Set the source tag for the metrics to the container ID hostname, eg first 12 chars +# source_tag = false +# +# ## Containers to include and exclude. Globs accepted. +# ## Note that an empty array for both will include all containers +# container_name_include = [] +# container_name_exclude = [] +# +# ## Container states to include and exclude. Globs accepted. +# ## When empty only containers in the "running" state will be captured. +# ## example: container_state_include = ["created", "restarting", "running", "removing", "paused", "exited", "dead"] +# ## example: container_state_exclude = ["created", "restarting", "running", "removing", "paused", "exited", "dead"] +# # container_state_include = [] +# # container_state_exclude = [] +# +# ## Timeout for docker list, info, and stats commands +# timeout = "5s" +# +# ## Whether to report for each container per-device blkio (8:0, 8:1...), +# ## network (eth0, eth1, ...) and cpu (cpu0, cpu1, ...) stats or not. +# ## Usage of this setting is discouraged since it will be deprecated in favor of 'perdevice_include'. +# ## Default value is 'true' for backwards compatibility, please set it to 'false' so that 'perdevice_include' setting +# ## is honored. +# perdevice = true +# +# ## Specifies for which classes a per-device metric should be issued +# ## Possible values are 'cpu' (cpu0, cpu1, ...), 'blkio' (8:0, 8:1, ...) and 'network' (eth0, eth1, ...) +# ## Please note that this setting has no effect if 'perdevice' is set to 'true' +# # perdevice_include = ["cpu"] +# +# ## Whether to report for each container total blkio and network stats or not. +# ## Usage of this setting is discouraged since it will be deprecated in favor of 'total_include'. +# ## Default value is 'false' for backwards compatibility, please set it to 'true' so that 'total_include' setting +# ## is honored. +# total = false +# +# ## Specifies for which classes a total metric should be issued. Total is an aggregated of the 'perdevice' values. +# ## Possible values are 'cpu', 'blkio' and 'network' +# ## Total 'cpu' is reported directly by Docker daemon, and 'network' and 'blkio' totals are aggregated by this plugin. +# ## Please note that this setting has no effect if 'total' is set to 'false' +# # total_include = ["cpu", "blkio", "network"] +# +# ## Which environment variables should we use as a tag +# ##tag_env = ["JAVA_HOME", "HEAP_SIZE"] +# +# ## docker labels to include and exclude as tags. Globs accepted. +# ## Note that an empty array for both will include all labels as tags +# docker_label_include = [] +# docker_label_exclude = [] +# +# ## Optional TLS Config +# # tls_ca = "/etc/telegraf/ca.pem" +# # tls_cert = "/etc/telegraf/cert.pem" +# # tls_key = "/etc/telegraf/key.pem" +# ## Use TLS but skip chain & host verification +# # insecure_skip_verify = false + + +# # Read statistics from one or many dovecot servers +# [[inputs.dovecot]] +# ## specify dovecot servers via an address:port list +# ## e.g. +# ## localhost:24242 +# ## +# ## If no servers are specified, then localhost is used as the host. +# servers = ["localhost:24242"] +# +# ## Type is one of "user", "domain", "ip", or "global" +# type = "global" +# +# ## Wildcard matches like "*.com". An empty string "" is same as "*" +# ## If type = "ip" filters should be +# filters = [""] + + +# # Read metrics about docker containers from Fargate/ECS v2, v3 meta endpoints. +# [[inputs.ecs]] +# ## ECS metadata url. +# ## Metadata v2 API is used if set explicitly. Otherwise, +# ## v3 metadata endpoint API is used if available. +# # endpoint_url = "" +# +# ## Containers to include and exclude. Globs accepted. +# ## Note that an empty array for both will include all containers +# # container_name_include = [] +# # container_name_exclude = [] +# +# ## Container states to include and exclude. Globs accepted. +# ## When empty only containers in the "RUNNING" state will be captured. +# ## Possible values are "NONE", "PULLED", "CREATED", "RUNNING", +# ## "RESOURCES_PROVISIONED", "STOPPED". +# # container_status_include = [] +# # container_status_exclude = [] +# +# ## ecs labels to include and exclude as tags. Globs accepted. +# ## Note that an empty array for both will include all labels as tags +# ecs_label_include = [ "com.amazonaws.ecs.*" ] +# ecs_label_exclude = [] +# +# ## Timeout for queries. +# # timeout = "5s" + + +# # Read stats from one or more Elasticsearch servers or clusters +# [[inputs.elasticsearch]] +# ## specify a list of one or more Elasticsearch servers +# # you can add username and password to your url to use basic authentication: +# # servers = ["http://user:pass@localhost:9200"] +# servers = ["http://localhost:9200"] +# +# ## Timeout for HTTP requests to the elastic search server(s) +# http_timeout = "5s" +# +# ## When local is true (the default), the node will read only its own stats. +# ## Set local to false when you want to read the node stats from all nodes +# ## of the cluster. +# local = true +# +# ## Set cluster_health to true when you want to also obtain cluster health stats +# cluster_health = false +# +# ## Adjust cluster_health_level when you want to also obtain detailed health stats +# ## The options are +# ## - indices (default) +# ## - cluster +# # cluster_health_level = "indices" +# +# ## Set cluster_stats to true when you want to also obtain cluster stats. +# cluster_stats = false +# +# ## Only gather cluster_stats from the master node. To work this require local = true +# cluster_stats_only_from_master = true +# +# ## Indices to collect; can be one or more indices names or _all +# ## Use of wildcards is allowed. Use a wildcard at the end to retrieve index names that end with a changing value, like a date. +# indices_include = ["_all"] +# +# ## One of "shards", "cluster", "indices" +# indices_level = "shards" +# +# ## node_stats is a list of sub-stats that you want to have gathered. Valid options +# ## are "indices", "os", "process", "jvm", "thread_pool", "fs", "transport", "http", +# ## "breaker". Per default, all stats are gathered. +# # node_stats = ["jvm", "http"] +# +# ## HTTP Basic Authentication username and password. +# # username = "" +# # password = "" +# +# ## Optional TLS Config +# # tls_ca = "/etc/telegraf/ca.pem" +# # tls_cert = "/etc/telegraf/cert.pem" +# # tls_key = "/etc/telegraf/key.pem" +# ## Use TLS but skip chain & host verification +# # insecure_skip_verify = false +# +# ## Sets the number of most recent indices to return for indices that are configured with a date-stamped suffix. +# ## Each 'indices_include' entry ending with a wildcard (*) or glob matching pattern will group together all indices that match it, and sort them +# ## by the date or number after the wildcard. Metrics then are gathered for only the 'num_most_recent_indices' amount of most recent indices. +# # num_most_recent_indices = 0 + + +# # Derive metrics from aggregating Elasticsearch query results +# [[inputs.elasticsearch_query]] +# ## The full HTTP endpoint URL for your Elasticsearch instance +# ## Multiple urls can be specified as part of the same cluster, +# ## this means that only ONE of the urls will be written to each interval. +# urls = [ "http://node1.es.example.com:9200" ] # required. +# +# ## Elasticsearch client timeout, defaults to "5s". +# # timeout = "5s" +# +# ## Set to true to ask Elasticsearch a list of all cluster nodes, +# ## thus it is not necessary to list all nodes in the urls config option +# # enable_sniffer = false +# +# ## Set the interval to check if the Elasticsearch nodes are available +# ## This option is only used if enable_sniffer is also set (0s to disable it) +# # health_check_interval = "10s" +# +# ## HTTP basic authentication details (eg. when using x-pack) +# # username = "telegraf" +# # password = "mypassword" +# +# ## Optional TLS Config +# # tls_ca = "/etc/telegraf/ca.pem" +# # tls_cert = "/etc/telegraf/cert.pem" +# # tls_key = "/etc/telegraf/key.pem" +# ## Use TLS but skip chain & host verification +# # insecure_skip_verify = false +# +# [[inputs.elasticsearch_query.aggregation]] +# ## measurement name for the results of the aggregation query +# measurement_name = "measurement" +# +# ## Elasticsearch indexes to query (accept wildcards). +# index = "index-*" +# +# ## The date/time field in the Elasticsearch index (mandatory). +# date_field = "@timestamp" +# +# ## If the field used for the date/time field in Elasticsearch is also using +# ## a custom date/time format it may be required to provide the format to +# ## correctly parse the field. +# ## +# ## If using one of the built in elasticsearch formats this is not required. +# # date_field_custom_format = "" +# +# ## Time window to query (eg. "1m" to query documents from last minute). +# ## Normally should be set to same as collection interval +# query_period = "1m" +# +# ## Lucene query to filter results +# # filter_query = "*" +# +# ## Fields to aggregate values (must be numeric fields) +# # metric_fields = ["metric"] +# +# ## Aggregation function to use on the metric fields +# ## Must be set if 'metric_fields' is set +# ## Valid values are: avg, sum, min, max, sum +# # metric_function = "avg" +# +# ## Fields to be used as tags +# ## Must be text, non-analyzed fields. Metric aggregations are performed per tag +# # tags = ["field.keyword", "field2.keyword"] +# +# ## Set to true to not ignore documents when the tag(s) above are missing +# # include_missing_tag = false +# +# ## String value of the tag when the tag does not exist +# ## Used when include_missing_tag is true +# # missing_tag_value = "null" + + +# # Returns ethtool statistics for given interfaces +# [[inputs.ethtool]] +# ## List of interfaces to pull metrics for +# # interface_include = ["eth0"] +# +# ## List of interfaces to ignore when pulling metrics. +# # interface_exclude = ["eth1"] + + +# # Read metrics from one or more commands that can output to stdout +# [[inputs.exec]] +# ## Commands array +# commands = [ +# "/tmp/test.sh", +# "/usr/bin/mycollector --foo=bar", +# "/tmp/collect_*.sh" +# ] +# +# ## Timeout for each command to complete. +# timeout = "5s" +# +# ## measurement name suffix (for separating different commands) +# name_suffix = "_mycollector" +# +# ## Data format to consume. +# ## Each data format has its own unique set of configuration options, read +# ## more about them here: +# ## https://github.com/influxdata/telegraf/blob/master/docs/DATA_FORMATS_INPUT.md +# data_format = "influx" + + +# # Read metrics from fail2ban. +# [[inputs.fail2ban]] +# ## Use sudo to run fail2ban-client +# use_sudo = false + + +# # Read devices value(s) from a Fibaro controller +# [[inputs.fibaro]] +# ## Required Fibaro controller address/hostname. +# ## Note: at the time of writing this plugin, Fibaro only implemented http - no https available +# url = "http://:80" +# +# ## Required credentials to access the API (http://) +# username = "" +# password = "" +# +# ## Amount of time allowed to complete the HTTP request +# # timeout = "5s" + + +# # Parse a complete file each interval +# [[inputs.file]] +# ## Files to parse each interval. Accept standard unix glob matching rules, +# ## as well as ** to match recursive files and directories. +# files = ["/tmp/metrics.out"] +# +# ## Name a tag containing the name of the file the data was parsed from. Leave empty +# ## to disable. +# # file_tag = "" +# +# ## Character encoding to use when interpreting the file contents. Invalid +# ## characters are replaced using the unicode replacement character. When set +# ## to the empty string the data is not decoded to text. +# ## ex: character_encoding = "utf-8" +# ## character_encoding = "utf-16le" +# ## character_encoding = "utf-16be" +# ## character_encoding = "" +# # character_encoding = "" +# +# ## The dataformat to be read from files +# ## Each data format has its own unique set of configuration options, read +# ## more about them here: +# ## https://github.com/influxdata/telegraf/blob/master/docs/DATA_FORMATS_INPUT.md +# data_format = "influx" + + +# # Count files in a directory +# [[inputs.filecount]] +# ## Directory to gather stats about. +# ## deprecated in 1.9; use the directories option +# # directory = "/var/cache/apt/archives" +# +# ## Directories to gather stats about. +# ## This accept standard unit glob matching rules, but with the addition of +# ## ** as a "super asterisk". ie: +# ## /var/log/** -> recursively find all directories in /var/log and count files in each directories +# ## /var/log/*/* -> find all directories with a parent dir in /var/log and count files in each directories +# ## /var/log -> count all files in /var/log and all of its subdirectories +# directories = ["/var/cache/apt/archives"] +# +# ## Only count files that match the name pattern. Defaults to "*". +# name = "*.deb" +# +# ## Count files in subdirectories. Defaults to true. +# recursive = false +# +# ## Only count regular files. Defaults to true. +# regular_only = true +# +# ## Follow all symlinks while walking the directory tree. Defaults to false. +# follow_symlinks = false +# +# ## Only count files that are at least this size. If size is +# ## a negative number, only count files that are smaller than the +# ## absolute value of size. Acceptable units are B, KiB, MiB, KB, ... +# ## Without quotes and units, interpreted as size in bytes. +# size = "0B" +# +# ## Only count files that have not been touched for at least this +# ## duration. If mtime is negative, only count files that have been +# ## touched in this duration. Defaults to "0s". +# mtime = "0s" + + +# # Read stats about given file(s) +# [[inputs.filestat]] +# ## Files to gather stats about. +# ## These accept standard unix glob matching rules, but with the addition of +# ## ** as a "super asterisk". ie: +# ## "/var/log/**.log" -> recursively find all .log files in /var/log +# ## "/var/log/*/*.log" -> find all .log files with a parent dir in /var/log +# ## "/var/log/apache.log" -> just tail the apache log file +# ## +# ## See https://github.com/gobwas/glob for more examples +# ## +# files = ["/var/log/**.log"] +# +# ## If true, read the entire file and calculate an md5 checksum. +# md5 = false + + +# # Read real time temps from fireboard.io servers +# [[inputs.fireboard]] +# ## Specify auth token for your account +# auth_token = "invalidAuthToken" +# ## You can override the fireboard server URL if necessary +# # url = https://fireboard.io/api/v1/devices.json +# ## You can set a different http_timeout if you need to +# ## You should set a string using an number and time indicator +# ## for example "12s" for 12 seconds. +# # http_timeout = "4s" + + +# # Read metrics exposed by fluentd in_monitor plugin +# [[inputs.fluentd]] +# ## This plugin reads information exposed by fluentd (using /api/plugins.json endpoint). +# ## +# ## Endpoint: +# ## - only one URI is allowed +# ## - https is not supported +# endpoint = "http://localhost:24220/api/plugins.json" +# +# ## Define which plugins have to be excluded (based on "type" field - e.g. monitor_agent) +# exclude = [ +# "monitor_agent", +# "dummy", +# ] + + +# # Gather repository information from GitHub hosted repositories. +# [[inputs.github]] +# ## List of repositories to monitor. +# repositories = [ +# "influxdata/telegraf", +# "influxdata/influxdb" +# ] +# +# ## Github API access token. Unauthenticated requests are limited to 60 per hour. +# # access_token = "" +# +# ## Github API enterprise url. Github Enterprise accounts must specify their base url. +# # enterprise_base_url = "" +# +# ## Timeout for HTTP requests. +# # http_timeout = "5s" +# +# ## List of additional fields to query. +# ## NOTE: Getting those fields might involve issuing additional API-calls, so please +# ## make sure you do not exceed the rate-limit of GitHub. +# ## +# ## Available fields are: +# ## - pull-requests -- number of open and closed pull requests (2 API-calls per repository) +# # additional_fields = [] + + +# # Read flattened metrics from one or more GrayLog HTTP endpoints +# [[inputs.graylog]] +# ## API endpoint, currently supported API: +# ## +# ## - multiple (Ex http://:12900/system/metrics/multiple) +# ## - namespace (Ex http://:12900/system/metrics/namespace/{namespace}) +# ## +# ## For namespace endpoint, the metrics array will be ignored for that call. +# ## Endpoint can contain namespace and multiple type calls. +# ## +# ## Please check http://[graylog-server-ip]:12900/api-browser for full list +# ## of endpoints +# servers = [ +# "http://[graylog-server-ip]:12900/system/metrics/multiple", +# ] +# +# ## Metrics list +# ## List of metrics can be found on Graylog webservice documentation. +# ## Or by hitting the the web service api at: +# ## http://[graylog-host]:12900/system/metrics +# metrics = [ +# "jvm.cl.loaded", +# "jvm.memory.pools.Metaspace.committed" +# ] +# +# ## Username and password +# username = "" +# password = "" +# +# ## Optional TLS Config +# # tls_ca = "/etc/telegraf/ca.pem" +# # tls_cert = "/etc/telegraf/cert.pem" +# # tls_key = "/etc/telegraf/key.pem" +# ## Use TLS but skip chain & host verification +# # insecure_skip_verify = false + + +# # Read metrics of haproxy, via socket or csv stats page +# [[inputs.haproxy]] +# ## An array of address to gather stats about. Specify an ip on hostname +# ## with optional port. ie localhost, 10.10.3.33:1936, etc. +# ## Make sure you specify the complete path to the stats endpoint +# ## including the protocol, ie http://10.10.3.33:1936/haproxy?stats +# +# ## If no servers are specified, then default to 127.0.0.1:1936/haproxy?stats +# servers = ["http://myhaproxy.com:1936/haproxy?stats"] +# +# ## Credentials for basic HTTP authentication +# # username = "admin" +# # password = "admin" +# +# ## You can also use local socket with standard wildcard globbing. +# ## Server address not starting with 'http' will be treated as a possible +# ## socket, so both examples below are valid. +# # servers = ["socket:/run/haproxy/admin.sock", "/run/haproxy/*.sock"] +# +# ## By default, some of the fields are renamed from what haproxy calls them. +# ## Setting this option to true results in the plugin keeping the original +# ## field names. +# # keep_field_names = false +# +# ## Optional TLS Config +# # tls_ca = "/etc/telegraf/ca.pem" +# # tls_cert = "/etc/telegraf/cert.pem" +# # tls_key = "/etc/telegraf/key.pem" +# ## Use TLS but skip chain & host verification +# # insecure_skip_verify = false + + +# # Monitor disks' temperatures using hddtemp +# [[inputs.hddtemp]] +# ## By default, telegraf gathers temps data from all disks detected by the +# ## hddtemp. +# ## +# ## Only collect temps from the selected disks. +# ## +# ## A * as the device name will return the temperature values of all disks. +# ## +# # address = "127.0.0.1:7634" +# # devices = ["sda", "*"] + + +# # Read formatted metrics from one or more HTTP endpoints +# [[inputs.http]] +# ## One or more URLs from which to read formatted metrics +# urls = [ +# "http://localhost/metrics" +# ] +# +# ## HTTP method +# # method = "GET" +# +# ## Optional HTTP headers +# # headers = {"X-Special-Header" = "Special-Value"} +# +# ## Optional file with Bearer token +# ## file content is added as an Authorization header +# # bearer_token = "/path/to/file" +# +# ## Optional HTTP Basic Auth Credentials +# # username = "username" +# # password = "pa$$word" +# +# ## HTTP entity-body to send with POST/PUT requests. +# # body = "" +# +# ## HTTP Content-Encoding for write request body, can be set to "gzip" to +# ## compress body or "identity" to apply no encoding. +# # content_encoding = "identity" +# +# ## HTTP Proxy support +# # http_proxy_url = "" +# +# ## OAuth2 Client Credentials Grant +# # client_id = "clientid" +# # client_secret = "secret" +# # token_url = "https://indentityprovider/oauth2/v1/token" +# # scopes = ["urn:opc:idm:__myscopes__"] +# +# ## Optional TLS Config +# # tls_ca = "/etc/telegraf/ca.pem" +# # tls_cert = "/etc/telegraf/cert.pem" +# # tls_key = "/etc/telegraf/key.pem" +# ## Use TLS but skip chain & host verification +# # insecure_skip_verify = false +# +# ## Optional Cookie authentication +# # cookie_auth_url = "https://localhost/authMe" +# # cookie_auth_method = "POST" +# # cookie_auth_username = "username" +# # cookie_auth_password = "pa$$word" +# # cookie_auth_body = '{"username": "user", "password": "pa$$word", "authenticate": "me"}' +# ## cookie_auth_renewal not set or set to "0" will auth once and never renew the cookie +# # cookie_auth_renewal = "5m" +# +# ## Amount of time allowed to complete the HTTP request +# # timeout = "5s" +# +# ## List of success status codes +# # success_status_codes = [200] +# +# ## Data format to consume. +# ## Each data format has its own unique set of configuration options, read +# ## more about them here: +# ## https://github.com/influxdata/telegraf/blob/master/docs/DATA_FORMATS_INPUT.md +# # data_format = "influx" + + +# # HTTP/HTTPS request given an address a method and a timeout +# [[inputs.http_response]] +# ## Deprecated in 1.12, use 'urls' +# ## Server address (default http://localhost) +# # address = "http://localhost" +# +# ## List of urls to query. +# # urls = ["http://localhost"] +# +# ## Set http_proxy (telegraf uses the system wide proxy settings if it's is not set) +# # http_proxy = "http://localhost:8888" +# +# ## Set response_timeout (default 5 seconds) +# # response_timeout = "5s" +# +# ## HTTP Request Method +# # method = "GET" +# +# ## Whether to follow redirects from the server (defaults to false) +# # follow_redirects = false +# +# ## Optional file with Bearer token +# ## file content is added as an Authorization header +# # bearer_token = "/path/to/file" +# +# ## Optional HTTP Basic Auth Credentials +# # username = "username" +# # password = "pa$$word" +# +# ## Optional HTTP Request Body +# # body = ''' +# # {'fake':'data'} +# # ''' +# +# ## Optional name of the field that will contain the body of the response. +# ## By default it is set to an empty String indicating that the body's content won't be added +# # response_body_field = '' +# +# ## Maximum allowed HTTP response body size in bytes. +# ## 0 means to use the default of 32MiB. +# ## If the response body size exceeds this limit a "body_read_error" will be raised +# # response_body_max_size = "32MiB" +# +# ## Optional substring or regex match in body of the response (case sensitive) +# # response_string_match = "\"service_status\": \"up\"" +# # response_string_match = "ok" +# # response_string_match = "\".*_status\".?:.?\"up\"" +# +# ## Expected response status code. +# ## The status code of the response is compared to this value. If they match, the field +# ## "response_status_code_match" will be 1, otherwise it will be 0. If the +# ## expected status code is 0, the check is disabled and the field won't be added. +# # response_status_code = 0 +# +# ## Optional TLS Config +# # tls_ca = "/etc/telegraf/ca.pem" +# # tls_cert = "/etc/telegraf/cert.pem" +# # tls_key = "/etc/telegraf/key.pem" +# ## Use TLS but skip chain & host verification +# # insecure_skip_verify = false +# +# ## HTTP Request Headers (all values must be strings) +# # [inputs.http_response.headers] +# # Host = "github.com" +# +# ## Optional setting to map response http headers into tags +# ## If the http header is not present on the request, no corresponding tag will be added +# ## If multiple instances of the http header are present, only the first value will be used +# # http_header_tags = {"HTTP_HEADER" = "TAG_NAME"} +# +# ## Interface to use when dialing an address +# # interface = "eth0" + + +# # Read flattened metrics from one or more JSON HTTP endpoints +# [[inputs.httpjson]] +# ## NOTE This plugin only reads numerical measurements, strings and booleans +# ## will be ignored. +# +# ## Name for the service being polled. Will be appended to the name of the +# ## measurement e.g. httpjson_webserver_stats +# ## +# ## Deprecated (1.3.0): Use name_override, name_suffix, name_prefix instead. +# name = "webserver_stats" +# +# ## URL of each server in the service's cluster +# servers = [ +# "http://localhost:9999/stats/", +# "http://localhost:9998/stats/", +# ] +# ## Set response_timeout (default 5 seconds) +# response_timeout = "5s" +# +# ## HTTP method to use: GET or POST (case-sensitive) +# method = "GET" +# +# ## List of tag names to extract from top-level of JSON server response +# # tag_keys = [ +# # "my_tag_1", +# # "my_tag_2" +# # ] +# +# ## Optional TLS Config +# # tls_ca = "/etc/telegraf/ca.pem" +# # tls_cert = "/etc/telegraf/cert.pem" +# # tls_key = "/etc/telegraf/key.pem" +# ## Use TLS but skip chain & host verification +# # insecure_skip_verify = false +# +# ## HTTP parameters (all values must be strings). For "GET" requests, data +# ## will be included in the query. For "POST" requests, data will be included +# ## in the request body as "x-www-form-urlencoded". +# # [inputs.httpjson.parameters] +# # event_type = "cpu_spike" +# # threshold = "0.75" +# +# ## HTTP Headers (all values must be strings) +# # [inputs.httpjson.headers] +# # X-Auth-Token = "my-xauth-token" +# # apiVersion = "v1" + + +# # Gather Icinga2 status +# [[inputs.icinga2]] +# ## Required Icinga2 server address +# # server = "https://localhost:5665" +# +# ## Required Icinga2 object type ("services" or "hosts") +# # object_type = "services" +# +# ## Credentials for basic HTTP authentication +# # username = "admin" +# # password = "admin" +# +# ## Maximum time to receive response. +# # response_timeout = "5s" +# +# ## Optional TLS Config +# # tls_ca = "/etc/telegraf/ca.pem" +# # tls_cert = "/etc/telegraf/cert.pem" +# # tls_key = "/etc/telegraf/key.pem" +# ## Use TLS but skip chain & host verification +# # insecure_skip_verify = true + + +# # Gets counters from all InfiniBand cards and ports installed +# [[inputs.infiniband]] +# # no configuration + + +# # Read InfluxDB-formatted JSON metrics from one or more HTTP endpoints +# [[inputs.influxdb]] +# ## Works with InfluxDB debug endpoints out of the box, +# ## but other services can use this format too. +# ## See the influxdb plugin's README for more details. +# +# ## Multiple URLs from which to read InfluxDB-formatted JSON +# ## Default is "http://localhost:8086/debug/vars". +# urls = [ +# "http://localhost:8086/debug/vars" +# ] +# +# ## Username and password to send using HTTP Basic Authentication. +# # username = "" +# # password = "" +# +# ## Optional TLS Config +# # tls_ca = "/etc/telegraf/ca.pem" +# # tls_cert = "/etc/telegraf/cert.pem" +# # tls_key = "/etc/telegraf/key.pem" +# ## Use TLS but skip chain & host verification +# # insecure_skip_verify = false +# +# ## http request & header timeout +# timeout = "5s" + + +# # Collect statistics about itself +# [[inputs.internal]] +# ## If true, collect telegraf memory stats. +# # collect_memstats = true + + +# # Monitors internet speed using speedtest.net service +# [[inputs.internet_speed]] +# ## Sets if runs file download test +# ## Default: false +# enable_file_download = false + + +# # This plugin gathers interrupts data from /proc/interrupts and /proc/softirqs. +# [[inputs.interrupts]] +# ## When set to true, cpu metrics are tagged with the cpu. Otherwise cpu is +# ## stored as a field. +# ## +# ## The default is false for backwards compatibility, and will be changed to +# ## true in a future version. It is recommended to set to true on new +# ## deployments. +# # cpu_as_tag = false +# +# ## To filter which IRQs to collect, make use of tagpass / tagdrop, i.e. +# # [inputs.interrupts.tagdrop] +# # irq = [ "NET_RX", "TASKLET" ] + + +# # Read metrics from the bare metal servers via IPMI +# [[inputs.ipmi_sensor]] +# ## optionally specify the path to the ipmitool executable +# # path = "/usr/bin/ipmitool" +# ## +# ## Setting 'use_sudo' to true will make use of sudo to run ipmitool. +# ## Sudo must be configured to allow the telegraf user to run ipmitool +# ## without a password. +# # use_sudo = false +# ## +# ## optionally force session privilege level. Can be CALLBACK, USER, OPERATOR, ADMINISTRATOR +# # privilege = "ADMINISTRATOR" +# ## +# ## optionally specify one or more servers via a url matching +# ## [username[:password]@][protocol[(address)]] +# ## e.g. +# ## root:passwd@lan(127.0.0.1) +# ## +# ## if no servers are specified, local machine sensor stats will be queried +# ## +# # servers = ["USERID:PASSW0RD@lan(192.168.1.1)"] +# +# ## Recommended: use metric 'interval' that is a multiple of 'timeout' to avoid +# ## gaps or overlap in pulled data +# interval = "30s" +# +# ## Timeout for the ipmitool command to complete +# timeout = "20s" +# +# ## Schema Version: (Optional, defaults to version 1) +# metric_version = 2 +# +# ## Optionally provide the hex key for the IMPI connection. +# # hex_key = "" +# +# ## If ipmitool should use a cache +# ## for me ipmitool runs about 2 to 10 times faster with cache enabled on HP G10 servers (when using ubuntu20.04) +# ## the cache file may not work well for you if some sensors come up late +# # use_cache = false +# +# ## Path to the ipmitools cache file (defaults to OS temp dir) +# ## The provided path must exist and must be writable +# # cache_path = "" + + +# # Gather packets and bytes counters from Linux ipsets +# [[inputs.ipset]] +# ## By default, we only show sets which have already matched at least 1 packet. +# ## set include_unmatched_sets = true to gather them all. +# include_unmatched_sets = false +# ## Adjust your sudo settings appropriately if using this option ("sudo ipset save") +# use_sudo = false +# ## The default timeout of 1s for ipset execution can be overridden here: +# # timeout = "1s" + + +# # Read jobs and cluster metrics from Jenkins instances +# [[inputs.jenkins]] +# ## The Jenkins URL in the format "schema://host:port" +# url = "http://my-jenkins-instance:8080" +# # username = "admin" +# # password = "admin" +# +# ## Set response_timeout +# response_timeout = "5s" +# +# ## Optional TLS Config +# # tls_ca = "/etc/telegraf/ca.pem" +# # tls_cert = "/etc/telegraf/cert.pem" +# # tls_key = "/etc/telegraf/key.pem" +# ## Use SSL but skip chain & host verification +# # insecure_skip_verify = false +# +# ## Optional Max Job Build Age filter +# ## Default 1 hour, ignore builds older than max_build_age +# # max_build_age = "1h" +# +# ## Optional Sub Job Depth filter +# ## Jenkins can have unlimited layer of sub jobs +# ## This config will limit the layers of pulling, default value 0 means +# ## unlimited pulling until no more sub jobs +# # max_subjob_depth = 0 +# +# ## Optional Sub Job Per Layer +# ## In workflow-multibranch-plugin, each branch will be created as a sub job. +# ## This config will limit to call only the lasted branches in each layer, +# ## empty will use default value 10 +# # max_subjob_per_layer = 10 +# +# ## Jobs to include or exclude from gathering +# ## When using both lists, job_exclude has priority. +# ## Wildcards are supported: [ "jobA/*", "jobB/subjob1/*"] +# # job_include = [ "*" ] +# # job_exclude = [ ] +# +# ## Nodes to include or exclude from gathering +# ## When using both lists, node_exclude has priority. +# # node_include = [ "*" ] +# # node_exclude = [ ] +# +# ## Worker pool for jenkins plugin only +# ## Empty this field will use default value 5 +# # max_connections = 5 + + +# # Read JMX metrics through Jolokia +# [[inputs.jolokia]] +# # DEPRECATED: the jolokia plugin has been deprecated in favor of the +# # jolokia2 plugin +# # see https://github.com/influxdata/telegraf/tree/master/plugins/inputs/jolokia2 +# +# ## This is the context root used to compose the jolokia url +# ## NOTE that Jolokia requires a trailing slash at the end of the context root +# ## NOTE that your jolokia security policy must allow for POST requests. +# context = "/jolokia/" +# +# ## This specifies the mode used +# # mode = "proxy" +# # +# ## When in proxy mode this section is used to specify further +# ## proxy address configurations. +# ## Remember to change host address to fit your environment. +# # [inputs.jolokia.proxy] +# # host = "127.0.0.1" +# # port = "8080" +# +# ## Optional http timeouts +# ## +# ## response_header_timeout, if non-zero, specifies the amount of time to wait +# ## for a server's response headers after fully writing the request. +# # response_header_timeout = "3s" +# ## +# ## client_timeout specifies a time limit for requests made by this client. +# ## Includes connection time, any redirects, and reading the response body. +# # client_timeout = "4s" +# +# ## Attribute delimiter +# ## +# ## When multiple attributes are returned for a single +# ## [inputs.jolokia.metrics], the field name is a concatenation of the metric +# ## name, and the attribute name, separated by the given delimiter. +# # delimiter = "_" +# +# ## List of servers exposing jolokia read service +# [[inputs.jolokia.servers]] +# name = "as-server-01" +# host = "127.0.0.1" +# port = "8080" +# # username = "myuser" +# # password = "mypassword" +# +# ## List of metrics collected on above servers +# ## Each metric consists in a name, a jmx path and either +# ## a pass or drop slice attribute. +# ## This collect all heap memory usage metrics. +# [[inputs.jolokia.metrics]] +# name = "heap_memory_usage" +# mbean = "java.lang:type=Memory" +# attribute = "HeapMemoryUsage" +# +# ## This collect thread counts metrics. +# [[inputs.jolokia.metrics]] +# name = "thread_count" +# mbean = "java.lang:type=Threading" +# attribute = "TotalStartedThreadCount,ThreadCount,DaemonThreadCount,PeakThreadCount" +# +# ## This collect number of class loaded/unloaded counts metrics. +# [[inputs.jolokia.metrics]] +# name = "class_count" +# mbean = "java.lang:type=ClassLoading" +# attribute = "LoadedClassCount,UnloadedClassCount,TotalLoadedClassCount" + + +# # Read JMX metrics from a Jolokia REST agent endpoint +# [[inputs.jolokia2_agent]] +# # default_tag_prefix = "" +# # default_field_prefix = "" +# # default_field_separator = "." +# +# # Add agents URLs to query +# urls = ["http://localhost:8080/jolokia"] +# # username = "" +# # password = "" +# # response_timeout = "5s" +# +# ## Optional TLS config +# # tls_ca = "/var/private/ca.pem" +# # tls_cert = "/var/private/client.pem" +# # tls_key = "/var/private/client-key.pem" +# # insecure_skip_verify = false +# +# ## Add metrics to read +# [[inputs.jolokia2_agent.metric]] +# name = "java_runtime" +# mbean = "java.lang:type=Runtime" +# paths = ["Uptime"] + + +# # Read JMX metrics from a Jolokia REST proxy endpoint +# [[inputs.jolokia2_proxy]] +# # default_tag_prefix = "" +# # default_field_prefix = "" +# # default_field_separator = "." +# +# ## Proxy agent +# url = "http://localhost:8080/jolokia" +# # username = "" +# # password = "" +# # response_timeout = "5s" +# +# ## Optional TLS config +# # tls_ca = "/var/private/ca.pem" +# # tls_cert = "/var/private/client.pem" +# # tls_key = "/var/private/client-key.pem" +# # insecure_skip_verify = false +# +# ## Add proxy targets to query +# # default_target_username = "" +# # default_target_password = "" +# [[inputs.jolokia2_proxy.target]] +# url = "service:jmx:rmi:///jndi/rmi://targethost:9999/jmxrmi" +# # username = "" +# # password = "" +# +# ## Add metrics to read +# [[inputs.jolokia2_proxy.metric]] +# name = "java_runtime" +# mbean = "java.lang:type=Runtime" +# paths = ["Uptime"] + + +# # Read Kapacitor-formatted JSON metrics from one or more HTTP endpoints +# [[inputs.kapacitor]] +# ## Multiple URLs from which to read Kapacitor-formatted JSON +# ## Default is "http://localhost:9092/kapacitor/v1/debug/vars". +# urls = [ +# "http://localhost:9092/kapacitor/v1/debug/vars" +# ] +# +# ## Time limit for http requests +# timeout = "5s" +# +# ## Optional TLS Config +# # tls_ca = "/etc/telegraf/ca.pem" +# # tls_cert = "/etc/telegraf/cert.pem" +# # tls_key = "/etc/telegraf/key.pem" +# ## Use TLS but skip chain & host verification +# # insecure_skip_verify = false + + +# # Read status information from one or more Kibana servers +# [[inputs.kibana]] +# ## Specify a list of one or more Kibana servers +# servers = ["http://localhost:5601"] +# +# ## Timeout for HTTP requests +# timeout = "5s" +# +# ## HTTP Basic Auth credentials +# # username = "username" +# # password = "pa$$word" +# +# ## Optional TLS Config +# # tls_ca = "/etc/telegraf/ca.pem" +# # tls_cert = "/etc/telegraf/cert.pem" +# # tls_key = "/etc/telegraf/key.pem" +# ## Use TLS but skip chain & host verification +# # insecure_skip_verify = false + + +# # Read metrics from the Kubernetes api +# [[inputs.kube_inventory]] +# ## URL for the Kubernetes API +# url = "https://127.0.0.1" +# +# ## Namespace to use. Set to "" to use all namespaces. +# # namespace = "default" +# +# ## Use bearer token for authorization. ('bearer_token' takes priority) +# ## If both of these are empty, we'll use the default serviceaccount: +# ## at: /run/secrets/kubernetes.io/serviceaccount/token +# # bearer_token = "/path/to/bearer/token" +# ## OR +# # bearer_token_string = "abc_123" +# +# ## Set response_timeout (default 5 seconds) +# # response_timeout = "5s" +# +# ## Optional Resources to exclude from gathering +# ## Leave them with blank with try to gather everything available. +# ## Values can be - "daemonsets", deployments", "endpoints", "ingress", "nodes", +# ## "persistentvolumes", "persistentvolumeclaims", "pods", "services", "statefulsets" +# # resource_exclude = [ "deployments", "nodes", "statefulsets" ] +# +# ## Optional Resources to include when gathering +# ## Overrides resource_exclude if both set. +# # resource_include = [ "deployments", "nodes", "statefulsets" ] +# +# ## selectors to include and exclude as tags. Globs accepted. +# ## Note that an empty array for both will include all selectors as tags +# ## selector_exclude overrides selector_include if both set. +# # selector_include = [] +# # selector_exclude = ["*"] +# +# ## Optional TLS Config +# # tls_ca = "/path/to/cafile" +# # tls_cert = "/path/to/certfile" +# # tls_key = "/path/to/keyfile" +# ## Use TLS but skip chain & host verification +# # insecure_skip_verify = false + + +# # Read metrics from the kubernetes kubelet api +# [[inputs.kubernetes]] +# ## URL for the kubelet +# url = "http://127.0.0.1:10255" +# +# ## Use bearer token for authorization. ('bearer_token' takes priority) +# ## If both of these are empty, we'll use the default serviceaccount: +# ## at: /run/secrets/kubernetes.io/serviceaccount/token +# # bearer_token = "/path/to/bearer/token" +# ## OR +# # bearer_token_string = "abc_123" +# +# ## Pod labels to be added as tags. An empty array for both include and +# ## exclude will include all labels. +# # label_include = [] +# # label_exclude = ["*"] +# +# ## Set response_timeout (default 5 seconds) +# # response_timeout = "5s" +# +# ## Optional TLS Config +# # tls_ca = /path/to/cafile +# # tls_cert = /path/to/certfile +# # tls_key = /path/to/keyfile +# ## Use TLS but skip chain & host verification +# # insecure_skip_verify = false + + +# # Read metrics from a LeoFS Server via SNMP +# [[inputs.leofs]] +# ## An array of URLs of the form: +# ## host [ ":" port] +# servers = ["127.0.0.1:4020"] + + +# # Provides Linux sysctl fs metrics +# [[inputs.linux_sysctl_fs]] +# # no configuration + + +# # Read metrics exposed by Logstash +# [[inputs.logstash]] +# ## The URL of the exposed Logstash API endpoint. +# url = "http://127.0.0.1:9600" +# +# ## Use Logstash 5 single pipeline API, set to true when monitoring +# ## Logstash 5. +# # single_pipeline = false +# +# ## Enable optional collection components. Can contain +# ## "pipelines", "process", and "jvm". +# # collect = ["pipelines", "process", "jvm"] +# +# ## Timeout for HTTP requests. +# # timeout = "5s" +# +# ## Optional HTTP Basic Auth credentials. +# # username = "username" +# # password = "pa$$word" +# +# ## Optional TLS Config. +# # tls_ca = "/etc/telegraf/ca.pem" +# # tls_cert = "/etc/telegraf/cert.pem" +# # tls_key = "/etc/telegraf/key.pem" +# +# ## Use TLS but skip chain & host verification. +# # insecure_skip_verify = false +# +# ## Optional HTTP headers. +# # [inputs.logstash.headers] +# # "X-Special-Header" = "Special-Value" + + +# # Read metrics about LVM physical volumes, volume groups, logical volumes. +# [[inputs.lvm]] +# ## Use sudo to run LVM commands +# use_sudo = false + + +# # Gathers metrics from the /3.0/reports MailChimp API +# [[inputs.mailchimp]] +# ## MailChimp API key +# ## get from https://admin.mailchimp.com/account/api/ +# api_key = "" # required +# ## Reports for campaigns sent more than days_old ago will not be collected. +# ## 0 means collect all. +# days_old = 0 +# ## Campaign ID to get, if empty gets all campaigns, this option overrides days_old +# # campaign_id = "" + + +# # Retrieves information on a specific host in a MarkLogic Cluster +# [[inputs.marklogic]] +# ## Base URL of the MarkLogic HTTP Server. +# url = "http://localhost:8002" +# +# ## List of specific hostnames to retrieve information. At least (1) required. +# # hosts = ["hostname1", "hostname2"] +# +# ## Using HTTP Basic Authentication. Management API requires 'manage-user' role privileges +# # username = "myuser" +# # password = "mypassword" +# +# ## Optional TLS Config +# # tls_ca = "/etc/telegraf/ca.pem" +# # tls_cert = "/etc/telegraf/cert.pem" +# # tls_key = "/etc/telegraf/key.pem" +# ## Use TLS but skip chain & host verification +# # insecure_skip_verify = false + + +# # Read metrics from one or many mcrouter servers +# [[inputs.mcrouter]] +# ## An array of address to gather stats about. Specify an ip or hostname +# ## with port. ie tcp://localhost:11211, tcp://10.0.0.1:11211, etc. +# servers = ["tcp://localhost:11211", "unix:///var/run/mcrouter.sock"] +# +# ## Timeout for metric collections from all servers. Minimum timeout is "1s". +# # timeout = "5s" + + +# # Read metrics from one or many memcached servers +# [[inputs.memcached]] +# ## An array of address to gather stats about. Specify an ip on hostname +# ## with optional port. ie localhost, 10.0.0.1:11211, etc. +# servers = ["localhost:11211"] +# # unix_sockets = ["/var/run/memcached.sock"] + + +# # Telegraf plugin for gathering metrics from N Mesos masters +# [[inputs.mesos]] +# ## Timeout, in ms. +# timeout = 100 +# +# ## A list of Mesos masters. +# masters = ["http://localhost:5050"] +# +# ## Master metrics groups to be collected, by default, all enabled. +# master_collections = [ +# "resources", +# "master", +# "system", +# "agents", +# "frameworks", +# "framework_offers", +# "tasks", +# "messages", +# "evqueue", +# "registrar", +# "allocator", +# ] +# +# ## A list of Mesos slaves, default is [] +# # slaves = [] +# +# ## Slave metrics groups to be collected, by default, all enabled. +# # slave_collections = [ +# # "resources", +# # "agent", +# # "system", +# # "executors", +# # "tasks", +# # "messages", +# # ] +# +# ## Optional TLS Config +# # tls_ca = "/etc/telegraf/ca.pem" +# # tls_cert = "/etc/telegraf/cert.pem" +# # tls_key = "/etc/telegraf/key.pem" +# ## Use TLS but skip chain & host verification +# # insecure_skip_verify = false + + +# # Collects scores from a Minecraft server's scoreboard using the RCON protocol +# [[inputs.minecraft]] +# ## Address of the Minecraft server. +# # server = "localhost" +# +# ## Server RCON Port. +# # port = "25575" +# +# ## Server RCON Password. +# password = "" +# +# ## Uncomment to remove deprecated metric components. +# # tagdrop = ["server"] + + +# # Retrieve data from MODBUS slave devices +# [[inputs.modbus]] +# ## Connection Configuration +# ## +# ## The plugin supports connections to PLCs via MODBUS/TCP, RTU over TCP, ASCII over TCP or +# ## via serial line communication in binary (RTU) or readable (ASCII) encoding +# ## +# ## Device name +# name = "Device" +# +# ## Slave ID - addresses a MODBUS device on the bus +# ## Range: 0 - 255 [0 = broadcast; 248 - 255 = reserved] +# slave_id = 1 +# +# ## Timeout for each request +# timeout = "1s" +# +# ## Maximum number of retries and the time to wait between retries +# ## when a slave-device is busy. +# # busy_retries = 0 +# # busy_retries_wait = "100ms" +# +# # TCP - connect via Modbus/TCP +# controller = "tcp://localhost:502" +# +# ## Serial (RS485; RS232) +# # controller = "file:///dev/ttyUSB0" +# # baud_rate = 9600 +# # data_bits = 8 +# # parity = "N" +# # stop_bits = 1 +# +# ## For Modbus over TCP you can choose between "TCP", "RTUoverTCP" and "ASCIIoverTCP" +# ## default behaviour is "TCP" if the controller is TCP +# ## For Serial you can choose between "RTU" and "ASCII" +# # transmission_mode = "RTU" +# +# ## Measurements +# ## +# +# ## Digital Variables, Discrete Inputs and Coils +# ## measurement - the (optional) measurement name, defaults to "modbus" +# ## name - the variable name +# ## address - variable address +# +# discrete_inputs = [ +# { name = "start", address = [0]}, +# { name = "stop", address = [1]}, +# { name = "reset", address = [2]}, +# { name = "emergency_stop", address = [3]}, +# ] +# coils = [ +# { name = "motor1_run", address = [0]}, +# { name = "motor1_jog", address = [1]}, +# { name = "motor1_stop", address = [2]}, +# ] +# +# ## Analog Variables, Input Registers and Holding Registers +# ## measurement - the (optional) measurement name, defaults to "modbus" +# ## name - the variable name +# ## byte_order - the ordering of bytes +# ## |---AB, ABCD - Big Endian +# ## |---BA, DCBA - Little Endian +# ## |---BADC - Mid-Big Endian +# ## |---CDAB - Mid-Little Endian +# ## data_type - INT16, UINT16, INT32, UINT32, INT64, UINT64, +# ## FLOAT32-IEEE, FLOAT64-IEEE (the IEEE 754 binary representation) +# ## FLOAT32, FIXED, UFIXED (fixed-point representation on input) +# ## scale - the final numeric variable representation +# ## address - variable address +# +# holding_registers = [ +# { name = "power_factor", byte_order = "AB", data_type = "FIXED", scale=0.01, address = [8]}, +# { name = "voltage", byte_order = "AB", data_type = "FIXED", scale=0.1, address = [0]}, +# { name = "energy", byte_order = "ABCD", data_type = "FIXED", scale=0.001, address = [5,6]}, +# { name = "current", byte_order = "ABCD", data_type = "FIXED", scale=0.001, address = [1,2]}, +# { name = "frequency", byte_order = "AB", data_type = "UFIXED", scale=0.1, address = [7]}, +# { name = "power", byte_order = "ABCD", data_type = "UFIXED", scale=0.1, address = [3,4]}, +# ] +# input_registers = [ +# { name = "tank_level", byte_order = "AB", data_type = "INT16", scale=1.0, address = [0]}, +# { name = "tank_ph", byte_order = "AB", data_type = "INT16", scale=1.0, address = [1]}, +# { name = "pump1_speed", byte_order = "ABCD", data_type = "INT32", scale=1.0, address = [3,4]}, +# ] + + +# # Read metrics from one or many MongoDB servers +# [[inputs.mongodb]] +# ## An array of URLs of the form: +# ## "mongodb://" [user ":" pass "@"] host [ ":" port] +# ## For example: +# ## mongodb://user:auth_key@10.10.3.30:27017, +# ## mongodb://10.10.3.33:18832, +# servers = ["mongodb://127.0.0.1:27017"] +# +# ## When true, collect cluster status +# ## Note that the query that counts jumbo chunks triggers a COLLSCAN, which +# ## may have an impact on performance. +# # gather_cluster_status = true +# +# ## When true, collect per database stats +# # gather_perdb_stats = false +# +# ## When true, collect per collection stats +# # gather_col_stats = false +# +# ## When true, collect usage statistics for each collection +# ## (insert, update, queries, remove, getmore, commands etc...). +# # gather_top_stat = false +# +# ## List of db where collections stats are collected +# ## If empty, all db are concerned +# # col_stats_dbs = ["local"] +# +# ## Optional TLS Config +# # tls_ca = "/etc/telegraf/ca.pem" +# # tls_cert = "/etc/telegraf/cert.pem" +# # tls_key = "/etc/telegraf/key.pem" +# ## Use TLS but skip chain & host verification +# # insecure_skip_verify = false + + +# # Read metrics and status information about processes managed by Monit +# [[inputs.monit]] +# ## Monit HTTPD address +# address = "http://127.0.0.1:2812" +# +# ## Username and Password for Monit +# # username = "" +# # password = "" +# +# ## Amount of time allowed to complete the HTTP request +# # timeout = "5s" +# +# ## Optional TLS Config +# # tls_ca = "/etc/telegraf/ca.pem" +# # tls_cert = "/etc/telegraf/cert.pem" +# # tls_key = "/etc/telegraf/key.pem" +# ## Use TLS but skip chain & host verification +# # insecure_skip_verify = false + + +# # Aggregates the contents of multiple files into a single point +# [[inputs.multifile]] +# ## Base directory where telegraf will look for files. +# ## Omit this option to use absolute paths. +# base_dir = "/sys/bus/i2c/devices/1-0076/iio:device0" +# +# ## If true, Telegraf discard all data when a single file can't be read. +# ## Else, Telegraf omits the field generated from this file. +# # fail_early = true +# +# ## Files to parse each interval. +# [[inputs.multifile.file]] +# file = "in_pressure_input" +# dest = "pressure" +# conversion = "float" +# [[inputs.multifile.file]] +# file = "in_temp_input" +# dest = "temperature" +# conversion = "float(3)" +# [[inputs.multifile.file]] +# file = "in_humidityrelative_input" +# dest = "humidityrelative" +# conversion = "float(3)" + + +# # Read metrics from one or many mysql servers +# [[inputs.mysql]] +# ## specify servers via a url matching: +# ## [username[:password]@][protocol[(address)]]/[?tls=[true|false|skip-verify|custom]] +# ## see https://github.com/go-sql-driver/mysql#dsn-data-source-name +# ## e.g. +# ## servers = ["user:passwd@tcp(127.0.0.1:3306)/?tls=false"] +# ## servers = ["user@tcp(127.0.0.1:3306)/?tls=false"] +# # +# ## If no servers are specified, then localhost is used as the host. +# servers = ["tcp(127.0.0.1:3306)/"] +# +# ## Selects the metric output format. +# ## +# ## This option exists to maintain backwards compatibility, if you have +# ## existing metrics do not set or change this value until you are ready to +# ## migrate to the new format. +# ## +# ## If you do not have existing metrics from this plugin set to the latest +# ## version. +# ## +# ## Telegraf >=1.6: metric_version = 2 +# ## <1.6: metric_version = 1 (or unset) +# metric_version = 2 +# +# ## if the list is empty, then metrics are gathered from all database tables +# # table_schema_databases = [] +# +# ## gather metrics from INFORMATION_SCHEMA.TABLES for databases provided above list +# # gather_table_schema = false +# +# ## gather thread state counts from INFORMATION_SCHEMA.PROCESSLIST +# # gather_process_list = false +# +# ## gather user statistics from INFORMATION_SCHEMA.USER_STATISTICS +# # gather_user_statistics = false +# +# ## gather auto_increment columns and max values from information schema +# # gather_info_schema_auto_inc = false +# +# ## gather metrics from INFORMATION_SCHEMA.INNODB_METRICS +# # gather_innodb_metrics = false +# +# ## gather metrics from SHOW SLAVE STATUS command output +# # gather_slave_status = false +# +# ## gather metrics from all channels from SHOW SLAVE STATUS command output +# # gather_all_slave_channels = false +# +# ## use MariaDB dialect for all channels SHOW SLAVE STATUS +# # mariadb_dialect = false +# +# ## gather metrics from SHOW BINARY LOGS command output +# # gather_binary_logs = false +# +# ## gather metrics from PERFORMANCE_SCHEMA.GLOBAL_VARIABLES +# # gather_global_variables = true +# +# ## gather metrics from PERFORMANCE_SCHEMA.TABLE_IO_WAITS_SUMMARY_BY_TABLE +# # gather_table_io_waits = false +# +# ## gather metrics from PERFORMANCE_SCHEMA.TABLE_LOCK_WAITS +# # gather_table_lock_waits = false +# +# ## gather metrics from PERFORMANCE_SCHEMA.TABLE_IO_WAITS_SUMMARY_BY_INDEX_USAGE +# # gather_index_io_waits = false +# +# ## gather metrics from PERFORMANCE_SCHEMA.EVENT_WAITS +# # gather_event_waits = false +# +# ## gather metrics from PERFORMANCE_SCHEMA.FILE_SUMMARY_BY_EVENT_NAME +# # gather_file_events_stats = false +# +# ## gather metrics from PERFORMANCE_SCHEMA.EVENTS_STATEMENTS_SUMMARY_BY_DIGEST +# # gather_perf_events_statements = false +# +# ## the limits for metrics form perf_events_statements +# # perf_events_statements_digest_text_limit = 120 +# # perf_events_statements_limit = 250 +# # perf_events_statements_time_limit = 86400 +# +# ## gather metrics from PERFORMANCE_SCHEMA.EVENTS_STATEMENTS_SUMMARY_BY_ACCOUNT_BY_EVENT_NAME +# # gather_perf_sum_per_acc_per_event = false +# +# ## list of events to be gathered for gather_perf_sum_per_acc_per_event +# ## in case of empty list all events will be gathered +# # perf_summary_events = [] +# +# ## Some queries we may want to run less often (such as SHOW GLOBAL VARIABLES) +# ## example: interval_slow = "30m" +# # interval_slow = "" +# +# ## Optional TLS Config (will be used if tls=custom parameter specified in server uri) +# # tls_ca = "/etc/telegraf/ca.pem" +# # tls_cert = "/etc/telegraf/cert.pem" +# # tls_key = "/etc/telegraf/key.pem" +# ## Use TLS but skip chain & host verification +# # insecure_skip_verify = false + + +# # Provides metrics about the state of a NATS server +# [[inputs.nats]] +# ## The address of the monitoring endpoint of the NATS server +# server = "http://localhost:8222" +# +# ## Maximum time to receive response +# # response_timeout = "5s" + + +# # Neptune Apex data collector +# [[inputs.neptune_apex]] +# ## The Neptune Apex plugin reads the publicly available status.xml data from a local Apex. +# ## Measurements will be logged under "apex". +# +# ## The base URL of the local Apex(es). If you specify more than one server, they will +# ## be differentiated by the "source" tag. +# servers = [ +# "http://apex.local", +# ] +# +# ## The response_timeout specifies how long to wait for a reply from the Apex. +# #response_timeout = "5s" + + +# # Read metrics about network interface usage +# [[inputs.net]] +# ## By default, telegraf gathers stats from any up interface (excluding loopback) +# ## Setting interfaces will tell it to gather these explicit interfaces, +# ## regardless of status. +# ## +# # interfaces = ["eth0"] +# ## +# ## On linux systems telegraf also collects protocol stats. +# ## Setting ignore_protocol_stats to true will skip reporting of protocol metrics. +# ## +# # ignore_protocol_stats = false +# ## + + +# # Collect response time of a TCP or UDP connection +# [[inputs.net_response]] +# ## Protocol, must be "tcp" or "udp" +# ## NOTE: because the "udp" protocol does not respond to requests, it requires +# ## a send/expect string pair (see below). +# protocol = "tcp" +# ## Server address (default localhost) +# address = "localhost:80" +# +# ## Set timeout +# # timeout = "1s" +# +# ## Set read timeout (only used if expecting a response) +# # read_timeout = "1s" +# +# ## The following options are required for UDP checks. For TCP, they are +# ## optional. The plugin will send the given string to the server and then +# ## expect to receive the given 'expect' string back. +# ## string sent to the server +# # send = "ssh" +# ## expected string in answer +# # expect = "ssh" +# +# ## Uncomment to remove deprecated fields +# # fielddrop = ["result_type", "string_found"] + + +# # Read TCP metrics such as established, time wait and sockets counts. +# [[inputs.netstat]] +# # no configuration + + +# # Read per-mount NFS client metrics from /proc/self/mountstats +# [[inputs.nfsclient]] +# ## Read more low-level metrics (optional, defaults to false) +# # fullstat = false +# +# ## List of mounts to explictly include or exclude (optional) +# ## The pattern (Go regexp) is matched against the mount point (not the +# ## device being mounted). If include_mounts is set, all mounts are ignored +# ## unless present in the list. If a mount is listed in both include_mounts +# ## and exclude_mounts, it is excluded. Go regexp patterns can be used. +# # include_mounts = [] +# # exclude_mounts = [] +# +# ## List of operations to include or exclude from collecting. This applies +# ## only when fullstat=true. Symantics are similar to {include,exclude}_mounts: +# ## the default is to collect everything; when include_operations is set, only +# ## those OPs are collected; when exclude_operations is set, all are collected +# ## except those listed. If include and exclude are set, the OP is excluded. +# ## See /proc/self/mountstats for a list of valid operations; note that +# ## NFSv3 and NFSv4 have different lists. While it is not possible to +# ## have different include/exclude lists for NFSv3/4, unused elements +# ## in the list should be okay. It is possible to have different lists +# ## for different mountpoints: use mulitple [[input.nfsclient]] stanzas, +# ## with their own lists. See "include_mounts" above, and be careful of +# ## duplicate metrics. +# # include_operations = [] +# # exclude_operations = [] + + +# # Read Nginx's basic status information (ngx_http_stub_status_module) +# [[inputs.nginx]] +# # An array of Nginx stub_status URI to gather stats. +# urls = ["http://localhost/server_status"] +# +# ## Optional TLS Config +# tls_ca = "/etc/telegraf/ca.pem" +# tls_cert = "/etc/telegraf/cert.cer" +# tls_key = "/etc/telegraf/key.key" +# ## Use TLS but skip chain & host verification +# insecure_skip_verify = false +# +# # HTTP response timeout (default: 5s) +# response_timeout = "5s" + + +# # Read Nginx Plus' full status information (ngx_http_status_module) +# [[inputs.nginx_plus]] +# ## An array of ngx_http_status_module or status URI to gather stats. +# urls = ["http://localhost/status"] +# +# # HTTP response timeout (default: 5s) +# response_timeout = "5s" +# +# ## Optional TLS Config +# # tls_ca = "/etc/telegraf/ca.pem" +# # tls_cert = "/etc/telegraf/cert.pem" +# # tls_key = "/etc/telegraf/key.pem" +# ## Use TLS but skip chain & host verification +# # insecure_skip_verify = false + + +# # Read Nginx Plus Api documentation +# [[inputs.nginx_plus_api]] +# ## An array of API URI to gather stats. +# urls = ["http://localhost/api"] +# +# # Nginx API version, default: 3 +# # api_version = 3 +# +# # HTTP response timeout (default: 5s) +# response_timeout = "5s" +# +# ## Optional TLS Config +# # tls_ca = "/etc/telegraf/ca.pem" +# # tls_cert = "/etc/telegraf/cert.pem" +# # tls_key = "/etc/telegraf/key.pem" +# ## Use TLS but skip chain & host verification +# # insecure_skip_verify = false + + +# # Read Nginx virtual host traffic status module information (nginx-module-sts) +# [[inputs.nginx_sts]] +# ## An array of ngx_http_status_module or status URI to gather stats. +# urls = ["http://localhost/status"] +# +# ## HTTP response timeout (default: 5s) +# response_timeout = "5s" +# +# ## Optional TLS Config +# # tls_ca = "/etc/telegraf/ca.pem" +# # tls_cert = "/etc/telegraf/cert.pem" +# # tls_key = "/etc/telegraf/key.pem" +# ## Use TLS but skip chain & host verification +# # insecure_skip_verify = false + + +# # Read nginx_upstream_check module status information (https://github.com/yaoweibin/nginx_upstream_check_module) +# [[inputs.nginx_upstream_check]] +# ## An URL where Nginx Upstream check module is enabled +# ## It should be set to return a JSON formatted response +# url = "http://127.0.0.1/status?format=json" +# +# ## HTTP method +# # method = "GET" +# +# ## Optional HTTP headers +# # headers = {"X-Special-Header" = "Special-Value"} +# +# ## Override HTTP "Host" header +# # host_header = "check.example.com" +# +# ## Timeout for HTTP requests +# timeout = "5s" +# +# ## Optional HTTP Basic Auth credentials +# # username = "username" +# # password = "pa$$word" +# +# ## Optional TLS Config +# # tls_ca = "/etc/telegraf/ca.pem" +# # tls_cert = "/etc/telegraf/cert.pem" +# # tls_key = "/etc/telegraf/key.pem" +# ## Use TLS but skip chain & host verification +# # insecure_skip_verify = false + + +# # Read Nginx virtual host traffic status module information (nginx-module-vts) +# [[inputs.nginx_vts]] +# ## An array of ngx_http_status_module or status URI to gather stats. +# urls = ["http://localhost/status"] +# +# ## HTTP response timeout (default: 5s) +# response_timeout = "5s" +# +# ## Optional TLS Config +# # tls_ca = "/etc/telegraf/ca.pem" +# # tls_cert = "/etc/telegraf/cert.pem" +# # tls_key = "/etc/telegraf/key.pem" +# ## Use TLS but skip chain & host verification +# # insecure_skip_verify = false + + +# # A plugin to collect stats from the NSD authoritative DNS name server +# [[inputs.nsd]] +# ## Address of server to connect to, optionally ':port'. Defaults to the +# ## address in the nsd config file. +# server = "127.0.0.1:8953" +# +# ## If running as a restricted user you can prepend sudo for additional access: +# # use_sudo = false +# +# ## The default location of the nsd-control binary can be overridden with: +# # binary = "/usr/sbin/nsd-control" +# +# ## The default location of the nsd config file can be overridden with: +# # config_file = "/etc/nsd/nsd.conf" +# +# ## The default timeout of 1s can be overridden with: +# # timeout = "1s" + + +# # Read NSQ topic and channel statistics. +# [[inputs.nsq]] +# ## An array of NSQD HTTP API endpoints +# endpoints = ["http://localhost:4151"] +# +# ## Optional TLS Config +# # tls_ca = "/etc/telegraf/ca.pem" +# # tls_cert = "/etc/telegraf/cert.pem" +# # tls_key = "/etc/telegraf/key.pem" +# ## Use TLS but skip chain & host verification +# # insecure_skip_verify = false + + +# # Collect kernel snmp counters and network interface statistics +# [[inputs.nstat]] +# ## file paths for proc files. If empty default paths will be used: +# ## /proc/net/netstat, /proc/net/snmp, /proc/net/snmp6 +# ## These can also be overridden with env variables, see README. +# proc_net_netstat = "/proc/net/netstat" +# proc_net_snmp = "/proc/net/snmp" +# proc_net_snmp6 = "/proc/net/snmp6" +# ## dump metrics with 0 values too +# dump_zeros = true + + +# # Get standard NTP query metrics, requires ntpq executable. +# [[inputs.ntpq]] +# ## If false, set the -n ntpq flag. Can reduce metric gather time. +# dns_lookup = true + + +# # Pulls statistics from nvidia GPUs attached to the host +# [[inputs.nvidia_smi]] +# ## Optional: path to nvidia-smi binary, defaults to $PATH via exec.LookPath +# # bin_path = "/usr/bin/nvidia-smi" +# +# ## Optional: timeout for GPU polling +# # timeout = "5s" + + +# # Retrieve data from OPCUA devices +# [[inputs.opcua]] +# ## Metric name +# # name = "opcua" +# # +# ## OPC UA Endpoint URL +# # endpoint = "opc.tcp://localhost:4840" +# # +# ## Maximum time allowed to establish a connect to the endpoint. +# # connect_timeout = "10s" +# # +# ## Maximum time allowed for a request over the estabilished connection. +# # request_timeout = "5s" +# # +# ## Security policy, one of "None", "Basic128Rsa15", "Basic256", +# ## "Basic256Sha256", or "auto" +# # security_policy = "auto" +# # +# ## Security mode, one of "None", "Sign", "SignAndEncrypt", or "auto" +# # security_mode = "auto" +# # +# ## Path to cert.pem. Required when security mode or policy isn't "None". +# ## If cert path is not supplied, self-signed cert and key will be generated. +# # certificate = "/etc/telegraf/cert.pem" +# # +# ## Path to private key.pem. Required when security mode or policy isn't "None". +# ## If key path is not supplied, self-signed cert and key will be generated. +# # private_key = "/etc/telegraf/key.pem" +# # +# ## Authentication Method, one of "Certificate", "UserName", or "Anonymous". To +# ## authenticate using a specific ID, select 'Certificate' or 'UserName' +# # auth_method = "Anonymous" +# # +# ## Username. Required for auth_method = "UserName" +# # username = "" +# # +# ## Password. Required for auth_method = "UserName" +# # password = "" +# # +# ## Option to select the metric timestamp to use. Valid options are: +# ## "gather" -- uses the time of receiving the data in telegraf +# ## "server" -- uses the timestamp provided by the server +# ## "source" -- uses the timestamp provided by the source +# # timestamp = "gather" +# # +# ## Node ID configuration +# ## name - field name to use in the output +# ## namespace - OPC UA namespace of the node (integer value 0 thru 3) +# ## identifier_type - OPC UA ID type (s=string, i=numeric, g=guid, b=opaque) +# ## identifier - OPC UA ID (tag as shown in opcua browser) +# ## Example: +# ## {name="ProductUri", namespace="0", identifier_type="i", identifier="2262"} +# # nodes = [ +# # {name="", namespace="", identifier_type="", identifier=""}, +# # {name="", namespace="", identifier_type="", identifier=""}, +# #] +# # +# ## Node Group +# ## Sets defaults for OPC UA namespace and ID type so they aren't required in +# ## every node. A group can also have a metric name that overrides the main +# ## plugin metric name. +# ## +# ## Multiple node groups are allowed +# #[[inputs.opcua.group]] +# ## Group Metric name. Overrides the top level name. If unset, the +# ## top level name is used. +# # name = +# # +# ## Group default namespace. If a node in the group doesn't set its +# ## namespace, this is used. +# # namespace = +# # +# ## Group default identifier type. If a node in the group doesn't set its +# ## namespace, this is used. +# # identifier_type = +# # +# ## Node ID Configuration. Array of nodes with the same settings as above. +# # nodes = [ +# # {name="", namespace="", identifier_type="", identifier=""}, +# # {name="", namespace="", identifier_type="", identifier=""}, +# #] + + +# # OpenLDAP cn=Monitor plugin +# [[inputs.openldap]] +# host = "localhost" +# port = 389 +# +# # ldaps, starttls, or no encryption. default is an empty string, disabling all encryption. +# # note that port will likely need to be changed to 636 for ldaps +# # valid options: "" | "starttls" | "ldaps" +# tls = "" +# +# # skip peer certificate verification. Default is false. +# insecure_skip_verify = false +# +# # Path to PEM-encoded Root certificate to use to verify server certificate +# tls_ca = "/etc/ssl/certs.pem" +# +# # dn/password to bind with. If bind_dn is empty, an anonymous bind is performed. +# bind_dn = "" +# bind_password = "" +# +# # Reverse metric names so they sort more naturally. Recommended. +# # This defaults to false if unset, but is set to true when generating a new config +# reverse_metric_names = true + + +# # Get standard NTP query metrics from OpenNTPD. +# [[inputs.openntpd]] +# ## Run ntpctl binary with sudo. +# # use_sudo = false +# +# ## Location of the ntpctl binary. +# # binary = "/usr/sbin/ntpctl" +# +# ## Maximum time the ntpctl binary is allowed to run. +# # timeout = "5ms" + + +# # A plugin to collect stats from Opensmtpd - a validating, recursive, and caching DNS resolver +# [[inputs.opensmtpd]] +# ## If running as a restricted user you can prepend sudo for additional access: +# #use_sudo = false +# +# ## The default location of the smtpctl binary can be overridden with: +# binary = "/usr/sbin/smtpctl" +# +# ## The default timeout of 1000ms can be overridden with (in milliseconds): +# timeout = 1000 + + +# # Read current weather and forecasts data from openweathermap.org +# [[inputs.openweathermap]] +# ## OpenWeatherMap API key. +# app_id = "xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx" +# +# ## City ID's to collect weather data from. +# city_id = ["5391959"] +# +# ## Language of the description field. Can be one of "ar", "bg", +# ## "ca", "cz", "de", "el", "en", "fa", "fi", "fr", "gl", "hr", "hu", +# ## "it", "ja", "kr", "la", "lt", "mk", "nl", "pl", "pt", "ro", "ru", +# ## "se", "sk", "sl", "es", "tr", "ua", "vi", "zh_cn", "zh_tw" +# # lang = "en" +# +# ## APIs to fetch; can contain "weather" or "forecast". +# fetch = ["weather", "forecast"] +# +# ## OpenWeatherMap base URL +# # base_url = "https://api.openweathermap.org/" +# +# ## Timeout for HTTP response. +# # response_timeout = "5s" +# +# ## Preferred unit system for temperature and wind speed. Can be one of +# ## "metric", "imperial", or "standard". +# # units = "metric" +# +# ## Query interval; OpenWeatherMap updates their weather data every 10 +# ## minutes. +# interval = "10m" + + +# # Read metrics of passenger using passenger-status +# [[inputs.passenger]] +# ## Path of passenger-status. +# ## +# ## Plugin gather metric via parsing XML output of passenger-status +# ## More information about the tool: +# ## https://www.phusionpassenger.com/library/admin/apache/overall_status_report.html +# ## +# ## If no path is specified, then the plugin simply execute passenger-status +# ## hopefully it can be found in your PATH +# command = "passenger-status -v --show=xml" + + +# # Gather counters from PF +# [[inputs.pf]] +# ## PF require root access on most systems. +# ## Setting 'use_sudo' to true will make use of sudo to run pfctl. +# ## Users must configure sudo to allow telegraf user to run pfctl with no password. +# ## pfctl can be restricted to only list command "pfctl -s info". +# use_sudo = false + + +# # Read metrics of phpfpm, via HTTP status page or socket +# [[inputs.phpfpm]] +# ## An array of addresses to gather stats about. Specify an ip or hostname +# ## with optional port and path +# ## +# ## Plugin can be configured in three modes (either can be used): +# ## - http: the URL must start with http:// or https://, ie: +# ## "http://localhost/status" +# ## "http://192.168.130.1/status?full" +# ## +# ## - unixsocket: path to fpm socket, ie: +# ## "/var/run/php5-fpm.sock" +# ## or using a custom fpm status path: +# ## "/var/run/php5-fpm.sock:fpm-custom-status-path" +# ## +# ## - fcgi: the URL must start with fcgi:// or cgi://, and port must be present, ie: +# ## "fcgi://10.0.0.12:9000/status" +# ## "cgi://10.0.10.12:9001/status" +# ## +# ## Example of multiple gathering from local socket and remote host +# ## urls = ["http://192.168.1.20/status", "/tmp/fpm.sock"] +# urls = ["http://localhost/status"] +# +# ## Duration allowed to complete HTTP requests. +# # timeout = "5s" +# +# ## Optional TLS Config +# # tls_ca = "/etc/telegraf/ca.pem" +# # tls_cert = "/etc/telegraf/cert.pem" +# # tls_key = "/etc/telegraf/key.pem" +# ## Use TLS but skip chain & host verification +# # insecure_skip_verify = false + + +# # Ping given url(s) and return statistics +# [[inputs.ping]] +# ## Hosts to send ping packets to. +# urls = ["example.org"] +# +# ## Method used for sending pings, can be either "exec" or "native". When set +# ## to "exec" the systems ping command will be executed. When set to "native" +# ## the plugin will send pings directly. +# ## +# ## While the default is "exec" for backwards compatibility, new deployments +# ## are encouraged to use the "native" method for improved compatibility and +# ## performance. +# # method = "exec" +# +# ## Number of ping packets to send per interval. Corresponds to the "-c" +# ## option of the ping command. +# # count = 1 +# +# ## Time to wait between sending ping packets in seconds. Operates like the +# ## "-i" option of the ping command. +# # ping_interval = 1.0 +# +# ## If set, the time to wait for a ping response in seconds. Operates like +# ## the "-W" option of the ping command. +# # timeout = 1.0 +# +# ## If set, the total ping deadline, in seconds. Operates like the -w option +# ## of the ping command. +# # deadline = 10 +# +# ## Interface or source address to send ping from. Operates like the -I or -S +# ## option of the ping command. +# # interface = "" +# +# ## Percentiles to calculate. This only works with the native method. +# # percentiles = [50, 95, 99] +# +# ## Specify the ping executable binary. +# # binary = "ping" +# +# ## Arguments for ping command. When arguments is not empty, the command from +# ## the binary option will be used and other options (ping_interval, timeout, +# ## etc) will be ignored. +# # arguments = ["-c", "3"] +# +# ## Use only IPv6 addresses when resolving a hostname. +# # ipv6 = false +# +# ## Number of data bytes to be sent. Corresponds to the "-s" +# ## option of the ping command. This only works with the native method. +# # size = 56 + + +# # Read metrics from one or many PowerDNS servers +# [[inputs.powerdns]] +# ## An array of sockets to gather stats about. +# ## Specify a path to unix socket. +# unix_sockets = ["/var/run/pdns.controlsocket"] + + +# # Read metrics from one or many PowerDNS Recursor servers +# [[inputs.powerdns_recursor]] +# ## Path to the Recursor control socket. +# unix_sockets = ["/var/run/pdns_recursor.controlsocket"] +# +# ## Directory to create receive socket. This default is likely not writable, +# ## please reference the full plugin documentation for a recommended setup. +# # socket_dir = "/var/run/" +# ## Socket permissions for the receive socket. +# # socket_mode = "0666" + + +# # Monitor process cpu and memory usage +# [[inputs.procstat]] +# ## PID file to monitor process +# pid_file = "/var/run/nginx.pid" +# ## executable name (ie, pgrep ) +# # exe = "nginx" +# ## pattern as argument for pgrep (ie, pgrep -f ) +# # pattern = "nginx" +# ## user as argument for pgrep (ie, pgrep -u ) +# # user = "nginx" +# ## Systemd unit name, supports globs when include_systemd_children is set to true +# # systemd_unit = "nginx.service" +# # include_systemd_children = false +# ## CGroup name or path, supports globs +# # cgroup = "systemd/system.slice/nginx.service" +# +# ## Windows service name +# # win_service = "" +# +# ## override for process_name +# ## This is optional; default is sourced from /proc//status +# # process_name = "bar" +# +# ## Field name prefix +# # prefix = "" +# +# ## When true add the full cmdline as a tag. +# # cmdline_tag = false +# +# ## Mode to use when calculating CPU usage. Can be one of 'solaris' or 'irix'. +# # mode = "irix" +# +# ## Add the PID as a tag instead of as a field. When collecting multiple +# ## processes with otherwise matching tags this setting should be enabled to +# ## ensure each process has a unique identity. +# ## +# ## Enabling this option may result in a large number of series, especially +# ## when processes have a short lifetime. +# # pid_tag = false +# +# ## Method to use when finding process IDs. Can be one of 'pgrep', or +# ## 'native'. The pgrep finder calls the pgrep executable in the PATH while +# ## the native finder performs the search directly in a manor dependent on the +# ## platform. Default is 'pgrep' +# # pid_finder = "pgrep" + + +# # Provides metrics from Proxmox nodes (Proxmox Virtual Environment > 6.2). +# [[inputs.proxmox]] +# ## API connection configuration. The API token was introduced in Proxmox v6.2. Required permissions for user and token: PVEAuditor role on /. +# base_url = "https://localhost:8006/api2/json" +# api_token = "USER@REALM!TOKENID=UUID" +# ## Node name, defaults to OS hostname +# # node_name = "" +# +# ## Optional TLS Config +# # tls_ca = "/etc/telegraf/ca.pem" +# # tls_cert = "/etc/telegraf/cert.pem" +# # tls_key = "/etc/telegraf/key.pem" +# ## Use TLS but skip chain & host verification +# insecure_skip_verify = false +# +# # HTTP response timeout (default: 5s) +# response_timeout = "5s" + + +# # Reads last_run_summary.yaml file and converts to measurements +# [[inputs.puppetagent]] +# ## Location of puppet last run summary file +# location = "/var/lib/puppet/state/last_run_summary.yaml" + + +# # Reads metrics from RabbitMQ servers via the Management Plugin +# [[inputs.rabbitmq]] +# ## Management Plugin url. (default: http://localhost:15672) +# # url = "http://localhost:15672" +# ## Tag added to rabbitmq_overview series; deprecated: use tags +# # name = "rmq-server-1" +# ## Credentials +# # username = "guest" +# # password = "guest" +# +# ## Optional TLS Config +# # tls_ca = "/etc/telegraf/ca.pem" +# # tls_cert = "/etc/telegraf/cert.pem" +# # tls_key = "/etc/telegraf/key.pem" +# ## Use TLS but skip chain & host verification +# # insecure_skip_verify = false +# +# ## Optional request timeouts +# ## +# ## ResponseHeaderTimeout, if non-zero, specifies the amount of time to wait +# ## for a server's response headers after fully writing the request. +# # header_timeout = "3s" +# ## +# ## client_timeout specifies a time limit for requests made by this client. +# ## Includes connection time, any redirects, and reading the response body. +# # client_timeout = "4s" +# +# ## A list of nodes to gather as the rabbitmq_node measurement. If not +# ## specified, metrics for all nodes are gathered. +# # nodes = ["rabbit@node1", "rabbit@node2"] +# +# ## A list of queues to gather as the rabbitmq_queue measurement. If not +# ## specified, metrics for all queues are gathered. +# # queues = ["telegraf"] +# +# ## A list of exchanges to gather as the rabbitmq_exchange measurement. If not +# ## specified, metrics for all exchanges are gathered. +# # exchanges = ["telegraf"] +# +# ## Metrics to include and exclude. Globs accepted. +# ## Note that an empty array for both will include all metrics +# ## Currently the following metrics are supported: "exchange", "federation", "node", "overview", "queue" +# # metric_include = [] +# # metric_exclude = [] +# +# ## Queues to include and exclude. Globs accepted. +# ## Note that an empty array for both will include all queues +# queue_name_include = [] +# queue_name_exclude = [] +# +# ## Federation upstreams include and exclude when gathering the rabbitmq_federation measurement. +# ## If neither are specified, metrics for all federation upstreams are gathered. +# ## Federation link metrics will only be gathered for queues and exchanges +# ## whose non-federation metrics will be collected (e.g a queue excluded +# ## by the 'queue_name_exclude' option will also be excluded from federation). +# ## Globs accepted. +# # federation_upstream_include = ["dataCentre-*"] +# # federation_upstream_exclude = [] + + +# # Read raindrops stats (raindrops - real-time stats for preforking Rack servers) +# [[inputs.raindrops]] +# ## An array of raindrops middleware URI to gather stats. +# urls = ["http://localhost:8080/_raindrops"] + + +# # Reads metrics from RavenDB servers via the Monitoring Endpoints +# [[inputs.ravendb]] +# ## Node URL and port that RavenDB is listening on +# url = "https://localhost:8080" +# +# ## RavenDB X509 client certificate setup +# # tls_cert = "/etc/telegraf/raven.crt" +# # tls_key = "/etc/telegraf/raven.key" +# +# ## Optional request timeout +# ## +# ## Timeout, specifies the amount of time to wait +# ## for a server's response headers after fully writing the request and +# ## time limit for requests made by this client +# # timeout = "5s" +# +# ## List of statistics which are collected +# # At least one is required +# # Allowed values: server, databases, indexes, collections +# # +# # stats_include = ["server", "databases", "indexes", "collections"] +# +# ## List of db where database stats are collected +# ## If empty, all db are concerned +# # db_stats_dbs = [] +# +# ## List of db where index status are collected +# ## If empty, all indexes from all db are concerned +# # index_stats_dbs = [] +# +# ## List of db where collection status are collected +# ## If empty, all collections from all db are concerned +# # collection_stats_dbs = [] + + +# # Read CPU, Fans, Powersupply and Voltage metrics of hardware server through redfish APIs +# [[inputs.redfish]] +# ## Server url +# address = "https://127.0.0.1:5000" +# +# ## Username, Password for hardware server +# username = "root" +# password = "password123456" +# +# ## ComputerSystemId +# computer_system_id="2M220100SL" +# +# ## Amount of time allowed to complete the HTTP request +# # timeout = "5s" +# +# ## Optional TLS Config +# # tls_ca = "/etc/telegraf/ca.pem" +# # tls_cert = "/etc/telegraf/cert.pem" +# # tls_key = "/etc/telegraf/key.pem" +# ## Use TLS but skip chain & host verification +# # insecure_skip_verify = false + + +# # Read metrics from one or many redis servers +# [[inputs.redis]] +# ## specify servers via a url matching: +# ## [protocol://][:password]@address[:port] +# ## e.g. +# ## tcp://localhost:6379 +# ## tcp://:password@192.168.99.100 +# ## unix:///var/run/redis.sock +# ## +# ## If no servers are specified, then localhost is used as the host. +# ## If no port is specified, 6379 is used +# servers = ["tcp://localhost:6379"] +# +# ## Optional. Specify redis commands to retrieve values +# # [[inputs.redis.commands]] +# # # The command to run where each argument is a separate element +# # command = ["get", "sample-key"] +# # # The field to store the result in +# # field = "sample-key-value" +# # # The type of the result +# # # Can be "string", "integer", or "float" +# # type = "string" +# +# ## specify server password +# # password = "s#cr@t%" +# +# ## Optional TLS Config +# # tls_ca = "/etc/telegraf/ca.pem" +# # tls_cert = "/etc/telegraf/cert.pem" +# # tls_key = "/etc/telegraf/key.pem" +# ## Use TLS but skip chain & host verification +# # insecure_skip_verify = true + + +# # Read metrics from one or many RethinkDB servers +# [[inputs.rethinkdb]] +# ## An array of URI to gather stats about. Specify an ip or hostname +# ## with optional port add password. ie, +# ## rethinkdb://user:auth_key@10.10.3.30:28105, +# ## rethinkdb://10.10.3.33:18832, +# ## 10.0.0.1:10000, etc. +# servers = ["127.0.0.1:28015"] +# ## +# ## If you use actual rethinkdb of > 2.3.0 with username/password authorization, +# ## protocol have to be named "rethinkdb2" - it will use 1_0 H. +# # servers = ["rethinkdb2://username:password@127.0.0.1:28015"] +# ## +# ## If you use older versions of rethinkdb (<2.2) with auth_key, protocol +# ## have to be named "rethinkdb". +# # servers = ["rethinkdb://username:auth_key@127.0.0.1:28015"] + + +# # Read metrics one or many Riak servers +# [[inputs.riak]] +# # Specify a list of one or more riak http servers +# servers = ["http://localhost:8098"] + + +# # Read API usage and limits for a Salesforce organisation +# [[inputs.salesforce]] +# ## specify your credentials +# ## +# username = "your_username" +# password = "your_password" +# ## +# ## (optional) security token +# # security_token = "your_security_token" +# ## +# ## (optional) environment type (sandbox or production) +# ## default is: production +# ## +# # environment = "production" +# ## +# ## (optional) API version (default: "39.0") +# ## +# # version = "39.0" + + +# # Read metrics from storage devices supporting S.M.A.R.T. +# [[inputs.smart]] +# ## Optionally specify the path to the smartctl executable +# # path_smartctl = "/usr/bin/smartctl" +# +# ## Optionally specify the path to the nvme-cli executable +# # path_nvme = "/usr/bin/nvme" +# +# ## Optionally specify if vendor specific attributes should be propagated for NVMe disk case +# ## ["auto-on"] - automatically find and enable additional vendor specific disk info +# ## ["vendor1", "vendor2", ...] - e.g. "Intel" enable additional Intel specific disk info +# # enable_extensions = ["auto-on"] +# +# ## On most platforms used cli utilities requires root access. +# ## Setting 'use_sudo' to true will make use of sudo to run smartctl or nvme-cli. +# ## Sudo must be configured to allow the telegraf user to run smartctl or nvme-cli +# ## without a password. +# # use_sudo = false +# +# ## Skip checking disks in this power mode. Defaults to +# ## "standby" to not wake up disks that have stopped rotating. +# ## See --nocheck in the man pages for smartctl. +# ## smartctl version 5.41 and 5.42 have faulty detection of +# ## power mode and might require changing this value to +# ## "never" depending on your disks. +# # nocheck = "standby" +# +# ## Gather all returned S.M.A.R.T. attribute metrics and the detailed +# ## information from each drive into the 'smart_attribute' measurement. +# # attributes = false +# +# ## Optionally specify devices to exclude from reporting if disks auto-discovery is performed. +# # excludes = [ "/dev/pass6" ] +# +# ## Optionally specify devices and device type, if unset +# ## a scan (smartctl --scan and smartctl --scan -d nvme) for S.M.A.R.T. devices will be done +# ## and all found will be included except for the excluded in excludes. +# # devices = [ "/dev/ada0 -d atacam", "/dev/nvme0"] +# +# ## Timeout for the cli command to complete. +# # timeout = "30s" + + +# # Retrieves SNMP values from remote agents +# [[inputs.snmp]] +# ## Agent addresses to retrieve values from. +# ## format: agents = [":"] +# ## scheme: optional, either udp, udp4, udp6, tcp, tcp4, tcp6. +# ## default is udp +# ## port: optional +# ## example: agents = ["udp://127.0.0.1:161"] +# ## agents = ["tcp://127.0.0.1:161"] +# ## agents = ["udp4://v4only-snmp-agent"] +# agents = ["udp://127.0.0.1:161"] +# +# ## Timeout for each request. +# # timeout = "5s" +# +# ## SNMP version; can be 1, 2, or 3. +# # version = 2 +# +# ## Agent host tag; the tag used to reference the source host +# # agent_host_tag = "agent_host" +# +# ## SNMP community string. +# # community = "public" +# +# ## Number of retries to attempt. +# # retries = 3 +# +# ## The GETBULK max-repetitions parameter. +# # max_repetitions = 10 +# +# ## SNMPv3 authentication and encryption options. +# ## +# ## Security Name. +# # sec_name = "myuser" +# ## Authentication protocol; one of "MD5", "SHA", "SHA224", "SHA256", "SHA384", "SHA512" or "". +# # auth_protocol = "MD5" +# ## Authentication password. +# # auth_password = "pass" +# ## Security Level; one of "noAuthNoPriv", "authNoPriv", or "authPriv". +# # sec_level = "authNoPriv" +# ## Context Name. +# # context_name = "" +# ## Privacy protocol used for encrypted messages; one of "DES", "AES" or "". +# # priv_protocol = "" +# ## Privacy password used for encrypted messages. +# # priv_password = "" +# +# ## Add fields and tables defining the variables you wish to collect. This +# ## example collects the system uptime and interface variables. Reference the +# ## full plugin documentation for configuration details. + + +# # DEPRECATED! PLEASE USE inputs.snmp INSTEAD. +# [[inputs.snmp_legacy]] +# ## Use 'oids.txt' file to translate oids to names +# ## To generate 'oids.txt' you need to run: +# ## snmptranslate -m all -Tz -On | sed -e 's/"//g' > /tmp/oids.txt +# ## Or if you have an other MIB folder with custom MIBs +# ## snmptranslate -M /mycustommibfolder -Tz -On -m all | sed -e 's/"//g' > oids.txt +# snmptranslate_file = "/tmp/oids.txt" +# [[inputs.snmp.host]] +# address = "192.168.2.2:161" +# # SNMP community +# community = "public" # default public +# # SNMP version (1, 2 or 3) +# # Version 3 not supported yet +# version = 2 # default 2 +# # SNMP response timeout +# timeout = 2.0 # default 2.0 +# # SNMP request retries +# retries = 2 # default 2 +# # Which get/bulk do you want to collect for this host +# collect = ["mybulk", "sysservices", "sysdescr"] +# # Simple list of OIDs to get, in addition to "collect" +# get_oids = [] +# +# [[inputs.snmp.host]] +# address = "192.168.2.3:161" +# community = "public" +# version = 2 +# timeout = 2.0 +# retries = 2 +# collect = ["mybulk"] +# get_oids = [ +# "ifNumber", +# ".1.3.6.1.2.1.1.3.0", +# ] +# +# [[inputs.snmp.get]] +# name = "ifnumber" +# oid = "ifNumber" +# +# [[inputs.snmp.get]] +# name = "interface_speed" +# oid = "ifSpeed" +# instance = "0" +# +# [[inputs.snmp.get]] +# name = "sysuptime" +# oid = ".1.3.6.1.2.1.1.3.0" +# unit = "second" +# +# [[inputs.snmp.bulk]] +# name = "mybulk" +# max_repetition = 127 +# oid = ".1.3.6.1.2.1.1" +# +# [[inputs.snmp.bulk]] +# name = "ifoutoctets" +# max_repetition = 127 +# oid = "ifOutOctets" +# +# [[inputs.snmp.host]] +# address = "192.168.2.13:161" +# #address = "127.0.0.1:161" +# community = "public" +# version = 2 +# timeout = 2.0 +# retries = 2 +# #collect = ["mybulk", "sysservices", "sysdescr", "systype"] +# collect = ["sysuptime" ] +# [[inputs.snmp.host.table]] +# name = "iftable3" +# include_instances = ["enp5s0", "eth1"] +# +# # SNMP TABLEs +# # table without mapping neither subtables +# [[inputs.snmp.table]] +# name = "iftable1" +# oid = ".1.3.6.1.2.1.31.1.1.1" +# +# # table without mapping but with subtables +# [[inputs.snmp.table]] +# name = "iftable2" +# oid = ".1.3.6.1.2.1.31.1.1.1" +# sub_tables = [".1.3.6.1.2.1.2.2.1.13"] +# +# # table with mapping but without subtables +# [[inputs.snmp.table]] +# name = "iftable3" +# oid = ".1.3.6.1.2.1.31.1.1.1" +# # if empty. get all instances +# mapping_table = ".1.3.6.1.2.1.31.1.1.1.1" +# # if empty, get all subtables +# +# # table with both mapping and subtables +# [[inputs.snmp.table]] +# name = "iftable4" +# oid = ".1.3.6.1.2.1.31.1.1.1" +# # if empty get all instances +# mapping_table = ".1.3.6.1.2.1.31.1.1.1.1" +# # if empty get all subtables +# # sub_tables could be not "real subtables" +# sub_tables=[".1.3.6.1.2.1.2.2.1.13", "bytes_recv", "bytes_send"] + + +# # Read stats from one or more Solr servers or cores +# [[inputs.solr]] +# ## specify a list of one or more Solr servers +# servers = ["http://localhost:8983"] +# +# ## specify a list of one or more Solr cores (default - all) +# # cores = ["main"] +# +# ## Optional HTTP Basic Auth Credentials +# # username = "username" +# # password = "pa$$word" + + +# # Gather timeseries from Google Cloud Platform v3 monitoring API +# [[inputs.stackdriver]] +# ## GCP Project +# project = "erudite-bloom-151019" +# +# ## Include timeseries that start with the given metric type. +# metric_type_prefix_include = [ +# "compute.googleapis.com/", +# ] +# +# ## Exclude timeseries that start with the given metric type. +# # metric_type_prefix_exclude = [] +# +# ## Many metrics are updated once per minute; it is recommended to override +# ## the agent level interval with a value of 1m or greater. +# interval = "1m" +# +# ## Maximum number of API calls to make per second. The quota for accounts +# ## varies, it can be viewed on the API dashboard: +# ## https://cloud.google.com/monitoring/quotas#quotas_and_limits +# # rate_limit = 14 +# +# ## The delay and window options control the number of points selected on +# ## each gather. When set, metrics are gathered between: +# ## start: now() - delay - window +# ## end: now() - delay +# # +# ## Collection delay; if set too low metrics may not yet be available. +# # delay = "5m" +# # +# ## If unset, the window will start at 1m and be updated dynamically to span +# ## the time between calls (approximately the length of the plugin interval). +# # window = "1m" +# +# ## TTL for cached list of metric types. This is the maximum amount of time +# ## it may take to discover new metrics. +# # cache_ttl = "1h" +# +# ## If true, raw bucket counts are collected for distribution value types. +# ## For a more lightweight collection, you may wish to disable and use +# ## distribution_aggregation_aligners instead. +# # gather_raw_distribution_buckets = true +# +# ## Aggregate functions to be used for metrics whose value type is +# ## distribution. These aggregate values are recorded in in addition to raw +# ## bucket counts; if they are enabled. +# ## +# ## For a list of aligner strings see: +# ## https://cloud.google.com/monitoring/api/ref_v3/rpc/google.monitoring.v3#aligner +# # distribution_aggregation_aligners = [ +# # "ALIGN_PERCENTILE_99", +# # "ALIGN_PERCENTILE_95", +# # "ALIGN_PERCENTILE_50", +# # ] +# +# ## Filters can be added to reduce the number of time series matched. All +# ## functions are supported: starts_with, ends_with, has_substring, and +# ## one_of. Only the '=' operator is supported. +# ## +# ## The logical operators when combining filters are defined statically using +# ## the following values: +# ## filter ::= {AND } +# ## resource_labels ::= {OR } +# ## metric_labels ::= {OR } +# ## +# ## For more details, see https://cloud.google.com/monitoring/api/v3/filters +# # +# ## Resource labels refine the time series selection with the following expression: +# ## resource.labels. = +# # [[inputs.stackdriver.filter.resource_labels]] +# # key = "instance_name" +# # value = 'starts_with("localhost")' +# # +# ## Metric labels refine the time series selection with the following expression: +# ## metric.labels. = +# # [[inputs.stackdriver.filter.metric_labels]] +# # key = "device_name" +# # value = 'one_of("sda", "sdb")' + + +# # Get synproxy counter statistics from procfs +# [[inputs.synproxy]] +# # no configuration + + +# # Reads metrics from a Teamspeak 3 Server via ServerQuery +# [[inputs.teamspeak]] +# ## Server address for Teamspeak 3 ServerQuery +# # server = "127.0.0.1:10011" +# ## Username for ServerQuery +# username = "serverqueryuser" +# ## Password for ServerQuery +# password = "secret" +# ## Array of virtual servers +# # virtual_servers = [1] + + +# # Read metrics about temperature +# [[inputs.temp]] +# # no configuration + + +# # Read Tengine's basic status information (ngx_http_reqstat_module) +# [[inputs.tengine]] +# # An array of Tengine reqstat module URI to gather stats. +# urls = ["http://127.0.0.1/us"] +# +# # HTTP response timeout (default: 5s) +# # response_timeout = "5s" +# +# ## Optional TLS Config +# # tls_ca = "/etc/telegraf/ca.pem" +# # tls_cert = "/etc/telegraf/cert.cer" +# # tls_key = "/etc/telegraf/key.key" +# ## Use TLS but skip chain & host verification +# # insecure_skip_verify = false + + +# # Gather metrics from the Tomcat server status page. +# [[inputs.tomcat]] +# ## URL of the Tomcat server status +# # url = "http://127.0.0.1:8080/manager/status/all?XML=true" +# +# ## HTTP Basic Auth Credentials +# # username = "tomcat" +# # password = "s3cret" +# +# ## Request timeout +# # timeout = "5s" +# +# ## Optional TLS Config +# # tls_ca = "/etc/telegraf/ca.pem" +# # tls_cert = "/etc/telegraf/cert.pem" +# # tls_key = "/etc/telegraf/key.pem" +# ## Use TLS but skip chain & host verification +# # insecure_skip_verify = false + + +# # Inserts sine and cosine waves for demonstration purposes +# [[inputs.trig]] +# ## Set the amplitude +# amplitude = 10.0 + + +# # Read Twemproxy stats data +# [[inputs.twemproxy]] +# ## Twemproxy stats address and port (no scheme) +# addr = "localhost:22222" +# ## Monitor pool name +# pools = ["redis_pool", "mc_pool"] + + +# # A plugin to collect stats from the Unbound DNS resolver +# [[inputs.unbound]] +# ## Address of server to connect to, read from unbound conf default, optionally ':port' +# ## Will lookup IP if given a hostname +# server = "127.0.0.1:8953" +# +# ## If running as a restricted user you can prepend sudo for additional access: +# # use_sudo = false +# +# ## The default location of the unbound-control binary can be overridden with: +# # binary = "/usr/sbin/unbound-control" +# +# ## The default location of the unbound config file can be overridden with: +# # config_file = "/etc/unbound/unbound.conf" +# +# ## The default timeout of 1s can be overridden with: +# # timeout = "1s" +# +# ## When set to true, thread metrics are tagged with the thread id. +# ## +# ## The default is false for backwards compatibility, and will be changed to +# ## true in a future version. It is recommended to set to true on new +# ## deployments. +# thread_as_tag = false + + +# # Read uWSGI metrics. +# [[inputs.uwsgi]] +# ## List with urls of uWSGI Stats servers. URL must match pattern: +# ## scheme://address[:port] +# ## +# ## For example: +# ## servers = ["tcp://localhost:5050", "http://localhost:1717", "unix:///tmp/statsock"] +# servers = ["tcp://127.0.0.1:1717"] +# +# ## General connection timeout +# # timeout = "5s" + + +# # Input plugin to collect Windows Event Log messages +# [[inputs.win_eventlog]] +# ## Telegraf should have Administrator permissions to subscribe for some Windows Events channels +# ## (System log, for example) +# +# ## LCID (Locale ID) for event rendering +# ## 1033 to force English language +# ## 0 to use default Windows locale +# # locale = 0 +# +# ## Name of eventlog, used only if xpath_query is empty +# ## Example: "Application" +# # eventlog_name = "" +# +# ## xpath_query can be in defined short form like "Event/System[EventID=999]" +# ## or you can form a XML Query. Refer to the Consuming Events article: +# ## https://docs.microsoft.com/en-us/windows/win32/wes/consuming-events +# ## XML query is the recommended form, because it is most flexible +# ## You can create or debug XML Query by creating Custom View in Windows Event Viewer +# ## and then copying resulting XML here +# xpath_query = ''' +# +# +# +# *[System[( (EventID >= 5152 and EventID <= 5158) or EventID=5379 or EventID=4672)]] +# +# +# +# +# +# +# +# +# +# +# +# +# +# +# ''' +# +# ## System field names: +# ## "Source", "EventID", "Version", "Level", "Task", "Opcode", "Keywords", "TimeCreated", +# ## "EventRecordID", "ActivityID", "RelatedActivityID", "ProcessID", "ThreadID", "ProcessName", +# ## "Channel", "Computer", "UserID", "UserName", "Message", "LevelText", "TaskText", "OpcodeText" +# +# ## In addition to System, Data fields can be unrolled from additional XML nodes in event. +# ## Human-readable representation of those nodes is formatted into event Message field, +# ## but XML is more machine-parsable +# +# # Process UserData XML to fields, if this node exists in Event XML +# process_userdata = true +# +# # Process EventData XML to fields, if this node exists in Event XML +# process_eventdata = true +# +# ## Separator character to use for unrolled XML Data field names +# separator = "_" +# +# ## Get only first line of Message field. For most events first line is usually more than enough +# only_first_line_of_message = true +# +# ## Parse timestamp from TimeCreated.SystemTime event field. +# ## Will default to current time of telegraf processing on parsing error or if set to false +# timestamp_from_event = true +# +# ## Fields to include as tags. Globbing supported ("Level*" for both "Level" and "LevelText") +# event_tags = ["Source", "EventID", "Level", "LevelText", "Task", "TaskText", "Opcode", "OpcodeText", "Keywords", "Channel", "Computer"] +# +# ## Default list of fields to send. All fields are sent by default. Globbing supported +# event_fields = ["*"] +# +# ## Fields to exclude. Also applied to data fields. Globbing supported +# exclude_fields = ["TimeCreated", "Binary", "Data_Address*"] +# +# ## Skip those tags or fields if their value is empty or equals to zero. Globbing supported +# exclude_empty = ["*ActivityID", "UserID"] + + +# # Input plugin to counterPath Performance Counters on Windows operating systems +# [[inputs.win_perf_counters]] +# ## By default this plugin returns basic CPU and Disk statistics. +# ## See the README file for more examples. +# ## Uncomment examples below or write your own as you see fit. If the system +# ## being polled for data does not have the Object at startup of the Telegraf +# ## agent, it will not be gathered. +# ## Settings: +# # PrintValid = false # Print All matching performance counters +# # Whether request a timestamp along with the PerfCounter data or just use current time +# # UsePerfCounterTime=true +# # If UseWildcardsExpansion params is set to true, wildcards (partial wildcards in instance names and wildcards in counters names) in configured counter paths will be expanded +# # and in case of localized Windows, counter paths will be also localized. It also returns instance indexes in instance names. +# # If false, wildcards (not partial) in instance names will still be expanded, but instance indexes will not be returned in instance names. +# #UseWildcardsExpansion = false +# # Period after which counters will be reread from configuration and wildcards in counter paths expanded +# CountersRefreshInterval="1m" +# +# [[inputs.win_perf_counters.object]] +# # Processor usage, alternative to native, reports on a per core. +# ObjectName = "Processor" +# Instances = ["*"] +# Counters = [ +# "% Idle Time", +# "% Interrupt Time", +# "% Privileged Time", +# "% User Time", +# "% Processor Time", +# "% DPC Time", +# ] +# Measurement = "win_cpu" +# # Set to true to include _Total instance when querying for all (*). +# # IncludeTotal=false +# # Print out when the performance counter is missing from object, counter or instance. +# # WarnOnMissing = false +# +# [[inputs.win_perf_counters.object]] +# # Disk times and queues +# ObjectName = "LogicalDisk" +# Instances = ["*"] +# Counters = [ +# "% Idle Time", +# "% Disk Time", +# "% Disk Read Time", +# "% Disk Write Time", +# "% User Time", +# "% Free Space", +# "Current Disk Queue Length", +# "Free Megabytes", +# ] +# Measurement = "win_disk" +# +# [[inputs.win_perf_counters.object]] +# ObjectName = "PhysicalDisk" +# Instances = ["*"] +# Counters = [ +# "Disk Read Bytes/sec", +# "Disk Write Bytes/sec", +# "Current Disk Queue Length", +# "Disk Reads/sec", +# "Disk Writes/sec", +# "% Disk Time", +# "% Disk Read Time", +# "% Disk Write Time", +# ] +# Measurement = "win_diskio" +# +# [[inputs.win_perf_counters.object]] +# ObjectName = "Network Interface" +# Instances = ["*"] +# Counters = [ +# "Bytes Received/sec", +# "Bytes Sent/sec", +# "Packets Received/sec", +# "Packets Sent/sec", +# "Packets Received Discarded", +# "Packets Outbound Discarded", +# "Packets Received Errors", +# "Packets Outbound Errors", +# ] +# Measurement = "win_net" +# +# +# [[inputs.win_perf_counters.object]] +# ObjectName = "System" +# Counters = [ +# "Context Switches/sec", +# "System Calls/sec", +# "Processor Queue Length", +# "System Up Time", +# ] +# Instances = ["------"] +# Measurement = "win_system" +# +# [[inputs.win_perf_counters.object]] +# # Example counterPath where the Instance portion must be removed to get data back, +# # such as from the Memory object. +# ObjectName = "Memory" +# Counters = [ +# "Available Bytes", +# "Cache Faults/sec", +# "Demand Zero Faults/sec", +# "Page Faults/sec", +# "Pages/sec", +# "Transition Faults/sec", +# "Pool Nonpaged Bytes", +# "Pool Paged Bytes", +# "Standby Cache Reserve Bytes", +# "Standby Cache Normal Priority Bytes", +# "Standby Cache Core Bytes", +# ] +# Instances = ["------"] # Use 6 x - to remove the Instance bit from the counterPath. +# Measurement = "win_mem" +# +# [[inputs.win_perf_counters.object]] +# # Example query where the Instance portion must be removed to get data back, +# # such as from the Paging File object. +# ObjectName = "Paging File" +# Counters = [ +# "% Usage", +# ] +# Instances = ["_Total"] +# Measurement = "win_swap" + + +# # Input plugin to report Windows services info. +# [[inputs.win_services]] +# ## Names of the services to monitor. Leave empty to monitor all the available services on the host. Globs accepted. +# service_names = [ +# "LanmanServer", +# "TermService", +# "Win*", +# ] + + +# # Collect Wireguard server interface and peer statistics +# [[inputs.wireguard]] +# ## Optional list of Wireguard device/interface names to query. +# ## If omitted, all Wireguard interfaces are queried. +# # devices = ["wg0"] + + +# # Monitor wifi signal strength and quality +# [[inputs.wireless]] +# ## Sets 'proc' directory path +# ## If not specified, then default is /proc +# # host_proc = "/proc" + + +# # Reads metrics from a SSL certificate +# [[inputs.x509_cert]] +# ## List certificate sources +# ## Prefix your entry with 'file://' if you intend to use relative paths +# sources = ["tcp://example.org:443", "https://influxdata.com:443", +# "udp://127.0.0.1:4433", "/etc/ssl/certs/ssl-cert-snakeoil.pem", +# "/etc/mycerts/*.mydomain.org.pem", "file:///path/to/*.pem"] +# +# ## Timeout for SSL connection +# # timeout = "5s" +# +# ## Pass a different name into the TLS request (Server Name Indication) +# ## example: server_name = "myhost.example.org" +# # server_name = "" +# +# ## Optional TLS Config +# # tls_ca = "/etc/telegraf/ca.pem" +# # tls_cert = "/etc/telegraf/cert.pem" +# # tls_key = "/etc/telegraf/key.pem" + + +# # Read metrics of ZFS from arcstats, zfetchstats, vdev_cache_stats, pools and datasets +# [[inputs.zfs]] +# ## ZFS kstat path. Ignored on FreeBSD +# ## If not specified, then default is: +# # kstatPath = "/proc/spl/kstat/zfs" +# +# ## By default, telegraf gather all zfs stats +# ## If not specified, then default is: +# # kstatMetrics = ["arcstats", "zfetchstats", "vdev_cache_stats"] +# ## For Linux, the default is: +# # kstatMetrics = ["abdstats", "arcstats", "dnodestats", "dbufcachestats", +# # "dmu_tx", "fm", "vdev_mirror_stats", "zfetchstats", "zil"] +# ## By default, don't gather zpool stats +# # poolMetrics = false +# ## By default, don't gather zdataset stats +# # datasetMetrics = false + + +# # Reads 'mntr' stats from one or many zookeeper servers +# [[inputs.zookeeper]] +# ## An array of address to gather stats about. Specify an ip or hostname +# ## with port. ie localhost:2181, 10.0.0.1:2181, etc. +# +# ## If no servers are specified, then localhost is used as the host. +# ## If no port is specified, 2181 is used +# servers = [":2181"] +# +# ## Timeout for metric collections from all servers. Minimum timeout is "1s". +# # timeout = "5s" +# +# ## Optional TLS Config +# # enable_tls = true +# # tls_ca = "/etc/telegraf/ca.pem" +# # tls_cert = "/etc/telegraf/cert.pem" +# # tls_key = "/etc/telegraf/key.pem" +# ## If false, skip chain & host verification +# # insecure_skip_verify = true + + +############################################################################### +# SERVICE INPUT PLUGINS # +############################################################################### + + +# # Listener capable of handling KNX bus messages provided through a KNX-IP Interface. +# [[inputs.KNXListener]] +# ## Type of KNX-IP interface. +# ## Can be either "tunnel" or "router". +# # service_type = "tunnel" +# +# ## Address of the KNX-IP interface. +# service_address = "localhost:3671" +# +# ## Measurement definition(s) +# # [[inputs.knx_listener.measurement]] +# # ## Name of the measurement +# # name = "temperature" +# # ## Datapoint-Type (DPT) of the KNX messages +# # dpt = "9.001" +# # ## List of Group-Addresses (GAs) assigned to the measurement +# # addresses = ["5/5/1"] +# +# # [[inputs.knx_listener.measurement]] +# # name = "illumination" +# # dpt = "9.004" +# # addresses = ["5/5/3"] + + +# # Pull Metric Statistics from Aliyun CMS +# [[inputs.aliyuncms]] +# ## Aliyun Credentials +# ## Credentials are loaded in the following order +# ## 1) Ram RoleArn credential +# ## 2) AccessKey STS token credential +# ## 3) AccessKey credential +# ## 4) Ecs Ram Role credential +# ## 5) RSA keypair credential +# ## 6) Environment variables credential +# ## 7) Instance metadata credential +# +# # access_key_id = "" +# # access_key_secret = "" +# # access_key_sts_token = "" +# # role_arn = "" +# # role_session_name = "" +# # private_key = "" +# # public_key_id = "" +# # role_name = "" +# +# ## Specify the ali cloud region list to be queried for metrics and objects discovery +# ## If not set, all supported regions (see below) would be covered, it can provide a significant load on API, so the recommendation here +# ## is to limit the list as much as possible. Allowed values: https://www.alibabacloud.com/help/zh/doc-detail/40654.htm +# ## Default supported regions are: +# ## 21 items: cn-qingdao,cn-beijing,cn-zhangjiakou,cn-huhehaote,cn-hangzhou,cn-shanghai,cn-shenzhen, +# ## cn-heyuan,cn-chengdu,cn-hongkong,ap-southeast-1,ap-southeast-2,ap-southeast-3,ap-southeast-5, +# ## ap-south-1,ap-northeast-1,us-west-1,us-east-1,eu-central-1,eu-west-1,me-east-1 +# ## +# ## From discovery perspective it set the scope for object discovery, the discovered info can be used to enrich +# ## the metrics with objects attributes/tags. Discovery is supported not for all projects (if not supported, then +# ## it will be reported on the start - for example for 'acs_cdn' project: +# ## 'E! [inputs.aliyuncms] Discovery tool is not activated: no discovery support for project "acs_cdn"' ) +# ## Currently, discovery supported for the following projects: +# ## - acs_ecs_dashboard +# ## - acs_rds_dashboard +# ## - acs_slb_dashboard +# ## - acs_vpc_eip +# regions = ["cn-hongkong"] +# +# # The minimum period for AliyunCMS metrics is 1 minute (60s). However not all +# # metrics are made available to the 1 minute period. Some are collected at +# # 3 minute, 5 minute, or larger intervals. +# # See: https://help.aliyun.com/document_detail/51936.html?spm=a2c4g.11186623.2.18.2bc1750eeOw1Pv +# # Note that if a period is configured that is smaller than the minimum for a +# # particular metric, that metric will not be returned by the Aliyun OpenAPI +# # and will not be collected by Telegraf. +# # +# ## Requested AliyunCMS aggregation Period (required - must be a multiple of 60s) +# period = "5m" +# +# ## Collection Delay (required - must account for metrics availability via AliyunCMS API) +# delay = "1m" +# +# ## Recommended: use metric 'interval' that is a multiple of 'period' to avoid +# ## gaps or overlap in pulled data +# interval = "5m" +# +# ## Metric Statistic Project (required) +# project = "acs_slb_dashboard" +# +# ## Maximum requests per second, default value is 200 +# ratelimit = 200 +# +# ## How often the discovery API call executed (default 1m) +# #discovery_interval = "1m" +# +# ## Metrics to Pull (Required) +# [[inputs.aliyuncms.metrics]] +# ## Metrics names to be requested, +# ## described here (per project): https://help.aliyun.com/document_detail/28619.html?spm=a2c4g.11186623.6.690.1938ad41wg8QSq +# names = ["InstanceActiveConnection", "InstanceNewConnection"] +# +# ## Dimension filters for Metric (these are optional). +# ## This allows to get additional metric dimension. If dimension is not specified it can be returned or +# ## the data can be aggregated - it depends on particular metric, you can find details here: https://help.aliyun.com/document_detail/28619.html?spm=a2c4g.11186623.6.690.1938ad41wg8QSq +# ## +# ## Note, that by default dimension filter includes the list of discovered objects in scope (if discovery is enabled) +# ## Values specified here would be added into the list of discovered objects. +# ## You can specify either single dimension: +# #dimensions = '{"instanceId": "p-example"}' +# +# ## Or you can specify several dimensions at once: +# #dimensions = '[{"instanceId": "p-example"},{"instanceId": "q-example"}]' +# +# ## Enrichment tags, can be added from discovery (if supported) +# ## Notation is : +# ## To figure out which fields are available, consult the Describe API per project. +# ## For example, for SLB: https://api.aliyun.com/#/?product=Slb&version=2014-05-15&api=DescribeLoadBalancers¶ms={}&tab=MOCK&lang=GO +# #tag_query_path = [ +# # "address:Address", +# # "name:LoadBalancerName", +# # "cluster_owner:Tags.Tag[?TagKey=='cs.cluster.name'].TagValue | [0]" +# # ] +# ## The following tags added by default: regionId (if discovery enabled), userId, instanceId. +# +# ## Allow metrics without discovery data, if discovery is enabled. If set to true, then metric without discovery +# ## data would be emitted, otherwise dropped. This cane be of help, in case debugging dimension filters, or partial coverage +# ## of discovery scope vs monitoring scope +# #allow_dps_without_discovery = false + + +# # AMQP consumer plugin +# [[inputs.amqp_consumer]] +# ## Broker to consume from. +# ## deprecated in 1.7; use the brokers option +# # url = "amqp://localhost:5672/influxdb" +# +# ## Brokers to consume from. If multiple brokers are specified a random broker +# ## will be selected anytime a connection is established. This can be +# ## helpful for load balancing when not using a dedicated load balancer. +# brokers = ["amqp://localhost:5672/influxdb"] +# +# ## Authentication credentials for the PLAIN auth_method. +# # username = "" +# # password = "" +# +# ## Name of the exchange to declare. If unset, no exchange will be declared. +# exchange = "telegraf" +# +# ## Exchange type; common types are "direct", "fanout", "topic", "header", "x-consistent-hash". +# # exchange_type = "topic" +# +# ## If true, exchange will be passively declared. +# # exchange_passive = false +# +# ## Exchange durability can be either "transient" or "durable". +# # exchange_durability = "durable" +# +# ## Additional exchange arguments. +# # exchange_arguments = { } +# # exchange_arguments = {"hash_property" = "timestamp"} +# +# ## AMQP queue name. +# queue = "telegraf" +# +# ## AMQP queue durability can be "transient" or "durable". +# queue_durability = "durable" +# +# ## If true, queue will be passively declared. +# # queue_passive = false +# +# ## A binding between the exchange and queue using this binding key is +# ## created. If unset, no binding is created. +# binding_key = "#" +# +# ## Maximum number of messages server should give to the worker. +# # prefetch_count = 50 +# +# ## Maximum messages to read from the broker that have not been written by an +# ## output. For best throughput set based on the number of metrics within +# ## each message and the size of the output's metric_batch_size. +# ## +# ## For example, if each message from the queue contains 10 metrics and the +# ## output metric_batch_size is 1000, setting this to 100 will ensure that a +# ## full batch is collected and the write is triggered immediately without +# ## waiting until the next flush_interval. +# # max_undelivered_messages = 1000 +# +# ## Auth method. PLAIN and EXTERNAL are supported +# ## Using EXTERNAL requires enabling the rabbitmq_auth_mechanism_ssl plugin as +# ## described here: https://www.rabbitmq.com/plugins.html +# # auth_method = "PLAIN" +# +# ## Optional TLS Config +# # tls_ca = "/etc/telegraf/ca.pem" +# # tls_cert = "/etc/telegraf/cert.pem" +# # tls_key = "/etc/telegraf/key.pem" +# ## Use TLS but skip chain & host verification +# # insecure_skip_verify = false +# +# ## Content encoding for message payloads, can be set to "gzip" to or +# ## "identity" to apply no encoding. +# # content_encoding = "identity" +# +# ## Data format to consume. +# ## Each data format has its own unique set of configuration options, read +# ## more about them here: +# ## https://github.com/influxdata/telegraf/blob/master/docs/DATA_FORMATS_INPUT.md +# data_format = "influx" + + +# # Read Cassandra metrics through Jolokia +# [[inputs.cassandra]] +# ## DEPRECATED: The cassandra plugin has been deprecated. Please use the +# ## jolokia2 plugin instead. +# ## +# ## see https://github.com/influxdata/telegraf/tree/master/plugins/inputs/jolokia2 +# +# context = "/jolokia/read" +# ## List of cassandra servers exposing jolokia read service +# servers = ["myuser:mypassword@10.10.10.1:8778","10.10.10.2:8778",":8778"] +# ## List of metrics collected on above servers +# ## Each metric consists of a jmx path. +# ## This will collect all heap memory usage metrics from the jvm and +# ## ReadLatency metrics for all keyspaces and tables. +# ## "type=Table" in the query works with Cassandra3.0. Older versions might +# ## need to use "type=ColumnFamily" +# metrics = [ +# "/java.lang:type=Memory/HeapMemoryUsage", +# "/org.apache.cassandra.metrics:type=Table,keyspace=*,scope=*,name=ReadLatency" +# ] + + +# # Cisco model-driven telemetry (MDT) input plugin for IOS XR, IOS XE and NX-OS platforms +# [[inputs.cisco_telemetry_mdt]] +# ## Telemetry transport can be "tcp" or "grpc". TLS is only supported when +# ## using the grpc transport. +# transport = "grpc" +# +# ## Address and port to host telemetry listener +# service_address = ":57000" +# +# ## Enable TLS; grpc transport only. +# # tls_cert = "/etc/telegraf/cert.pem" +# # tls_key = "/etc/telegraf/key.pem" +# +# ## Enable TLS client authentication and define allowed CA certificates; grpc +# ## transport only. +# # tls_allowed_cacerts = ["/etc/telegraf/clientca.pem"] +# +# ## Define (for certain nested telemetry measurements with embedded tags) which fields are tags +# # embedded_tags = ["Cisco-IOS-XR-qos-ma-oper:qos/interface-table/interface/input/service-policy-names/service-policy-instance/statistics/class-stats/class-name"] +# +# ## Define aliases to map telemetry encoding paths to simple measurement names +# [inputs.cisco_telemetry_mdt.aliases] +# ifstats = "ietf-interfaces:interfaces-state/interface/statistics" +# ##Define Property Xformation, please refer README and https://pubhub.devnetcloud.com/media/dme-docs-9-3-3/docs/appendix/ for Model details. +# [inputs.cisco_telemetry_mdt.dmes] +# ModTs = "ignore" +# CreateTs = "ignore" + + +# # Read metrics from one or many ClickHouse servers +# [[inputs.clickhouse]] +# ## Username for authorization on ClickHouse server +# ## example: username = "default" +# username = "default" +# +# ## Password for authorization on ClickHouse server +# ## example: password = "super_secret" +# +# ## HTTP(s) timeout while getting metrics values +# ## The timeout includes connection time, any redirects, and reading the response body. +# ## example: timeout = 1s +# # timeout = 5s +# +# ## List of servers for metrics scraping +# ## metrics scrape via HTTP(s) clickhouse interface +# ## https://clickhouse.tech/docs/en/interfaces/http/ +# ## example: servers = ["http://127.0.0.1:8123","https://custom-server.mdb.yandexcloud.net"] +# servers = ["http://127.0.0.1:8123"] +# +# ## If "auto_discovery"" is "true" plugin tries to connect to all servers available in the cluster +# ## with using same "user:password" described in "user" and "password" parameters +# ## and get this server hostname list from "system.clusters" table +# ## see +# ## - https://clickhouse.tech/docs/en/operations/system_tables/#system-clusters +# ## - https://clickhouse.tech/docs/en/operations/server_settings/settings/#server_settings_remote_servers +# ## - https://clickhouse.tech/docs/en/operations/table_engines/distributed/ +# ## - https://clickhouse.tech/docs/en/operations/table_engines/replication/#creating-replicated-tables +# ## example: auto_discovery = false +# # auto_discovery = true +# +# ## Filter cluster names in "system.clusters" when "auto_discovery" is "true" +# ## when this filter present then "WHERE cluster IN (...)" filter will apply +# ## please use only full cluster names here, regexp and glob filters is not allowed +# ## for "/etc/clickhouse-server/config.d/remote.xml" +# ## +# ## +# ## +# ## +# ## clickhouse-ru-1.local9000 +# ## clickhouse-ru-2.local9000 +# ## +# ## +# ## clickhouse-eu-1.local9000 +# ## clickhouse-eu-2.local9000 +# ## +# ## +# ## +# ## +# ## +# ## +# ## example: cluster_include = ["my-own-cluster"] +# # cluster_include = [] +# +# ## Filter cluster names in "system.clusters" when "auto_discovery" is "true" +# ## when this filter present then "WHERE cluster NOT IN (...)" filter will apply +# ## example: cluster_exclude = ["my-internal-not-discovered-cluster"] +# # cluster_exclude = [] +# +# ## Optional TLS Config +# # tls_ca = "/etc/telegraf/ca.pem" +# # tls_cert = "/etc/telegraf/cert.pem" +# # tls_key = "/etc/telegraf/key.pem" +# ## Use TLS but skip chain & host verification +# # insecure_skip_verify = false + + +# # Read metrics from Google PubSub +# [[inputs.cloud_pubsub]] +# ## Required. Name of Google Cloud Platform (GCP) Project that owns +# ## the given PubSub subscription. +# project = "my-project" +# +# ## Required. Name of PubSub subscription to ingest metrics from. +# subscription = "my-subscription" +# +# ## Required. Data format to consume. +# ## Each data format has its own unique set of configuration options. +# ## Read more about them here: +# ## https://github.com/influxdata/telegraf/blob/master/docs/DATA_FORMATS_INPUT.md +# data_format = "influx" +# +# ## Optional. Filepath for GCP credentials JSON file to authorize calls to +# ## PubSub APIs. If not set explicitly, Telegraf will attempt to use +# ## Application Default Credentials, which is preferred. +# # credentials_file = "path/to/my/creds.json" +# +# ## Optional. Number of seconds to wait before attempting to restart the +# ## PubSub subscription receiver after an unexpected error. +# ## If the streaming pull for a PubSub Subscription fails (receiver), +# ## the agent attempts to restart receiving messages after this many seconds. +# # retry_delay_seconds = 5 +# +# ## Optional. Maximum byte length of a message to consume. +# ## Larger messages are dropped with an error. If less than 0 or unspecified, +# ## treated as no limit. +# # max_message_len = 1000000 +# +# ## Optional. Maximum messages to read from PubSub that have not been written +# ## to an output. Defaults to 1000. +# ## For best throughput set based on the number of metrics within +# ## each message and the size of the output's metric_batch_size. +# ## +# ## For example, if each message contains 10 metrics and the output +# ## metric_batch_size is 1000, setting this to 100 will ensure that a +# ## full batch is collected and the write is triggered immediately without +# ## waiting until the next flush_interval. +# # max_undelivered_messages = 1000 +# +# ## The following are optional Subscription ReceiveSettings in PubSub. +# ## Read more about these values: +# ## https://godoc.org/cloud.google.com/go/pubsub#ReceiveSettings +# +# ## Optional. Maximum number of seconds for which a PubSub subscription +# ## should auto-extend the PubSub ACK deadline for each message. If less than +# ## 0, auto-extension is disabled. +# # max_extension = 0 +# +# ## Optional. Maximum number of unprocessed messages in PubSub +# ## (unacknowledged but not yet expired in PubSub). +# ## A value of 0 is treated as the default PubSub value. +# ## Negative values will be treated as unlimited. +# # max_outstanding_messages = 0 +# +# ## Optional. Maximum size in bytes of unprocessed messages in PubSub +# ## (unacknowledged but not yet expired in PubSub). +# ## A value of 0 is treated as the default PubSub value. +# ## Negative values will be treated as unlimited. +# # max_outstanding_bytes = 0 +# +# ## Optional. Max number of goroutines a PubSub Subscription receiver can spawn +# ## to pull messages from PubSub concurrently. This limit applies to each +# ## subscription separately and is treated as the PubSub default if less than +# ## 1. Note this setting does not limit the number of messages that can be +# ## processed concurrently (use "max_outstanding_messages" instead). +# # max_receiver_go_routines = 0 +# +# ## Optional. If true, Telegraf will attempt to base64 decode the +# ## PubSub message data before parsing +# # base64_data = false + + +# # Google Cloud Pub/Sub Push HTTP listener +# [[inputs.cloud_pubsub_push]] +# ## Address and port to host HTTP listener on +# service_address = ":8080" +# +# ## Application secret to verify messages originate from Cloud Pub/Sub +# # token = "" +# +# ## Path to listen to. +# # path = "/" +# +# ## Maximum duration before timing out read of the request +# # read_timeout = "10s" +# ## Maximum duration before timing out write of the response. This should be set to a value +# ## large enough that you can send at least 'metric_batch_size' number of messages within the +# ## duration. +# # write_timeout = "10s" +# +# ## Maximum allowed http request body size in bytes. +# ## 0 means to use the default of 524,288,00 bytes (500 mebibytes) +# # max_body_size = "500MB" +# +# ## Whether to add the pubsub metadata, such as message attributes and subscription as a tag. +# # add_meta = false +# +# ## Optional. Maximum messages to read from PubSub that have not been written +# ## to an output. Defaults to 1000. +# ## For best throughput set based on the number of metrics within +# ## each message and the size of the output's metric_batch_size. +# ## +# ## For example, if each message contains 10 metrics and the output +# ## metric_batch_size is 1000, setting this to 100 will ensure that a +# ## full batch is collected and the write is triggered immediately without +# ## waiting until the next flush_interval. +# # max_undelivered_messages = 1000 +# +# ## Set one or more allowed client CA certificate file names to +# ## enable mutually authenticated TLS connections +# # tls_allowed_cacerts = ["/etc/telegraf/clientca.pem"] +# +# ## Add service certificate and key +# # tls_cert = "/etc/telegraf/cert.pem" +# # tls_key = "/etc/telegraf/key.pem" +# +# ## Data format to consume. +# ## Each data format has its own unique set of configuration options, read +# ## more about them here: +# ## https://github.com/influxdata/telegraf/blob/master/docs/DATA_FORMATS_INPUT.md +# data_format = "influx" + + +# # Ingests files in a directory and then moves them to a target directory. +# [[inputs.directory_monitor]] +# ## The directory to monitor and read files from. +# directory = "" +# # +# ## The directory to move finished files to. +# finished_directory = "" +# # +# ## The directory to move files to upon file error. +# ## If not provided, erroring files will stay in the monitored directory. +# # error_directory = "" +# # +# ## The amount of time a file is allowed to sit in the directory before it is picked up. +# ## This time can generally be low but if you choose to have a very large file written to the directory and it's potentially slow, +# ## set this higher so that the plugin will wait until the file is fully copied to the directory. +# # directory_duration_threshold = "50ms" +# # +# ## A list of the only file names to monitor, if necessary. Supports regex. If left blank, all files are ingested. +# # files_to_monitor = ["^.*\.csv"] +# # +# ## A list of files to ignore, if necessary. Supports regex. +# # files_to_ignore = [".DS_Store"] +# # +# ## Maximum lines of the file to process that have not yet be written by the +# ## output. For best throughput set to the size of the output's metric_buffer_limit. +# ## Warning: setting this number higher than the output's metric_buffer_limit can cause dropped metrics. +# # max_buffered_metrics = 10000 +# # +# ## The maximum amount of file paths to queue up for processing at once, before waiting until files are processed to find more files. +# ## Lowering this value will result in *slightly* less memory use, with a potential sacrifice in speed efficiency, if absolutely necessary. +# # file_queue_size = 100000 +# # +# ## The dataformat to be read from the files. +# ## Each data format has its own unique set of configuration options, read +# ## more about them here: +# ## https://github.com/influxdata/telegraf/blob/master/docs/DATA_FORMATS_INPUT.md +# ## NOTE: We currently only support parsing newline-delimited JSON. See the format here: https://github.com/ndjson/ndjson-spec +# data_format = "influx" + + +# # Read logging output from the Docker engine +# [[inputs.docker_log]] +# ## Docker Endpoint +# ## To use TCP, set endpoint = "tcp://[ip]:[port]" +# ## To use environment variables (ie, docker-machine), set endpoint = "ENV" +# # endpoint = "unix:///var/run/docker.sock" +# +# ## When true, container logs are read from the beginning; otherwise +# ## reading begins at the end of the log. +# # from_beginning = false +# +# ## Timeout for Docker API calls. +# # timeout = "5s" +# +# ## Containers to include and exclude. Globs accepted. +# ## Note that an empty array for both will include all containers +# # container_name_include = [] +# # container_name_exclude = [] +# +# ## Container states to include and exclude. Globs accepted. +# ## When empty only containers in the "running" state will be captured. +# # container_state_include = [] +# # container_state_exclude = [] +# +# ## docker labels to include and exclude as tags. Globs accepted. +# ## Note that an empty array for both will include all labels as tags +# # docker_label_include = [] +# # docker_label_exclude = [] +# +# ## Set the source tag for the metrics to the container ID hostname, eg first 12 chars +# source_tag = false +# +# ## Optional TLS Config +# # tls_ca = "/etc/telegraf/ca.pem" +# # tls_cert = "/etc/telegraf/cert.pem" +# # tls_key = "/etc/telegraf/key.pem" +# ## Use TLS but skip chain & host verification +# # insecure_skip_verify = false + + +# # Azure Event Hubs service input plugin +# [[inputs.eventhub_consumer]] +# ## The default behavior is to create a new Event Hub client from environment variables. +# ## This requires one of the following sets of environment variables to be set: +# ## +# ## 1) Expected Environment Variables: +# ## - "EVENTHUB_CONNECTION_STRING" +# ## +# ## 2) Expected Environment Variables: +# ## - "EVENTHUB_NAMESPACE" +# ## - "EVENTHUB_NAME" +# ## - "EVENTHUB_KEY_NAME" +# ## - "EVENTHUB_KEY_VALUE" +# +# ## 3) Expected Environment Variables: +# ## - "EVENTHUB_NAMESPACE" +# ## - "EVENTHUB_NAME" +# ## - "AZURE_TENANT_ID" +# ## - "AZURE_CLIENT_ID" +# ## - "AZURE_CLIENT_SECRET" +# +# ## Uncommenting the option below will create an Event Hub client based solely on the connection string. +# ## This can either be the associated environment variable or hard coded directly. +# ## If this option is uncommented, environment variables will be ignored. +# ## Connection string should contain EventHubName (EntityPath) +# # connection_string = "" +# +# ## Set persistence directory to a valid folder to use a file persister instead of an in-memory persister +# # persistence_dir = "" +# +# ## Change the default consumer group +# # consumer_group = "" +# +# ## By default the event hub receives all messages present on the broker, alternative modes can be set below. +# ## The timestamp should be in https://github.com/toml-lang/toml#offset-date-time format (RFC 3339). +# ## The 3 options below only apply if no valid persister is read from memory or file (e.g. first run). +# # from_timestamp = +# # latest = true +# +# ## Set a custom prefetch count for the receiver(s) +# # prefetch_count = 1000 +# +# ## Add an epoch to the receiver(s) +# # epoch = 0 +# +# ## Change to set a custom user agent, "telegraf" is used by default +# # user_agent = "telegraf" +# +# ## To consume from a specific partition, set the partition_ids option. +# ## An empty array will result in receiving from all partitions. +# # partition_ids = ["0","1"] +# +# ## Max undelivered messages +# # max_undelivered_messages = 1000 +# +# ## Set either option below to true to use a system property as timestamp. +# ## You have the choice between EnqueuedTime and IoTHubEnqueuedTime. +# ## It is recommended to use this setting when the data itself has no timestamp. +# # enqueued_time_as_ts = true +# # iot_hub_enqueued_time_as_ts = true +# +# ## Tags or fields to create from keys present in the application property bag. +# ## These could for example be set by message enrichments in Azure IoT Hub. +# # application_property_tags = [] +# # application_property_fields = [] +# +# ## Tag or field name to use for metadata +# ## By default all metadata is disabled +# # sequence_number_field = "SequenceNumber" +# # enqueued_time_field = "EnqueuedTime" +# # offset_field = "Offset" +# # partition_id_tag = "PartitionID" +# # partition_key_tag = "PartitionKey" +# # iot_hub_device_connection_id_tag = "IoTHubDeviceConnectionID" +# # iot_hub_auth_generation_id_tag = "IoTHubAuthGenerationID" +# # iot_hub_connection_auth_method_tag = "IoTHubConnectionAuthMethod" +# # iot_hub_connection_module_id_tag = "IoTHubConnectionModuleID" +# # iot_hub_enqueued_time_field = "IoTHubEnqueuedTime" +# +# ## Data format to consume. +# ## Each data format has its own unique set of configuration options, read +# ## more about them here: +# ## https://github.com/influxdata/telegraf/blob/master/docs/DATA_FORMATS_INPUT.md +# data_format = "influx" + + +# # Run executable as long-running input plugin +# [[inputs.execd]] +# ## Program to run as daemon +# command = ["telegraf-smartctl", "-d", "/dev/sda"] +# +# ## Define how the process is signaled on each collection interval. +# ## Valid values are: +# ## "none" : Do not signal anything. +# ## The process must output metrics by itself. +# ## "STDIN" : Send a newline on STDIN. +# ## "SIGHUP" : Send a HUP signal. Not available on Windows. +# ## "SIGUSR1" : Send a USR1 signal. Not available on Windows. +# ## "SIGUSR2" : Send a USR2 signal. Not available on Windows. +# signal = "none" +# +# ## Delay before the process is restarted after an unexpected termination +# restart_delay = "10s" +# +# ## Data format to consume. +# ## Each data format has its own unique set of configuration options, read +# ## more about them here: +# ## https://github.com/influxdata/telegraf/blob/master/docs/DATA_FORMATS_INPUT.md +# data_format = "influx" + + +# # gNMI telemetry input plugin +# [[inputs.gnmi]] +# ## Address and port of the gNMI GRPC server +# addresses = ["10.49.234.114:57777"] +# +# ## define credentials +# username = "cisco" +# password = "cisco" +# +# ## gNMI encoding requested (one of: "proto", "json", "json_ietf", "bytes") +# # encoding = "proto" +# +# ## redial in case of failures after +# redial = "10s" +# +# ## enable client-side TLS and define CA to authenticate the device +# # enable_tls = true +# # tls_ca = "/etc/telegraf/ca.pem" +# # insecure_skip_verify = true +# +# ## define client-side TLS certificate & key to authenticate to the device +# # tls_cert = "/etc/telegraf/cert.pem" +# # tls_key = "/etc/telegraf/key.pem" +# +# ## gNMI subscription prefix (optional, can usually be left empty) +# ## See: https://github.com/openconfig/reference/blob/master/rpc/gnmi/gnmi-specification.md#222-paths +# # origin = "" +# # prefix = "" +# # target = "" +# +# ## Define additional aliases to map telemetry encoding paths to simple measurement names +# #[inputs.gnmi.aliases] +# # ifcounters = "openconfig:/interfaces/interface/state/counters" +# +# [[inputs.gnmi.subscription]] +# ## Name of the measurement that will be emitted +# name = "ifcounters" +# +# ## Origin and path of the subscription +# ## See: https://github.com/openconfig/reference/blob/master/rpc/gnmi/gnmi-specification.md#222-paths +# ## +# ## origin usually refers to a (YANG) data model implemented by the device +# ## and path to a specific substructure inside it that should be subscribed to (similar to an XPath) +# ## YANG models can be found e.g. here: https://github.com/YangModels/yang/tree/master/vendor/cisco/xr +# origin = "openconfig-interfaces" +# path = "/interfaces/interface/state/counters" +# +# # Subscription mode (one of: "target_defined", "sample", "on_change") and interval +# subscription_mode = "sample" +# sample_interval = "10s" +# +# ## Suppress redundant transmissions when measured values are unchanged +# # suppress_redundant = false +# +# ## If suppression is enabled, send updates at least every X seconds anyway +# # heartbeat_interval = "60s" + + +# # Accept metrics over InfluxDB 1.x HTTP API +# [[inputs.http_listener]] +# ## Address and port to host InfluxDB listener on +# service_address = ":8186" +# +# ## maximum duration before timing out read of the request +# read_timeout = "10s" +# ## maximum duration before timing out write of the response +# write_timeout = "10s" +# +# ## Maximum allowed HTTP request body size in bytes. +# ## 0 means to use the default of 32MiB. +# max_body_size = "32MiB" +# +# ## Optional tag name used to store the database. +# ## If the write has a database in the query string then it will be kept in this tag name. +# ## This tag can be used in downstream outputs. +# ## The default value of nothing means it will be off and the database will not be recorded. +# # database_tag = "" +# +# ## If set the retention policy specified in the write query will be added as +# ## the value of this tag name. +# # retention_policy_tag = "" +# +# ## Set one or more allowed client CA certificate file names to +# ## enable mutually authenticated TLS connections +# tls_allowed_cacerts = ["/etc/telegraf/clientca.pem"] +# +# ## Add service certificate and key +# tls_cert = "/etc/telegraf/cert.pem" +# tls_key = "/etc/telegraf/key.pem" +# +# ## Optional username and password to accept for HTTP basic authentication. +# ## You probably want to make sure you have TLS configured above for this. +# # basic_username = "foobar" +# # basic_password = "barfoo" + + +# # Generic HTTP write listener +# [[inputs.http_listener_v2]] +# ## Address and port to host HTTP listener on +# service_address = ":8080" +# +# ## Path to listen to. +# ## This option is deprecated and only available for backward-compatibility. Please use paths instead. +# # path = "" +# +# ## Paths to listen to. +# # paths = ["/telegraf"] +# +# ## Save path as http_listener_v2_path tag if set to true +# # path_tag = false +# +# ## HTTP methods to accept. +# # methods = ["POST", "PUT"] +# +# ## maximum duration before timing out read of the request +# # read_timeout = "10s" +# ## maximum duration before timing out write of the response +# # write_timeout = "10s" +# +# ## Maximum allowed http request body size in bytes. +# ## 0 means to use the default of 524,288,000 bytes (500 mebibytes) +# # max_body_size = "500MB" +# +# ## Part of the request to consume. Available options are "body" and +# ## "query". +# # data_source = "body" +# +# ## Set one or more allowed client CA certificate file names to +# ## enable mutually authenticated TLS connections +# # tls_allowed_cacerts = ["/etc/telegraf/clientca.pem"] +# +# ## Add service certificate and key +# # tls_cert = "/etc/telegraf/cert.pem" +# # tls_key = "/etc/telegraf/key.pem" +# +# ## Optional username and password to accept for HTTP basic authentication. +# ## You probably want to make sure you have TLS configured above for this. +# # basic_username = "foobar" +# # basic_password = "barfoo" +# +# ## Optional setting to map http headers into tags +# ## If the http header is not present on the request, no corresponding tag will be added +# ## If multiple instances of the http header are present, only the first value will be used +# # http_header_tags = {"HTTP_HEADER" = "TAG_NAME"} +# +# ## Data format to consume. +# ## Each data format has its own unique set of configuration options, read +# ## more about them here: +# ## https://github.com/influxdata/telegraf/blob/master/docs/DATA_FORMATS_INPUT.md +# data_format = "influx" + + +# # Accept metrics over InfluxDB 1.x HTTP API +# [[inputs.influxdb_listener]] +# ## Address and port to host InfluxDB listener on +# service_address = ":8186" +# +# ## maximum duration before timing out read of the request +# read_timeout = "10s" +# ## maximum duration before timing out write of the response +# write_timeout = "10s" +# +# ## Maximum allowed HTTP request body size in bytes. +# ## 0 means to use the default of 32MiB. +# max_body_size = "32MiB" +# +# ## Optional tag name used to store the database. +# ## If the write has a database in the query string then it will be kept in this tag name. +# ## This tag can be used in downstream outputs. +# ## The default value of nothing means it will be off and the database will not be recorded. +# # database_tag = "" +# +# ## If set the retention policy specified in the write query will be added as +# ## the value of this tag name. +# # retention_policy_tag = "" +# +# ## Set one or more allowed client CA certificate file names to +# ## enable mutually authenticated TLS connections +# tls_allowed_cacerts = ["/etc/telegraf/clientca.pem"] +# +# ## Add service certificate and key +# tls_cert = "/etc/telegraf/cert.pem" +# tls_key = "/etc/telegraf/key.pem" +# +# ## Optional username and password to accept for HTTP basic authentication. +# ## You probably want to make sure you have TLS configured above for this. +# # basic_username = "foobar" +# # basic_password = "barfoo" + + +# # Accept metrics over InfluxDB 2.x HTTP API +# [[inputs.influxdb_v2_listener]] +# ## Address and port to host InfluxDB listener on +# ## (Double check the port. Could be 9999 if using OSS Beta) +# service_address = ":8086" +# +# ## Maximum allowed HTTP request body size in bytes. +# ## 0 means to use the default of 32MiB. +# # max_body_size = "32MiB" +# +# ## Optional tag to determine the bucket. +# ## If the write has a bucket in the query string then it will be kept in this tag name. +# ## This tag can be used in downstream outputs. +# ## The default value of nothing means it will be off and the database will not be recorded. +# # bucket_tag = "" +# +# ## Set one or more allowed client CA certificate file names to +# ## enable mutually authenticated TLS connections +# # tls_allowed_cacerts = ["/etc/telegraf/clientca.pem"] +# +# ## Add service certificate and key +# # tls_cert = "/etc/telegraf/cert.pem" +# # tls_key = "/etc/telegraf/key.pem" +# +# ## Optional token to accept for HTTP authentication. +# ## You probably want to make sure you have TLS configured above for this. +# # token = "some-long-shared-secret-token" + + +# # Read JTI OpenConfig Telemetry from listed sensors +# [[inputs.jti_openconfig_telemetry]] +# ## List of device addresses to collect telemetry from +# servers = ["localhost:1883"] +# +# ## Authentication details. Username and password are must if device expects +# ## authentication. Client ID must be unique when connecting from multiple instances +# ## of telegraf to the same device +# username = "user" +# password = "pass" +# client_id = "telegraf" +# +# ## Frequency to get data +# sample_frequency = "1000ms" +# +# ## Sensors to subscribe for +# ## A identifier for each sensor can be provided in path by separating with space +# ## Else sensor path will be used as identifier +# ## When identifier is used, we can provide a list of space separated sensors. +# ## A single subscription will be created with all these sensors and data will +# ## be saved to measurement with this identifier name +# sensors = [ +# "/interfaces/", +# "collection /components/ /lldp", +# ] +# +# ## We allow specifying sensor group level reporting rate. To do this, specify the +# ## reporting rate in Duration at the beginning of sensor paths / collection +# ## name. For entries without reporting rate, we use configured sample frequency +# sensors = [ +# "1000ms customReporting /interfaces /lldp", +# "2000ms collection /components", +# "/interfaces", +# ] +# +# ## Optional TLS Config +# # enable_tls = true +# # tls_ca = "/etc/telegraf/ca.pem" +# # tls_cert = "/etc/telegraf/cert.pem" +# # tls_key = "/etc/telegraf/key.pem" +# ## Use TLS but skip chain & host verification +# # insecure_skip_verify = false +# +# ## Delay between retry attempts of failed RPC calls or streams. Defaults to 1000ms. +# ## Failed streams/calls will not be retried if 0 is provided +# retry_delay = "1000ms" +# +# ## To treat all string values as tags, set this to true +# str_as_tags = false + + +# # Read metrics from Kafka topics +# [[inputs.kafka_consumer]] +# ## Kafka brokers. +# brokers = ["localhost:9092"] +# +# ## Topics to consume. +# topics = ["telegraf"] +# +# ## When set this tag will be added to all metrics with the topic as the value. +# # topic_tag = "" +# +# ## Optional Client id +# # client_id = "Telegraf" +# +# ## Set the minimal supported Kafka version. Setting this enables the use of new +# ## Kafka features and APIs. Must be 0.10.2.0 or greater. +# ## ex: version = "1.1.0" +# # version = "" +# +# ## Optional TLS Config +# # tls_ca = "/etc/telegraf/ca.pem" +# # tls_cert = "/etc/telegraf/cert.pem" +# # tls_key = "/etc/telegraf/key.pem" +# ## Use TLS but skip chain & host verification +# # insecure_skip_verify = false +# +# ## SASL authentication credentials. These settings should typically be used +# ## with TLS encryption enabled +# # sasl_username = "kafka" +# # sasl_password = "secret" +# +# ## Optional SASL: +# ## one of: OAUTHBEARER, PLAIN, SCRAM-SHA-256, SCRAM-SHA-512, GSSAPI +# ## (defaults to PLAIN) +# # sasl_mechanism = "" +# +# ## used if sasl_mechanism is GSSAPI (experimental) +# # sasl_gssapi_service_name = "" +# # ## One of: KRB5_USER_AUTH and KRB5_KEYTAB_AUTH +# # sasl_gssapi_auth_type = "KRB5_USER_AUTH" +# # sasl_gssapi_kerberos_config_path = "/" +# # sasl_gssapi_realm = "realm" +# # sasl_gssapi_key_tab_path = "" +# # sasl_gssapi_disable_pafxfast = false +# +# ## used if sasl_mechanism is OAUTHBEARER (experimental) +# # sasl_access_token = "" +# +# ## SASL protocol version. When connecting to Azure EventHub set to 0. +# # sasl_version = 1 +# +# ## Name of the consumer group. +# # consumer_group = "telegraf_metrics_consumers" +# +# ## Compression codec represents the various compression codecs recognized by +# ## Kafka in messages. +# ## 0 : None +# ## 1 : Gzip +# ## 2 : Snappy +# ## 3 : LZ4 +# ## 4 : ZSTD +# # compression_codec = 0 +# +# ## Initial offset position; one of "oldest" or "newest". +# # offset = "oldest" +# +# ## Consumer group partition assignment strategy; one of "range", "roundrobin" or "sticky". +# # balance_strategy = "range" +# +# ## Maximum length of a message to consume, in bytes (default 0/unlimited); +# ## larger messages are dropped +# max_message_len = 1000000 +# +# ## Maximum messages to read from the broker that have not been written by an +# ## output. For best throughput set based on the number of metrics within +# ## each message and the size of the output's metric_batch_size. +# ## +# ## For example, if each message from the queue contains 10 metrics and the +# ## output metric_batch_size is 1000, setting this to 100 will ensure that a +# ## full batch is collected and the write is triggered immediately without +# ## waiting until the next flush_interval. +# # max_undelivered_messages = 1000 +# +# ## Data format to consume. +# ## Each data format has its own unique set of configuration options, read +# ## more about them here: +# ## https://github.com/influxdata/telegraf/blob/master/docs/DATA_FORMATS_INPUT.md +# data_format = "influx" + + +# # Read metrics from Kafka topic(s) +# [[inputs.kafka_consumer_legacy]] +# ## topic(s) to consume +# topics = ["telegraf"] +# +# ## an array of Zookeeper connection strings +# zookeeper_peers = ["localhost:2181"] +# +# ## Zookeeper Chroot +# zookeeper_chroot = "" +# +# ## the name of the consumer group +# consumer_group = "telegraf_metrics_consumers" +# +# ## Offset (must be either "oldest" or "newest") +# offset = "oldest" +# +# ## Data format to consume. +# ## Each data format has its own unique set of configuration options, read +# ## more about them here: +# ## https://github.com/influxdata/telegraf/blob/master/docs/DATA_FORMATS_INPUT.md +# data_format = "influx" +# +# ## Maximum length of a message to consume, in bytes (default 0/unlimited); +# ## larger messages are dropped +# max_message_len = 65536 + + +# # Configuration for the AWS Kinesis input. +# [[inputs.kinesis_consumer]] +# ## Amazon REGION of kinesis endpoint. +# region = "ap-southeast-2" +# +# ## Amazon Credentials +# ## Credentials are loaded in the following order +# ## 1) Web identity provider credentials via STS if role_arn and web_identity_token_file are specified +# ## 2) Assumed credentials via STS if role_arn is specified +# ## 3) explicit credentials from 'access_key' and 'secret_key' +# ## 4) shared profile from 'profile' +# ## 5) environment variables +# ## 6) shared credentials file +# ## 7) EC2 Instance Profile +# # access_key = "" +# # secret_key = "" +# # token = "" +# # role_arn = "" +# # web_identity_token_file = "" +# # role_session_name = "" +# # profile = "" +# # shared_credential_file = "" +# +# ## Endpoint to make request against, the correct endpoint is automatically +# ## determined and this option should only be set if you wish to override the +# ## default. +# ## ex: endpoint_url = "http://localhost:8000" +# # endpoint_url = "" +# +# ## Kinesis StreamName must exist prior to starting telegraf. +# streamname = "StreamName" +# +# ## Shard iterator type (only 'TRIM_HORIZON' and 'LATEST' currently supported) +# # shard_iterator_type = "TRIM_HORIZON" +# +# ## Maximum messages to read from the broker that have not been written by an +# ## output. For best throughput set based on the number of metrics within +# ## each message and the size of the output's metric_batch_size. +# ## +# ## For example, if each message from the queue contains 10 metrics and the +# ## output metric_batch_size is 1000, setting this to 100 will ensure that a +# ## full batch is collected and the write is triggered immediately without +# ## waiting until the next flush_interval. +# # max_undelivered_messages = 1000 +# +# ## Data format to consume. +# ## Each data format has its own unique set of configuration options, read +# ## more about them here: +# ## https://github.com/influxdata/telegraf/blob/master/docs/DATA_FORMATS_INPUT.md +# data_format = "influx" +# +# ## +# ## The content encoding of the data from kinesis +# ## If you are processing a cloudwatch logs kinesis stream then set this to "gzip" +# ## as AWS compresses cloudwatch log data before it is sent to kinesis (aws +# ## also base64 encodes the zip byte data before pushing to the stream. The base64 decoding +# ## is done automatically by the golang sdk, as data is read from kinesis) +# ## +# # content_encoding = "identity" +# +# ## Optional +# ## Configuration for a dynamodb checkpoint +# [inputs.kinesis_consumer.checkpoint_dynamodb] +# ## unique name for this consumer +# app_name = "default" +# table_name = "default" + + +# # Listener capable of handling KNX bus messages provided through a KNX-IP Interface. +# [[inputs.knx_listener]] +# ## Type of KNX-IP interface. +# ## Can be either "tunnel" or "router". +# # service_type = "tunnel" +# +# ## Address of the KNX-IP interface. +# service_address = "localhost:3671" +# +# ## Measurement definition(s) +# # [[inputs.knx_listener.measurement]] +# # ## Name of the measurement +# # name = "temperature" +# # ## Datapoint-Type (DPT) of the KNX messages +# # dpt = "9.001" +# # ## List of Group-Addresses (GAs) assigned to the measurement +# # addresses = ["5/5/1"] +# +# # [[inputs.knx_listener.measurement]] +# # name = "illumination" +# # dpt = "9.004" +# # addresses = ["5/5/3"] + + +# # Read metrics off Arista LANZ, via socket +# [[inputs.lanz]] +# ## URL to Arista LANZ endpoint +# servers = [ +# "tcp://127.0.0.1:50001" +# ] + + +# # Stream and parse log file(s). +# [[inputs.logparser]] +# ## Log files to parse. +# ## These accept standard unix glob matching rules, but with the addition of +# ## ** as a "super asterisk". ie: +# ## /var/log/**.log -> recursively find all .log files in /var/log +# ## /var/log/*/*.log -> find all .log files with a parent dir in /var/log +# ## /var/log/apache.log -> only tail the apache log file +# files = ["/var/log/apache/access.log"] +# +# ## Read files that currently exist from the beginning. Files that are created +# ## while telegraf is running (and that match the "files" globs) will always +# ## be read from the beginning. +# from_beginning = false +# +# ## Method used to watch for file updates. Can be either "inotify" or "poll". +# # watch_method = "inotify" +# +# ## Parse logstash-style "grok" patterns: +# [inputs.logparser.grok] +# ## This is a list of patterns to check the given log file(s) for. +# ## Note that adding patterns here increases processing time. The most +# ## efficient configuration is to have one pattern per logparser. +# ## Other common built-in patterns are: +# ## %{COMMON_LOG_FORMAT} (plain apache & nginx access logs) +# ## %{COMBINED_LOG_FORMAT} (access logs + referrer & agent) +# patterns = ["%{COMBINED_LOG_FORMAT}"] +# +# ## Name of the outputted measurement name. +# measurement = "apache_access_log" +# +# ## Full path(s) to custom pattern files. +# custom_pattern_files = [] +# +# ## Custom patterns can also be defined here. Put one pattern per line. +# custom_patterns = ''' +# ''' +# +# ## Timezone allows you to provide an override for timestamps that +# ## don't already include an offset +# ## e.g. 04/06/2016 12:41:45 data one two 5.43µs +# ## +# ## Default: "" which renders UTC +# ## Options are as follows: +# ## 1. Local -- interpret based on machine localtime +# ## 2. "Canada/Eastern" -- Unix TZ values like those found in https://en.wikipedia.org/wiki/List_of_tz_database_time_zones +# ## 3. UTC -- or blank/unspecified, will return timestamp in UTC +# # timezone = "Canada/Eastern" +# +# ## When set to "disable", timestamp will not incremented if there is a +# ## duplicate. +# # unique_timestamp = "auto" + + +# # Read metrics from MQTT topic(s) +# [[inputs.mqtt_consumer]] +# ## Broker URLs for the MQTT server or cluster. To connect to multiple +# ## clusters or standalone servers, use a seperate plugin instance. +# ## example: servers = ["tcp://localhost:1883"] +# ## servers = ["ssl://localhost:1883"] +# ## servers = ["ws://localhost:1883"] +# servers = ["tcp://127.0.0.1:1883"] +# +# ## Topics that will be subscribed to. +# topics = [ +# "telegraf/host01/cpu", +# "telegraf/+/mem", +# "sensors/#", +# ] +# +# ## The message topic will be stored in a tag specified by this value. If set +# ## to the empty string no topic tag will be created. +# # topic_tag = "topic" +# +# ## QoS policy for messages +# ## 0 = at most once +# ## 1 = at least once +# ## 2 = exactly once +# ## +# ## When using a QoS of 1 or 2, you should enable persistent_session to allow +# ## resuming unacknowledged messages. +# # qos = 0 +# +# ## Connection timeout for initial connection in seconds +# # connection_timeout = "30s" +# +# ## Maximum messages to read from the broker that have not been written by an +# ## output. For best throughput set based on the number of metrics within +# ## each message and the size of the output's metric_batch_size. +# ## +# ## For example, if each message from the queue contains 10 metrics and the +# ## output metric_batch_size is 1000, setting this to 100 will ensure that a +# ## full batch is collected and the write is triggered immediately without +# ## waiting until the next flush_interval. +# # max_undelivered_messages = 1000 +# +# ## Persistent session disables clearing of the client session on connection. +# ## In order for this option to work you must also set client_id to identify +# ## the client. To receive messages that arrived while the client is offline, +# ## also set the qos option to 1 or 2 and don't forget to also set the QoS when +# ## publishing. +# # persistent_session = false +# +# ## If unset, a random client ID will be generated. +# # client_id = "" +# +# ## Username and password to connect MQTT server. +# # username = "telegraf" +# # password = "metricsmetricsmetricsmetrics" +# +# ## Optional TLS Config +# # tls_ca = "/etc/telegraf/ca.pem" +# # tls_cert = "/etc/telegraf/cert.pem" +# # tls_key = "/etc/telegraf/key.pem" +# ## Use TLS but skip chain & host verification +# # insecure_skip_verify = false +# +# ## Data format to consume. +# ## Each data format has its own unique set of configuration options, read +# ## more about them here: +# ## https://github.com/influxdata/telegraf/blob/master/docs/DATA_FORMATS_INPUT.md +# data_format = "influx" + + +# # Read metrics from NATS subject(s) +# [[inputs.nats_consumer]] +# ## urls of NATS servers +# servers = ["nats://localhost:4222"] +# +# ## subject(s) to consume +# subjects = ["telegraf"] +# +# ## name a queue group +# queue_group = "telegraf_consumers" +# +# ## Optional credentials +# # username = "" +# # password = "" +# +# ## Optional NATS 2.0 and NATS NGS compatible user credentials +# # credentials = "/etc/telegraf/nats.creds" +# +# ## Use Transport Layer Security +# # secure = false +# +# ## Optional TLS Config +# # tls_ca = "/etc/telegraf/ca.pem" +# # tls_cert = "/etc/telegraf/cert.pem" +# # tls_key = "/etc/telegraf/key.pem" +# ## Use TLS but skip chain & host verification +# # insecure_skip_verify = false +# +# ## Sets the limits for pending msgs and bytes for each subscription +# ## These shouldn't need to be adjusted except in very high throughput scenarios +# # pending_message_limit = 65536 +# # pending_bytes_limit = 67108864 +# +# ## Maximum messages to read from the broker that have not been written by an +# ## output. For best throughput set based on the number of metrics within +# ## each message and the size of the output's metric_batch_size. +# ## +# ## For example, if each message from the queue contains 10 metrics and the +# ## output metric_batch_size is 1000, setting this to 100 will ensure that a +# ## full batch is collected and the write is triggered immediately without +# ## waiting until the next flush_interval. +# # max_undelivered_messages = 1000 +# +# ## Data format to consume. +# ## Each data format has its own unique set of configuration options, read +# ## more about them here: +# ## https://github.com/influxdata/telegraf/blob/master/docs/DATA_FORMATS_INPUT.md +# data_format = "influx" + + +# # Read NSQ topic for metrics. +# [[inputs.nsq_consumer]] +# ## Server option still works but is deprecated, we just prepend it to the nsqd array. +# # server = "localhost:4150" +# +# ## An array representing the NSQD TCP HTTP Endpoints +# nsqd = ["localhost:4150"] +# +# ## An array representing the NSQLookupd HTTP Endpoints +# nsqlookupd = ["localhost:4161"] +# topic = "telegraf" +# channel = "consumer" +# max_in_flight = 100 +# +# ## Maximum messages to read from the broker that have not been written by an +# ## output. For best throughput set based on the number of metrics within +# ## each message and the size of the output's metric_batch_size. +# ## +# ## For example, if each message from the queue contains 10 metrics and the +# ## output metric_batch_size is 1000, setting this to 100 will ensure that a +# ## full batch is collected and the write is triggered immediately without +# ## waiting until the next flush_interval. +# # max_undelivered_messages = 1000 +# +# ## Data format to consume. +# ## Each data format has its own unique set of configuration options, read +# ## more about them here: +# ## https://github.com/influxdata/telegraf/blob/master/docs/DATA_FORMATS_INPUT.md +# data_format = "influx" + + +# # Receive OpenTelemetry traces, metrics, and logs over gRPC +# [[inputs.opentelemetry]] +# ## Override the default (0.0.0.0:4317) destination OpenTelemetry gRPC service +# ## address:port +# # service_address = "0.0.0.0:4317" +# +# ## Override the default (5s) new connection timeout +# # timeout = "5s" +# +# ## Override the default (prometheus-v1) metrics schema. +# ## Supports: "prometheus-v1", "prometheus-v2" +# ## For more information about the alternatives, read the Prometheus input +# ## plugin notes. +# # metrics_schema = "prometheus-v1" +# +# ## Optional TLS Config. +# ## For advanced options: https://github.com/influxdata/telegraf/blob/v1.18.3/docs/TLS.md +# ## +# ## Set one or more allowed client CA certificate file names to +# ## enable mutually authenticated TLS connections. +# # tls_allowed_cacerts = ["/etc/telegraf/clientca.pem"] +# ## Add service certificate and key. +# # tls_cert = "/etc/telegraf/cert.pem" +# # tls_key = "/etc/telegraf/key.pem" + + +# # Read metrics from one or many pgbouncer servers +# [[inputs.pgbouncer]] +# ## specify address via a url matching: +# ## postgres://[pqgotest[:password]]@localhost[/dbname]\ +# ## ?sslmode=[disable|verify-ca|verify-full] +# ## or a simple string: +# ## host=localhost user=pqgotest password=... sslmode=... dbname=app_production +# ## +# ## All connection parameters are optional. +# ## +# address = "host=localhost user=pgbouncer sslmode=disable" + + +# # Read metrics from one or many postgresql servers +# [[inputs.postgresql]] +# ## specify address via a url matching: +# ## postgres://[pqgotest[:password]]@localhost[/dbname]\ +# ## ?sslmode=[disable|verify-ca|verify-full] +# ## or a simple string: +# ## host=localhost user=pqgotest password=... sslmode=... dbname=app_production +# ## +# ## All connection parameters are optional. +# ## +# ## Without the dbname parameter, the driver will default to a database +# ## with the same name as the user. This dbname is just for instantiating a +# ## connection with the server and doesn't restrict the databases we are trying +# ## to grab metrics for. +# ## +# address = "host=localhost user=postgres sslmode=disable" +# ## A custom name for the database that will be used as the "server" tag in the +# ## measurement output. If not specified, a default one generated from +# ## the connection address is used. +# # outputaddress = "db01" +# +# ## connection configuration. +# ## maxlifetime - specify the maximum lifetime of a connection. +# ## default is forever (0s) +# max_lifetime = "0s" +# +# ## A list of databases to explicitly ignore. If not specified, metrics for all +# ## databases are gathered. Do NOT use with the 'databases' option. +# # ignored_databases = ["postgres", "template0", "template1"] +# +# ## A list of databases to pull metrics about. If not specified, metrics for all +# ## databases are gathered. Do NOT use with the 'ignored_databases' option. +# # databases = ["app_production", "testing"] + + +# # Read metrics from one or many postgresql servers +# [[inputs.postgresql_extensible]] +# ## specify address via a url matching: +# ## postgres://[pqgotest[:password]]@localhost[/dbname]\ +# ## ?sslmode=[disable|verify-ca|verify-full] +# ## or a simple string: +# ## host=localhost user=pqgotest password=... sslmode=... dbname=app_production +# # +# ## All connection parameters are optional. # +# ## Without the dbname parameter, the driver will default to a database +# ## with the same name as the user. This dbname is just for instantiating a +# ## connection with the server and doesn't restrict the databases we are trying +# ## to grab metrics for. +# # +# address = "host=localhost user=postgres sslmode=disable" +# +# ## connection configuration. +# ## maxlifetime - specify the maximum lifetime of a connection. +# ## default is forever (0s) +# max_lifetime = "0s" +# +# ## A list of databases to pull metrics about. If not specified, metrics for all +# ## databases are gathered. +# ## databases = ["app_production", "testing"] +# # +# ## A custom name for the database that will be used as the "server" tag in the +# ## measurement output. If not specified, a default one generated from +# ## the connection address is used. +# # outputaddress = "db01" +# # +# ## Define the toml config where the sql queries are stored +# ## New queries can be added, if the withdbname is set to true and there is no +# ## databases defined in the 'databases field', the sql query is ended by a +# ## 'is not null' in order to make the query succeed. +# ## Example : +# ## The sqlquery : "SELECT * FROM pg_stat_database where datname" become +# ## "SELECT * FROM pg_stat_database where datname IN ('postgres', 'pgbench')" +# ## because the databases variable was set to ['postgres', 'pgbench' ] and the +# ## withdbname was true. Be careful that if the withdbname is set to false you +# ## don't have to define the where clause (aka with the dbname) the tagvalue +# ## field is used to define custom tags (separated by commas) +# ## The optional "measurement" value can be used to override the default +# ## output measurement name ("postgresql"). +# ## +# ## The script option can be used to specify the .sql file path. +# ## If script and sqlquery options specified at same time, sqlquery will be used +# ## +# ## the tagvalue field is used to define custom tags (separated by comas). +# ## the query is expected to return columns which match the names of the +# ## defined tags. The values in these columns must be of a string-type, +# ## a number-type or a blob-type. +# ## +# ## The timestamp field is used to override the data points timestamp value. By +# ## default, all rows inserted with current time. By setting a timestamp column, +# ## the row will be inserted with that column's value. +# ## +# ## Structure : +# ## [[inputs.postgresql_extensible.query]] +# ## sqlquery string +# ## version string +# ## withdbname boolean +# ## tagvalue string (comma separated) +# ## measurement string +# ## timestamp string +# [[inputs.postgresql_extensible.query]] +# sqlquery="SELECT * FROM pg_stat_database" +# version=901 +# withdbname=false +# tagvalue="" +# measurement="" +# [[inputs.postgresql_extensible.query]] +# sqlquery="SELECT * FROM pg_stat_bgwriter" +# version=901 +# withdbname=false +# tagvalue="postgresql.stats" + + +# # Read metrics from one or many prometheus clients +# [[inputs.prometheus]] +# ## An array of urls to scrape metrics from. +# urls = ["http://localhost:9100/metrics"] +# +# ## Metric version controls the mapping from Prometheus metrics into +# ## Telegraf metrics. When using the prometheus_client output, use the same +# ## value in both plugins to ensure metrics are round-tripped without +# ## modification. +# ## +# ## example: metric_version = 1; +# ## metric_version = 2; recommended version +# # metric_version = 1 +# +# ## Url tag name (tag containing scrapped url. optional, default is "url") +# # url_tag = "url" +# +# ## Whether the timestamp of the scraped metrics will be ignored. +# ## If set to true, the gather time will be used. +# # ignore_timestamp = false +# +# ## An array of Kubernetes services to scrape metrics from. +# # kubernetes_services = ["http://my-service-dns.my-namespace:9100/metrics"] +# +# ## Kubernetes config file to create client from. +# # kube_config = "/path/to/kubernetes.config" +# +# ## Scrape Kubernetes pods for the following prometheus annotations: +# ## - prometheus.io/scrape: Enable scraping for this pod +# ## - prometheus.io/scheme: If the metrics endpoint is secured then you will need to +# ## set this to 'https' & most likely set the tls config. +# ## - prometheus.io/path: If the metrics path is not /metrics, define it with this annotation. +# ## - prometheus.io/port: If port is not 9102 use this annotation +# # monitor_kubernetes_pods = true +# ## Get the list of pods to scrape with either the scope of +# ## - cluster: the kubernetes watch api (default, no need to specify) +# ## - node: the local cadvisor api; for scalability. Note that the config node_ip or the environment variable NODE_IP must be set to the host IP. +# # pod_scrape_scope = "cluster" +# ## Only for node scrape scope: node IP of the node that telegraf is running on. +# ## Either this config or the environment variable NODE_IP must be set. +# # node_ip = "10.180.1.1" +# ## Only for node scrape scope: interval in seconds for how often to get updated pod list for scraping. +# ## Default is 60 seconds. +# # pod_scrape_interval = 60 +# ## Restricts Kubernetes monitoring to a single namespace +# ## ex: monitor_kubernetes_pods_namespace = "default" +# # monitor_kubernetes_pods_namespace = "" +# # label selector to target pods which have the label +# # kubernetes_label_selector = "env=dev,app=nginx" +# # field selector to target pods +# # eg. To scrape pods on a specific node +# # kubernetes_field_selector = "spec.nodeName=$HOSTNAME" +# +# ## Scrape Services available in Consul Catalog +# # [inputs.prometheus.consul] +# # enabled = true +# # agent = "http://localhost:8500" +# # query_interval = "5m" +# +# # [[inputs.prometheus.consul.query]] +# # name = "a service name" +# # tag = "a service tag" +# # url = 'http://{{if ne .ServiceAddress ""}}{{.ServiceAddress}}{{else}}{{.Address}}{{end}}:{{.ServicePort}}/{{with .ServiceMeta.metrics_path}}{{.}}{{else}}metrics{{end}}' +# # [inputs.prometheus.consul.query.tags] +# # host = "{{.Node}}" +# +# ## Use bearer token for authorization. ('bearer_token' takes priority) +# # bearer_token = "/path/to/bearer/token" +# ## OR +# # bearer_token_string = "abc_123" +# +# ## HTTP Basic Authentication username and password. ('bearer_token' and +# ## 'bearer_token_string' take priority) +# # username = "" +# # password = "" +# +# ## Specify timeout duration for slower prometheus clients (default is 3s) +# # response_timeout = "3s" +# +# ## Optional TLS Config +# # tls_ca = /path/to/cafile +# # tls_cert = /path/to/certfile +# # tls_key = /path/to/keyfile +# ## Use TLS but skip chain & host verification +# # insecure_skip_verify = false + + +# # Riemann protobuff listener. +# [[inputs.riemann_listener]] +# ## URL to listen on. +# ## Default is "tcp://:5555" +# # service_address = "tcp://:8094" +# # service_address = "tcp://127.0.0.1:http" +# # service_address = "tcp4://:8094" +# # service_address = "tcp6://:8094" +# # service_address = "tcp6://[2001:db8::1]:8094" +# +# ## Maximum number of concurrent connections. +# ## 0 (default) is unlimited. +# # max_connections = 1024 +# ## Read timeout. +# ## 0 (default) is unlimited. +# # read_timeout = "30s" +# ## Optional TLS configuration. +# # tls_cert = "/etc/telegraf/cert.pem" +# # tls_key = "/etc/telegraf/key.pem" +# ## Enables client authentication if set. +# # tls_allowed_cacerts = ["/etc/telegraf/clientca.pem"] +# ## Maximum socket buffer size (in bytes when no unit specified). +# # read_buffer_size = "64KiB" +# ## Period between keep alive probes. +# ## 0 disables keep alive probes. +# ## Defaults to the OS configuration. +# # keep_alive_period = "5m" + + +# # SFlow V5 Protocol Listener +# [[inputs.sflow]] +# ## Address to listen for sFlow packets. +# ## example: service_address = "udp://:6343" +# ## service_address = "udp4://:6343" +# ## service_address = "udp6://:6343" +# service_address = "udp://:6343" +# +# ## Set the size of the operating system's receive buffer. +# ## example: read_buffer_size = "64KiB" +# # read_buffer_size = "" + + +# # Receive SNMP traps +# [[inputs.snmp_trap]] +# ## Transport, local address, and port to listen on. Transport must +# ## be "udp://". Omit local address to listen on all interfaces. +# ## example: "udp://127.0.0.1:1234" +# ## +# ## Special permissions may be required to listen on a port less than +# ## 1024. See README.md for details +# ## +# # service_address = "udp://:162" +# ## +# ## Path to mib files +# # path = ["/usr/share/snmp/mibs"] +# ## +# ## Timeout running snmptranslate command +# # timeout = "5s" +# ## Snmp version, defaults to 2c +# # version = "2c" +# ## SNMPv3 authentication and encryption options. +# ## +# ## Security Name. +# # sec_name = "myuser" +# ## Authentication protocol; one of "MD5", "SHA" or "". +# # auth_protocol = "MD5" +# ## Authentication password. +# # auth_password = "pass" +# ## Security Level; one of "noAuthNoPriv", "authNoPriv", or "authPriv". +# # sec_level = "authNoPriv" +# ## Privacy protocol used for encrypted messages; one of "DES", "AES", "AES192", "AES192C", "AES256", "AES256C" or "". +# # priv_protocol = "" +# ## Privacy password used for encrypted messages. +# # priv_password = "" + + +# # Generic socket listener capable of handling multiple socket types. +# [[inputs.socket_listener]] +# ## URL to listen on +# # service_address = "tcp://:8094" +# # service_address = "tcp://127.0.0.1:http" +# # service_address = "tcp4://:8094" +# # service_address = "tcp6://:8094" +# # service_address = "tcp6://[2001:db8::1]:8094" +# # service_address = "udp://:8094" +# # service_address = "udp4://:8094" +# # service_address = "udp6://:8094" +# # service_address = "unix:///tmp/telegraf.sock" +# # service_address = "unixgram:///tmp/telegraf.sock" +# +# ## Change the file mode bits on unix sockets. These permissions may not be +# ## respected by some platforms, to safely restrict write permissions it is best +# ## to place the socket into a directory that has previously been created +# ## with the desired permissions. +# ## ex: socket_mode = "777" +# # socket_mode = "" +# +# ## Maximum number of concurrent connections. +# ## Only applies to stream sockets (e.g. TCP). +# ## 0 (default) is unlimited. +# # max_connections = 1024 +# +# ## Read timeout. +# ## Only applies to stream sockets (e.g. TCP). +# ## 0 (default) is unlimited. +# # read_timeout = "30s" +# +# ## Optional TLS configuration. +# ## Only applies to stream sockets (e.g. TCP). +# # tls_cert = "/etc/telegraf/cert.pem" +# # tls_key = "/etc/telegraf/key.pem" +# ## Enables client authentication if set. +# # tls_allowed_cacerts = ["/etc/telegraf/clientca.pem"] +# +# ## Maximum socket buffer size (in bytes when no unit specified). +# ## For stream sockets, once the buffer fills up, the sender will start backing up. +# ## For datagram sockets, once the buffer fills up, metrics will start dropping. +# ## Defaults to the OS default. +# # read_buffer_size = "64KiB" +# +# ## Period between keep alive probes. +# ## Only applies to TCP sockets. +# ## 0 disables keep alive probes. +# ## Defaults to the OS configuration. +# # keep_alive_period = "5m" +# +# ## Data format to consume. +# ## Each data format has its own unique set of configuration options, read +# ## more about them here: +# ## https://github.com/influxdata/telegraf/blob/master/docs/DATA_FORMATS_INPUT.md +# # data_format = "influx" +# +# ## Content encoding for message payloads, can be set to "gzip" to or +# ## "identity" to apply no encoding. +# # content_encoding = "identity" + + +# # Read metrics from SQL queries +# [[inputs.sql]] +# ## Database Driver +# ## See https://github.com/influxdata/telegraf/blob/master/docs/SQL_DRIVERS_INPUT.md for +# ## a list of supported drivers. +# driver = "mysql" +# +# ## Data source name for connecting +# ## The syntax and supported options depends on selected driver. +# dsn = "username:password@mysqlserver:3307/dbname?param=value" +# +# ## Timeout for any operation +# ## Note that the timeout for queries is per query not per gather. +# # timeout = "5s" +# +# ## Connection time limits +# ## By default the maximum idle time and maximum lifetime of a connection is unlimited, i.e. the connections +# ## will not be closed automatically. If you specify a positive time, the connections will be closed after +# ## idleing or existing for at least that amount of time, respectively. +# # connection_max_idle_time = "0s" +# # connection_max_life_time = "0s" +# +# ## Connection count limits +# ## By default the number of open connections is not limited and the number of maximum idle connections +# ## will be inferred from the number of queries specified. If you specify a positive number for any of the +# ## two options, connections will be closed when reaching the specified limit. The number of idle connections +# ## will be clipped to the maximum number of connections limit if any. +# # connection_max_open = 0 +# # connection_max_idle = auto +# +# [[inputs.sql.query]] +# ## Query to perform on the server +# query="SELECT user,state,latency,score FROM Scoreboard WHERE application > 0" +# ## Alternatively to specifying the query directly you can select a file here containing the SQL query. +# ## Only one of 'query' and 'query_script' can be specified! +# # query_script = "/path/to/sql/script.sql" +# +# ## Name of the measurement +# ## In case both measurement and 'measurement_col' are given, the latter takes precedence. +# # measurement = "sql" +# +# ## Column name containing the name of the measurement +# ## If given, this will take precedence over the 'measurement' setting. In case a query result +# ## does not contain the specified column, we fall-back to the 'measurement' setting. +# # measurement_column = "" +# +# ## Column name containing the time of the measurement +# ## If ommited, the time of the query will be used. +# # time_column = "" +# +# ## Format of the time contained in 'time_col' +# ## The time must be 'unix', 'unix_ms', 'unix_us', 'unix_ns', or a golang time format. +# ## See https://golang.org/pkg/time/#Time.Format for details. +# # time_format = "unix" +# +# ## Column names containing tags +# ## An empty include list will reject all columns and an empty exclude list will not exclude any column. +# ## I.e. by default no columns will be returned as tag and the tags are empty. +# # tag_columns_include = [] +# # tag_columns_exclude = [] +# +# ## Column names containing fields (explicit types) +# ## Convert the given columns to the corresponding type. Explicit type conversions take precedence over +# ## the automatic (driver-based) conversion below. +# ## NOTE: Columns should not be specified for multiple types or the resulting type is undefined. +# # field_columns_float = [] +# # field_columns_int = [] +# # field_columns_uint = [] +# # field_columns_bool = [] +# # field_columns_string = [] +# +# ## Column names containing fields (automatic types) +# ## An empty include list is equivalent to '[*]' and all returned columns will be accepted. An empty +# ## exclude list will not exclude any column. I.e. by default all columns will be returned as fields. +# ## NOTE: We rely on the database driver to perform automatic datatype conversion. +# # field_columns_include = [] +# # field_columns_exclude = [] + + +# # Read metrics from Microsoft SQL Server +# [[inputs.sqlserver]] +# ## Specify instances to monitor with a list of connection strings. +# ## All connection parameters are optional. +# ## By default, the host is localhost, listening on default port, TCP 1433. +# ## for Windows, the user is the currently running AD user (SSO). +# ## See https://github.com/denisenkom/go-mssqldb for detailed connection +# ## parameters, in particular, tls connections can be created like so: +# ## "encrypt=true;certificate=;hostNameInCertificate=" +# servers = [ +# "Server=192.168.1.10;Port=1433;User Id=;Password=;app name=telegraf;log=1;", +# ] +# +# ## Authentication method +# ## valid methods: "connection_string", "AAD" +# # auth_method = "connection_string" +# +# ## "database_type" enables a specific set of queries depending on the database type. If specified, it replaces azuredb = true/false and query_version = 2 +# ## In the config file, the sql server plugin section should be repeated each with a set of servers for a specific database_type. +# ## Possible values for database_type are - "AzureSQLDB" or "AzureSQLManagedInstance" or "SQLServer" +# +# ## Queries enabled by default for database_type = "AzureSQLDB" are - +# ## AzureSQLDBResourceStats, AzureSQLDBResourceGovernance, AzureSQLDBWaitStats, AzureSQLDBDatabaseIO, AzureSQLDBServerProperties, +# ## AzureSQLDBOsWaitstats, AzureSQLDBMemoryClerks, AzureSQLDBPerformanceCounters, AzureSQLDBRequests, AzureSQLDBSchedulers +# +# # database_type = "AzureSQLDB" +# +# ## A list of queries to include. If not specified, all the above listed queries are used. +# # include_query = [] +# +# ## A list of queries to explicitly ignore. +# # exclude_query = [] +# +# ## Queries enabled by default for database_type = "AzureSQLManagedInstance" are - +# ## AzureSQLMIResourceStats, AzureSQLMIResourceGovernance, AzureSQLMIDatabaseIO, AzureSQLMIServerProperties, AzureSQLMIOsWaitstats, +# ## AzureSQLMIMemoryClerks, AzureSQLMIPerformanceCounters, AzureSQLMIRequests, AzureSQLMISchedulers +# +# # database_type = "AzureSQLManagedInstance" +# +# # include_query = [] +# +# # exclude_query = [] +# +# ## Queries enabled by default for database_type = "SQLServer" are - +# ## SQLServerPerformanceCounters, SQLServerWaitStatsCategorized, SQLServerDatabaseIO, SQLServerProperties, SQLServerMemoryClerks, +# ## SQLServerSchedulers, SQLServerRequests, SQLServerVolumeSpace, SQLServerCpu +# +# database_type = "SQLServer" +# +# include_query = [] +# +# ## SQLServerAvailabilityReplicaStates and SQLServerDatabaseReplicaStates are optional queries and hence excluded here as default +# exclude_query = ["SQLServerAvailabilityReplicaStates", "SQLServerDatabaseReplicaStates"] +# +# ## Following are old config settings, you may use them only if you are using the earlier flavor of queries, however it is recommended to use +# ## the new mechanism of identifying the database_type there by use it's corresponding queries +# +# ## Optional parameter, setting this to 2 will use a new version +# ## of the collection queries that break compatibility with the original +# ## dashboards. +# ## Version 2 - is compatible from SQL Server 2012 and later versions and also for SQL Azure DB +# # query_version = 2 +# +# ## If you are using AzureDB, setting this to true will gather resource utilization metrics +# # azuredb = false + + +# # Statsd UDP/TCP Server +# [[inputs.statsd]] +# ## Protocol, must be "tcp", "udp", "udp4" or "udp6" (default=udp) +# protocol = "udp" +# +# ## MaxTCPConnection - applicable when protocol is set to tcp (default=250) +# max_tcp_connections = 250 +# +# ## Enable TCP keep alive probes (default=false) +# tcp_keep_alive = false +# +# ## Specifies the keep-alive period for an active network connection. +# ## Only applies to TCP sockets and will be ignored if tcp_keep_alive is false. +# ## Defaults to the OS configuration. +# # tcp_keep_alive_period = "2h" +# +# ## Address and port to host UDP listener on +# service_address = ":8125" +# +# ## The following configuration options control when telegraf clears it's cache +# ## of previous values. If set to false, then telegraf will only clear it's +# ## cache when the daemon is restarted. +# ## Reset gauges every interval (default=true) +# delete_gauges = true +# ## Reset counters every interval (default=true) +# delete_counters = true +# ## Reset sets every interval (default=true) +# delete_sets = true +# ## Reset timings & histograms every interval (default=true) +# delete_timings = true +# +# ## Percentiles to calculate for timing & histogram stats +# percentiles = [50.0, 90.0, 99.0, 99.9, 99.95, 100.0] +# +# ## separator to use between elements of a statsd metric +# metric_separator = "_" +# +# ## Parses tags in the datadog statsd format +# ## http://docs.datadoghq.com/guides/dogstatsd/ +# parse_data_dog_tags = false +# +# ## Parses datadog extensions to the statsd format +# datadog_extensions = false +# +# ## Parses distributions metric as specified in the datadog statsd format +# ## https://docs.datadoghq.com/developers/metrics/types/?tab=distribution#definition +# datadog_distributions = false +# +# ## Statsd data translation templates, more info can be read here: +# ## https://github.com/influxdata/telegraf/blob/master/docs/TEMPLATE_PATTERN.md +# # templates = [ +# # "cpu.* measurement*" +# # ] +# +# ## Number of UDP messages allowed to queue up, once filled, +# ## the statsd server will start dropping packets +# allowed_pending_messages = 10000 +# +# ## Number of timing/histogram values to track per-measurement in the +# ## calculation of percentiles. Raising this limit increases the accuracy +# ## of percentiles but also increases the memory usage and cpu time. +# percentile_limit = 1000 +# +# ## Max duration (TTL) for each metric to stay cached/reported without being updated. +# #max_ttl = "1000h" + + +# # Suricata stats and alerts plugin +# [[inputs.suricata]] +# ## Data sink for Suricata stats and alerts logs +# # This is expected to be a filename of a +# # unix socket to be created for listening. +# source = "/var/run/suricata-stats.sock" +# +# # Delimiter for flattening field keys, e.g. subitem "alert" of "detect" +# # becomes "detect_alert" when delimiter is "_". +# delimiter = "_" +# +# ## Detect alert logs +# # alerts = false + + +# # Accepts syslog messages following RFC5424 format with transports as per RFC5426, RFC5425, or RFC6587 +# [[inputs.syslog]] +# ## Specify an ip or hostname with port - eg., tcp://localhost:6514, tcp://10.0.0.1:6514 +# ## Protocol, address and port to host the syslog receiver. +# ## If no host is specified, then localhost is used. +# ## If no port is specified, 6514 is used (RFC5425#section-4.1). +# server = "tcp://:6514" +# +# ## TLS Config +# # tls_allowed_cacerts = ["/etc/telegraf/ca.pem"] +# # tls_cert = "/etc/telegraf/cert.pem" +# # tls_key = "/etc/telegraf/key.pem" +# +# ## Period between keep alive probes. +# ## 0 disables keep alive probes. +# ## Defaults to the OS configuration. +# ## Only applies to stream sockets (e.g. TCP). +# # keep_alive_period = "5m" +# +# ## Maximum number of concurrent connections (default = 0). +# ## 0 means unlimited. +# ## Only applies to stream sockets (e.g. TCP). +# # max_connections = 1024 +# +# ## Read timeout is the maximum time allowed for reading a single message (default = 5s). +# ## 0 means unlimited. +# # read_timeout = "5s" +# +# ## The framing technique with which it is expected that messages are transported (default = "octet-counting"). +# ## Whether the messages come using the octect-counting (RFC5425#section-4.3.1, RFC6587#section-3.4.1), +# ## or the non-transparent framing technique (RFC6587#section-3.4.2). +# ## Must be one of "octet-counting", "non-transparent". +# # framing = "octet-counting" +# +# ## The trailer to be expected in case of non-transparent framing (default = "LF"). +# ## Must be one of "LF", or "NUL". +# # trailer = "LF" +# +# ## Whether to parse in best effort mode or not (default = false). +# ## By default best effort parsing is off. +# # best_effort = false +# +# ## The RFC standard to use for message parsing +# ## By default RFC5424 is used. RFC3164 only supports UDP transport (no streaming support) +# ## Must be one of "RFC5424", or "RFC3164". +# # syslog_standard = "RFC5424" +# +# ## Character to prepend to SD-PARAMs (default = "_"). +# ## A syslog message can contain multiple parameters and multiple identifiers within structured data section. +# ## Eg., [id1 name1="val1" name2="val2"][id2 name1="val1" nameA="valA"] +# ## For each combination a field is created. +# ## Its name is created concatenating identifier, sdparam_separator, and parameter name. +# # sdparam_separator = "_" + + +# # Parse the new lines appended to a file +# [[inputs.tail]] +# ## File names or a pattern to tail. +# ## These accept standard unix glob matching rules, but with the addition of +# ## ** as a "super asterisk". ie: +# ## "/var/log/**.log" -> recursively find all .log files in /var/log +# ## "/var/log/*/*.log" -> find all .log files with a parent dir in /var/log +# ## "/var/log/apache.log" -> just tail the apache log file +# ## "/var/log/log[!1-2]* -> tail files without 1-2 +# ## "/var/log/log[^1-2]* -> identical behavior as above +# ## See https://github.com/gobwas/glob for more examples +# ## +# files = ["/var/mymetrics.out"] +# +# ## Read file from beginning. +# # from_beginning = false +# +# ## Whether file is a named pipe +# # pipe = false +# +# ## Method used to watch for file updates. Can be either "inotify" or "poll". +# # watch_method = "inotify" +# +# ## Maximum lines of the file to process that have not yet be written by the +# ## output. For best throughput set based on the number of metrics on each +# ## line and the size of the output's metric_batch_size. +# # max_undelivered_lines = 1000 +# +# ## Character encoding to use when interpreting the file contents. Invalid +# ## characters are replaced using the unicode replacement character. When set +# ## to the empty string the data is not decoded to text. +# ## ex: character_encoding = "utf-8" +# ## character_encoding = "utf-16le" +# ## character_encoding = "utf-16be" +# ## character_encoding = "" +# # character_encoding = "" +# +# ## Data format to consume. +# ## Each data format has its own unique set of configuration options, read +# ## more about them here: +# ## https://github.com/influxdata/telegraf/blob/master/docs/DATA_FORMATS_INPUT.md +# data_format = "influx" +# +# ## Set the tag that will contain the path of the tailed file. If you don't want this tag, set it to an empty string. +# # path_tag = "path" +# +# ## multiline parser/codec +# ## https://www.elastic.co/guide/en/logstash/2.4/plugins-filters-multiline.html +# #[inputs.tail.multiline] +# ## The pattern should be a regexp which matches what you believe to be an +# ## indicator that the field is part of an event consisting of multiple lines of log data. +# #pattern = "^\s" +# +# ## This field must be either "previous" or "next". +# ## If a line matches the pattern, "previous" indicates that it belongs to the previous line, +# ## whereas "next" indicates that the line belongs to the next one. +# #match_which_line = "previous" +# +# ## The invert_match field can be true or false (defaults to false). +# ## If true, a message not matching the pattern will constitute a match of the multiline +# ## filter and the what will be applied. (vice-versa is also true) +# #invert_match = false +# +# ## After the specified timeout, this plugin sends a multiline event even if no new pattern +# ## is found to start a new event. The default timeout is 5s. +# #timeout = 5s + + +# # Generic TCP listener +# [[inputs.tcp_listener]] +# # DEPRECATED: the TCP listener plugin has been deprecated in favor of the +# # socket_listener plugin +# # see https://github.com/influxdata/telegraf/tree/master/plugins/inputs/socket_listener + + +# # Generic UDP listener +# [[inputs.udp_listener]] +# # DEPRECATED: the TCP listener plugin has been deprecated in favor of the +# # socket_listener plugin +# # see https://github.com/influxdata/telegraf/tree/master/plugins/inputs/socket_listener + + +# # Read metrics from VMware vCenter +# [[inputs.vsphere]] +# ## List of vCenter URLs to be monitored. These three lines must be uncommented +# ## and edited for the plugin to work. +# vcenters = [ "https://vcenter.local/sdk" ] +# username = "user@corp.local" +# password = "secret" +# +# ## VMs +# ## Typical VM metrics (if omitted or empty, all metrics are collected) +# # vm_include = [ "/*/vm/**"] # Inventory path to VMs to collect (by default all are collected) +# # vm_exclude = [] # Inventory paths to exclude +# vm_metric_include = [ +# "cpu.demand.average", +# "cpu.idle.summation", +# "cpu.latency.average", +# "cpu.readiness.average", +# "cpu.ready.summation", +# "cpu.run.summation", +# "cpu.usagemhz.average", +# "cpu.used.summation", +# "cpu.wait.summation", +# "mem.active.average", +# "mem.granted.average", +# "mem.latency.average", +# "mem.swapin.average", +# "mem.swapinRate.average", +# "mem.swapout.average", +# "mem.swapoutRate.average", +# "mem.usage.average", +# "mem.vmmemctl.average", +# "net.bytesRx.average", +# "net.bytesTx.average", +# "net.droppedRx.summation", +# "net.droppedTx.summation", +# "net.usage.average", +# "power.power.average", +# "virtualDisk.numberReadAveraged.average", +# "virtualDisk.numberWriteAveraged.average", +# "virtualDisk.read.average", +# "virtualDisk.readOIO.latest", +# "virtualDisk.throughput.usage.average", +# "virtualDisk.totalReadLatency.average", +# "virtualDisk.totalWriteLatency.average", +# "virtualDisk.write.average", +# "virtualDisk.writeOIO.latest", +# "sys.uptime.latest", +# ] +# # vm_metric_exclude = [] ## Nothing is excluded by default +# # vm_instances = true ## true by default +# +# ## Hosts +# ## Typical host metrics (if omitted or empty, all metrics are collected) +# # host_include = [ "/*/host/**"] # Inventory path to hosts to collect (by default all are collected) +# # host_exclude [] # Inventory paths to exclude +# host_metric_include = [ +# "cpu.coreUtilization.average", +# "cpu.costop.summation", +# "cpu.demand.average", +# "cpu.idle.summation", +# "cpu.latency.average", +# "cpu.readiness.average", +# "cpu.ready.summation", +# "cpu.swapwait.summation", +# "cpu.usage.average", +# "cpu.usagemhz.average", +# "cpu.used.summation", +# "cpu.utilization.average", +# "cpu.wait.summation", +# "disk.deviceReadLatency.average", +# "disk.deviceWriteLatency.average", +# "disk.kernelReadLatency.average", +# "disk.kernelWriteLatency.average", +# "disk.numberReadAveraged.average", +# "disk.numberWriteAveraged.average", +# "disk.read.average", +# "disk.totalReadLatency.average", +# "disk.totalWriteLatency.average", +# "disk.write.average", +# "mem.active.average", +# "mem.latency.average", +# "mem.state.latest", +# "mem.swapin.average", +# "mem.swapinRate.average", +# "mem.swapout.average", +# "mem.swapoutRate.average", +# "mem.totalCapacity.average", +# "mem.usage.average", +# "mem.vmmemctl.average", +# "net.bytesRx.average", +# "net.bytesTx.average", +# "net.droppedRx.summation", +# "net.droppedTx.summation", +# "net.errorsRx.summation", +# "net.errorsTx.summation", +# "net.usage.average", +# "power.power.average", +# "storageAdapter.numberReadAveraged.average", +# "storageAdapter.numberWriteAveraged.average", +# "storageAdapter.read.average", +# "storageAdapter.write.average", +# "sys.uptime.latest", +# ] +# ## Collect IP addresses? Valid values are "ipv4" and "ipv6" +# # ip_addresses = ["ipv6", "ipv4" ] +# +# # host_metric_exclude = [] ## Nothing excluded by default +# # host_instances = true ## true by default +# +# +# ## Clusters +# # cluster_include = [ "/*/host/**"] # Inventory path to clusters to collect (by default all are collected) +# # cluster_exclude = [] # Inventory paths to exclude +# # cluster_metric_include = [] ## if omitted or empty, all metrics are collected +# # cluster_metric_exclude = [] ## Nothing excluded by default +# # cluster_instances = false ## false by default +# +# ## Datastores +# # datastore_include = [ "/*/datastore/**"] # Inventory path to datastores to collect (by default all are collected) +# # datastore_exclude = [] # Inventory paths to exclude +# # datastore_metric_include = [] ## if omitted or empty, all metrics are collected +# # datastore_metric_exclude = [] ## Nothing excluded by default +# # datastore_instances = false ## false by default +# +# ## Datacenters +# # datacenter_include = [ "/*/host/**"] # Inventory path to clusters to collect (by default all are collected) +# # datacenter_exclude = [] # Inventory paths to exclude +# datacenter_metric_include = [] ## if omitted or empty, all metrics are collected +# datacenter_metric_exclude = [ "*" ] ## Datacenters are not collected by default. +# # datacenter_instances = false ## false by default +# +# ## Plugin Settings +# ## separator character to use for measurement and field names (default: "_") +# # separator = "_" +# +# ## number of objects to retrieve per query for realtime resources (vms and hosts) +# ## set to 64 for vCenter 5.5 and 6.0 (default: 256) +# # max_query_objects = 256 +# +# ## number of metrics to retrieve per query for non-realtime resources (clusters and datastores) +# ## set to 64 for vCenter 5.5 and 6.0 (default: 256) +# # max_query_metrics = 256 +# +# ## number of go routines to use for collection and discovery of objects and metrics +# # collect_concurrency = 1 +# # discover_concurrency = 1 +# +# ## the interval before (re)discovering objects subject to metrics collection (default: 300s) +# # object_discovery_interval = "300s" +# +# ## timeout applies to any of the api request made to vcenter +# # timeout = "60s" +# +# ## When set to true, all samples are sent as integers. This makes the output +# ## data types backwards compatible with Telegraf 1.9 or lower. Normally all +# ## samples from vCenter, with the exception of percentages, are integer +# ## values, but under some conditions, some averaging takes place internally in +# ## the plugin. Setting this flag to "false" will send values as floats to +# ## preserve the full precision when averaging takes place. +# # use_int_samples = true +# +# ## Custom attributes from vCenter can be very useful for queries in order to slice the +# ## metrics along different dimension and for forming ad-hoc relationships. They are disabled +# ## by default, since they can add a considerable amount of tags to the resulting metrics. To +# ## enable, simply set custom_attribute_exclude to [] (empty set) and use custom_attribute_include +# ## to select the attributes you want to include. +# ## By default, since they can add a considerable amount of tags to the resulting metrics. To +# ## enable, simply set custom_attribute_exclude to [] (empty set) and use custom_attribute_include +# ## to select the attributes you want to include. +# # custom_attribute_include = [] +# # custom_attribute_exclude = ["*"] +# +# ## The number of vSphere 5 minute metric collection cycles to look back for non-realtime metrics. In +# ## some versions (6.7, 7.0 and possible more), certain metrics, such as cluster metrics, may be reported +# ## with a significant delay (>30min). If this happens, try increasing this number. Please note that increasing +# ## it too much may cause performance issues. +# # metric_lookback = 3 +# +# ## Optional SSL Config +# # ssl_ca = "/path/to/cafile" +# # ssl_cert = "/path/to/certfile" +# # ssl_key = "/path/to/keyfile" +# ## Use SSL but skip chain & host verification +# # insecure_skip_verify = false +# +# ## The Historical Interval value must match EXACTLY the interval in the daily +# # "Interval Duration" found on the VCenter server under Configure > General > Statistics > Statistic intervals +# # historical_interval = "5m" + + +# # A Webhooks Event collector +# [[inputs.webhooks]] +# ## Address and port to host Webhook listener on +# service_address = ":1619" +# +# [inputs.webhooks.filestack] +# path = "/filestack" +# +# [inputs.webhooks.github] +# path = "/github" +# # secret = "" +# +# [inputs.webhooks.mandrill] +# path = "/mandrill" +# +# [inputs.webhooks.rollbar] +# path = "/rollbar" +# +# [inputs.webhooks.papertrail] +# path = "/papertrail" +# +# [inputs.webhooks.particle] +# path = "/particle" + + +# # This plugin implements the Zipkin http server to gather trace and timing data needed to troubleshoot latency problems in microservice architectures. +# [[inputs.zipkin]] +# # path = "/api/v1/spans" # URL path for span data +# # port = 9411 # Port on which Telegraf listens + From 1a9b3ad476e3a04761dda233186548b5ca95a7b4 Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Wed, 6 Oct 2021 12:13:49 -0500 Subject: [PATCH 55/81] fix: bump k8s.io/apimachinery from 0.21.1 to 0.22.2 (#9776) Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> Co-authored-by: Sebastian Spaink --- go.mod | 8 ++++---- go.sum | 30 +++++++++++++++++++----------- 2 files changed, 23 insertions(+), 15 deletions(-) diff --git a/go.mod b/go.mod index 55de18a59efa0..c6f3138489d28 100644 --- a/go.mod +++ b/go.mod @@ -121,7 +121,7 @@ require ( github.com/google/gofuzz v1.1.0 // indirect github.com/google/uuid v1.2.0 // indirect github.com/googleapis/gax-go/v2 v2.0.5 // indirect - github.com/googleapis/gnostic v0.4.1 // indirect + github.com/googleapis/gnostic v0.5.5 // indirect github.com/gopcua/opcua v0.2.0-rc2.0.20210409063412-baabb9b14fd2 github.com/gorilla/mux v1.7.3 github.com/gorilla/websocket v1.4.2 @@ -315,9 +315,9 @@ require ( gopkg.in/yaml.v3 v3.0.0-20210107192922-496545a6307b // indirect gotest.tools v2.2.0+incompatible k8s.io/api v0.20.6 - k8s.io/apimachinery v0.21.1 + k8s.io/apimachinery v0.22.2 k8s.io/client-go v0.20.6 - k8s.io/klog/v2 v2.8.0 // indirect + k8s.io/klog/v2 v2.9.0 // indirect k8s.io/utils v0.0.0-20201110183641-67b214c5f920 // indirect modernc.org/cc/v3 v3.33.5 // indirect modernc.org/ccgo/v3 v3.9.4 // indirect @@ -328,7 +328,7 @@ require ( modernc.org/sqlite v1.10.8 modernc.org/strutil v1.1.0 // indirect modernc.org/token v1.0.0 // indirect - sigs.k8s.io/structured-merge-diff/v4 v4.1.0 // indirect + sigs.k8s.io/structured-merge-diff/v4 v4.1.2 // indirect sigs.k8s.io/yaml v1.2.0 // indirect ) diff --git a/go.sum b/go.sum index fccbfb9b9bdcb..a8fc62a7b3874 100644 --- a/go.sum +++ b/go.sum @@ -586,6 +586,7 @@ github.com/envoyproxy/protoc-gen-validate v0.1.0/go.mod h1:iSmxcyjqTsJpI2R4NaDN7 github.com/evanphx/json-patch v0.0.0-20200808040245-162e5629780b/go.mod h1:NAJj0yf/KaRKURN6nyi7A9IZydMivZEm9oQLWNjfKDc= github.com/evanphx/json-patch v4.2.0+incompatible/go.mod h1:50XU6AFN0ol/bzJsmQLiYLvXMP4fmwYFNcr97nuDLSk= github.com/evanphx/json-patch v4.9.0+incompatible/go.mod h1:50XU6AFN0ol/bzJsmQLiYLvXMP4fmwYFNcr97nuDLSk= +github.com/evanphx/json-patch v4.11.0+incompatible/go.mod h1:50XU6AFN0ol/bzJsmQLiYLvXMP4fmwYFNcr97nuDLSk= github.com/facebookgo/stack v0.0.0-20160209184415-751773369052 h1:JWuenKqqX8nojtoVVWjGfOF9635RETekkoH6Cc9SX0A= github.com/facebookgo/stack v0.0.0-20160209184415-751773369052/go.mod h1:UbMTZqLaRiH3MsBH8va0n7s1pQYcu3uTb8G4tygF4Zg= github.com/facebookgo/stackerr v0.0.0-20150612192056-c2fcf88613f4 h1:fP04zlkPjAGpsduG7xN3rRkxjAqkJaIQnnkNYYw/pAk= @@ -864,8 +865,10 @@ github.com/googleapis/gax-go/v2 v2.0.5/go.mod h1:DWXyrwAJ9X0FpwwEdw+IPEYBICEFu5m github.com/googleapis/gnostic v0.0.0-20170729233727-0c5108395e2d/go.mod h1:sJBsCZ4ayReDTBIg8b9dl28c5xFWyhBTVRp3pOg5EKY= github.com/googleapis/gnostic v0.1.0/go.mod h1:sJBsCZ4ayReDTBIg8b9dl28c5xFWyhBTVRp3pOg5EKY= github.com/googleapis/gnostic v0.4.0/go.mod h1:on+2t9HRStVgn95RSsFWFz+6Q0Snyqv1awfrALZdbtU= -github.com/googleapis/gnostic v0.4.1 h1:DLJCy1n/vrD4HPjOvYcT8aYQXpPIzoRZONaYwyycI+I= github.com/googleapis/gnostic v0.4.1/go.mod h1:LRhVm6pbyptWbWbuZ38d1eyptfvIytN3ir6b65WBswg= +github.com/googleapis/gnostic v0.5.1/go.mod h1:6U4PtQXGIEt/Z3h5MAT7FNofLnw9vXk2cUuW7uA/OeU= +github.com/googleapis/gnostic v0.5.5 h1:9fHAtK0uDfpveeqqo1hkEZJcFvYXAiCN3UutL8F9xHw= +github.com/googleapis/gnostic v0.5.5/go.mod h1:7+EbHbldMins07ALC74bsA81Ovc97DwqyJO1AENw9kA= github.com/gopcua/opcua v0.2.0-rc2.0.20210409063412-baabb9b14fd2 h1:OtFKr0Kwe1oLpMR+uNMh/DPgC5fxAq4xRe6HBv8LDqQ= github.com/gopcua/opcua v0.2.0-rc2.0.20210409063412-baabb9b14fd2/go.mod h1:a6QH4F9XeODklCmWuvaOdL8v9H0d73CEKUHWVZLQyE8= github.com/gophercloud/gophercloud v0.1.0/go.mod h1:vxM41WHh5uqHVBMZHzuwNOHh8XEoIEcSTewFxm1c5g8= @@ -1294,14 +1297,16 @@ github.com/onsi/ginkgo v1.10.1/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+ github.com/onsi/ginkgo v1.10.1/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE= github.com/onsi/ginkgo v1.10.3/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE= github.com/onsi/ginkgo v1.11.0/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE= -github.com/onsi/ginkgo v1.12.1 h1:mFwc4LvZ0xpSvDZ3E+k8Yte0hLOMxXUlP+yXtJqkYfQ= github.com/onsi/ginkgo v1.12.1/go.mod h1:zj2OWP4+oCPe1qIXoGWkgMRwljMUYCdkwsT2108oapk= +github.com/onsi/ginkgo v1.14.0 h1:2mOpI4JVVPBN+WQRa0WKH2eXR+Ey+uK4n7Zj0aYpIQA= +github.com/onsi/ginkgo v1.14.0/go.mod h1:iSB4RoI2tjJc9BBv4NKIKWKya62Rps+oPG/Lv9klQyY= github.com/onsi/gomega v0.0.0-20151007035656-2152b45fa28a/go.mod h1:C1qb7wdrVGGVU+Z6iS04AVkA3Q65CEZX59MT0QO5uiA= github.com/onsi/gomega v0.0.0-20170829124025-dcabb60a477c/go.mod h1:C1qb7wdrVGGVU+Z6iS04AVkA3Q65CEZX59MT0QO5uiA= github.com/onsi/gomega v1.4.3/go.mod h1:ex+gbHU/CVuBBDIJjb2X0qEXbFg53c61hWP/1CpauHY= github.com/onsi/gomega v1.5.0/go.mod h1:ex+gbHU/CVuBBDIJjb2X0qEXbFg53c61hWP/1CpauHY= github.com/onsi/gomega v1.7.0/go.mod h1:ex+gbHU/CVuBBDIJjb2X0qEXbFg53c61hWP/1CpauHY= github.com/onsi/gomega v1.7.1/go.mod h1:XdKZgCCFLUoM/7CFJVPcG8C1xQ1AJ0vpAezJrB7JYyY= +github.com/onsi/gomega v1.10.1/go.mod h1:iN09h71vgCQne3DLsj+A5owkum+a2tYe+TOCB1ybHNo= github.com/onsi/gomega v1.10.3 h1:gph6h/qe9GSUw1NhH1gp+qb+h8rXD8Cy60Z32Qw3ELA= github.com/onsi/gomega v1.10.3/go.mod h1:V9xEwhxec5O8UDM77eCW8vLymOMltsqPVYWrpDsH8xc= github.com/op/go-logging v0.0.0-20160315200505-970db520ece7/go.mod h1:HzydrMdWErDVzsI23lYNej1Htcns9BCg93Dk0bBINWk= @@ -1559,6 +1564,7 @@ github.com/spf13/pflag v1.0.5 h1:iy+VFUOCP1a+8yFto/drg2CJ5u0yRoB7fZw3DKv/JXA= github.com/spf13/pflag v1.0.5/go.mod h1:McXfInJRrz4CZXVZOBLb0bTZqETkiAhM9Iw0y3An2Bg= github.com/spf13/viper v1.4.0/go.mod h1:PTJ7Z/lr49W6bUbkmS1V3by4uWynFiR9p7+dSq/yZzE= github.com/stefanberger/go-pkcs11uri v0.0.0-20201008174630-78d3cae3a980/go.mod h1:AO3tvPzVZ/ayst6UlUKUv6rcPQInYe3IknH3jYhAKu8= +github.com/stoewer/go-strcase v1.2.0/go.mod h1:IBiWB2sKIp3wVVQ3Y035++gc+knqhUQag1KpM8ahLw8= github.com/streadway/amqp v0.0.0-20190404075320-75d898a42a94/go.mod h1:AZpEONHx3DKn8O/DFsRAY58/XVQiIPMTMB1SddzLXVw= github.com/streadway/amqp v0.0.0-20190827072141-edfb9018d271 h1:WhxRHzgeVGETMlmVfqhRn8RIeeNoPr2Czh33I4Zdccw= github.com/streadway/amqp v0.0.0-20190827072141-edfb9018d271/go.mod h1:AZpEONHx3DKn8O/DFsRAY58/XVQiIPMTMB1SddzLXVw= @@ -1858,6 +1864,7 @@ golang.org/x/net v0.0.0-20200425230154-ff2c4b7c35a0/go.mod h1:qpuaurCH72eLCgpAm/ golang.org/x/net v0.0.0-20200501053045-e0ff5e5a1de5/go.mod h1:qpuaurCH72eLCgpAm/N6yyVIVM9cpaDIP3A8BGJEC5A= golang.org/x/net v0.0.0-20200506145744-7e3656a0809f/go.mod h1:qpuaurCH72eLCgpAm/N6yyVIVM9cpaDIP3A8BGJEC5A= golang.org/x/net v0.0.0-20200513185701-a91f0712d120/go.mod h1:qpuaurCH72eLCgpAm/N6yyVIVM9cpaDIP3A8BGJEC5A= +golang.org/x/net v0.0.0-20200520004742-59133d7f0dd7/go.mod h1:qpuaurCH72eLCgpAm/N6yyVIVM9cpaDIP3A8BGJEC5A= golang.org/x/net v0.0.0-20200520182314-0ba52f642ac2/go.mod h1:qpuaurCH72eLCgpAm/N6yyVIVM9cpaDIP3A8BGJEC5A= golang.org/x/net v0.0.0-20200625001655-4c5254603344/go.mod h1:/O7V0waA8r7cgGh81Ro3o1hOxt32SMVPicZroKQ2sZA= golang.org/x/net v0.0.0-20200707034311-ab3426394381/go.mod h1:/O7V0waA8r7cgGh81Ro3o1hOxt32SMVPicZroKQ2sZA= @@ -1874,7 +1881,6 @@ golang.org/x/net v0.0.0-20201224014010-6772e930b67b/go.mod h1:m0MpNAwzfU5UDzcl9v golang.org/x/net v0.0.0-20201224014010-6772e930b67b/go.mod h1:m0MpNAwzfU5UDzcl9v0D8zg8gWTRqZa9RBIspLL5mdg= golang.org/x/net v0.0.0-20201224014010-6772e930b67b/go.mod h1:m0MpNAwzfU5UDzcl9v0D8zg8gWTRqZa9RBIspLL5mdg= golang.org/x/net v0.0.0-20210119194325-5f4716e94777/go.mod h1:m0MpNAwzfU5UDzcl9v0D8zg8gWTRqZa9RBIspLL5mdg= -golang.org/x/net v0.0.0-20210224082022-3d97a244fca7/go.mod h1:m0MpNAwzfU5UDzcl9v0D8zg8gWTRqZa9RBIspLL5mdg= golang.org/x/net v0.0.0-20210226172049-e18ecbb05110/go.mod h1:m0MpNAwzfU5UDzcl9v0D8zg8gWTRqZa9RBIspLL5mdg= golang.org/x/net v0.0.0-20210226172049-e18ecbb05110/go.mod h1:m0MpNAwzfU5UDzcl9v0D8zg8gWTRqZa9RBIspLL5mdg= golang.org/x/net v0.0.0-20210316092652-d523dce5a7f4/go.mod h1:RBQZq4jEuRlivfhVLdyRGr576XBO4/greRjx4P4O3yc= @@ -1882,6 +1888,7 @@ golang.org/x/net v0.0.0-20210331212208-0fccb6fa2b5c/go.mod h1:p54w0d4576C0XHj96b golang.org/x/net v0.0.0-20210405180319-a5a99cb37ef4/go.mod h1:p54w0d4576C0XHj96bSt6lcn1PtDYWL6XObtHCRCNQM= golang.org/x/net v0.0.0-20210405180319-a5a99cb37ef4/go.mod h1:p54w0d4576C0XHj96bSt6lcn1PtDYWL6XObtHCRCNQM= golang.org/x/net v0.0.0-20210503060351-7fd8e65b6420/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y= +golang.org/x/net v0.0.0-20210520170846-37e1c6afe023/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y= golang.org/x/net v0.0.0-20210610132358-84b48f89b13b h1:k+E048sYJHyVnsr1GDrRZWQ32D2C7lWs9JRc0bel53A= golang.org/x/net v0.0.0-20210610132358-84b48f89b13b/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y= golang.org/x/oauth2 v0.0.0-20180821212333-d2e6202438be/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U= @@ -1986,6 +1993,7 @@ golang.org/x/sys v0.0.0-20200420163511-1957bb5e6d1f/go.mod h1:h1NjWce9XRLGQEsW7w golang.org/x/sys v0.0.0-20200501052902-10377860bb8e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20200511232937-7e40ca221e25/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20200515095857-1151b9dac4a9/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200519105757-fe76b779f299/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20200523222454-059865788121/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20200615200032-f1bc736245b1/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20200622214017-ed371f2e16b4/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= @@ -2017,7 +2025,6 @@ golang.org/x/sys v0.0.0-20210124154548-22da62e12c0c/go.mod h1:h1NjWce9XRLGQEsW7w golang.org/x/sys v0.0.0-20210124154548-22da62e12c0c/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20210124154548-22da62e12c0c/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20210220050731-9a76102bfb43/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20210225134936-a50acf3fe073/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20210303074136-134d130e1a04/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20210305230114-8fe3ee5dd75b/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20210315160823-c6e025ad8005/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= @@ -2241,6 +2248,7 @@ google.golang.org/genproto v0.0.0-20200804131852-c06518451d9c/go.mod h1:FWY/as6D google.golang.org/genproto v0.0.0-20200815001618-f69a88009b70/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= google.golang.org/genproto v0.0.0-20200825200019-8632dd797987/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= google.golang.org/genproto v0.0.0-20200904004341-0bd0a958aa1d/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= +google.golang.org/genproto v0.0.0-20201019141844-1ed22bb0c154/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= google.golang.org/genproto v0.0.0-20201109203340-2640f1f9cdfb/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= google.golang.org/genproto v0.0.0-20201110150050-8816d57aaa9a/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= google.golang.org/genproto v0.0.0-20201201144952-b05cb90ed32e/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= @@ -2411,8 +2419,8 @@ k8s.io/apimachinery v0.18.8/go.mod h1:6sQd+iHEqmOtALqOFjSWp2KZ9F0wlU/nWm0ZgsYWMi k8s.io/apimachinery v0.20.1/go.mod h1:WlLqWAHZGg07AeltaI0MV5uk1Omp8xaN0JGLY6gkRpU= k8s.io/apimachinery v0.20.4/go.mod h1:WlLqWAHZGg07AeltaI0MV5uk1Omp8xaN0JGLY6gkRpU= k8s.io/apimachinery v0.20.6/go.mod h1:ejZXtW1Ra6V1O5H8xPBGz+T3+4gfkTCeExAHKU57MAc= -k8s.io/apimachinery v0.21.1 h1:Q6XuHGlj2xc+hlMCvqyYfbv3H7SRGn2c8NycxJquDVs= -k8s.io/apimachinery v0.21.1/go.mod h1:jbreFvJo3ov9rj7eWT7+sYiRx+qZuCYXwWT1bcDswPY= +k8s.io/apimachinery v0.22.2 h1:ejz6y/zNma8clPVfNDLnPbleBo6MpoFy/HBiBqCouVk= +k8s.io/apimachinery v0.22.2/go.mod h1:O3oNtNadZdeOMxHFVxOreoznohCpy0z6mocxbZr7oJ0= k8s.io/apiserver v0.20.1/go.mod h1:ro5QHeQkgMS7ZGpvf4tSMx6bBOgPfE+f52KwvXfScaU= k8s.io/apiserver v0.20.4/go.mod h1:Mc80thBKOyy7tbvFtB4kJv1kbdD0eIH8k8vianJcbFM= k8s.io/apiserver v0.20.6/go.mod h1:QIJXNt6i6JB+0YQRNcS0hdRHJlMhflFmsBDeSgT1r8Q= @@ -2436,11 +2444,11 @@ k8s.io/klog v1.0.0 h1:Pt+yjF5aB1xDSVbau4VsWe+dQNzA0qv1LlXdC2dF6Q8= k8s.io/klog v1.0.0/go.mod h1:4Bi6QPql/J/LkTDqv7R/cd3hPo4k2DG6Ptcz060Ez5I= k8s.io/klog/v2 v2.0.0/go.mod h1:PBfzABfn139FHAV07az/IF9Wp1bkk3vpT2XSJ76fSDE= k8s.io/klog/v2 v2.4.0/go.mod h1:Od+F08eJP+W3HUb4pSrPpgp9DGU4GzlpG/TmITuYh/Y= -k8s.io/klog/v2 v2.8.0 h1:Q3gmuM9hKEjefWFFYF0Mat+YyFJvsUyYuwyNNJ5C9Ts= -k8s.io/klog/v2 v2.8.0/go.mod h1:hy9LJ/NvuK+iVyP4Ehqva4HxZG/oXyIS3n3Jmire4Ec= +k8s.io/klog/v2 v2.9.0 h1:D7HV+n1V57XeZ0m6tdRkfknthUaM06VFbWldOFh8kzM= +k8s.io/klog/v2 v2.9.0/go.mod h1:hy9LJ/NvuK+iVyP4Ehqva4HxZG/oXyIS3n3Jmire4Ec= k8s.io/kube-openapi v0.0.0-20200410145947-61e04a5be9a6/go.mod h1:GRQhZsXIAJ1xR0C9bd8UpWHZ5plfAS9fzPjJuQ6JL3E= k8s.io/kube-openapi v0.0.0-20201113171705-d219536bb9fd/go.mod h1:WOJ3KddDSol4tAGcJo0Tvi+dK12EcqSLqcWsryKMpfM= -k8s.io/kube-openapi v0.0.0-20210305001622-591a79e4bda7/go.mod h1:wXW5VT87nVfh/iLV8FpR2uDvrFyomxbtb1KivDbvPTE= +k8s.io/kube-openapi v0.0.0-20210421082810-95288971da7e/go.mod h1:vHXdDvt9+2spS2Rx9ql3I8tycm3H9FDfdUoIuKCefvw= k8s.io/kubernetes v1.13.0/go.mod h1:ocZa8+6APFNC2tX1DZASIbocyYT5jHzqFVsY5aoB7Jk= k8s.io/utils v0.0.0-20200324210504-a9aa75ae1b89/go.mod h1:sZAwmy6armz5eXlNoLmJcl4F1QuKu7sr+mFQ0byX7Ew= k8s.io/utils v0.0.0-20200414100711-2df71ebbae66/go.mod h1:jPW/WVKK9YHAvNhRxK0md/EJ228hCsBRufyofKtW8HA= @@ -2485,8 +2493,8 @@ sigs.k8s.io/structured-merge-diff/v3 v3.0.0-20200116222232-67a7b8c61874/go.mod h sigs.k8s.io/structured-merge-diff/v3 v3.0.0/go.mod h1:PlARxl6Hbt/+BC80dRLi1qAmnMqwqDg62YvvVkZjemw= sigs.k8s.io/structured-merge-diff/v4 v4.0.2/go.mod h1:bJZC9H9iH24zzfZ/41RGcq60oK1F7G282QMXDPYydCw= sigs.k8s.io/structured-merge-diff/v4 v4.0.3/go.mod h1:bJZC9H9iH24zzfZ/41RGcq60oK1F7G282QMXDPYydCw= -sigs.k8s.io/structured-merge-diff/v4 v4.1.0 h1:C4r9BgJ98vrKnnVCjwCSXcWjWe0NKcUQkmzDXZXGwH8= -sigs.k8s.io/structured-merge-diff/v4 v4.1.0/go.mod h1:bJZC9H9iH24zzfZ/41RGcq60oK1F7G282QMXDPYydCw= +sigs.k8s.io/structured-merge-diff/v4 v4.1.2 h1:Hr/htKFmJEbtMgS/UD0N+gtgctAqz81t3nu+sPzynno= +sigs.k8s.io/structured-merge-diff/v4 v4.1.2/go.mod h1:j/nl6xW8vLS49O8YvXW1ocPhZawJtm+Yrr7PPRQ0Vg4= sigs.k8s.io/yaml v1.1.0/go.mod h1:UJmg0vDUVViEyp3mgSv9WPwZCDxu4rQW1olrI1uml+o= sigs.k8s.io/yaml v1.2.0 h1:kr/MCeFWJWTwyaHoR9c8EjH9OumOmoF9YGiZd7lFm/Q= sigs.k8s.io/yaml v1.2.0/go.mod h1:yfXDCHCao9+ENCvLSE62v9VSji2MKu5jeNfTrofGhJc= From 06edde61610679ed396ceda2f165e70e39f46d66 Mon Sep 17 00:00:00 2001 From: David Reimschussel Date: Wed, 6 Oct 2021 15:12:02 -0600 Subject: [PATCH 56/81] Update changelog (cherry picked from commit ca61e202f9b0bd19833cfa37638d7946972339c6) --- CHANGELOG.md | 25 +++++++++++++++++++++++++ etc/telegraf.conf | 15 --------------- 2 files changed, 25 insertions(+), 15 deletions(-) diff --git a/CHANGELOG.md b/CHANGELOG.md index 2ebccd4849220..7360ff5a687d3 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -1,3 +1,28 @@ +## v1.20.1 [2021-10-06] + +#### Bugfixes + + - [#9776](https://github.com/influxdata/telegraf/pull/9776) Update k8s.io/apimachinery module from 0.21.1 to 0.22.2 + - [#9864](https://github.com/influxdata/telegraf/pull/9864) Update containerd module to v1.5.7 + - [#9863](https://github.com/influxdata/telegraf/pull/9863) Update consul module to v1.11.0 + - [#9846](https://github.com/influxdata/telegraf/pull/9846) `inputs.mongodb` Fix panic due to nil dereference + - [#9850](https://github.com/influxdata/telegraf/pull/9850) `inputs.intel_rdt` Prevent timeout when logging + - [#9848](https://github.com/influxdata/telegraf/pull/9848) `outputs.loki` Update http_headers setting to match sample config + - [#9808](https://github.com/influxdata/telegraf/pull/9808) `inputs.procstat` Add missing tags + - [#9803](https://github.com/influxdata/telegraf/pull/9803) `outputs.mqtt` Add keep alive config option and documentation around issue with eclipse/mosquitto version + - [#9800](https://github.com/influxdata/telegraf/pull/9800) Fix output buffer never completely flushing + - [#9458](https://github.com/influxdata/telegraf/pull/9458) `inputs.couchbase` Fix insecure certificate validation + - [#9797](https://github.com/influxdata/telegraf/pull/9797) `inputs.opentelemetry` Fix error returned to OpenTelemetry client + - [#9789](https://github.com/influxdata/telegraf/pull/9789) Update github.com/testcontainers/testcontainers-go module from 0.11.0 to 0.11.1 + - [#9791](https://github.com/influxdata/telegraf/pull/9791) Update github.com/Azure/go-autorest/autorest/adal module + - [#9678](https://github.com/influxdata/telegraf/pull/9678) Update github.com/Azure/go-autorest/autorest/azure/auth module from 0.5.6 to 0.5.8 + - [#9769](https://github.com/influxdata/telegraf/pull/9769) Update cloud.google.com/go/pubsub module from 1.15.0 to 1.17.0 + - [#9770](https://github.com/influxdata/telegraf/pull/9770) Update github.com/aws/smithy-go module from 1.3.1 to 1.8.0 + +#### Features + + - [#9838](https://github.com/influxdata/telegraf/pull/9838) `inputs.elasticsearch_query` Add custom time/date format field + ## v1.20.0 [2021-09-17] #### Release Notes diff --git a/etc/telegraf.conf b/etc/telegraf.conf index 5564bc23ac8aa..2f2dce2f61df6 100644 --- a/etc/telegraf.conf +++ b/etc/telegraf.conf @@ -765,9 +765,6 @@ # ## Endpoints for your graylog instances. # servers = ["udp://127.0.0.1:12201"] # -# ## Connection timeout. -# # timeout = "5s" -# # ## The field to use as the GELF short_message, if unset the static string # ## "telegraf" will be used. # ## example: short_message_field = "message" @@ -4801,12 +4798,6 @@ # # ] -# # Read metrics about LVM physical volumes, volume groups, logical volumes. -# [[inputs.lvm]] -# ## Use sudo to run LVM commands -# use_sudo = false - - # # Gathers metrics from the /3.0/reports MailChimp API # [[inputs.mailchimp]] # ## MailChimp API key @@ -5501,12 +5492,6 @@ # ## Password. Required for auth_method = "UserName" # # password = "" # # -# ## Option to select the metric timestamp to use. Valid options are: -# ## "gather" -- uses the time of receiving the data in telegraf -# ## "server" -- uses the timestamp provided by the server -# ## "source" -- uses the timestamp provided by the source -# # timestamp = "gather" -# # # ## Node ID configuration # ## name - field name to use in the output # ## namespace - OPC UA namespace of the node (integer value 0 thru 3) From 6b51697ef01a3faa04dedb3e433f95f8281ec29d Mon Sep 17 00:00:00 2001 From: Joshua Powers Date: Thu, 7 Oct 2021 10:19:53 -0600 Subject: [PATCH 57/81] fix: set location for timezone on failing time tests (#9877) Resolves: #9874 --- internal/internal_test.go | 8 ++++++++ 1 file changed, 8 insertions(+) diff --git a/internal/internal_test.go b/internal/internal_test.go index 8dae73f562702..24fdb91bb2ebc 100644 --- a/internal/internal_test.go +++ b/internal/internal_test.go @@ -514,6 +514,7 @@ func TestParseTimestamp(t *testing.T) { format: "UnixDate", timestamp: "Mon Jan 2 15:04:05 MST 2006", expected: unixdate("Mon Jan 2 15:04:05 MST 2006"), + location: "Local", }, { @@ -521,6 +522,7 @@ func TestParseTimestamp(t *testing.T) { format: "RubyDate", timestamp: "Mon Jan 02 15:04:05 -0700 2006", expected: rubydate("Mon Jan 02 15:04:05 -0700 2006"), + location: "Local", }, { @@ -528,6 +530,7 @@ func TestParseTimestamp(t *testing.T) { format: "RFC822", timestamp: "02 Jan 06 15:04 MST", expected: rfc822("02 Jan 06 15:04 MST"), + location: "Local", }, { @@ -535,6 +538,7 @@ func TestParseTimestamp(t *testing.T) { format: "RFC822Z", timestamp: "02 Jan 06 15:04 -0700", expected: rfc822z("02 Jan 06 15:04 -0700"), + location: "Local", }, { @@ -542,6 +546,7 @@ func TestParseTimestamp(t *testing.T) { format: "RFC850", timestamp: "Monday, 02-Jan-06 15:04:05 MST", expected: rfc850("Monday, 02-Jan-06 15:04:05 MST"), + location: "Local", }, { @@ -549,6 +554,7 @@ func TestParseTimestamp(t *testing.T) { format: "RFC1123", timestamp: "Mon, 02 Jan 2006 15:04:05 MST", expected: rfc1123("Mon, 02 Jan 2006 15:04:05 MST"), + location: "Local", }, { @@ -556,6 +562,7 @@ func TestParseTimestamp(t *testing.T) { format: "RFC1123Z", timestamp: "Mon, 02 Jan 2006 15:04:05 -0700", expected: rfc1123z("Mon, 02 Jan 2006 15:04:05 -0700"), + location: "Local", }, { @@ -563,6 +570,7 @@ func TestParseTimestamp(t *testing.T) { format: "RFC3339Nano", timestamp: "2006-01-02T15:04:05.999999999-07:00", expected: rfc3339nano("2006-01-02T15:04:05.999999999-07:00"), + location: "Local", }, { From 282ec85cd7027d021bf7baa09c6b10fd2a5df134 Mon Sep 17 00:00:00 2001 From: Joshua Powers Date: Thu, 7 Oct 2021 12:34:14 -0600 Subject: [PATCH 58/81] fix: makefile missing space for i386 tar and rpm (#9887) --- Makefile | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/Makefile b/Makefile index 12267c04403bb..143a55d58a757 100644 --- a/Makefile +++ b/Makefile @@ -271,7 +271,7 @@ ppc64le += linux_ppc64le.tar.gz ppc64le.rpm ppc64el.deb .PHONY: ppc64le ppc64le: @ echo $(ppc64le) -i386 += freebsd_i386.tar.gz i386.deb linux_i386.tar.gzi386.rpm +i386 += freebsd_i386.tar.gz i386.deb linux_i386.tar.gz i386.rpm .PHONY: i386 i386: @ echo $(i386) From 15753a6f7fa065248a19d534582fc7b79e0afe12 Mon Sep 17 00:00:00 2001 From: Sebastian Spaink <3441183+sspaink@users.noreply.github.com> Date: Thu, 7 Oct 2021 14:47:59 -0500 Subject: [PATCH 59/81] fix: migrate to cloud.google.com/go/monitoring/apiv3/v2 (#9880) --- plugins/inputs/stackdriver/stackdriver.go | 6 +++--- plugins/outputs/stackdriver/stackdriver.go | 4 ++-- plugins/outputs/stackdriver/stackdriver_test.go | 2 +- 3 files changed, 6 insertions(+), 6 deletions(-) diff --git a/plugins/inputs/stackdriver/stackdriver.go b/plugins/inputs/stackdriver/stackdriver.go index 885913f91dd1c..cc8b1a40a10a5 100644 --- a/plugins/inputs/stackdriver/stackdriver.go +++ b/plugins/inputs/stackdriver/stackdriver.go @@ -9,7 +9,7 @@ import ( "sync" "time" - monitoring "cloud.google.com/go/monitoring/apiv3" + monitoring "cloud.google.com/go/monitoring/apiv3/v2" googlepbduration "github.com/golang/protobuf/ptypes/duration" googlepbts "github.com/golang/protobuf/ptypes/timestamp" "github.com/influxdata/telegraf" @@ -397,7 +397,7 @@ func (s *Stackdriver) newTimeSeriesConf( StartTime: &googlepbts.Timestamp{Seconds: startTime.Unix()}, } tsReq := &monitoringpb.ListTimeSeriesRequest{ - Name: monitoring.MetricProjectPath(s.Project), + Name: fmt.Sprintf("projects/%s", s.Project), Filter: filter, Interval: interval, } @@ -533,7 +533,7 @@ func (s *Stackdriver) generatetimeSeriesConfs( ret := []*timeSeriesConf{} req := &monitoringpb.ListMetricDescriptorsRequest{ - Name: monitoring.MetricProjectPath(s.Project), + Name: fmt.Sprintf("projects/%s", s.Project), } filters := s.newListMetricDescriptorsFilters() diff --git a/plugins/outputs/stackdriver/stackdriver.go b/plugins/outputs/stackdriver/stackdriver.go index 4d561a27b5007..d4f660ff7c569 100644 --- a/plugins/outputs/stackdriver/stackdriver.go +++ b/plugins/outputs/stackdriver/stackdriver.go @@ -9,7 +9,7 @@ import ( "sort" "strings" - monitoring "cloud.google.com/go/monitoring/apiv3" // Imports the Stackdriver Monitoring client package. + monitoring "cloud.google.com/go/monitoring/apiv3/v2" // Imports the Stackdriver Monitoring client package. googlepb "github.com/golang/protobuf/ptypes/timestamp" "github.com/influxdata/telegraf" "github.com/influxdata/telegraf/internal" @@ -218,7 +218,7 @@ func (s *Stackdriver) Write(metrics []telegraf.Metric) error { // Prepare time series request. timeSeriesRequest := &monitoringpb.CreateTimeSeriesRequest{ - Name: monitoring.MetricProjectPath(s.Project), + Name: fmt.Sprintf("projects/%s", s.Project), TimeSeries: timeSeries, } diff --git a/plugins/outputs/stackdriver/stackdriver_test.go b/plugins/outputs/stackdriver/stackdriver_test.go index 7ddaa44854620..8af553b374c53 100644 --- a/plugins/outputs/stackdriver/stackdriver_test.go +++ b/plugins/outputs/stackdriver/stackdriver_test.go @@ -11,7 +11,7 @@ import ( "testing" "time" - monitoring "cloud.google.com/go/monitoring/apiv3" + monitoring "cloud.google.com/go/monitoring/apiv3/v2" "github.com/golang/protobuf/proto" emptypb "github.com/golang/protobuf/ptypes/empty" googlepb "github.com/golang/protobuf/ptypes/timestamp" From 128ed8849b16239707a9bedd059029792aceac53 Mon Sep 17 00:00:00 2001 From: Patrick Hemmer Date: Thu, 7 Oct 2021 16:35:44 -0400 Subject: [PATCH 60/81] fix: memory leak in influx parser (#9787) --- plugins/parsers/influx/machine.go | 14 +++++++------- plugins/parsers/influx/machine.go.rl | 14 +++++++------- 2 files changed, 14 insertions(+), 14 deletions(-) diff --git a/plugins/parsers/influx/machine.go b/plugins/parsers/influx/machine.go index 2649cdb42bc37..4bbf8c079476b 100644 --- a/plugins/parsers/influx/machine.go +++ b/plugins/parsers/influx/machine.go @@ -3747,13 +3747,6 @@ func (m *streamMachine) Next() error { m.machine.finishMetric = false for { - // Expand the buffer if it is full - if m.machine.pe == len(m.machine.data) { - expanded := make([]byte, 2*len(m.machine.data)) - copy(expanded, m.machine.data) - m.machine.data = expanded - } - err := m.machine.exec() if err != nil { return err @@ -3764,6 +3757,13 @@ func (m *streamMachine) Next() error { break } + // Expand the buffer if it is full + if m.machine.pe == len(m.machine.data) { + expanded := make([]byte, 2*len(m.machine.data)) + copy(expanded, m.machine.data) + m.machine.data = expanded + } + n, err := m.reader.Read(m.machine.data[m.machine.pe:]) if n == 0 && err == io.EOF { m.machine.eof = m.machine.pe diff --git a/plugins/parsers/influx/machine.go.rl b/plugins/parsers/influx/machine.go.rl index 29f4307860ea2..d6b5d949e4065 100644 --- a/plugins/parsers/influx/machine.go.rl +++ b/plugins/parsers/influx/machine.go.rl @@ -499,13 +499,6 @@ func (m *streamMachine) Next() error { m.machine.finishMetric = false for { - // Expand the buffer if it is full - if m.machine.pe == len(m.machine.data) { - expanded := make([]byte, 2 * len(m.machine.data)) - copy(expanded, m.machine.data) - m.machine.data = expanded - } - err := m.machine.exec() if err != nil { return err @@ -516,6 +509,13 @@ func (m *streamMachine) Next() error { break } + // Expand the buffer if it is full + if m.machine.pe == len(m.machine.data) { + expanded := make([]byte, 2 * len(m.machine.data)) + copy(expanded, m.machine.data) + m.machine.data = expanded + } + n, err := m.reader.Read(m.machine.data[m.machine.pe:]) if n == 0 && err == io.EOF { m.machine.eof = m.machine.pe From d06b387528f36909515b0d27cf333f6ef60f430d Mon Sep 17 00:00:00 2001 From: etycomputer <57578566+etycomputer@users.noreply.github.com> Date: Fri, 8 Oct 2021 06:38:20 +1000 Subject: [PATCH 61/81] feat: Adds the ability to create and name a tag containing the filename using the directory monitor input plugin (#9860) Co-authored-by: Ehsan Yazdi --- plugins/inputs/directory_monitor/README.md | 6 ++ .../directory_monitor/directory_monitor.go | 17 +++++- .../directory_monitor_test.go | 59 +++++++++++++++++++ plugins/inputs/file/README.md | 5 +- plugins/inputs/file/file.go | 6 +- 5 files changed, 89 insertions(+), 4 deletions(-) diff --git a/plugins/inputs/directory_monitor/README.md b/plugins/inputs/directory_monitor/README.md index 66d9eb51fce79..4e260f44256ed 100644 --- a/plugins/inputs/directory_monitor/README.md +++ b/plugins/inputs/directory_monitor/README.md @@ -39,6 +39,12 @@ This plugin is intended to read files that are moved or copied to the monitored ## Lowering this value will result in *slightly* less memory use, with a potential sacrifice in speed efficiency, if absolutely necessary. # file_queue_size = 100000 # + ## Name a tag containing the name of the file the data was parsed from. Leave empty + ## to disable. Cautious when file name variation is high, this can increase the cardinality + ## significantly. Read more about cardinality here: + ## https://docs.influxdata.com/influxdb/cloud/reference/glossary/#series-cardinality + # file_tag = "" + # ## The dataformat to be read from the files. ## Each data format has its own unique set of configuration options, read ## more about them here: diff --git a/plugins/inputs/directory_monitor/directory_monitor.go b/plugins/inputs/directory_monitor/directory_monitor.go index a58c039422757..ee1163e7a51b1 100644 --- a/plugins/inputs/directory_monitor/directory_monitor.go +++ b/plugins/inputs/directory_monitor/directory_monitor.go @@ -55,6 +55,12 @@ const sampleConfig = ` ## Lowering this value will result in *slightly* less memory use, with a potential sacrifice in speed efficiency, if absolutely necessary. # file_queue_size = 100000 # + ## Name a tag containing the name of the file the data was parsed from. Leave empty + ## to disable. Cautious when file name variation is high, this can increase the cardinality + ## significantly. Read more about cardinality here: + ## https://docs.influxdata.com/influxdb/cloud/reference/glossary/#series-cardinality + # file_tag = "" + # ## The dataformat to be read from the files. ## Each data format has its own unique set of configuration options, read ## more about them here: @@ -75,6 +81,7 @@ type DirectoryMonitor struct { Directory string `toml:"directory"` FinishedDirectory string `toml:"finished_directory"` ErrorDirectory string `toml:"error_directory"` + FileTag string `toml:"file_tag"` FilesToMonitor []string `toml:"files_to_monitor"` FilesToIgnore []string `toml:"files_to_ignore"` @@ -250,10 +257,10 @@ func (monitor *DirectoryMonitor) ingestFile(filePath string) error { reader = file } - return monitor.parseFile(parser, reader) + return monitor.parseFile(parser, reader, file.Name()) } -func (monitor *DirectoryMonitor) parseFile(parser parsers.Parser, reader io.Reader) error { +func (monitor *DirectoryMonitor) parseFile(parser parsers.Parser, reader io.Reader, fileName string) error { // Read the file line-by-line and parse with the configured parse method. firstLine := true scanner := bufio.NewScanner(reader) @@ -264,6 +271,12 @@ func (monitor *DirectoryMonitor) parseFile(parser parsers.Parser, reader io.Read } firstLine = false + if monitor.FileTag != "" { + for _, m := range metrics { + m.AddTag(monitor.FileTag, filepath.Base(fileName)) + } + } + if err := monitor.sendMetrics(metrics); err != nil { return err } diff --git a/plugins/inputs/directory_monitor/directory_monitor_test.go b/plugins/inputs/directory_monitor/directory_monitor_test.go index 7cda5f2d7b639..3e954adb40320 100644 --- a/plugins/inputs/directory_monitor/directory_monitor_test.go +++ b/plugins/inputs/directory_monitor/directory_monitor_test.go @@ -134,3 +134,62 @@ func TestMultipleJSONFileImports(t *testing.T) { // Verify that we read each JSON line once to a single metric. require.Equal(t, len(acc.Metrics), 5) } + +func TestFileTag(t *testing.T) { + acc := testutil.Accumulator{} + testJSONFile := "test.json" + + // Establish process directory and finished directory. + finishedDirectory, err := os.MkdirTemp("", "finished") + require.NoError(t, err) + processDirectory, err := os.MkdirTemp("", "test") + require.NoError(t, err) + defer os.RemoveAll(processDirectory) + defer os.RemoveAll(finishedDirectory) + + // Init plugin. + r := DirectoryMonitor{ + Directory: processDirectory, + FinishedDirectory: finishedDirectory, + FileTag: "filename", + MaxBufferedMetrics: 1000, + FileQueueSize: 1000, + } + err = r.Init() + require.NoError(t, err) + + parserConfig := parsers.Config{ + DataFormat: "json", + JSONNameKey: "Name", + } + + r.SetParserFunc(func() (parsers.Parser, error) { + return parsers.NewParser(&parserConfig) + }) + + // Let's drop a 1-line LINE-DELIMITED json. + // Write csv file to process into the 'process' directory. + f, err := os.Create(filepath.Join(processDirectory, testJSONFile)) + require.NoError(t, err) + _, err = f.WriteString("{\"Name\": \"event1\",\"Speed\": 100.1,\"Length\": 20.1}") + require.NoError(t, err) + err = f.Close() + require.NoError(t, err) + + err = r.Start(&acc) + r.Log = testutil.Logger{} + require.NoError(t, err) + err = r.Gather(&acc) + require.NoError(t, err) + acc.Wait(1) + r.Stop() + + // Verify that we read each JSON line once to a single metric. + require.Equal(t, len(acc.Metrics), 1) + for _, m := range acc.Metrics { + for key, value := range m.Tags { + require.Equal(t, r.FileTag, key) + require.Equal(t, filepath.Base(testJSONFile), value) + } + } +} diff --git a/plugins/inputs/file/README.md b/plugins/inputs/file/README.md index ef0fb90b0796c..8ec406da7be3d 100644 --- a/plugins/inputs/file/README.md +++ b/plugins/inputs/file/README.md @@ -20,8 +20,11 @@ plugin instead. ## https://github.com/influxdata/telegraf/blob/master/docs/DATA_FORMATS_INPUT.md data_format = "influx" + ## Name a tag containing the name of the file the data was parsed from. Leave empty - ## to disable. + ## to disable. Cautious when file name variation is high, this can increase the cardinality + ## significantly. Read more about cardinality here: + ## https://docs.influxdata.com/influxdb/cloud/reference/glossary/#series-cardinality # file_tag = "" ``` diff --git a/plugins/inputs/file/file.go b/plugins/inputs/file/file.go index 22af282dbde0a..fbfc536a6d874 100644 --- a/plugins/inputs/file/file.go +++ b/plugins/inputs/file/file.go @@ -29,9 +29,13 @@ const sampleConfig = ` ## as well as ** to match recursive files and directories. files = ["/tmp/metrics.out"] + ## Name a tag containing the name of the file the data was parsed from. Leave empty - ## to disable. + ## to disable. Cautious when file name variation is high, this can increase the cardinality + ## significantly. Read more about cardinality here: + ## https://docs.influxdata.com/influxdb/cloud/reference/glossary/#series-cardinality # file_tag = "" + # ## Character encoding to use when interpreting the file contents. Invalid ## characters are replaced using the unicode replacement character. When set From 9de8c987f23c13aeb8cf1cbe365e6e55630d6df6 Mon Sep 17 00:00:00 2001 From: Sebastian Spaink <3441183+sspaink@users.noreply.github.com> Date: Thu, 7 Oct 2021 15:41:59 -0500 Subject: [PATCH 62/81] fix: mute graylog UDP/TCP tests by marking them as integration (#9881) --- plugins/outputs/graylog/graylog_test.go | 12 ++++++++++++ 1 file changed, 12 insertions(+) diff --git a/plugins/outputs/graylog/graylog_test.go b/plugins/outputs/graylog/graylog_test.go index faa5b34b908d7..fcf61ae77d51e 100644 --- a/plugins/outputs/graylog/graylog_test.go +++ b/plugins/outputs/graylog/graylog_test.go @@ -15,14 +15,26 @@ import ( ) func TestWriteDefault(t *testing.T) { + if testing.Short() { + t.Skip("skipping test in short mode.") + } + scenarioUDP(t, "127.0.0.1:12201") } func TestWriteUDP(t *testing.T) { + if testing.Short() { + t.Skip("skipping test in short mode.") + } + scenarioUDP(t, "udp://127.0.0.1:12201") } func TestWriteTCP(t *testing.T) { + if testing.Short() { + t.Skip("skipping test in short mode.") + } + scenarioTCP(t, "tcp://127.0.0.1:12201") } From da5727e34cae3b85a4840c827575978092fd040b Mon Sep 17 00:00:00 2001 From: Sebastian Spaink <3441183+sspaink@users.noreply.github.com> Date: Thu, 7 Oct 2021 15:45:02 -0500 Subject: [PATCH 63/81] fix: duplicate line_protocol when using object and fields (#9872) --- plugins/parsers/json_v2/parser.go | 2 +- plugins/parsers/json_v2/parser_test.go | 4 ++ .../mix_field_and_object/expected.out | 1 + .../testdata/mix_field_and_object/input.json | 44 +++++++++++++++++++ .../mix_field_and_object/telegraf.conf | 15 +++++++ 5 files changed, 65 insertions(+), 1 deletion(-) create mode 100644 plugins/parsers/json_v2/testdata/mix_field_and_object/expected.out create mode 100644 plugins/parsers/json_v2/testdata/mix_field_and_object/input.json create mode 100644 plugins/parsers/json_v2/testdata/mix_field_and_object/telegraf.conf diff --git a/plugins/parsers/json_v2/parser.go b/plugins/parsers/json_v2/parser.go index f4f84c562e781..ebeb6545ba549 100644 --- a/plugins/parsers/json_v2/parser.go +++ b/plugins/parsers/json_v2/parser.go @@ -137,7 +137,7 @@ func (p *Parser) Parse(input []byte) ([]telegraf.Metric, error) { metrics = append(metrics, cartesianProduct(tags, fields)...) if len(objects) != 0 && len(metrics) != 0 { - metrics = append(metrics, cartesianProduct(objects, metrics)...) + metrics = cartesianProduct(objects, metrics) } else { metrics = append(metrics, objects...) } diff --git a/plugins/parsers/json_v2/parser_test.go b/plugins/parsers/json_v2/parser_test.go index 3ef08856190ac..686bf826ad9d7 100644 --- a/plugins/parsers/json_v2/parser_test.go +++ b/plugins/parsers/json_v2/parser_test.go @@ -20,6 +20,10 @@ func TestData(t *testing.T) { name string test string }{ + { + name: "Test when using field and object together", + test: "mix_field_and_object", + }, { name: "Test complex nesting", test: "complex_nesting", diff --git a/plugins/parsers/json_v2/testdata/mix_field_and_object/expected.out b/plugins/parsers/json_v2/testdata/mix_field_and_object/expected.out new file mode 100644 index 0000000000000..e7f0e222418aa --- /dev/null +++ b/plugins/parsers/json_v2/testdata/mix_field_and_object/expected.out @@ -0,0 +1 @@ +openweather,id=2.643743e+06,name=London coord_lat=51.5085,coord_lon=-0.1257,description="few clouds",main_temp=12.54,summary="Clouds",wind_speed=2.11 1628186541000000000 diff --git a/plugins/parsers/json_v2/testdata/mix_field_and_object/input.json b/plugins/parsers/json_v2/testdata/mix_field_and_object/input.json new file mode 100644 index 0000000000000..402113af8ca9e --- /dev/null +++ b/plugins/parsers/json_v2/testdata/mix_field_and_object/input.json @@ -0,0 +1,44 @@ +{ + "coord": { + "lon": -0.1257, + "lat": 51.5085 + }, + "weather": [ + { + "id": 801, + "main": "Clouds", + "description": "few clouds", + "icon": "02n" + } + ], + "base": "stations", + "main": { + "temp": 12.54, + "feels_like": 11.86, + "temp_min": 10.49, + "temp_max": 14.27, + "pressure": 1024, + "humidity": 77 + }, + "visibility": 10000, + "wind": { + "speed": 2.11, + "deg": 254, + "gust": 4.63 + }, + "clouds": { + "all": 21 + }, + "dt": 1633545358, + "sys": { + "type": 2, + "id": 2019646, + "country": "GB", + "sunrise": 1633500560, + "sunset": 1633541256 + }, + "timezone": 3600, + "id": 2643743, + "name": "London", + "cod": 200 +} diff --git a/plugins/parsers/json_v2/testdata/mix_field_and_object/telegraf.conf b/plugins/parsers/json_v2/testdata/mix_field_and_object/telegraf.conf new file mode 100644 index 0000000000000..cc181960cbf1e --- /dev/null +++ b/plugins/parsers/json_v2/testdata/mix_field_and_object/telegraf.conf @@ -0,0 +1,15 @@ +[[inputs.file]] + files = ["./testdata/mix_field_and_object/input.json"] + data_format = "json_v2" + [[inputs.file.json_v2]] + measurement_name = "openweather" + [[inputs.file.json_v2.field]] + path = "weather.#.main" + rename = "summary" + [[inputs.file.json_v2.field]] + path = "weather.#.description" + [[inputs.file.json_v2.object]] + path = "@this" + included_keys = ["coord_lat", "coord_lon", "main_temp", "wind_speed"] # List of JSON keys (for a nested key, prepend the parent keys with underscores) that should be only included in result + tags = ["id", "name"] # List of JSON keys (for a nested key, prepend the parent keys with underscores) to be a tag instead of a field + From fde637464add3b2958560338cc3a29e0164021ba Mon Sep 17 00:00:00 2001 From: Sebastian Spaink <3441183+sspaink@users.noreply.github.com> Date: Thu, 7 Oct 2021 15:47:56 -0500 Subject: [PATCH 64/81] fix: migrate aws/credentials.go to use NewSession, same functionality but now supports error (#9878) --- config/aws/credentials.go | 15 +++++---- plugins/inputs/cloudwatch/cloudwatch.go | 6 +++- .../kinesis_consumer/kinesis_consumer.go | 31 ++++++++++++------- plugins/outputs/cloudwatch/cloudwatch.go | 18 ++++++----- .../cloudwatch_logs/cloudwatch_logs.go | 12 ++++--- plugins/outputs/kinesis/kinesis.go | 8 +++-- plugins/outputs/timestream/timestream.go | 28 ++++++++++------- plugins/outputs/timestream/timestream_test.go | 16 +++++----- 8 files changed, 83 insertions(+), 51 deletions(-) diff --git a/config/aws/credentials.go b/config/aws/credentials.go index d2c2b284817d8..7b75917393590 100644 --- a/config/aws/credentials.go +++ b/config/aws/credentials.go @@ -21,7 +21,7 @@ type CredentialConfig struct { WebIdentityTokenFile string `toml:"web_identity_token_file"` } -func (c *CredentialConfig) Credentials() client.ConfigProvider { +func (c *CredentialConfig) Credentials() (client.ConfigProvider, error) { if c.RoleARN != "" { return c.assumeCredentials() } @@ -29,7 +29,7 @@ func (c *CredentialConfig) Credentials() client.ConfigProvider { return c.rootCredentials() } -func (c *CredentialConfig) rootCredentials() client.ConfigProvider { +func (c *CredentialConfig) rootCredentials() (client.ConfigProvider, error) { config := &aws.Config{ Region: aws.String(c.Region), } @@ -42,11 +42,14 @@ func (c *CredentialConfig) rootCredentials() client.ConfigProvider { config.Credentials = credentials.NewSharedCredentials(c.Filename, c.Profile) } - return session.New(config) + return session.NewSession(config) } -func (c *CredentialConfig) assumeCredentials() client.ConfigProvider { - rootCredentials := c.rootCredentials() +func (c *CredentialConfig) assumeCredentials() (client.ConfigProvider, error) { + rootCredentials, err := c.rootCredentials() + if err != nil { + return nil, err + } config := &aws.Config{ Region: aws.String(c.Region), Endpoint: &c.EndpointURL, @@ -58,5 +61,5 @@ func (c *CredentialConfig) assumeCredentials() client.ConfigProvider { config.Credentials = stscreds.NewCredentials(rootCredentials, c.RoleARN) } - return session.New(config) + return session.NewSession(config) } diff --git a/plugins/inputs/cloudwatch/cloudwatch.go b/plugins/inputs/cloudwatch/cloudwatch.go index 7dbd3c7faa7be..3fb86310946e1 100644 --- a/plugins/inputs/cloudwatch/cloudwatch.go +++ b/plugins/inputs/cloudwatch/cloudwatch.go @@ -288,7 +288,11 @@ func (c *CloudWatch) initializeCloudWatch() error { } loglevel := aws.LogOff - c.client = cwClient.New(c.CredentialConfig.Credentials(), cfg.WithLogLevel(loglevel)) + p, err := c.CredentialConfig.Credentials() + if err != nil { + return err + } + c.client = cwClient.New(p, cfg.WithLogLevel(loglevel)) // Initialize regex matchers for each Dimension value. for _, m := range c.Metrics { diff --git a/plugins/inputs/kinesis_consumer/kinesis_consumer.go b/plugins/inputs/kinesis_consumer/kinesis_consumer.go index 005ccdc43aab2..88b5fef660112 100644 --- a/plugins/inputs/kinesis_consumer/kinesis_consumer.go +++ b/plugins/inputs/kinesis_consumer/kinesis_consumer.go @@ -153,24 +153,31 @@ func (k *KinesisConsumer) SetParser(parser parsers.Parser) { } func (k *KinesisConsumer) connect(ac telegraf.Accumulator) error { - client := kinesis.New(k.CredentialConfig.Credentials()) + p, err := k.CredentialConfig.Credentials() + if err != nil { + return err + } + client := kinesis.New(p) k.checkpoint = &noopCheckpoint{} if k.DynamoDB != nil { - var err error + p, err := (&internalaws.CredentialConfig{ + Region: k.Region, + AccessKey: k.AccessKey, + SecretKey: k.SecretKey, + RoleARN: k.RoleARN, + Profile: k.Profile, + Filename: k.Filename, + Token: k.Token, + EndpointURL: k.EndpointURL, + }).Credentials() + if err != nil { + return err + } k.checkpoint, err = ddb.New( k.DynamoDB.AppName, k.DynamoDB.TableName, - ddb.WithDynamoClient(dynamodb.New((&internalaws.CredentialConfig{ - Region: k.Region, - AccessKey: k.AccessKey, - SecretKey: k.SecretKey, - RoleARN: k.RoleARN, - Profile: k.Profile, - Filename: k.Filename, - Token: k.Token, - EndpointURL: k.EndpointURL, - }).Credentials())), + ddb.WithDynamoClient(dynamodb.New(p)), ddb.WithMaxInterval(time.Second*10), ) if err != nil { diff --git a/plugins/outputs/cloudwatch/cloudwatch.go b/plugins/outputs/cloudwatch/cloudwatch.go index ddf3622328ba2..85f9570b3d5ea 100644 --- a/plugins/outputs/cloudwatch/cloudwatch.go +++ b/plugins/outputs/cloudwatch/cloudwatch.go @@ -177,12 +177,12 @@ var sampleConfig = ` ## Namespace for the CloudWatch MetricDatums namespace = "InfluxData/Telegraf" - ## If you have a large amount of metrics, you should consider to send statistic - ## values instead of raw metrics which could not only improve performance but - ## also save AWS API cost. If enable this flag, this plugin would parse the required - ## CloudWatch statistic fields (count, min, max, and sum) and send them to CloudWatch. - ## You could use basicstats aggregator to calculate those fields. If not all statistic - ## fields are available, all fields would still be sent as raw metrics. + ## If you have a large amount of metrics, you should consider to send statistic + ## values instead of raw metrics which could not only improve performance but + ## also save AWS API cost. If enable this flag, this plugin would parse the required + ## CloudWatch statistic fields (count, min, max, and sum) and send them to CloudWatch. + ## You could use basicstats aggregator to calculate those fields. If not all statistic + ## fields are available, all fields would still be sent as raw metrics. # write_statistics = false ## Enable high resolution metrics of 1 second (if not enabled, standard resolution are of 60 seconds precision) @@ -198,7 +198,11 @@ func (c *CloudWatch) Description() string { } func (c *CloudWatch) Connect() error { - c.svc = cloudwatch.New(c.CredentialConfig.Credentials()) + p, err := c.CredentialConfig.Credentials() + if err != nil { + return err + } + c.svc = cloudwatch.New(p) return nil } diff --git a/plugins/outputs/cloudwatch_logs/cloudwatch_logs.go b/plugins/outputs/cloudwatch_logs/cloudwatch_logs.go index 79eb5d7722f13..f9ef289089363 100644 --- a/plugins/outputs/cloudwatch_logs/cloudwatch_logs.go +++ b/plugins/outputs/cloudwatch_logs/cloudwatch_logs.go @@ -108,12 +108,12 @@ region = "us-east-1" ## Cloud watch log group. Must be created in AWS cloudwatch logs upfront! ## For example, you can specify the name of the k8s cluster here to group logs from all cluster in oine place -log_group = "my-group-name" +log_group = "my-group-name" ## Log stream in log group ## Either log group name or reference to metric attribute, from which it can be parsed: ## tag: or field:. If log stream is not exist, it will be created. -## Since AWS is not automatically delete logs streams with expired logs entries (i.e. empty log stream) +## Since AWS is not automatically delete logs streams with expired logs entries (i.e. empty log stream) ## you need to put in place appropriate house-keeping (https://forums.aws.amazon.com/thread.jspa?threadID=178855) log_stream = "tag:location" @@ -126,7 +126,7 @@ log_data_metric_name = "docker_log" ## Specify from which metric attribute the log data should be retrieved: ## tag: or field:. ## I.e., if you are using docker_log plugin to stream logs from container, then -## specify log_data_source = "field:message" +## specify log_data_source = "field:message" log_data_source = "field:message" ` @@ -187,7 +187,11 @@ func (c *CloudWatchLogs) Connect() error { var logGroupsOutput = &cloudwatchlogs.DescribeLogGroupsOutput{NextToken: &dummyToken} var err error - c.svc = cloudwatchlogs.New(c.CredentialConfig.Credentials()) + p, err := c.CredentialConfig.Credentials() + if err != nil { + return err + } + c.svc = cloudwatchlogs.New(p) if c.svc == nil { return fmt.Errorf("can't create cloudwatch logs service endpoint") } diff --git a/plugins/outputs/kinesis/kinesis.go b/plugins/outputs/kinesis/kinesis.go index 2e75788400ae0..412e3d9742b72 100644 --- a/plugins/outputs/kinesis/kinesis.go +++ b/plugins/outputs/kinesis/kinesis.go @@ -126,9 +126,13 @@ func (k *KinesisOutput) Connect() error { k.Log.Infof("Establishing a connection to Kinesis in %s", k.Region) } - svc := kinesis.New(k.CredentialConfig.Credentials()) + p, err := k.CredentialConfig.Credentials() + if err != nil { + return err + } + svc := kinesis.New(p) - _, err := svc.DescribeStreamSummary(&kinesis.DescribeStreamSummaryInput{ + _, err = svc.DescribeStreamSummary(&kinesis.DescribeStreamSummaryInput{ StreamName: aws.String(k.StreamName), }) k.svc = svc diff --git a/plugins/outputs/timestream/timestream.go b/plugins/outputs/timestream/timestream.go index 2c77c408e7357..42ea706afa9e6 100644 --- a/plugins/outputs/timestream/timestream.go +++ b/plugins/outputs/timestream/timestream.go @@ -57,7 +57,7 @@ const MaxRecordsPerCall = 100 var sampleConfig = ` ## Amazon Region region = "us-east-1" - + ## Amazon Credentials ## Credentials are loaded in the following order: ## 1) Web identity provider credentials via STS if role_arn and web_identity_token_file are specified @@ -75,7 +75,7 @@ var sampleConfig = ` #role_session_name = "" #profile = "" #shared_credential_file = "" - + ## Endpoint to make request against, the correct endpoint is automatically ## determined and this option should only be set if you wish to override the ## default. @@ -88,7 +88,7 @@ var sampleConfig = ` ## Specifies if the plugin should describe the Timestream database upon starting ## to validate if it has access necessary permissions, connection, etc., as a safety check. - ## If the describe operation fails, the plugin will not start + ## If the describe operation fails, the plugin will not start ## and therefore the Telegraf agent will not start. describe_database_on_start = false @@ -97,17 +97,17 @@ var sampleConfig = ` ## For example, consider the following data in line protocol format: ## weather,location=us-midwest,season=summer temperature=82,humidity=71 1465839830100400200 ## airquality,location=us-west no2=5,pm25=16 1465839830100400200 - ## where weather and airquality are the measurement names, location and season are tags, + ## where weather and airquality are the measurement names, location and season are tags, ## and temperature, humidity, no2, pm25 are fields. ## In multi-table mode: ## - first line will be ingested to table named weather ## - second line will be ingested to table named airquality ## - the tags will be represented as dimensions ## - first table (weather) will have two records: - ## one with measurement name equals to temperature, + ## one with measurement name equals to temperature, ## another with measurement name equals to humidity ## - second table (airquality) will have two records: - ## one with measurement name equals to no2, + ## one with measurement name equals to no2, ## another with measurement name equals to pm25 ## - the Timestream tables from the example will look like this: ## TABLE "weather": @@ -141,7 +141,7 @@ var sampleConfig = ` ## Specifies the Timestream table where the metrics will be uploaded. # single_table_name = "yourTableNameHere" - ## Only valid and required for mapping_mode = "single-table" + ## Only valid and required for mapping_mode = "single-table" ## Describes what will be the Timestream dimension name for the Telegraf ## measurement name. # single_table_dimension_name_for_telegraf_measurement_name = "namespace" @@ -169,9 +169,12 @@ var sampleConfig = ` ` // WriteFactory function provides a way to mock the client instantiation for testing purposes. -var WriteFactory = func(credentialConfig *internalaws.CredentialConfig) WriteClient { - configProvider := credentialConfig.Credentials() - return timestreamwrite.New(configProvider) +var WriteFactory = func(credentialConfig *internalaws.CredentialConfig) (WriteClient, error) { + configProvider, err := credentialConfig.Credentials() + if err != nil { + return nil, err + } + return timestreamwrite.New(configProvider), nil } func (t *Timestream) Connect() error { @@ -221,7 +224,10 @@ func (t *Timestream) Connect() error { t.Log.Infof("Constructing Timestream client for '%s' mode", t.MappingMode) - svc := WriteFactory(&t.CredentialConfig) + svc, err := WriteFactory(&t.CredentialConfig) + if err != nil { + return err + } if t.DescribeDatabaseOnStart { t.Log.Infof("Describing database '%s' in region '%s'", t.DatabaseName, t.Region) diff --git a/plugins/outputs/timestream/timestream_test.go b/plugins/outputs/timestream/timestream_test.go index 67cdb4495c1d8..868e24d745a9c 100644 --- a/plugins/outputs/timestream/timestream_test.go +++ b/plugins/outputs/timestream/timestream_test.go @@ -2,7 +2,6 @@ package timestream_test import ( "fmt" - "github.com/aws/aws-sdk-go/aws/awserr" "reflect" "sort" "strconv" @@ -10,6 +9,8 @@ import ( "testing" "time" + "github.com/aws/aws-sdk-go/aws/awserr" + "github.com/aws/aws-sdk-go/aws" "github.com/aws/aws-sdk-go/service/timestreamwrite" "github.com/influxdata/telegraf" @@ -53,10 +54,9 @@ func (m *mockTimestreamClient) DescribeDatabase(*timestreamwrite.DescribeDatabas func TestConnectValidatesConfigParameters(t *testing.T) { assertions := assert.New(t) - ts.WriteFactory = func(credentialConfig *internalaws.CredentialConfig) ts.WriteClient { - return &mockTimestreamClient{} + ts.WriteFactory = func(credentialConfig *internalaws.CredentialConfig) (ts.WriteClient, error) { + return &mockTimestreamClient{}, nil } - // checking base arguments noDatabaseName := ts.Timestream{Log: testutil.Logger{}} assertions.Contains(noDatabaseName.Connect().Error(), "DatabaseName") @@ -182,11 +182,11 @@ func (m *mockTimestreamErrorClient) DescribeDatabase(*timestreamwrite.DescribeDa func TestThrottlingErrorIsReturnedToTelegraf(t *testing.T) { assertions := assert.New(t) - ts.WriteFactory = func(credentialConfig *internalaws.CredentialConfig) ts.WriteClient { + ts.WriteFactory = func(credentialConfig *internalaws.CredentialConfig) (ts.WriteClient, error) { return &mockTimestreamErrorClient{ awserr.New(timestreamwrite.ErrCodeThrottlingException, "Throttling Test", nil), - } + }, nil } plugin := ts.Timestream{ MappingMode: ts.MappingModeMultiTable, @@ -210,11 +210,11 @@ func TestThrottlingErrorIsReturnedToTelegraf(t *testing.T) { func TestRejectedRecordsErrorResultsInMetricsBeingSkipped(t *testing.T) { assertions := assert.New(t) - ts.WriteFactory = func(credentialConfig *internalaws.CredentialConfig) ts.WriteClient { + ts.WriteFactory = func(credentialConfig *internalaws.CredentialConfig) (ts.WriteClient, error) { return &mockTimestreamErrorClient{ awserr.New(timestreamwrite.ErrCodeRejectedRecordsException, "RejectedRecords Test", nil), - } + }, nil } plugin := ts.Timestream{ MappingMode: ts.MappingModeMultiTable, From 276bbc900461e1f33569952f7e737631714ca05b Mon Sep 17 00:00:00 2001 From: David Reimschussel Date: Thu, 7 Oct 2021 15:24:09 -0600 Subject: [PATCH 65/81] Update changelog (cherry picked from commit 3eab8d846e2337de731db51cbd36bb8586a59bd1) --- CHANGELOG.md | 10 ++++++++++ 1 file changed, 10 insertions(+) diff --git a/CHANGELOG.md b/CHANGELOG.md index 7360ff5a687d3..8760b914b7f95 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -1,3 +1,13 @@ +## v1.20.2 [2021-10-07] + +#### Bugfixes + + - [#9878](https://github.com/influxdata/telegraf/pull/9878) `inputs.cloudwatch` Use new session API + - [#9872](https://github.com/influxdata/telegraf/pull/9872) `parsers.json_v2` Duplicate line_protocol when using object and fields + - [#9787](https://github.com/influxdata/telegraf/pull/9787) `parsers.influx` Fix memory leak in influx parser + - [#9880](https://github.com/influxdata/telegraf/pull/9880) `inputs.stackdriver` Migrate to cloud.google.com/go/monitoring/apiv3/v2 + - [#9887](https://github.com/influxdata/telegraf/pull/9887) Fix makefile typo that prevented i386 tar and rpm packages from being built + ## v1.20.1 [2021-10-06] #### Bugfixes From 60211f0f1a800b2e3cad0f4ecf8823781b4dcfa3 Mon Sep 17 00:00:00 2001 From: Joshua Powers Date: Mon, 11 Oct 2021 08:34:30 -0600 Subject: [PATCH 66/81] Fix/jaeger version (#9893) --- docs/LICENSE_OF_DEPENDENCIES.md | 11 +- go.mod | 80 ++-- go.sum | 645 +++++++++++++++++++++++++------- 3 files changed, 557 insertions(+), 179 deletions(-) diff --git a/docs/LICENSE_OF_DEPENDENCIES.md b/docs/LICENSE_OF_DEPENDENCIES.md index b36594a1faea2..34a54d2d2fde9 100644 --- a/docs/LICENSE_OF_DEPENDENCIES.md +++ b/docs/LICENSE_OF_DEPENDENCIES.md @@ -83,7 +83,6 @@ following works: - github.com/go-stack/stack [MIT License](https://github.com/go-stack/stack/blob/master/LICENSE.md) - github.com/gobwas/glob [MIT License](https://github.com/gobwas/glob/blob/master/LICENSE) - github.com/gofrs/uuid [MIT License](https://github.com/gofrs/uuid/blob/master/LICENSE) -- github.com/gogo/googleapis [Apache License 2.0](https://github.com/gogo/googleapis/blob/master/LICENSE) - github.com/gogo/protobuf [BSD 3-Clause Clear License](https://github.com/gogo/protobuf/blob/master/LICENSE) - github.com/golang-jwt/jwt [MIT License](https://github.com/golang-jwt/jwt/blob/main/LICENSE) - github.com/golang-sql/civil [Apache License 2.0](https://github.com/golang-sql/civil/blob/master/LICENSE) @@ -105,7 +104,6 @@ following works: - github.com/gosnmp/gosnmp [BSD 2-Clause "Simplified" License](https://github.com/gosnmp/gosnmp/blob/master/LICENSE) - github.com/grid-x/modbus [BSD 3-Clause "New" or "Revised" License](https://github.com/grid-x/modbus/blob/master/LICENSE) - github.com/grid-x/serial [MIT License](https://github.com/grid-x/serial/blob/master/LICENSE) -- github.com/grpc-ecosystem/grpc-gateway [BSD 3-Clause "New" or "Revised" License](https://github.com/grpc-ecosystem/grpc-gateway/blob/master/LICENSE.txt) - github.com/hailocab/go-hostpool [MIT License](https://github.com/hailocab/go-hostpool/blob/master/LICENSE) - github.com/harlow/kinesis-consumer [MIT License](https://github.com/harlow/kinesis-consumer/blob/master/MIT-LICENSE) - github.com/hashicorp/consul/api [Mozilla Public License 2.0](https://github.com/hashicorp/consul/blob/master/LICENSE) @@ -133,9 +131,14 @@ following works: - github.com/jackc/pgx [MIT License](https://github.com/jackc/pgx/blob/master/LICENSE) - github.com/jaegertracing/jaeger [Apache License 2.0](https://github.com/jaegertracing/jaeger/blob/master/LICENSE) - github.com/james4k/rcon [MIT License](https://github.com/james4k/rcon/blob/master/LICENSE) +- github.com/jcmturner/aescts [Apache License 2.0](https://github.com/jcmturner/aescts/blob/master/LICENSE) +- github.com/jcmturner/dnsutils [Apache License 2.0](https://github.com/jcmturner/dnsutils/blob/master/LICENSE) - github.com/jcmturner/gofork [BSD 3-Clause "New" or "Revised" License](https://github.com/jcmturner/gofork/blob/master/LICENSE) +- github.com/jcmturner/gokrb5 [Apache License 2.0](https://github.com/jcmturner/gokrb5/blob/master/LICENSE) +- github.com/jcmturner/rpc [Apache License 2.0](https://github.com/jcmturner/rpc/blob/master/LICENSE) - github.com/jhump/protoreflect [Apache License 2.0](https://github.com/jhump/protoreflect/blob/master/LICENSE) - github.com/jmespath/go-jmespath [Apache License 2.0](https://github.com/jmespath/go-jmespath/blob/master/LICENSE) +- github.com/josharian/intern [MIT License](https://github.com/josharian/intern/blob/master/license.md) - github.com/jpillora/backoff [MIT License](https://github.com/jpillora/backoff/blob/master/LICENSE) - github.com/json-iterator/go [MIT License](https://github.com/json-iterator/go/blob/master/LICENSE) - github.com/kardianos/service [zlib License](https://github.com/kardianos/service/blob/master/LICENSE) @@ -253,10 +256,6 @@ following works: - gopkg.in/gorethink/gorethink.v3 [Apache License 2.0](https://github.com/rethinkdb/rethinkdb-go/blob/v3.0.5/LICENSE) - gopkg.in/inf.v0 [BSD 3-Clause "New" or "Revised" License](https://github.com/go-inf/inf/blob/v0.9.1/LICENSE) - gopkg.in/ini.v1 [Apache License 2.0](https://github.com/go-ini/ini/blob/master/LICENSE) -- gopkg.in/jcmturner/aescts.v1 [Apache License 2.0](https://github.com/jcmturner/aescts/blob/v1.0.1/LICENSE) -- gopkg.in/jcmturner/dnsutils.v1 [Apache License 2.0](https://github.com/jcmturner/dnsutils/blob/v1.0.1/LICENSE) -- gopkg.in/jcmturner/gokrb5.v7 [Apache License 2.0](https://github.com/jcmturner/gokrb5/tree/v7.5.0/LICENSE) -- gopkg.in/jcmturner/rpc.v1 [Apache License 2.0](https://github.com/jcmturner/rpc/blob/v1.1.0/LICENSE) - gopkg.in/ldap.v3 [MIT License](https://github.com/go-ldap/ldap/blob/v3.1.7/LICENSE) - gopkg.in/olivere/elastic.v5 [MIT License](https://github.com/olivere/elastic/blob/v5.0.76/LICENSE) - gopkg.in/tomb.v1 [BSD 3-Clause Clear License](https://github.com/go-tomb/tomb/blob/v1/LICENSE) diff --git a/go.mod b/go.mod index c6f3138489d28..bcb5f9af3a7af 100644 --- a/go.mod +++ b/go.mod @@ -13,7 +13,7 @@ require ( github.com/Azure/azure-event-hubs-go/v3 v3.3.13 github.com/Azure/azure-kusto-go v0.3.2 github.com/Azure/azure-pipeline-go v0.2.3 // indirect - github.com/Azure/azure-sdk-for-go v51.1.0+incompatible // indirect + github.com/Azure/azure-sdk-for-go v52.5.0+incompatible // indirect github.com/Azure/azure-storage-blob-go v0.13.0 // indirect github.com/Azure/azure-storage-queue-go v0.0.0-20191125232315-636801874cdd github.com/Azure/go-amqp v0.13.12 // indirect @@ -32,8 +32,8 @@ require ( github.com/Mellanox/rdmamap v0.0.0-20191106181932-7c3c4763a6ee github.com/Microsoft/go-winio v0.4.17 // indirect github.com/Microsoft/hcsshim v0.8.21 // indirect - github.com/Shopify/sarama v1.27.2 - github.com/StackExchange/wmi v0.0.0-20210224194228-fe8f1750fd46 // indirect + github.com/Shopify/sarama v1.29.1 + github.com/StackExchange/wmi v1.2.1 // indirect github.com/aerospike/aerospike-client-go v1.27.0 github.com/alecthomas/participle v0.4.1 // indirect github.com/alecthomas/units v0.0.0-20210208195552-ff826a37aa15 @@ -92,29 +92,28 @@ require ( github.com/eapache/queue v1.1.0 // indirect github.com/echlebek/timeproxy v1.0.0 // indirect github.com/eclipse/paho.mqtt.golang v1.3.0 - github.com/fatih/color v1.9.0 // indirect + github.com/fatih/color v1.10.0 // indirect github.com/form3tech-oss/jwt-go v3.2.3+incompatible // indirect github.com/ghodss/yaml v1.0.1-0.20190212211648-25d852aebe32 github.com/go-logfmt/logfmt v0.5.0 github.com/go-logr/logr v0.4.0 // indirect - github.com/go-ole/go-ole v1.2.4 // indirect + github.com/go-ole/go-ole v1.2.5 // indirect github.com/go-ping/ping v0.0.0-20210201095549-52eed920f98c github.com/go-redis/redis v6.15.9+incompatible github.com/go-sql-driver/mysql v1.6.0 - github.com/go-stack/stack v1.8.0 // indirect + github.com/go-stack/stack v1.8.1 // indirect github.com/goburrow/modbus v0.1.0 // indirect github.com/goburrow/serial v0.1.0 // indirect github.com/gobwas/glob v0.2.3 github.com/gofrs/uuid v3.3.0+incompatible - github.com/gogo/googleapis v1.4.0 // indirect github.com/gogo/protobuf v1.3.2 github.com/golang-jwt/jwt/v4 v4.0.0 github.com/golang-sql/civil v0.0.0-20190719163853-cb61b32ac6fe // indirect github.com/golang/geo v0.0.0-20190916061304-5b978397cfec - github.com/golang/groupcache v0.0.0-20200121045136-8c9f03a8e57e // indirect + github.com/golang/groupcache v0.0.0-20210331224755-41bb18bfe9da // indirect github.com/golang/protobuf v1.5.2 github.com/golang/snappy v0.0.3 - github.com/google/flatbuffers v1.11.0 // indirect + github.com/google/flatbuffers v1.12.0 // indirect github.com/google/go-cmp v0.5.6 github.com/google/go-github/v32 v32.1.0 github.com/google/go-querystring v1.0.0 // indirect @@ -123,7 +122,7 @@ require ( github.com/googleapis/gax-go/v2 v2.0.5 // indirect github.com/googleapis/gnostic v0.5.5 // indirect github.com/gopcua/opcua v0.2.0-rc2.0.20210409063412-baabb9b14fd2 - github.com/gorilla/mux v1.7.3 + github.com/gorilla/mux v1.8.0 github.com/gorilla/websocket v1.4.2 github.com/gosnmp/gosnmp v1.32.0 github.com/grid-x/modbus v0.0.0-20210224155242-c4a3d042e99b @@ -133,7 +132,7 @@ require ( github.com/harlow/kinesis-consumer v0.3.1-0.20181230152818-2f58b136fee0 github.com/hashicorp/consul/api v1.11.0 github.com/hashicorp/go-cleanhttp v0.5.1 // indirect - github.com/hashicorp/go-hclog v0.12.2 // indirect + github.com/hashicorp/go-hclog v0.16.2 // indirect github.com/hashicorp/go-immutable-radix v1.2.0 // indirect github.com/hashicorp/go-msgpack v0.5.5 // indirect github.com/hashicorp/go-rootcerts v1.0.2 // indirect @@ -155,7 +154,7 @@ require ( github.com/jackc/pgservicefile v0.0.0-20200307190119-3430c5407db8 // indirect github.com/jackc/pgtype v1.3.0 // indirect github.com/jackc/pgx/v4 v4.6.0 - github.com/jaegertracing/jaeger v1.15.1 // indirect + github.com/jaegertracing/jaeger v1.26.0 // indirect github.com/james4k/rcon v0.0.0-20120923215419-8fbb8268b60a github.com/jcmturner/gofork v1.0.0 // indirect github.com/jhump/protoreflect v1.8.3-0.20210616212123-6cc1efa697ca @@ -165,14 +164,13 @@ require ( github.com/kardianos/service v1.0.0 github.com/karrick/godirwalk v1.16.1 github.com/kballard/go-shellquote v0.0.0-20180428030007-95032a82bc51 - github.com/klauspost/compress v1.13.1 // indirect - github.com/kr/pretty v0.2.1 // indirect + github.com/klauspost/compress v1.13.4 // indirect + github.com/kr/pretty v0.3.0 // indirect github.com/kr/text v0.2.0 // indirect github.com/kylelemons/godebug v1.1.0 // indirect github.com/leodido/ragel-machinery v0.0.0-20181214104525-299bdde78165 // indirect - github.com/lib/pq v1.3.0 // indirect - github.com/mailru/easyjson v0.7.1 // indirect - github.com/mattn/go-colorable v0.1.6 // indirect + github.com/mailru/easyjson v0.7.7 // indirect + github.com/mattn/go-colorable v0.1.8 // indirect github.com/mattn/go-ieproxy v0.0.1 // indirect github.com/mattn/go-isatty v0.0.12 // indirect github.com/matttproud/golang_protobuf_extensions v1.0.2-0.20181231171920-c182affec369 @@ -183,7 +181,7 @@ require ( github.com/miekg/dns v1.1.43 github.com/minio/highwayhash v1.0.1 // indirect github.com/mitchellh/go-homedir v1.1.0 // indirect - github.com/mitchellh/mapstructure v1.2.2 // indirect + github.com/mitchellh/mapstructure v1.4.1 // indirect github.com/moby/ipvs v1.0.1 github.com/moby/sys/mount v0.2.0 // indirect github.com/moby/sys/mountinfo v0.4.1 // indirect @@ -209,7 +207,7 @@ require ( github.com/openzipkin-contrib/zipkin-go-opentracing v0.4.5 github.com/openzipkin/zipkin-go v0.2.5 github.com/philhofer/fwd v1.1.1 // indirect - github.com/pierrec/lz4 v2.5.2+incompatible // indirect + github.com/pierrec/lz4 v2.6.0+incompatible // indirect github.com/pion/dtls/v2 v2.0.9 github.com/pion/logging v0.2.2 // indirect github.com/pion/transport v0.12.3 // indirect @@ -219,10 +217,10 @@ require ( github.com/pmezard/go-difflib v1.0.0 // indirect github.com/prometheus/client_golang v1.11.0 github.com/prometheus/client_model v0.2.0 - github.com/prometheus/common v0.26.0 + github.com/prometheus/common v0.30.0 github.com/prometheus/procfs v0.6.0 - github.com/prometheus/prometheus v1.8.2-0.20200911110723-e83ef207b6c2 - github.com/rcrowley/go-metrics v0.0.0-20200313005456-10cdbea86bc0 // indirect + github.com/prometheus/prometheus v1.8.2-0.20210430082741-2a4b8e12bbf2 + github.com/rcrowley/go-metrics v0.0.0-20201227073835-cf1acfcdf475 // indirect github.com/remyoudompheng/bigfft v0.0.0-20200410134404-eec4a21b6bb0 // indirect github.com/riemann/riemann-go-client v0.5.0 github.com/robertkrimen/otto v0.0.0-20191219234010-c382bd3c16ff // indirect @@ -236,8 +234,8 @@ require ( github.com/showwin/speedtest-go v1.1.4 github.com/signalfx/com_signalfx_metrics_protobuf v0.0.2 // indirect github.com/signalfx/gohistogram v0.0.0-20160107210732-1ccfd2ff5083 // indirect - github.com/signalfx/golib/v3 v3.3.34 - github.com/signalfx/sapm-proto v0.4.0 // indirect + github.com/signalfx/golib/v3 v3.3.38 + github.com/signalfx/sapm-proto v0.7.2 // indirect github.com/sirupsen/logrus v1.8.1 github.com/sleepinggenius2/gosmi v0.4.3 github.com/snowflakedb/gosnowflake v1.5.0 @@ -250,8 +248,8 @@ require ( github.com/tidwall/match v1.0.3 // indirect github.com/tidwall/pretty v1.1.0 // indirect github.com/tinylib/msgp v1.1.6 - github.com/tklauser/go-sysconf v0.3.5 // indirect - github.com/tklauser/numcpus v0.2.2 // indirect + github.com/tklauser/go-sysconf v0.3.9 // indirect + github.com/tklauser/numcpus v0.3.0 // indirect github.com/vapourismo/knx-go v0.0.0-20201122213738-75fe09ace330 github.com/vishvananda/netlink v1.1.1-0.20201029203352-d40f9887b852 // indirect github.com/vishvananda/netns v0.0.0-20200728191858-db3c7e526aae // indirect @@ -263,8 +261,8 @@ require ( github.com/xdg-go/pbkdf2 v1.0.0 // indirect github.com/xdg-go/scram v1.0.2 // indirect github.com/xdg-go/stringprep v1.0.2 // indirect - github.com/xdg/scram v0.0.0-20180814205039-7eeb5667e42c - github.com/xdg/stringprep v1.0.0 // indirect + github.com/xdg/scram v1.0.3 + github.com/xdg/stringprep v1.0.3 // indirect github.com/youmark/pkcs8 v0.0.0-20201027041543-1326539a0a0a // indirect github.com/yuin/gopher-lua v0.0.0-20180630135845-46796da1b0b4 // indirect go.etcd.io/etcd/api/v3 v3.5.0 // indirect @@ -275,15 +273,15 @@ require ( go.opentelemetry.io/otel/metric v0.23.0 go.opentelemetry.io/otel/sdk/metric v0.23.0 go.starlark.net v0.0.0-20210406145628-7a1108eaa012 - go.uber.org/atomic v1.7.0 // indirect + go.uber.org/atomic v1.9.0 // indirect go.uber.org/multierr v1.6.0 // indirect golang.org/x/crypto v0.0.0-20210616213533-5ff15b29337e // indirect golang.org/x/mod v0.4.2 // indirect - golang.org/x/net v0.0.0-20210610132358-84b48f89b13b + golang.org/x/net v0.0.0-20210614182718-04defd469f4e golang.org/x/oauth2 v0.0.0-20210805134026-6f1e6394065a golang.org/x/sync v0.0.0-20210220032951-036812b2e83c - golang.org/x/sys v0.0.0-20210806184541-e5e7981a1069 - golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1 // indirect + golang.org/x/sys v0.0.0-20210816074244-15123e1e1f71 + golang.org/x/term v0.0.0-20210220032956-6a3ed077a48d // indirect golang.org/x/text v0.3.6 golang.org/x/time v0.0.0-20210723032227-1f47c861a9ac // indirect golang.org/x/tools v0.1.5 @@ -301,11 +299,7 @@ require ( gopkg.in/fsnotify.v1 v1.4.7 // indirect gopkg.in/gorethink/gorethink.v3 v3.0.5 gopkg.in/inf.v0 v0.9.1 // indirect - gopkg.in/ini.v1 v1.42.0 // indirect - gopkg.in/jcmturner/aescts.v1 v1.0.1 // indirect - gopkg.in/jcmturner/dnsutils.v1 v1.0.1 // indirect - gopkg.in/jcmturner/gokrb5.v7 v7.5.0 // indirect - gopkg.in/jcmturner/rpc.v1 v1.1.0 // indirect + gopkg.in/ini.v1 v1.62.0 // indirect gopkg.in/ldap.v3 v3.1.0 gopkg.in/olivere/elastic.v5 v5.0.70 gopkg.in/sourcemap.v1 v1.0.5 // indirect @@ -314,11 +308,11 @@ require ( gopkg.in/yaml.v2 v2.4.0 gopkg.in/yaml.v3 v3.0.0-20210107192922-496545a6307b // indirect gotest.tools v2.2.0+incompatible - k8s.io/api v0.20.6 + k8s.io/api v0.22.2 k8s.io/apimachinery v0.22.2 - k8s.io/client-go v0.20.6 + k8s.io/client-go v0.22.2 k8s.io/klog/v2 v2.9.0 // indirect - k8s.io/utils v0.0.0-20201110183641-67b214c5f920 // indirect + k8s.io/utils v0.0.0-20210819203725-bdf08cb9a70a // indirect modernc.org/cc/v3 v3.33.5 // indirect modernc.org/ccgo/v3 v3.9.4 // indirect modernc.org/libc v1.9.5 // indirect @@ -334,6 +328,12 @@ require ( require ( github.com/cenkalti/backoff/v4 v4.1.1 // indirect + github.com/jcmturner/aescts/v2 v2.0.0 // indirect + github.com/jcmturner/dnsutils/v2 v2.0.0 // indirect + github.com/jcmturner/gokrb5/v8 v8.4.2 // indirect + github.com/jcmturner/rpc/v2 v2.0.3 // indirect + github.com/josharian/intern v1.0.0 // indirect + github.com/rogpeppe/go-internal v1.6.2 // indirect go.opentelemetry.io/otel v1.0.0-RC3 // indirect go.opentelemetry.io/otel/exporters/otlp/otlpmetric v0.23.0 // indirect go.opentelemetry.io/otel/internal/metric v0.23.0 // indirect diff --git a/go.sum b/go.sum index a8fc62a7b3874..de38213b94a7b 100644 --- a/go.sum +++ b/go.sum @@ -1,3 +1,4 @@ +4d63.com/gochecknoglobals v0.0.0-20201008074935-acfc0b28355a/go.mod h1:wfdC5ZjKSPr7CybKEcgJhUOgeAQW1+7WcyK8OvUilfo= bazil.org/fuse v0.0.0-20160811212531-371fbbdaa898/go.mod h1:Xbm+BRKSBEpa4q4hTSxohYNQpsxXPbPry4JJWOB3LB8= cloud.google.com/go v0.26.0/go.mod h1:aQUYkXzVsufM+DwF1aE+0xfcU+56JwCaLick0ClmMTw= cloud.google.com/go v0.34.0/go.mod h1:aQUYkXzVsufM+DwF1aE+0xfcU+56JwCaLick0ClmMTw= @@ -38,6 +39,7 @@ cloud.google.com/go/bigquery v1.8.0/go.mod h1:J5hqkt3O0uAFnINi6JXValWIb1v0goeZM7 cloud.google.com/go/bigtable v1.2.0/go.mod h1:JcVAOl45lrTmQfLj7T6TxyMzIN/3FGGcFm+2xVAli2o= cloud.google.com/go/datastore v1.0.0/go.mod h1:LXYbyblFSglQ5pkeyhO+Qmw7ukd3C+pD7TKLgZqpHYE= cloud.google.com/go/datastore v1.1.0/go.mod h1:umbIZjpQpHh4hmRpGhH4tLFup+FVzqBi1b3c64qFpCk= +cloud.google.com/go/firestore v1.1.0/go.mod h1:ulACoGHTpvq5r8rxGJ4ddJZBZqakUQqClKRT5SZwBmk= cloud.google.com/go/kms v0.1.0 h1:VXAb5OzejDcyhFzIDeZ5n5AUdlsFnCyexuascIwWMj0= cloud.google.com/go/kms v0.1.0/go.mod h1:8Qp8PCAypHg4FdmlyW1QRAv09BGQ9Uzh7JnmIZxPk+c= cloud.google.com/go/monitoring v0.2.0 h1:UFQB1+YbZjAOqAFFY4RlNiOrt19O5HzPeCdtYSlPvmk= @@ -60,7 +62,9 @@ code.cloudfoundry.org/clock v1.0.0/go.mod h1:QD9Lzhd/ux6eNQVUDVRJX/RKTigpewimNYB collectd.org v0.3.0/go.mod h1:A/8DzQBkF6abtvrT2j/AU/4tiBgJWYyh0y/oB/4MlWE= collectd.org v0.5.0 h1:y4uFSAuOmeVhG3GCRa3/oH+ysePfO/+eGJNfd0Qa3d8= collectd.org v0.5.0/go.mod h1:A/8DzQBkF6abtvrT2j/AU/4tiBgJWYyh0y/oB/4MlWE= +contrib.go.opencensus.io/exporter/prometheus v0.3.0/go.mod h1:rpCPVQKhiyH8oomWgm34ZmgIdZa8OVYO5WAIygPbBBE= dmitri.shuralyov.com/gpu/mtl v0.0.0-20190408044501-666a987793e9/go.mod h1:H6x//7gZCb22OMCxBHrMx7a5I7Hp++hsVxbQ4BYO7hU= +github.com/AndreasBriese/bbloom v0.0.0-20190825152654-46b345b51c96/go.mod h1:bOvUY6CB00SOBii9/FifXqc0awNKxLFCL/+pkDPuyl8= github.com/Azure/azure-amqp-common-go/v3 v3.0.1 h1:mXh+eyOxGLBfqDtfmbtby0l7XfG/6b2NkuZ3B7i6zHA= github.com/Azure/azure-amqp-common-go/v3 v3.0.1/go.mod h1:PBIGdzcO1teYoufTKMcGibdKaYZv4avS+O6LNIp8bq0= github.com/Azure/azure-event-hubs-go/v3 v3.3.13 h1:aiI2RLjp0MzLCuFUXzR8b3h3bdPIc2c3vBYXRK8jX3E= @@ -74,9 +78,9 @@ github.com/Azure/azure-pipeline-go v0.2.3 h1:7U9HBg1JFK3jHl5qmo4CTZKFTVgMwdFHMVt github.com/Azure/azure-pipeline-go v0.2.3/go.mod h1:x841ezTBIMG6O3lAcl8ATHnsOPVl2bqk7S3ta6S6u4k= github.com/Azure/azure-sdk-for-go v16.2.1+incompatible/go.mod h1:9XXNKU+eRnpl9moKnB4QOLf1HestfXbmab5FXxiDBjc= github.com/Azure/azure-sdk-for-go v44.1.0+incompatible/go.mod h1:9XXNKU+eRnpl9moKnB4QOLf1HestfXbmab5FXxiDBjc= -github.com/Azure/azure-sdk-for-go v45.1.0+incompatible/go.mod h1:9XXNKU+eRnpl9moKnB4QOLf1HestfXbmab5FXxiDBjc= -github.com/Azure/azure-sdk-for-go v51.1.0+incompatible h1:7uk6GWtUqKg6weLv2dbKnzwb0ml1Qn70AdtRccZ543w= github.com/Azure/azure-sdk-for-go v51.1.0+incompatible/go.mod h1:9XXNKU+eRnpl9moKnB4QOLf1HestfXbmab5FXxiDBjc= +github.com/Azure/azure-sdk-for-go v52.5.0+incompatible h1:/NLBWHCnIHtZyLPc1P7WIqi4Te4CC23kIQyK3Ep/7lA= +github.com/Azure/azure-sdk-for-go v52.5.0+incompatible/go.mod h1:9XXNKU+eRnpl9moKnB4QOLf1HestfXbmab5FXxiDBjc= github.com/Azure/azure-storage-blob-go v0.6.0/go.mod h1:oGfmITT1V6x//CswqY2gtAHND+xIP64/qL7a5QJix0Y= github.com/Azure/azure-storage-blob-go v0.8.0/go.mod h1:lPI3aLPpuLTeUwh1sViKXFxwl2B6teiRqI0deQUvsw0= github.com/Azure/azure-storage-blob-go v0.13.0 h1:lgWHvFh+UYBNVQLFHXkvul2f6yOPA9PIH82RTG2cSwc= @@ -96,7 +100,7 @@ github.com/Azure/go-autorest/autorest v0.9.3/go.mod h1:GsRuLYvwzLjjjRoWEIyMUaYq8 github.com/Azure/go-autorest/autorest v0.10.0/go.mod h1:/FALq9T/kS7b5J5qsQ+RSTUdAmGFqi0vUdVNNx8q630= github.com/Azure/go-autorest/autorest v0.11.1/go.mod h1:JFgpikqFJ/MleTTxwepExTKnFUKKszPS8UavbQYUMuw= github.com/Azure/go-autorest/autorest v0.11.3/go.mod h1:JFgpikqFJ/MleTTxwepExTKnFUKKszPS8UavbQYUMuw= -github.com/Azure/go-autorest/autorest v0.11.4/go.mod h1:JFgpikqFJ/MleTTxwepExTKnFUKKszPS8UavbQYUMuw= +github.com/Azure/go-autorest/autorest v0.11.12/go.mod h1:eipySxLmqSyC5s5k1CLupqet0PSENBEDP93LQ9a8QYw= github.com/Azure/go-autorest/autorest v0.11.17/go.mod h1:eipySxLmqSyC5s5k1CLupqet0PSENBEDP93LQ9a8QYw= github.com/Azure/go-autorest/autorest v0.11.18 h1:90Y4srNYrwOtAgVo3ndrQkTYn6kf1Eg/AjTFJ8Is2aM= github.com/Azure/go-autorest/autorest v0.11.18/go.mod h1:dSiJPy22c3u0OtOKDNttNgqpNFY/GeWa7GH/Pz56QRA= @@ -127,10 +131,8 @@ github.com/Azure/go-autorest/autorest/mocks v0.3.0/go.mod h1:a8FDP3DYzQ4RYfVAxAN github.com/Azure/go-autorest/autorest/mocks v0.4.0/go.mod h1:LTp+uSrOhSkaKrUy935gNZuuIPPVsHlr9DSOxSayd+k= github.com/Azure/go-autorest/autorest/mocks v0.4.1 h1:K0laFcLE6VLTOwNgSxaGbUcLPuGXlNkbVvq4cW4nIHk= github.com/Azure/go-autorest/autorest/mocks v0.4.1/go.mod h1:LTp+uSrOhSkaKrUy935gNZuuIPPVsHlr9DSOxSayd+k= -github.com/Azure/go-autorest/autorest/to v0.3.0/go.mod h1:MgwOyqaIuKdG4TL/2ywSsIWKAfJfgHDo8ObuUk3t5sA= github.com/Azure/go-autorest/autorest/to v0.4.0 h1:oXVqrxakqqV1UZdSazDOPOLvOIz+XA683u8EctwboHk= github.com/Azure/go-autorest/autorest/to v0.4.0/go.mod h1:fE8iZBn7LQR7zH/9XU2NcPR4o9jEImooCeWJcYV/zLE= -github.com/Azure/go-autorest/autorest/validation v0.2.0/go.mod h1:3EEqHnBxQGHXRYq3HT1WyXAvT7LLY3tl70hw6tQIbjI= github.com/Azure/go-autorest/autorest/validation v0.3.1 h1:AgyqjAd94fwNAoTjl/WQXg4VvFeRFpO+UhNyRXqF1ac= github.com/Azure/go-autorest/autorest/validation v0.3.1/go.mod h1:yhLgjC0Wda5DYXl6JAsWyUe4KVNffhoDhG0zVzUMo3E= github.com/Azure/go-autorest/logger v0.1.0/go.mod h1:oExouG+K6PryycPJfVSxi/koC6LSNgds39diKLz7Vrc= @@ -145,7 +147,13 @@ github.com/BurntSushi/toml v0.3.1/go.mod h1:xHWCNGjB5oqiDr8zfno3MHue2Ht5sIBksp03 github.com/BurntSushi/xgb v0.0.0-20160522181843-27f122750802/go.mod h1:IVnqGOEym/WlBOVXweHU+Q+/VP0lqqI8lqeDx9IjBqo= github.com/DATA-DOG/go-sqlmock v1.3.3/go.mod h1:f/Ixk793poVmq4qj/V1dPUg2JEAKC73Q5eFN3EC/SaM= github.com/DataDog/datadog-go v3.2.0+incompatible/go.mod h1:LButxg5PwREeZtORoXG3tL4fMGNddJ+vMq1mwgfaqoQ= +github.com/DataDog/zstd v1.3.6-0.20190409195224-796139022798/go.mod h1:1jcaCB/ufaK+sKp1NBhlGmpz41jOoPQ35bpF36t7BBo= +github.com/DataDog/zstd v1.4.4/go.mod h1:1jcaCB/ufaK+sKp1NBhlGmpz41jOoPQ35bpF36t7BBo= +github.com/Djarvur/go-err113 v0.0.0-20210108212216-aea10b59be24/go.mod h1:4UJr5HIiMZrwgkSPdsjy2uOQExX/WEILpIrO9UPGuXs= +github.com/HdrHistogram/hdrhistogram-go v0.9.0/go.mod h1:nxrse8/Tzg2tg3DZcZjm6qEclQKK70g0KxO61gFFZD4= +github.com/HdrHistogram/hdrhistogram-go v1.0.1/go.mod h1:BWJ+nMSHY3L41Zj7CA3uXnloDp7xxV0YvstAE7nKTaM= github.com/Knetic/govaluate v3.0.1-0.20171022003610-9aa49832a739+incompatible/go.mod h1:r7JcOSlj0wfOMncg0iLm8Leh48TZaKVeNIfJntJ2wa0= +github.com/Masterminds/semver v1.5.0/go.mod h1:MB6lktGJrhw8PrUyiEoblNEGEQ+RzHPF078ddwwvV3Y= github.com/Mellanox/rdmamap v0.0.0-20191106181932-7c3c4763a6ee h1:atI/FFjXh6hIVlPE1Jup9m8N4B9q/OSbMUe2EBahs+w= github.com/Mellanox/rdmamap v0.0.0-20191106181932-7c3c4763a6ee/go.mod h1:jDA6v0TUYrFEIAE5uGJ29LQOeONIgMdP4Rkqb8HUnPM= github.com/Microsoft/go-winio v0.4.11/go.mod h1:VhR8bwka0BXejwEJY73c50VrPtXAaKcyvVC4A4RozmA= @@ -185,20 +193,22 @@ github.com/NYTimes/gziphandler v0.0.0-20170623195520-56545f4a5d46/go.mod h1:3wb0 github.com/NYTimes/gziphandler v0.0.0-20170623195520-56545f4a5d46/go.mod h1:3wb06e3pkSAbeQ52E9H9iFoQsEEwGN64994WTCIhntQ= github.com/NYTimes/gziphandler v0.0.0-20170623195520-56545f4a5d46/go.mod h1:3wb06e3pkSAbeQ52E9H9iFoQsEEwGN64994WTCIhntQ= github.com/OneOfOne/xxhash v1.2.2/go.mod h1:HSdplMjZKSmBqAxg5vPj2TmRDmfkzw+cTzAElWljhcU= -github.com/PuerkitoBio/purell v1.0.0/go.mod h1:c11w/QuzBsJSee3cPx9rAFu61PvFxuPbtSwDGJws/X0= +github.com/OpenPeeDeeP/depguard v1.0.1/go.mod h1:xsIw86fROiiwelg+jB2uM9PiKihMMmUx/1V+TNhjQvM= github.com/PuerkitoBio/purell v1.1.0/go.mod h1:c11w/QuzBsJSee3cPx9rAFu61PvFxuPbtSwDGJws/X0= github.com/PuerkitoBio/purell v1.1.1/go.mod h1:c11w/QuzBsJSee3cPx9rAFu61PvFxuPbtSwDGJws/X0= -github.com/PuerkitoBio/urlesc v0.0.0-20160726150825-5bd2802263f2/go.mod h1:uGdkoq3SwY9Y+13GIhn11/XLaGBb4BfwItxLd5jeuXE= github.com/PuerkitoBio/urlesc v0.0.0-20170810143723-de5bf2ad4578/go.mod h1:uGdkoq3SwY9Y+13GIhn11/XLaGBb4BfwItxLd5jeuXE= github.com/Shopify/logrus-bugsnag v0.0.0-20171204204709-577dee27f20d/go.mod h1:HI8ITrYtUY+O+ZhtlqUnD8+KwNPOyugEhfP9fdUIaEQ= github.com/Shopify/sarama v1.19.0/go.mod h1:FVkBWblsNy7DGZRfXLU0O9RCGt5g3g3yEuWXgklEdEo= -github.com/Shopify/sarama v1.27.2 h1:1EyY1dsxNDUQEv0O/4TsjosHI2CgB1uo9H/v56xzTxc= -github.com/Shopify/sarama v1.27.2/go.mod h1:g5s5osgELxgM+Md9Qni9rzo7Rbt+vvFQI4bt/Mc93II= +github.com/Shopify/sarama v1.22.2-0.20190604114437-cd910a683f9f/go.mod h1:XLH1GYJnLVE0XCr6KdJGVJRTwY30moWNJ4sERjXX6fs= +github.com/Shopify/sarama v1.29.0/go.mod h1:2QpgD79wpdAESqNQMxNc0KYMkycd4slxGdV3TWSVqrU= +github.com/Shopify/sarama v1.29.1 h1:wBAacXbYVLmWieEA/0X/JagDdCZ8NVFOfS6l6+2u5S0= +github.com/Shopify/sarama v1.29.1/go.mod h1:mdtqvCSg8JOxk8PmpTNGyo6wzd4BMm4QXSfDnTXmgkE= github.com/Shopify/toxiproxy v2.1.4+incompatible h1:TKdv8HiTLgE5wdJuEML90aBgNWsokNbMijUGhmcoBJc= github.com/Shopify/toxiproxy v2.1.4+incompatible/go.mod h1:OXgGpZ6Cli1/URJOF1DMxUHB2q5Ap20/P/eIdh4G0pI= github.com/StackExchange/wmi v0.0.0-20190523213315-cbe66965904d/go.mod h1:3eOhrUMpNV+6aFIbp5/iudMxNCF27Vw2OZgy4xEx0Fg= -github.com/StackExchange/wmi v0.0.0-20210224194228-fe8f1750fd46 h1:5sXbqlSomvdjlRbWyNqkPsJ3Fg+tQZCbgeX1VGljbQY= github.com/StackExchange/wmi v0.0.0-20210224194228-fe8f1750fd46/go.mod h1:3eOhrUMpNV+6aFIbp5/iudMxNCF27Vw2OZgy4xEx0Fg= +github.com/StackExchange/wmi v1.2.1 h1:VIkavFPXSjcnS+O8yTq7NI32k0R5Aj+v39y29VYDOSA= +github.com/StackExchange/wmi v1.2.1/go.mod h1:rcmrprowKIVzvc+NUiLncP2uuArMWLCbu9SBzvHz7e8= github.com/VividCortex/gohistogram v1.0.0/go.mod h1:Pf5mBqqDxYaXu3hDrrU+w6nw50o/4+TcAqDqk/vUH7g= github.com/a8m/tree v0.0.0-20210115125333-10a5fd5b637d/go.mod h1:FSdwKX97koS5efgm8WevNf7XS3PqtyFkKDDXrz778cg= github.com/aerospike/aerospike-client-go v1.27.0 h1:VC6/Wqqm3Qlp4/utM7Zts3cv4A2HPn8rVFp/XZKTWgE= @@ -220,11 +230,13 @@ github.com/alecthomas/units v0.0.0-20190924025748-f65c72e2690d/go.mod h1:rBZYJk5 github.com/alecthomas/units v0.0.0-20210208195552-ff826a37aa15 h1:AUNCr9CiJuwrRYS3XieqF+Z9B9gNxo/eANAJCF2eiN4= github.com/alecthomas/units v0.0.0-20210208195552-ff826a37aa15/go.mod h1:OMCwj8VM1Kc9e19TLln2VL61YJF0x1XFtfdL4JdbSyE= github.com/alexflint/go-filemutex v0.0.0-20171022225611-72bdc8eae2ae/go.mod h1:CgnQgUtFrFz9mxFNtED3jI5tLDjKlOM+oUF/sTk6ps0= +github.com/alexkohler/prealloc v1.0.0/go.mod h1:VetnK3dIgFBBKmg0YnD9F9x6Icjd+9cvfHR56wJVlKE= github.com/aliyun/alibaba-cloud-sdk-go v1.61.1004 h1:YtaYjXmemIMyySUbs0VGFPqsLpsNHf4TW/L6yqpJQ9s= github.com/aliyun/alibaba-cloud-sdk-go v1.61.1004/go.mod h1:pUKYbK5JQ+1Dfxk80P0qxGqe5dkxDoabbZS7zOcouyA= github.com/amir/raidman v0.0.0-20170415203553-1ccc43bfb9c9 h1:FXrPTd8Rdlc94dKccl7KPmdmIbVh/OjelJ8/vgMRzcQ= github.com/amir/raidman v0.0.0-20170415203553-1ccc43bfb9c9/go.mod h1:eliMa/PW+RDr2QLWRmLH1R1ZA4RInpmvOzDDXtaIZkc= github.com/andreyvit/diff v0.0.0-20170406064948-c7f18ee00883/go.mod h1:rCTlJbsFo29Kk6CurOXKm700vrz8f0KW0JNfpkRJY/8= +github.com/andybalholm/brotli v1.0.0/go.mod h1:loMXtMfwqflxFJPmdbJO0a3KNoPuLBgiu3qAvBg8x/Y= github.com/antchfx/jsonquery v1.1.4 h1:+OlFO3QS9wjU0MKx9MgHm5f6o6hdd4e9mUTp0wTjxlM= github.com/antchfx/jsonquery v1.1.4/go.mod h1:cHs8r6Bymd8j6HI6Ej1IJbjahKvLBcIEh54dfmo+E9A= github.com/antchfx/xmlquery v1.3.6 h1:kaEVzH1mNo/2AJZrhZjAaAUTy2Nn2zxGfYYU8jWfXOo= @@ -233,12 +245,15 @@ github.com/antchfx/xpath v1.1.7/go.mod h1:Yee4kTMuNiPYJ7nSNorELQMr1J33uOpXDMByNY github.com/antchfx/xpath v1.1.10/go.mod h1:Yee4kTMuNiPYJ7nSNorELQMr1J33uOpXDMByNYhvtNk= github.com/antchfx/xpath v1.1.11 h1:WOFtK8TVAjLm3lbgqeP0arlHpvCEeTANeWZ/csPpJkQ= github.com/antchfx/xpath v1.1.11/go.mod h1:i54GszH55fYfBmoZXapTHN8T8tkcHfRgLyVwwqzXNcs= +github.com/antihax/optional v0.0.0-20180407024304-ca021399b1a6/go.mod h1:V8iCPQYkqmusNa815XgQio277wI47sdRh1dUOLdyC6Q= github.com/antihax/optional v1.0.0/go.mod h1:uupD/76wgC+ih3iEmQUL+0Ugr19nfwCT1kdvxnR2qWY= +github.com/antonmedv/expr v1.8.9/go.mod h1:5qsM3oLGDND7sDmQGDXHkYfkjYMUX14qsgqmHhwGEk8= github.com/apache/arrow/go/arrow v0.0.0-20191024131854-af6fa24be0db/go.mod h1:VTxUBvSJ3s3eHAg65PNgrsn5BtqCRPdmyXh6rAfdxN0= github.com/apache/arrow/go/arrow v0.0.0-20200601151325-b2287a20f230 h1:5ultmol0yeX75oh1hY78uAFn3dupBQ/QUNxERCkiaUQ= github.com/apache/arrow/go/arrow v0.0.0-20200601151325-b2287a20f230/go.mod h1:QNYViu/X0HXDHw7m3KXzWSVXIbfUvJqBFe6Gj8/pYA0= github.com/apache/thrift v0.12.0/go.mod h1:cp2SuWMxlEZw2r+iP2GNCdIi4C1qmUzdZFSVb+bacwQ= github.com/apache/thrift v0.13.0/go.mod h1:cp2SuWMxlEZw2r+iP2GNCdIi4C1qmUzdZFSVb+bacwQ= +github.com/apache/thrift v0.14.1/go.mod h1:cp2SuWMxlEZw2r+iP2GNCdIi4C1qmUzdZFSVb+bacwQ= github.com/apache/thrift v0.14.2 h1:hY4rAyg7Eqbb27GB6gkhUKrRAuc8xRjlNtJq+LseKeY= github.com/apache/thrift v0.14.2/go.mod h1:cp2SuWMxlEZw2r+iP2GNCdIi4C1qmUzdZFSVb+bacwQ= github.com/aristanetworks/glog v0.0.0-20191112221043-67e8567f59f3 h1:Bmjk+DjIi3tTAU0wxGaFbfjGUqlxxSXARq9A96Kgoos= @@ -256,11 +271,16 @@ github.com/aryann/difflib v0.0.0-20170710044230-e206f873d14a/go.mod h1:DAHtR1m6l github.com/asaskevich/govalidator v0.0.0-20180720115003-f9ffefc3facf/go.mod h1:lB+ZfQJz7igIIfQNfa7Ml4HSf2uFQQRzpGGRXenZAgY= github.com/asaskevich/govalidator v0.0.0-20190424111038-f61b66f89f4a/go.mod h1:lB+ZfQJz7igIIfQNfa7Ml4HSf2uFQQRzpGGRXenZAgY= github.com/asaskevich/govalidator v0.0.0-20200108200545-475eaeb16496/go.mod h1:oGkLhpf+kjZl6xBf758TQhh5XrAeiJv/7FRz/2spLIg= +github.com/asaskevich/govalidator v0.0.0-20200428143746-21a406dcc535/go.mod h1:oGkLhpf+kjZl6xBf758TQhh5XrAeiJv/7FRz/2spLIg= +github.com/asaskevich/govalidator v0.0.0-20200907205600-7a23bdc65eef/go.mod h1:WaHUgvxTVq04UNunO+XhnAqY/wQc+bxr74GqbsZ/Jqw= +github.com/asaskevich/govalidator v0.0.0-20210307081110-f21760c49a8d/go.mod h1:WaHUgvxTVq04UNunO+XhnAqY/wQc+bxr74GqbsZ/Jqw= +github.com/ashanbrown/forbidigo v1.1.0/go.mod h1:vVW7PEdqEFqapJe95xHkTfB1+XvZXBFg8t0sG2FIxmI= +github.com/ashanbrown/makezero v0.0.0-20201205152432-7b7cdbb3025a/go.mod h1:oG9Dnez7/ESBqc4EdrdNlryeo7d0KcW1ftXHm7nU/UU= github.com/aws/aws-lambda-go v1.13.3/go.mod h1:4UKl9IzQMoD+QF79YdCuzCwp8VbmG4VAQwij/eHl5CU= github.com/aws/aws-sdk-go v1.15.11/go.mod h1:mFuSZ37Z9YOHbQEwBWztmVzqXrEkub65tZoCYDt7FT0= github.com/aws/aws-sdk-go v1.27.0/go.mod h1:KmX6BPdI08NWTb3/sm4ZGu5ShLoqVDhKgpiN924inxo= -github.com/aws/aws-sdk-go v1.34.9/go.mod h1:5zCpMtNQVjRREroY7sYe8lOMRSxkhG6MZveU8YkpAk0= github.com/aws/aws-sdk-go v1.34.28/go.mod h1:H7NKnBqNVzoTJpGfLrQkkD+ytBA93eiDYi/+8rV9s48= +github.com/aws/aws-sdk-go v1.38.3/go.mod h1:hcU610XS61/+aQV88ixoOzUoG7v3b31pl2zKMmprdro= github.com/aws/aws-sdk-go v1.38.69 h1:V489lmrdkIQSfF6OAGZZ1Cavcm7eczCm2JcGvX+yHRg= github.com/aws/aws-sdk-go v1.38.69/go.mod h1:hcU610XS61/+aQV88ixoOzUoG7v3b31pl2zKMmprdro= github.com/aws/aws-sdk-go-v2 v0.18.0/go.mod h1:JWVYvqSMppoMJC0x5wdwiImzgXTI9FuZwxzkQq9wy+g= @@ -302,10 +322,14 @@ github.com/beorn7/perks v1.0.0/go.mod h1:KWe93zE9D1o94FZ5RNwFwVgaQK1VOXiVxmqh+Ce github.com/beorn7/perks v1.0.1 h1:VlbKKnNfV8bJzeqoa4cOKqO6bYr3WgKZxO8Z16+hsOM= github.com/beorn7/perks v1.0.1/go.mod h1:G2ZrVWU2WbWT9wwq4/hrbKbnv/1ERSJQ0ibhJ6rlkpw= github.com/bgentry/speakeasy v0.1.0/go.mod h1:+zsyZBPWlz7T6j88CTgSN5bM796AkVf0kBD4zp0CCIs= +github.com/bitly/go-hostpool v0.0.0-20171023180738-a3a6125de932/go.mod h1:NOuUCSz6Q9T7+igc/hlvDOUdtWKryOrtFyIVABv/p7k= github.com/bitly/go-hostpool v0.1.0 h1:XKmsF6k5el6xHG3WPJ8U0Ku/ye7njX7W81Ng7O2ioR0= github.com/bitly/go-hostpool v0.1.0/go.mod h1:4gOCgp6+NZnVqlKyZ/iBZFTAJKembaVENUpMkpg42fw= github.com/bitly/go-simplejson v0.5.0/go.mod h1:cXHtHw4XUPsvGaxgjIAn8PhEWG9NfngEKAMDJEczWVA= github.com/bits-and-blooms/bitset v1.2.0/go.mod h1:gIdJ4wp64HaoK2YrL1Q5/N7Y16edYb8uY+O0FJTyyDA= +github.com/bketelsen/crypt v0.0.3-0.20200106085610-5cbc8cc4026c/go.mod h1:MKsuJmJgSg28kpZDP6UIiPt0e0Oz0kqKNGyRaWEPv84= +github.com/bketelsen/crypt v0.0.4/go.mod h1:aI6NrJ0pMGgvZKL1iVgXLnfIFJtfV+bKCoqOes/6LfM= +github.com/bkielbasa/cyclop v1.2.0/go.mod h1:qOI0yy6A7dYC4Zgsa72Ppm9kONl0RoIlPbzot9mhmeI= github.com/blang/semver v3.1.0+incompatible/go.mod h1:kRBLl5iJ+tD4TcOOxsy/0fnwebNt5EWlYSAyrTnjyyk= github.com/blang/semver v3.5.1+incompatible/go.mod h1:kRBLl5iJ+tD4TcOOxsy/0fnwebNt5EWlYSAyrTnjyyk= github.com/bmatcuk/doublestar/v3 v3.0.0 h1:TQtVPlDnAYwcrVNB2JiGuMc++H5qzWZd9PhkNo5WyHI= @@ -314,7 +338,9 @@ github.com/bmizerany/assert v0.0.0-20160611221934-b7ed37b82869 h1:DDGfHa7BWjL4Yn github.com/bmizerany/assert v0.0.0-20160611221934-b7ed37b82869/go.mod h1:Ekp36dRnpXw/yCqJaO+ZrUyxD+3VXMFFr56k5XYrpB4= github.com/bmizerany/pat v0.0.0-20170815010413-6226ea591a40/go.mod h1:8rLXio+WjiTceGBHIoTvn60HIbs7Hm7bcHjyrSqYB9c= github.com/boltdb/bolt v1.3.1/go.mod h1:clJnj/oiGkjum5o1McbSZDSLxVThjynRyGBgiAx27Ps= +github.com/bombsimon/wsl/v3 v3.2.0/go.mod h1:st10JtZYLE4D5sC7b8xV4zTKZwAQjCH/Hy2Pm1FNZIc= github.com/bshuster-repo/logrus-logstash-hook v0.4.1/go.mod h1:zsTqEiSzDgAa/8GZR7E1qaXrhYNDKBYy5/dWPTIflbk= +github.com/bsm/sarama-cluster v2.1.13+incompatible/go.mod h1:r7ao+4tTNXvWm+VRpRJchr2kQhqxgmAp2iEX5W96gMM= github.com/buger/jsonparser v0.0.0-20180808090653-f4dd9f5a6b44/go.mod h1:bbYlZJ7hK1yFx9hf58LP0zeX7UjIGs20ufpu3evjr+s= github.com/buger/jsonparser v0.0.0-20180808090653-f4dd9f5a6b44/go.mod h1:bbYlZJ7hK1yFx9hf58LP0zeX7UjIGs20ufpu3evjr+s= github.com/buger/jsonparser v0.0.0-20180808090653-f4dd9f5a6b44/go.mod h1:bbYlZJ7hK1yFx9hf58LP0zeX7UjIGs20ufpu3evjr+s= @@ -340,10 +366,12 @@ github.com/cenkalti/backoff/v4 v4.0.2/go.mod h1:eEew/i+1Q6OrCDZh3WiXYv3+nJwBASZ8 github.com/cenkalti/backoff/v4 v4.1.1 h1:G2HAfAmvm/GcKan2oOQpBXOd2tT2G57ZnZGWa1PxPBQ= github.com/cenkalti/backoff/v4 v4.1.1/go.mod h1:scbssz8iZGpm3xbr14ovlUdkxfGXNInqkPWOWmG2CLw= github.com/census-instrumentation/opencensus-proto v0.2.1/go.mod h1:f6KPmirojxKA12rnyqOA5BBL4O983OfeGPqjHWSTneU= +github.com/census-instrumentation/opencensus-proto v0.3.0/go.mod h1:f6KPmirojxKA12rnyqOA5BBL4O983OfeGPqjHWSTneU= github.com/cespare/xxhash v1.1.0 h1:a6HrQnmkObjyL+Gs60czilIUGqrzKutQD6XZog3p+ko= github.com/cespare/xxhash v1.1.0/go.mod h1:XrSqR1VqqWfGrhpAt58auRo0WTKS1nRRg3ghfAqPWnc= github.com/cespare/xxhash/v2 v2.1.1 h1:6MnRN8NT7+YBpUIWxHtefFZOKTAPgGjpQSxqLNn0+qY= github.com/cespare/xxhash/v2 v2.1.1/go.mod h1:VGX0DQ3Q6kWi7AoAeZDth3/j3BFtOZR5XLFGgcrjCOs= +github.com/charithe/durationcheck v0.0.6/go.mod h1:SSbRIBVfMjCi/kEB6K65XEA83D6prSM8ap1UCpNKtgg= github.com/checkpoint-restore/go-criu/v4 v4.1.0/go.mod h1:xUQBLp4RLc5zJtWY++yjOoMoB5lihDt7fai+75m+rGw= github.com/checkpoint-restore/go-criu/v5 v5.0.0/go.mod h1:cfwC0EG7HMUenopBsUf9d89JlCLQIfgVcNsNN0t6T2M= github.com/chzyer/logex v1.1.10/go.mod h1:+Ywpsq7O8HXn0nuIou7OrIPyXbp3wmkHB+jjWRnGsAI= @@ -395,7 +423,6 @@ github.com/containerd/containerd v1.3.0-beta.2.0.20190828155532-0293cbd26c69/go. github.com/containerd/containerd v1.3.0/go.mod h1:bC6axHOhabU15QhwfG7w5PipXdVtMXFTttgp+kVtyUA= github.com/containerd/containerd v1.3.1-0.20191213020239-082f7e3aed57/go.mod h1:bC6axHOhabU15QhwfG7w5PipXdVtMXFTttgp+kVtyUA= github.com/containerd/containerd v1.3.2/go.mod h1:bC6axHOhabU15QhwfG7w5PipXdVtMXFTttgp+kVtyUA= -github.com/containerd/containerd v1.3.4/go.mod h1:bC6axHOhabU15QhwfG7w5PipXdVtMXFTttgp+kVtyUA= github.com/containerd/containerd v1.4.0-beta.2.0.20200729163537-40b22ef07410/go.mod h1:bC6axHOhabU15QhwfG7w5PipXdVtMXFTttgp+kVtyUA= github.com/containerd/containerd v1.4.1/go.mod h1:bC6axHOhabU15QhwfG7w5PipXdVtMXFTttgp+kVtyUA= github.com/containerd/containerd v1.4.3/go.mod h1:bC6axHOhabU15QhwfG7w5PipXdVtMXFTttgp+kVtyUA= @@ -463,9 +490,12 @@ github.com/containers/ocicrypt v1.1.0/go.mod h1:b8AOe0YR67uU8OqfVNcznfFpAzu3rdgU github.com/containers/ocicrypt v1.1.1/go.mod h1:Dm55fwWm1YZAjYRaJ94z2mfZikIyIN4B0oB3dj3jFxY= github.com/coreos/bbolt v1.3.2/go.mod h1:iRUV2dpdMOn7Bo10OQBFzIJO9kkE559Wcmn+qkEiiKk= github.com/coreos/etcd v3.3.10+incompatible/go.mod h1:uF7uidLiAD3TWHmW31ZFd/JWoc32PjwdhPthX9715RE= +github.com/coreos/etcd v3.3.13+incompatible/go.mod h1:uF7uidLiAD3TWHmW31ZFd/JWoc32PjwdhPthX9715RE= +github.com/coreos/go-etcd v2.0.0+incompatible/go.mod h1:Jez6KQU2B/sWsbdaef3ED8NzMklzPG4d5KIOhIy30Tk= github.com/coreos/go-iptables v0.4.5/go.mod h1:/mVI274lEDI2ns62jHCDnCyBF9Iwsmekav8Dbxlm1MU= github.com/coreos/go-iptables v0.5.0/go.mod h1:/mVI274lEDI2ns62jHCDnCyBF9Iwsmekav8Dbxlm1MU= github.com/coreos/go-oidc v2.1.0+incompatible/go.mod h1:CgnwVTmzoESiwO9qyAFEMiHoZ1nMCKZlZ9V6mm3/LKc= +github.com/coreos/go-oidc v2.2.1+incompatible/go.mod h1:CgnwVTmzoESiwO9qyAFEMiHoZ1nMCKZlZ9V6mm3/LKc= github.com/coreos/go-semver v0.2.0/go.mod h1:nnelYz7RCh+5ahJtPPxZlU+153eP4D4r3EedlOD2RNk= github.com/coreos/go-semver v0.3.0 h1:wkHLiw0WNATZnSG7epLsujiMCgPAc9xhjJ4tgnAxmfM= github.com/coreos/go-semver v0.3.0/go.mod h1:nnelYz7RCh+5ahJtPPxZlU+153eP4D4r3EedlOD2RNk= @@ -485,22 +515,27 @@ github.com/couchbase/gomemcached v0.1.3 h1:HIc5qMYNbuhB7zNaiEtj61DCYkquAwrQlf64q github.com/couchbase/gomemcached v0.1.3/go.mod h1:mxliKQxOv84gQ0bJWbI+w9Wxdpt9HjDvgW9MjCym5Vo= github.com/couchbase/goutils v0.1.0 h1:0WLlKJilu7IBm98T8nS9+J36lBFVLRUSIUtyD/uWpAE= github.com/couchbase/goutils v0.1.0/go.mod h1:BQwMFlJzDjFDG3DJUdU0KORxn88UlsOULuxLExMh3Hs= +github.com/cpuguy83/go-md2man v1.0.10/go.mod h1:SmD6nW6nTyfqj6ABTjUi3V3JVMnlJmwcJI5acqYI6dE= github.com/cpuguy83/go-md2man/v2 v2.0.0-20190314233015-f79a8a8ca69d/go.mod h1:maD7wRr/U5Z6m/iR4s+kqSMx2CaBsrgA7czyZG/E6dU= github.com/cpuguy83/go-md2man/v2 v2.0.0/go.mod h1:maD7wRr/U5Z6m/iR4s+kqSMx2CaBsrgA7czyZG/E6dU= github.com/creack/pty v1.1.7/go.mod h1:lj5s0c3V2DBrqTV7llrYr5NG6My20zk30Fl46Y7DoTY= github.com/creack/pty v1.1.9/go.mod h1:oKZEueFk5CKHvIhNR5MUki03XCEU+Q6VDXinZuGJ33E= github.com/creack/pty v1.1.11 h1:07n33Z8lZxZ2qwegKbObQohDhXDQxiMMz1NOUGYlesw= github.com/creack/pty v1.1.11/go.mod h1:oKZEueFk5CKHvIhNR5MUki03XCEU+Q6VDXinZuGJ33E= +github.com/crossdock/crossdock-go v0.0.0-20160816171116-049aabb0122b/go.mod h1:v9FBN7gdVTpiD/+LZ7Po0UKvROyT87uLVxTHVky/dlQ= github.com/cyphar/filepath-securejoin v0.2.2/go.mod h1:FpkQEhXnPnOthhzymB7CGsFk2G9VLXONKD9G7QGMM+4= github.com/d2g/dhcp4 v0.0.0-20170904100407-a1d1b6c41b1c/go.mod h1:Ct2BUK8SB0YC1SMSibvLzxjeJLnrYEVLULFNiHY9YfQ= github.com/d2g/dhcp4client v1.0.0/go.mod h1:j0hNfjhrt2SxUOw55nL0ATM/z4Yt3t2Kd1mW34z5W5s= github.com/d2g/dhcp4server v0.0.0-20181031114812-7d4a0a7f59a5/go.mod h1:Eo87+Kg/IX2hfWJfwxMzLyuSZyxSoAug2nGa1G2QAi8= github.com/d2g/hardwareaddr v0.0.0-20190221164911-e7d9fbe030e4/go.mod h1:bMl4RjIciD2oAxI7DmWRx6gbeqrkoLqv3MV0vzNad+I= +github.com/daixiang0/gci v0.2.8/go.mod h1:+4dZ7TISfSmqfAGv59ePaHfNzgGtIkHAhhdKggP1JAc= github.com/dave/jennifer v1.2.0/go.mod h1:fIb+770HOpJ2fmN9EPPKOqm1vMGhB+TwXKMZhrIygKg= +github.com/davecgh/go-spew v0.0.0-20161028175848-04cdfd42973b/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= github.com/davecgh/go-spew v1.1.1 h1:vj9j/u1bqnvCEfJOwUhtlOARqs3+rkHYY13jYWTU97c= github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= github.com/davecgh/go-xdr v0.0.0-20161123171359-e6a2ba005892/go.mod h1:CTDl0pzVzE5DEzZhPfvhY/9sPFMQIxaJ9VAMs9AagrE= +github.com/denis-tingajkin/go-header v0.4.2/go.mod h1:eLRHAVXzE5atsKAnNRDB90WHCFFnBUn4RN0nRcs1LJA= github.com/denisenkom/go-mssqldb v0.10.0 h1:QykgLZBorFE95+gO3u9esLd0BmbvpWp0/waNNZfHBM8= github.com/denisenkom/go-mssqldb v0.10.0/go.mod h1:xbL0rPBG9cCiLr28tMa8zpbdarY27NDyej4t/EjAShU= github.com/denverdino/aliyungo v0.0.0-20190125010748-a747050bb1ba/go.mod h1:dV8lFg6daOBZbT6/BDGIz6Y3WFGn8juu6G+CQ6LHtl0= @@ -508,13 +543,20 @@ github.com/devigned/tab v0.0.1/go.mod h1:oVYrfgGyond090gxCvvbjZji79+peOiSV6vhZhK github.com/devigned/tab v0.1.1 h1:3mD6Kb1mUOYeLpJvTVSDwSg5ZsfSxfvxGRTxRsJsITA= github.com/devigned/tab v0.1.1/go.mod h1:XG9mPq0dFghrYvoBF3xdRrJzSTX1b7IQrvaL9mzjeJY= github.com/devigned/tab/opencensus v0.1.2/go.mod h1:U6xXMXnNwXJpdaK0mnT3zdng4WTi+vCfqn7YHofEv2A= +github.com/dgraph-io/badger v1.6.2/go.mod h1:JW2yswe3V058sS0kZ2h/AXeDSqFjxnZcRrVH//y2UQE= +github.com/dgraph-io/badger/v3 v3.2103.1/go.mod h1:dULbq6ehJ5K0cGW/1TQ9iSfUk0gbSiToDWmWmTsJ53E= +github.com/dgraph-io/ristretto v0.0.1/go.mod h1:T40EBc7CJke8TkpiYfGGKAeFjSaxuFXhuXRyumBd6RE= +github.com/dgraph-io/ristretto v0.0.2/go.mod h1:KPxhHT9ZxKefz+PCeOGsrHpl1qZ7i70dGTu2u+Ahh6E= +github.com/dgraph-io/ristretto v0.1.0/go.mod h1:fux0lOrBhrVCJd3lcTHsIJhq1T2rokOu6v9Vcb3Q9ug= github.com/dgrijalva/jwt-go v0.0.0-20170104182250-a601269ab70c/go.mod h1:E3ru+11k8xSBh+hMPgOLZmtrrCbhqsmaPHjLKYnJCaQ= github.com/dgrijalva/jwt-go v3.2.0+incompatible h1:7qlOGliEKZXTDg6OTjfoBKDXWrumCAMpl/TFQ4/5kLM= github.com/dgrijalva/jwt-go v3.2.0+incompatible/go.mod h1:E3ru+11k8xSBh+hMPgOLZmtrrCbhqsmaPHjLKYnJCaQ= github.com/dgryski/go-bitstream v0.0.0-20180413035011-3522498ce2c8/go.mod h1:VMaSuZ+SZcx/wljOQKvp5srsbCiKDEb6K2wC4+PiBmQ= +github.com/dgryski/go-farm v0.0.0-20190423205320-6a90982ecee2/go.mod h1:SqUrOPUnsFjfmXRMNPybcSiG0BgUW2AuFH8PAnS2iTw= +github.com/dgryski/go-farm v0.0.0-20200201041132-a6ae2369ad13/go.mod h1:SqUrOPUnsFjfmXRMNPybcSiG0BgUW2AuFH8PAnS2iTw= github.com/dgryski/go-sip13 v0.0.0-20181026042036-e10d5fee7954/go.mod h1:vAd38F8PWV+bWy6jNmig1y/TA+kYO4g3RSRF0IAv0no= -github.com/dgryski/go-sip13 v0.0.0-20190329191031-25c5027a8c7b/go.mod h1:vAd38F8PWV+bWy6jNmig1y/TA+kYO4g3RSRF0IAv0no= -github.com/digitalocean/godo v1.42.1/go.mod h1:p7dOjjtSBqCTUksqtA5Fd3uaKs9kyTq2xcz76ulEJRU= +github.com/dgryski/go-sip13 v0.0.0-20200911182023-62edffca9245/go.mod h1:vAd38F8PWV+bWy6jNmig1y/TA+kYO4g3RSRF0IAv0no= +github.com/digitalocean/godo v1.58.0/go.mod h1:p7dOjjtSBqCTUksqtA5Fd3uaKs9kyTq2xcz76ulEJRU= github.com/dimchansky/utfbom v1.1.0/go.mod h1:rO41eb7gLfo8SF1jd9F8HplJm1Fewwi4mQvIirEdv+8= github.com/dimchansky/utfbom v1.1.1 h1:vV6w1AhK4VMnhBno/TPVCoK9U/LP0PkLCS9tbxHdi/U= github.com/dimchansky/utfbom v1.1.1/go.mod h1:SxdoEBH5qIqFocHMyGOXVAybYJdr71b1Q/j0mACtrfE= @@ -525,7 +567,7 @@ github.com/docker/distribution v2.7.1-0.20190205005809-0d3efadf0154+incompatible github.com/docker/distribution v2.7.1-0.20190205005809-0d3efadf0154+incompatible/go.mod h1:J2gT2udsDAN96Uj4KfcMRqY0/ypR+oyYUYmja8H+y+w= github.com/docker/distribution v2.7.1+incompatible h1:a5mlkVzth6W5A4fOsS3D2EO5BUmsJpcB+cRlLU7cSug= github.com/docker/distribution v2.7.1+incompatible/go.mod h1:J2gT2udsDAN96Uj4KfcMRqY0/ypR+oyYUYmja8H+y+w= -github.com/docker/docker v17.12.0-ce-rc1.0.20200706150819-a40b877fbb9e+incompatible/go.mod h1:eEKB0N0r5NX/I1kEveEz05bcu8tLC/8azJZsviup8Sk= +github.com/docker/docker v20.10.5+incompatible/go.mod h1:eEKB0N0r5NX/I1kEveEz05bcu8tLC/8azJZsviup8Sk= github.com/docker/docker v20.10.7+incompatible h1:Z6O9Nhsjv+ayUEeI1IojKbYcsGdgYSNqxe1s2MYzUhQ= github.com/docker/docker v20.10.7+incompatible/go.mod h1:eEKB0N0r5NX/I1kEveEz05bcu8tLC/8azJZsviup8Sk= github.com/docker/go-connections v0.4.0 h1:El9xVISelRB7BuFusrZozjnkIM5YnzCViNKohAFqRJQ= @@ -583,8 +625,7 @@ github.com/envoyproxy/go-control-plane v0.9.9-0.20201210154907-fd9021fe5dad/go.m github.com/envoyproxy/go-control-plane v0.9.9-0.20210217033140-668b12f5399d/go.mod h1:cXg6YxExXjJnVBQHBLXeUAgxn2UodCpnH306RInaBQk= github.com/envoyproxy/go-control-plane v0.9.9-0.20210512163311-63b5d3c536b0/go.mod h1:hliV/p42l8fGbc6Y9bQ70uLwIvmJyVE5k4iMKlh8wCQ= github.com/envoyproxy/protoc-gen-validate v0.1.0/go.mod h1:iSmxcyjqTsJpI2R4NaDN7+kN2VEUnK/pcBlmesArF7c= -github.com/evanphx/json-patch v0.0.0-20200808040245-162e5629780b/go.mod h1:NAJj0yf/KaRKURN6nyi7A9IZydMivZEm9oQLWNjfKDc= -github.com/evanphx/json-patch v4.2.0+incompatible/go.mod h1:50XU6AFN0ol/bzJsmQLiYLvXMP4fmwYFNcr97nuDLSk= +github.com/esimonov/ifshort v1.0.1/go.mod h1:yZqNJUrNn20K8Q9n2CrjTKYyVEmX209Hgu+M1LBpeZE= github.com/evanphx/json-patch v4.9.0+incompatible/go.mod h1:50XU6AFN0ol/bzJsmQLiYLvXMP4fmwYFNcr97nuDLSk= github.com/evanphx/json-patch v4.11.0+incompatible/go.mod h1:50XU6AFN0ol/bzJsmQLiYLvXMP4fmwYFNcr97nuDLSk= github.com/facebookgo/stack v0.0.0-20160209184415-751773369052 h1:JWuenKqqX8nojtoVVWjGfOF9635RETekkoH6Cc9SX0A= @@ -592,8 +633,11 @@ github.com/facebookgo/stack v0.0.0-20160209184415-751773369052/go.mod h1:UbMTZqL github.com/facebookgo/stackerr v0.0.0-20150612192056-c2fcf88613f4 h1:fP04zlkPjAGpsduG7xN3rRkxjAqkJaIQnnkNYYw/pAk= github.com/facebookgo/stackerr v0.0.0-20150612192056-c2fcf88613f4/go.mod h1:SBHk9aNQtiw4R4bEuzHjVmZikkUKCnO1v3lPQ21HZGk= github.com/fatih/color v1.7.0/go.mod h1:Zm6kSWBoL9eyXnKyktHP6abPY2pDugNf5KwzbycvMj4= -github.com/fatih/color v1.9.0 h1:8xPHl4/q1VyqGIPif1F+1V3Y3lSmrq01EabUW3CoW5s= github.com/fatih/color v1.9.0/go.mod h1:eQcE1qtQxscV5RaZvpXrrb8Drkc3/DdQ+uUYCNjL+zU= +github.com/fatih/color v1.10.0 h1:s36xzo75JdqLaaWoiEHk767eHiwo0598uUxyfiPkDsg= +github.com/fatih/color v1.10.0/go.mod h1:ELkj/draVOlAH/xkhN6mQ50Qd0MPOk5AAr3maGEBuJM= +github.com/fatih/structtag v1.2.0/go.mod h1:mBJUNpUnHmRKrKlQQlmCrh5PuhftFbNv8Ys4/aAZl94= +github.com/felixge/httpsnoop v1.0.1/go.mod h1:m8KPJKqk1gH5J9DgRY2ASl2lWCfGKXixSwevea8zH2U= github.com/fogleman/gg v1.2.1-0.20190220221249-0403632d5b90/go.mod h1:R/bRT+9gY/C5z7JzPU0zXsXHKM4/ayA+zqcVNZzPa1k= github.com/form3tech-oss/jwt-go v3.2.2+incompatible/go.mod h1:pbq4aXjuKjdthFRnoDwaVPLA+WlJuPGy+QneDUgJi2k= github.com/form3tech-oss/jwt-go v3.2.3+incompatible h1:7ZaBxOI7TMoYBfyA3cQHErNNyAWIKUMIwqxEtgHOs5c= @@ -602,22 +646,29 @@ github.com/fortytw2/leaktest v1.3.0 h1:u8491cBMTQ8ft8aeV+adlcytMZylmA5nnwwkRZjI8 github.com/fortytw2/leaktest v1.3.0/go.mod h1:jDsjWgpAGjm2CA7WthBh/CdZYEPF31XHquHwclZch5g= github.com/franela/goblin v0.0.0-20200105215937-c9ffbefa60db/go.mod h1:7dvUGVsVBjqR7JHJk0brhHOZYGmfBYOrK0ZhYMEtBr4= github.com/franela/goreq v0.0.0-20171204163338-bcd34c9993f8/go.mod h1:ZhphrRTfi2rbfLwlschooIH4+wKKDR4Pdxhh+TRoA20= -github.com/frankban/quicktest v1.10.2/go.mod h1:K+q6oSqb0W0Ininfk863uOk1lMy69l/P6txr3mVT54s= +github.com/frankban/quicktest v1.7.3/go.mod h1:V1d2J5pfxYH6EjBAgSK7YNXcXlTWxUHdE1sVDXkjnig= github.com/frankban/quicktest v1.11.3 h1:8sXhOn0uLys67V8EsXLc6eszDs8VXWxL3iRvebPhedY= github.com/frankban/quicktest v1.11.3/go.mod h1:wRf/ReqHper53s+kmmSZizM8NamnL3IM0I9ntUbOk+k= github.com/fsnotify/fsnotify v1.4.7/go.mod h1:jwhsz4b93w/PPRr/qN1Yymfu8t87LnFCMoQvtojpjFo= -github.com/fsnotify/fsnotify v1.4.9 h1:hsms1Qyu0jgnwNXIxa+/V/PDsU6CfLf6CNO8H7IWoS4= github.com/fsnotify/fsnotify v1.4.9/go.mod h1:znqG4EE+3YCdAaPaxE2ZRY/06pZUdp0tY4IgpuI1SZQ= +github.com/fsnotify/fsnotify v1.5.1 h1:mZcQUHVQUQWoPXXtuf9yuEXKudkV2sx1E06UadKWpgI= +github.com/fsnotify/fsnotify v1.5.1/go.mod h1:T3375wBYaZdLLcVNkcVbzGHY7f1l/uK5T5Ai1i3InKU= github.com/fullsailor/pkcs7 v0.0.0-20190404230743-d7302db945fa/go.mod h1:KnogPXtdwXqoenmZCw6S+25EAm2MkxbG0deNDu4cbSA= +github.com/fzipp/gocyclo v0.3.1/go.mod h1:DJHO6AUmbdqj2ET4Z9iArSuwWgYDRryYt2wASxc7x3E= github.com/garyburd/redigo v0.0.0-20150301180006-535138d7bcd7/go.mod h1:NR3MbYisc3/PwhQ00EMzDiPmrwpPxAn5GI05/YaO1SY= +github.com/gdamore/encoding v1.0.0/go.mod h1:alR0ol34c49FCSBLjhosxzcPHQbf2trDkoo5dl+VrEg= +github.com/gdamore/tcell v1.3.0/go.mod h1:Hjvr+Ofd+gLglo7RYKxxnzCBmev3BzsS67MebKS4zMM= github.com/ghodss/yaml v0.0.0-20150909031657-73d445a93680/go.mod h1:4dBDuWmgqj2HViK6kFavaiC9ZROes6MMH2rRYeMEF04= github.com/ghodss/yaml v1.0.0/go.mod h1:4dBDuWmgqj2HViK6kFavaiC9ZROes6MMH2rRYeMEF04= github.com/ghodss/yaml v1.0.1-0.20190212211648-25d852aebe32 h1:Mn26/9ZMNWSw9C9ERFA1PUxfmGpolnw2v0bKOREu5ew= github.com/ghodss/yaml v1.0.1-0.20190212211648-25d852aebe32/go.mod h1:GIjDIg/heH5DOkXY3YJ/wNhfHsQHoXGjl8G8amsYQ1I= +github.com/gin-contrib/sse v0.1.0/go.mod h1:RHrZQHXnP2xjPF+u1gW/2HnVO7nvIa9PG3Gm+fLHvGI= +github.com/gin-gonic/gin v1.5.0/go.mod h1:Nd6IXA8m5kNZdNEHMBd93KT+mdY3+bewLgRvmCsR2Do= github.com/globalsign/mgo v0.0.0-20180905125535-1ca0a4f7cbcb/go.mod h1:xkRDCp4j0OGD1HRkm4kmhM+pmpv3AKq5SU7GMg4oO/Q= github.com/globalsign/mgo v0.0.0-20181015135952-eeefdecb41b8/go.mod h1:xkRDCp4j0OGD1HRkm4kmhM+pmpv3AKq5SU7GMg4oO/Q= github.com/glycerine/go-unsnap-stream v0.0.0-20180323001048-9f0cb55181dd/go.mod h1:/20jfyN9Y5QPEAprSgKAUr+glWDY39ZiUEAYOEv5dsE= github.com/glycerine/goconvey v0.0.0-20190410193231-58a59202ab31/go.mod h1:Ogl1Tioa0aV7gstGFO7KhffUsb9M4ydbEbbxpcEDc24= +github.com/go-critic/go-critic v0.5.4/go.mod h1:cjB4YGw+n/+X8gREApej7150Uyy1Tg8If6F2XOAUXNE= github.com/go-gl/glfw v0.0.0-20190409004039-e6da0acd62b1/go.mod h1:vR7hzQXu2zJy9AVAgeJqvqgH9Q5CA+iKCZ2gyEVpxRU= github.com/go-gl/glfw/v3.3/glfw v0.0.0-20191125211704-12ad95a8df72/go.mod h1:tQ2UAYgL5IevRw8kRxooKSPJfGvJ9fJQFa0TUsXzTg8= github.com/go-gl/glfw/v3.3/glfw v0.0.0-20200222043503-6f7a984d4dc4/go.mod h1:tQ2UAYgL5IevRw8kRxooKSPJfGvJ9fJQFa0TUsXzTg8= @@ -635,8 +686,9 @@ github.com/go-logr/logr v0.1.0/go.mod h1:ixOQHD9gLJUVQQ2ZOR7zLEifBX6tGkNJF4QyIY7 github.com/go-logr/logr v0.2.0/go.mod h1:z6/tIYblkpsD+a4lm/fGIIU9mZ+XfAiaFtq7xTgseGU= github.com/go-logr/logr v0.4.0 h1:K7/B1jt6fIBQVd4Owv2MqGQClcgf0R266+7C/QjRcLc= github.com/go-logr/logr v0.4.0/go.mod h1:z6/tIYblkpsD+a4lm/fGIIU9mZ+XfAiaFtq7xTgseGU= -github.com/go-ole/go-ole v1.2.4 h1:nNBDSCOigTSiarFpYE9J/KtEA1IOW4CNeqT9TQDqCxI= github.com/go-ole/go-ole v1.2.4/go.mod h1:XCwSNxSkXRo4vlyPy93sltvi/qJq0jqQhjqQNIwKuxM= +github.com/go-ole/go-ole v1.2.5 h1:t4MGB5xEDZvXI+0rMjjsfBsD7yAgp/s9ZDkL1JndXwY= +github.com/go-ole/go-ole v1.2.5/go.mod h1:pprOEPIfldk/42T2oK7lQ4v4JSDwmV0As9GaiUsvbm0= github.com/go-openapi/analysis v0.0.0-20180825180245-b006789cd277/go.mod h1:k70tL6pCuVxPJOHXQ+wIac1FUrvNkHolPie/cLEU6hI= github.com/go-openapi/analysis v0.17.0/go.mod h1:IowGgpVeD0vNm45So8nr+IcQ3pxVtpRoBWb8PVZO0ik= github.com/go-openapi/analysis v0.18.0/go.mod h1:IowGgpVeD0vNm45So8nr+IcQ3pxVtpRoBWb8PVZO0ik= @@ -644,21 +696,30 @@ github.com/go-openapi/analysis v0.19.2/go.mod h1:3P1osvZa9jKjb8ed2TPng3f0i/UY9sn github.com/go-openapi/analysis v0.19.4/go.mod h1:3P1osvZa9jKjb8ed2TPng3f0i/UY9snX6gxi44djMjk= github.com/go-openapi/analysis v0.19.5/go.mod h1:hkEAkxagaIvIP7VTn8ygJNkd4kAYON2rCu0v0ObL0AU= github.com/go-openapi/analysis v0.19.10/go.mod h1:qmhS3VNFxBlquFJ0RGoDtylO9y4pgTAUNE9AEEMdlJQ= +github.com/go-openapi/analysis v0.19.16/go.mod h1:GLInF007N83Ad3m8a/CbQ5TPzdnGT7workfHwuVjNVk= +github.com/go-openapi/analysis v0.20.0/go.mod h1:BMchjvaHDykmRMsK40iPtvyOfFdMMxlOmQr9FBZk+Og= +github.com/go-openapi/analysis v0.20.1/go.mod h1:BMchjvaHDykmRMsK40iPtvyOfFdMMxlOmQr9FBZk+Og= github.com/go-openapi/errors v0.17.0/go.mod h1:LcZQpmvG4wyF5j4IhA73wkLFQg+QJXOQHVjmcZxhka0= github.com/go-openapi/errors v0.18.0/go.mod h1:LcZQpmvG4wyF5j4IhA73wkLFQg+QJXOQHVjmcZxhka0= github.com/go-openapi/errors v0.19.2/go.mod h1:qX0BLWsyaKfvhluLejVpVNwNRdXZhEbTA4kxxpKBC94= github.com/go-openapi/errors v0.19.3/go.mod h1:qX0BLWsyaKfvhluLejVpVNwNRdXZhEbTA4kxxpKBC94= github.com/go-openapi/errors v0.19.4/go.mod h1:qX0BLWsyaKfvhluLejVpVNwNRdXZhEbTA4kxxpKBC94= -github.com/go-openapi/jsonpointer v0.0.0-20160704185906-46af16f9f7b1/go.mod h1:+35s3my2LFTysnkMfxsJBAMHj/DoqoB9knIWoYG/Vk0= +github.com/go-openapi/errors v0.19.6/go.mod h1:cM//ZKUKyO06HSwqAelJ5NsEMMcpa6VpXe8DOa1Mi1M= +github.com/go-openapi/errors v0.19.7/go.mod h1:cM//ZKUKyO06HSwqAelJ5NsEMMcpa6VpXe8DOa1Mi1M= +github.com/go-openapi/errors v0.19.8/go.mod h1:cM//ZKUKyO06HSwqAelJ5NsEMMcpa6VpXe8DOa1Mi1M= +github.com/go-openapi/errors v0.19.9/go.mod h1:cM//ZKUKyO06HSwqAelJ5NsEMMcpa6VpXe8DOa1Mi1M= +github.com/go-openapi/errors v0.20.0/go.mod h1:cM//ZKUKyO06HSwqAelJ5NsEMMcpa6VpXe8DOa1Mi1M= +github.com/go-openapi/errors v0.20.1/go.mod h1:cM//ZKUKyO06HSwqAelJ5NsEMMcpa6VpXe8DOa1Mi1M= github.com/go-openapi/jsonpointer v0.17.0/go.mod h1:cOnomiV+CVVwFLk0A/MExoFMjwdsUdVpsRhURCKh+3M= github.com/go-openapi/jsonpointer v0.18.0/go.mod h1:cOnomiV+CVVwFLk0A/MExoFMjwdsUdVpsRhURCKh+3M= github.com/go-openapi/jsonpointer v0.19.2/go.mod h1:3akKfEdA7DF1sugOqz1dVQHBcuDBPKZGEoHC/NkiQRg= github.com/go-openapi/jsonpointer v0.19.3/go.mod h1:Pl9vOtqEWErmShwVjC8pYs9cog34VGT37dQOVbmoatg= -github.com/go-openapi/jsonreference v0.0.0-20160704190145-13c6e3589ad9/go.mod h1:W3Z9FmVs9qj+KR4zFKmDPGiLdk1D9Rlm7cyMvf57TTg= +github.com/go-openapi/jsonpointer v0.19.5/go.mod h1:Pl9vOtqEWErmShwVjC8pYs9cog34VGT37dQOVbmoatg= github.com/go-openapi/jsonreference v0.17.0/go.mod h1:g4xxGn04lDIRh0GJb5QlpE3HfopLOL6uZrK/VgnsK9I= github.com/go-openapi/jsonreference v0.18.0/go.mod h1:g4xxGn04lDIRh0GJb5QlpE3HfopLOL6uZrK/VgnsK9I= github.com/go-openapi/jsonreference v0.19.2/go.mod h1:jMjeRr2HHw6nAVajTXJ4eiUwohSTlpa0o73RUL1owJc= github.com/go-openapi/jsonreference v0.19.3/go.mod h1:rjx6GuL8TTa9VaixXglHmQmIL98+wF9xc8zWvFonSJ8= +github.com/go-openapi/jsonreference v0.19.5/go.mod h1:RdybgQwPxbL4UEjuAruzK1x3nE69AqPYEJeo/TWfEeg= github.com/go-openapi/loads v0.17.0/go.mod h1:72tmFy5wsWx89uEVddd0RjRWPZm92WRLhf7AC+0+OOU= github.com/go-openapi/loads v0.18.0/go.mod h1:72tmFy5wsWx89uEVddd0RjRWPZm92WRLhf7AC+0+OOU= github.com/go-openapi/loads v0.19.0/go.mod h1:72tmFy5wsWx89uEVddd0RjRWPZm92WRLhf7AC+0+OOU= @@ -666,17 +727,28 @@ github.com/go-openapi/loads v0.19.2/go.mod h1:QAskZPMX5V0C2gvfkGZzJlINuP7Hx/4+ix github.com/go-openapi/loads v0.19.3/go.mod h1:YVfqhUCdahYwR3f3iiwQLhicVRvLlU/WO5WPaZvcvSI= github.com/go-openapi/loads v0.19.4/go.mod h1:zZVHonKd8DXyxyw4yfnVjPzBjIQcLt0CCsn0N0ZrQsk= github.com/go-openapi/loads v0.19.5/go.mod h1:dswLCAdonkRufe/gSUC3gN8nTSaB9uaS2es0x5/IbjY= +github.com/go-openapi/loads v0.19.6/go.mod h1:brCsvE6j8mnbmGBh103PT/QLHfbyDxA4hsKvYBNEGVc= +github.com/go-openapi/loads v0.19.7/go.mod h1:brCsvE6j8mnbmGBh103PT/QLHfbyDxA4hsKvYBNEGVc= +github.com/go-openapi/loads v0.20.0/go.mod h1:2LhKquiE513rN5xC6Aan6lYOSddlL8Mp20AW9kpviM4= +github.com/go-openapi/loads v0.20.2/go.mod h1:hTVUotJ+UonAMMZsvakEgmWKgtulweO9vYP2bQYKA/o= github.com/go-openapi/runtime v0.0.0-20180920151709-4f900dc2ade9/go.mod h1:6v9a6LTXWQCdL8k1AO3cvqx5OtZY/Y9wKTgaoP6YRfA= github.com/go-openapi/runtime v0.19.0/go.mod h1:OwNfisksmmaZse4+gpV3Ne9AyMOlP1lt4sK4FXt0O64= github.com/go-openapi/runtime v0.19.4/go.mod h1:X277bwSUBxVlCYR3r7xgZZGKVvBd/29gLDlFGtJ8NL4= github.com/go-openapi/runtime v0.19.15/go.mod h1:dhGWCTKRXlAfGnQG0ONViOZpjfg0m2gUt9nTQPQZuoo= -github.com/go-openapi/spec v0.0.0-20160808142527-6aced65f8501/go.mod h1:J8+jY1nAiCcj+friV/PDoE1/3eeccG9LYBs0tYvLOWc= +github.com/go-openapi/runtime v0.19.16/go.mod h1:5P9104EJgYcizotuXhEuUrzVc+j1RiSjahULvYmlv98= +github.com/go-openapi/runtime v0.19.24/go.mod h1:Lm9YGCeecBnUUkFTxPC4s1+lwrkJ0pthx8YvyjCfkgk= +github.com/go-openapi/runtime v0.19.28/go.mod h1:BvrQtn6iVb2QmiVXRsFAm6ZCAZBpbVKFfN6QWCp582M= github.com/go-openapi/spec v0.17.0/go.mod h1:XkF/MOi14NmjsfZ8VtAKf8pIlbZzyoTvZsdfssdxcBI= github.com/go-openapi/spec v0.18.0/go.mod h1:XkF/MOi14NmjsfZ8VtAKf8pIlbZzyoTvZsdfssdxcBI= github.com/go-openapi/spec v0.19.2/go.mod h1:sCxk3jxKgioEJikev4fgkNmwS+3kuYdJtcsZsD5zxMY= github.com/go-openapi/spec v0.19.3/go.mod h1:FpwSN1ksY1eteniUU7X0N/BgJ7a4WvBFVA8Lj9mJglo= github.com/go-openapi/spec v0.19.6/go.mod h1:Hm2Jr4jv8G1ciIAo+frC/Ft+rR2kQDh8JHKHb3gWUSk= github.com/go-openapi/spec v0.19.8/go.mod h1:Hm2Jr4jv8G1ciIAo+frC/Ft+rR2kQDh8JHKHb3gWUSk= +github.com/go-openapi/spec v0.19.15/go.mod h1:+81FIL1JwC5P3/Iuuozq3pPE9dXdIEGxFutcFKaVbmU= +github.com/go-openapi/spec v0.20.0/go.mod h1:+81FIL1JwC5P3/Iuuozq3pPE9dXdIEGxFutcFKaVbmU= +github.com/go-openapi/spec v0.20.1/go.mod h1:93x7oh+d+FQsmsieroS4cmR3u0p/ywH649a3qwC9OsQ= +github.com/go-openapi/spec v0.20.2/go.mod h1:RW6Xcbs6LOyWLU/mXGdzn2Qc+3aj+ASfI7rvSZh1Vls= +github.com/go-openapi/spec v0.20.3/go.mod h1:gG4F8wdEDN+YPBMVnzE85Rbhf+Th2DTvA9nFPQ5AYEg= github.com/go-openapi/strfmt v0.17.0/go.mod h1:P82hnJI0CXkErkXi8IKjPbNBM6lV6+5pLP5l494TcyU= github.com/go-openapi/strfmt v0.18.0/go.mod h1:P82hnJI0CXkErkXi8IKjPbNBM6lV6+5pLP5l494TcyU= github.com/go-openapi/strfmt v0.19.0/go.mod h1:+uW+93UVvGGq2qGaZxdDeJqSAqBqBdl+ZPMF/cC8nDY= @@ -684,19 +756,33 @@ github.com/go-openapi/strfmt v0.19.2/go.mod h1:0yX7dbo8mKIvc3XSKp7MNfxw4JytCfCD6 github.com/go-openapi/strfmt v0.19.3/go.mod h1:0yX7dbo8mKIvc3XSKp7MNfxw4JytCfCD6+bY1AVL9LU= github.com/go-openapi/strfmt v0.19.4/go.mod h1:eftuHTlB/dI8Uq8JJOyRlieZf+WkkxUuk0dgdHXr2Qk= github.com/go-openapi/strfmt v0.19.5/go.mod h1:eftuHTlB/dI8Uq8JJOyRlieZf+WkkxUuk0dgdHXr2Qk= -github.com/go-openapi/swag v0.0.0-20160704191624-1d0bd113de87/go.mod h1:DXUve3Dpr1UfpPtxFw+EFuQ41HhCWZfha5jSVRG7C7I= +github.com/go-openapi/strfmt v0.19.11/go.mod h1:UukAYgTaQfqJuAFlNxxMWNvMYiwiXtLsF2VwmoFtbtc= +github.com/go-openapi/strfmt v0.20.0/go.mod h1:UukAYgTaQfqJuAFlNxxMWNvMYiwiXtLsF2VwmoFtbtc= +github.com/go-openapi/strfmt v0.20.1/go.mod h1:43urheQI9dNtE5lTZQfuFJvjYJKPrxicATpEfZwHUNk= +github.com/go-openapi/strfmt v0.20.2/go.mod h1:43urheQI9dNtE5lTZQfuFJvjYJKPrxicATpEfZwHUNk= github.com/go-openapi/swag v0.17.0/go.mod h1:AByQ+nYG6gQg71GINrmuDXCPWdL640yX49/kXLo40Tg= github.com/go-openapi/swag v0.18.0/go.mod h1:AByQ+nYG6gQg71GINrmuDXCPWdL640yX49/kXLo40Tg= github.com/go-openapi/swag v0.19.2/go.mod h1:POnQmlKehdgb5mhVOsnJFsivZCEZ/vjK9gh66Z9tfKk= github.com/go-openapi/swag v0.19.5/go.mod h1:POnQmlKehdgb5mhVOsnJFsivZCEZ/vjK9gh66Z9tfKk= github.com/go-openapi/swag v0.19.7/go.mod h1:ao+8BpOPyKdpQz3AOJfbeEVpLmWAvlT1IfTe5McPyhY= github.com/go-openapi/swag v0.19.9/go.mod h1:ao+8BpOPyKdpQz3AOJfbeEVpLmWAvlT1IfTe5McPyhY= +github.com/go-openapi/swag v0.19.12/go.mod h1:eFdyEBkTdoAf/9RXBvj4cr1nH7GD8Kzo5HTt47gr72M= +github.com/go-openapi/swag v0.19.13/go.mod h1:QYRuS/SOXUCsnplDa677K7+DxSOj6IPNl/eQntq43wQ= +github.com/go-openapi/swag v0.19.14/go.mod h1:QYRuS/SOXUCsnplDa677K7+DxSOj6IPNl/eQntq43wQ= +github.com/go-openapi/swag v0.19.15/go.mod h1:QYRuS/SOXUCsnplDa677K7+DxSOj6IPNl/eQntq43wQ= github.com/go-openapi/validate v0.18.0/go.mod h1:Uh4HdOzKt19xGIGm1qHf/ofbX1YQ4Y+MYsct2VUrAJ4= github.com/go-openapi/validate v0.19.2/go.mod h1:1tRCw7m3jtI8eNWEEliiAqUIcBztB2KDnRCRMUi7GTA= github.com/go-openapi/validate v0.19.3/go.mod h1:90Vh6jjkTn+OT1Eefm0ZixWNFjhtOH7vS9k0lo6zwJo= github.com/go-openapi/validate v0.19.8/go.mod h1:8DJv2CVJQ6kGNpFW6eV9N3JviE1C85nY1c2z52x1Gk4= +github.com/go-openapi/validate v0.19.10/go.mod h1:RKEZTUWDkxKQxN2jDT7ZnZi2bhZlbNMAuKvKB+IaGx8= +github.com/go-openapi/validate v0.19.12/go.mod h1:Rzou8hA/CBw8donlS6WNEUQupNvUZ0waH08tGe6kAQ4= +github.com/go-openapi/validate v0.19.15/go.mod h1:tbn/fdOwYHgrhPBzidZfJC2MIVvs9GA7monOmWBbeCI= +github.com/go-openapi/validate v0.20.1/go.mod h1:b60iJT+xNNLfaQJUqLI7946tYiFEOuE9E4k54HpKcJ0= +github.com/go-openapi/validate v0.20.2/go.mod h1:e7OJoKNgd0twXZwIn0A43tHbvIcr/rZIVCbJBpTUoY0= github.com/go-ping/ping v0.0.0-20210201095549-52eed920f98c h1:fWdhUpCuoeNIPiQ+pkAmmERYEjhVx5/cbVGK7T99OkI= github.com/go-ping/ping v0.0.0-20210201095549-52eed920f98c/go.mod h1:35JbSyV/BYqHwwRA6Zr1uVDm1637YlNOU61wI797NPI= +github.com/go-playground/locales v0.12.1/go.mod h1:IUMDtCfWo/w/mtMfIE/IG2K+Ey3ygWanZIBtBW0W2TM= +github.com/go-playground/universal-translator v0.16.0/go.mod h1:1AnU7NaIRDWWzGEKwgtJRd2xk99HeFyHw3yid4rvQIY= github.com/go-redis/redis v6.15.9+incompatible h1:K0pv1D7EQUjfyoMql+r/jZqCLizCGKFlFgcHWWmHQjg= github.com/go-redis/redis v6.15.9+incompatible/go.mod h1:NAIEuMOZ/fxfXJIrKDQDz8wamY7mA7PouImQ2Jvg6kA= github.com/go-sql-driver/mysql v1.4.0/go.mod h1:zAC/RDZ24gD3HViQzih4MyKcchzm+sOG5ZlKdlhCg5w= @@ -704,8 +790,21 @@ github.com/go-sql-driver/mysql v1.4.1/go.mod h1:zAC/RDZ24gD3HViQzih4MyKcchzm+sOG github.com/go-sql-driver/mysql v1.5.0/go.mod h1:DCzpHaOWr8IXmIStZouvnhqoel9Qv2LBy8hT2VhHyBg= github.com/go-sql-driver/mysql v1.6.0 h1:BCTh4TKNUYmOmMUcQ3IipzF5prigylS7XXjEkfCHuOE= github.com/go-sql-driver/mysql v1.6.0/go.mod h1:DCzpHaOWr8IXmIStZouvnhqoel9Qv2LBy8hT2VhHyBg= -github.com/go-stack/stack v1.8.0 h1:5SgMzNM5HxrEjV0ww2lTmX6E2Izsfxas4+YHWRs3Lsk= github.com/go-stack/stack v1.8.0/go.mod h1:v0f6uXyyMGvRgIKkXu+yp6POWl0qKG85gN/melR3HDY= +github.com/go-stack/stack v1.8.1 h1:ntEHSVwIt7PNXNpgPmVfMrNhLtgjlmnZha2kOpuRiDw= +github.com/go-stack/stack v1.8.1/go.mod h1:dcoOX6HbPZSZptuspn9bctJ+N/CnF5gGygcUP3XYfe4= +github.com/go-toolsmith/astcast v1.0.0/go.mod h1:mt2OdQTeAQcY4DQgPSArJjHCcOwlX+Wl/kwN+LbLGQ4= +github.com/go-toolsmith/astcopy v1.0.0/go.mod h1:vrgyG+5Bxrnz4MZWPF+pI4R8h3qKRjjyvV/DSez4WVQ= +github.com/go-toolsmith/astequal v1.0.0/go.mod h1:H+xSiq0+LtiDC11+h1G32h7Of5O3CYFJ99GVbS5lDKY= +github.com/go-toolsmith/astfmt v1.0.0/go.mod h1:cnWmsOAuq4jJY6Ct5YWlVLmcmLMn1JUPuQIHCY7CJDw= +github.com/go-toolsmith/astinfo v0.0.0-20180906194353-9809ff7efb21/go.mod h1:dDStQCHtmZpYOmjRP/8gHHnCCch3Zz3oEgCdZVdtweU= +github.com/go-toolsmith/astp v1.0.0/go.mod h1:RSyrtpVlfTFGDYRbrjyWP1pYu//tSFcvdYrA8meBmLI= +github.com/go-toolsmith/pkgload v1.0.0/go.mod h1:5eFArkbO80v7Z0kdngIxsRXRMTaX4Ilcwuh3clNrQJc= +github.com/go-toolsmith/strparse v1.0.0/go.mod h1:YI2nUKP9YGZnL/L1/DLFBfixrcjslWct4wyljWhSRy8= +github.com/go-toolsmith/typep v1.0.0/go.mod h1:JSQCQMUPdRlMZFswiq3TGpNp1GMktqkR2Ns5AIQkATU= +github.com/go-toolsmith/typep v1.0.2/go.mod h1:JSQCQMUPdRlMZFswiq3TGpNp1GMktqkR2Ns5AIQkATU= +github.com/go-xmlfmt/xmlfmt v0.0.0-20191208150333-d5b6f63a941b/go.mod h1:aUCEOzzezBEjDBbFBoSiya/gduyIiWYRP6CnSFIV8AM= +github.com/go-zookeeper/zk v1.0.2/go.mod h1:nOB03cncLtlp4t+UAkGSV+9beXP/akpekBwL+UX1Qcw= github.com/gobuffalo/attrs v0.0.0-20190224210810-a9411de4debd/go.mod h1:4duuawTqi2wkkpB4ePgWMaai6/Kc6WEz83bhFwpHzj0= github.com/gobuffalo/depgen v0.0.0-20190329151759-d478694a28d3/go.mod h1:3STtPUQYuzV0gBVOY3vy6CfMm/ljR4pABfrTeHNLHUY= github.com/gobuffalo/depgen v0.1.0/go.mod h1:+ifsuy7fhi15RWncXQQKjWS9JPkdah5sZvtHc2RXGlg= @@ -736,20 +835,21 @@ github.com/goburrow/serial v0.1.0 h1:v2T1SQa/dlUqQiYIT8+Cu7YolfqAi3K96UmhwYyuSrA github.com/goburrow/serial v0.1.0/go.mod h1:sAiqG0nRVswsm1C97xsttiYCzSLBmUZ/VSlVLZJ8haA= github.com/gobwas/glob v0.2.3 h1:A4xDbljILXROh+kObIiy5kIaPYD8e96x1tgBhUI5J+Y= github.com/gobwas/glob v0.2.3/go.mod h1:d3Ez4x06l9bZtSvzIay5+Yzi0fmZzPgnTbPcKjJAkT8= +github.com/gocql/gocql v0.0.0-20200228163523-cd4b606dd2fb/go.mod h1:DL0ekTmBSTdlNF25Orwt/JMzqIq3EJ4MVa/J/uK64OY= github.com/godbus/dbus v0.0.0-20151105175453-c7fdd8b5cd55/go.mod h1:/YcGZj5zSblfDWMMoOzV4fas9FZnQYTkDnsGvmh2Grw= github.com/godbus/dbus v0.0.0-20180201030542-885f9cc04c9c/go.mod h1:/YcGZj5zSblfDWMMoOzV4fas9FZnQYTkDnsGvmh2Grw= github.com/godbus/dbus v0.0.0-20190422162347-ade71ed3457e/go.mod h1:bBOAhwG1umN6/6ZUMtDFBMQR8jRg9O75tm9K00oMsK4= github.com/godbus/dbus/v5 v5.0.3/go.mod h1:xhWf0FNVPg57R7Z0UbKHbJfkEywrmjJnf7w5xrFpKfA= github.com/godbus/dbus/v5 v5.0.4/go.mod h1:xhWf0FNVPg57R7Z0UbKHbJfkEywrmjJnf7w5xrFpKfA= +github.com/gofrs/flock v0.8.0/go.mod h1:F1TvTiK9OcQqauNUHlbJvyl9Qa1QvF/gOUDKA14jxHU= github.com/gofrs/uuid v3.2.0+incompatible h1:y12jRkkFxsd7GpqdSZ+/KCs/fJbqpEXSGd4+jfEaewE= github.com/gofrs/uuid v3.2.0+incompatible/go.mod h1:b2aQJv3Z4Fp6yNu3cdSllBxTCLRxnplIgP/c0N/04lM= github.com/gofrs/uuid v3.3.0+incompatible h1:8K4tyRfvU1CYPgJsveYFQMhpFd/wXNM7iK6rR7UHz84= github.com/gofrs/uuid v3.3.0+incompatible/go.mod h1:b2aQJv3Z4Fp6yNu3cdSllBxTCLRxnplIgP/c0N/04lM= github.com/gogo/googleapis v1.1.0/go.mod h1:gf4bu3Q80BeJ6H1S1vYPm8/ELATdvryBaNFGgqEef3s= github.com/gogo/googleapis v1.2.0/go.mod h1:Njal3psf3qN6dwBtQfUmBZh2ybovJ0tlu3o/AC7HYjU= -github.com/gogo/googleapis v1.3.1/go.mod h1:d+q1s/xVJxZGKWwC/6UfPIF33J+G1Tq4GYv9Y+Tg/EU= -github.com/gogo/googleapis v1.4.0 h1:zgVt4UpGxcqVOw97aRGxT4svlcmdK35fynLNctY32zI= github.com/gogo/googleapis v1.4.0/go.mod h1:5YRNX2z1oM5gXdAkurHa942MDgEJyk02w4OecKY87+c= +github.com/gogo/googleapis v1.4.1/go.mod h1:2lpHqI5OcWCtVElxXnPt+s8oJvMpySlOyM6xDCrzib4= github.com/gogo/protobuf v1.1.1/go.mod h1:r8qH/GZQm5c6nD/R0oafs1akxWv10x8SbQlK7atdtwQ= github.com/gogo/protobuf v1.1.1/go.mod h1:r8qH/GZQm5c6nD/R0oafs1akxWv10x8SbQlK7atdtwQ= github.com/gogo/protobuf v1.2.0/go.mod h1:r8qH/GZQm5c6nD/R0oafs1akxWv10x8SbQlK7atdtwQ= @@ -772,8 +872,9 @@ github.com/golang/groupcache v0.0.0-20160516000752-02826c3e7903/go.mod h1:cIg4er github.com/golang/groupcache v0.0.0-20190129154638-5b532d6fd5ef/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc= github.com/golang/groupcache v0.0.0-20190702054246-869f871628b6/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc= github.com/golang/groupcache v0.0.0-20191227052852-215e87163ea7/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc= -github.com/golang/groupcache v0.0.0-20200121045136-8c9f03a8e57e h1:1r7pUrabqp18hOBcwBwiTsbnFeTZHV9eER/QT5JVZxY= github.com/golang/groupcache v0.0.0-20200121045136-8c9f03a8e57e/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc= +github.com/golang/groupcache v0.0.0-20210331224755-41bb18bfe9da h1:oI5xCqsCo564l8iNU+DwB5epxmsaqB+rhGL0m5jtYqE= +github.com/golang/groupcache v0.0.0-20210331224755-41bb18bfe9da/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc= github.com/golang/mock v1.1.1/go.mod h1:oTYuIxOrZwtPieC+H1uAHpcLFnEyAGVDL/k47Jfbm0A= github.com/golang/mock v1.2.0/go.mod h1:oTYuIxOrZwtPieC+H1uAHpcLFnEyAGVDL/k47Jfbm0A= github.com/golang/mock v1.3.1/go.mod h1:sBzyDLLjw3U8JLTeZvSv8jJB+tU5PVekmnlKIyFUx0Y= @@ -783,7 +884,6 @@ github.com/golang/mock v1.4.3/go.mod h1:UOMv5ysSaYNkG+OFQykRIcU/QvvxJf3p21QfJ2Bt github.com/golang/mock v1.4.4/go.mod h1:l3mdAwkq5BuhzHwde/uurv3sEJeZMXNpwsxVWU71h+4= github.com/golang/mock v1.5.0/go.mod h1:CWnOUgYIOo4TcNZ0wHX3YZCqsaM1I1Jvs6v3mP3KVu8= github.com/golang/mock v1.6.0/go.mod h1:p6yTPP+5HYm5mzsMV8JkE6ZKdX+/wYM6Hr+LicevLPs= -github.com/golang/protobuf v0.0.0-20161109072736-4bd1920723d7/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= github.com/golang/protobuf v0.0.0-20170307001533-c9c7427a2a70/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= github.com/golang/protobuf v1.2.0/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= github.com/golang/protobuf v1.3.1/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= @@ -803,16 +903,29 @@ github.com/golang/protobuf v1.5.0/go.mod h1:FsONVRAS9T7sI+LIUmWTfcYkHO4aIWwzhcaS github.com/golang/protobuf v1.5.1/go.mod h1:DopwsBzvsk0Fs44TXzsVbJyPhcCPeIwnvohx4u74HPM= github.com/golang/protobuf v1.5.2 h1:ROPKBNFfQgOUMifHyP+KYbvpjbdoFNs+aK7DXlji0Tw= github.com/golang/protobuf v1.5.2/go.mod h1:XVQd3VNwM+JqD3oG2Ue2ip4fOMUkwXdXDdiuN0vRsmY= +github.com/golang/snappy v0.0.0-20170215233205-553a64147049/go.mod h1:/XxbfmMg8lxefKM7IXC3fBNl/7bRcc72aCRzEWrmP2Q= github.com/golang/snappy v0.0.0-20180518054509-2e65f85255db/go.mod h1:/XxbfmMg8lxefKM7IXC3fBNl/7bRcc72aCRzEWrmP2Q= github.com/golang/snappy v0.0.1/go.mod h1:/XxbfmMg8lxefKM7IXC3fBNl/7bRcc72aCRzEWrmP2Q= github.com/golang/snappy v0.0.3 h1:fHPg5GQYlCeLIPB9BZqMVR5nR9A+IM5zcgeTdjMYmLA= github.com/golang/snappy v0.0.3/go.mod h1:/XxbfmMg8lxefKM7IXC3fBNl/7bRcc72aCRzEWrmP2Q= -github.com/google/addlicense v0.0.0-20190510175307-22550fa7c1b0/go.mod h1:QtPG26W17m+OIQgE6gQ24gC1M6pUaMBAbFrTIDtwG/E= +github.com/golangci/check v0.0.0-20180506172741-cfe4005ccda2/go.mod h1:k9Qvh+8juN+UKMCS/3jFtGICgW8O96FVaZsaxdzDkR4= +github.com/golangci/dupl v0.0.0-20180902072040-3e9179ac440a/go.mod h1:ryS0uhF+x9jgbj/N71xsEqODy9BN81/GonCZiOzirOk= +github.com/golangci/go-misc v0.0.0-20180628070357-927a3d87b613/go.mod h1:SyvUF2NxV+sN8upjjeVYr5W7tyxaT1JVtvhKhOn2ii8= +github.com/golangci/gofmt v0.0.0-20190930125516-244bba706f1a/go.mod h1:9qCChq59u/eW8im404Q2WWTrnBUQKjpNYKMbU4M7EFU= +github.com/golangci/golangci-lint v1.38.0/go.mod h1:Knp/sd5ATrVp7EOzWzwIIFH+c8hUfpW+oOQb8NvdZDo= +github.com/golangci/lint-1 v0.0.0-20191013205115-297bf364a8e0/go.mod h1:66R6K6P6VWk9I95jvqGxkqJxVWGFy9XlDwLwVz1RCFg= +github.com/golangci/maligned v0.0.0-20180506175553-b1d89398deca/go.mod h1:tvlJhZqDe4LMs4ZHD0oMUlt9G2LWuDGoisJTBzLMV9o= +github.com/golangci/misspell v0.3.5/go.mod h1:dEbvlSfYbMQDtrpRMQU675gSDLDNa8sCPPChZ7PhiVA= +github.com/golangci/revgrep v0.0.0-20210208091834-cd28932614b5/go.mod h1:LK+zW4MpyytAWQRz0M4xnzEk50lSvqDQKfx304apFkY= +github.com/golangci/unconvert v0.0.0-20180507085042-28b1c447d1f4/go.mod h1:Izgrg8RkN3rCIMLGE9CyYmU9pY2Jer6DgANEnZ/L/cQ= +github.com/google/addlicense v0.0.0-20200906110928-a0294312aa76/go.mod h1:EMjYTRimagHs1FwlIqKyX3wAM0u3rA+McvlIIWmSamA= github.com/google/btree v0.0.0-20180813153112-4030bb1f1f0c/go.mod h1:lNA+9X1NB3Zf8V7Ke586lFgjr2dZNuvo3lPJSGZ5JPQ= -github.com/google/btree v1.0.0 h1:0udJVsspx3VBr5FwtLhQQtuAsVc79tTq0ocGIPAU6qo= github.com/google/btree v1.0.0/go.mod h1:lNA+9X1NB3Zf8V7Ke586lFgjr2dZNuvo3lPJSGZ5JPQ= -github.com/google/flatbuffers v1.11.0 h1:O7CEyB8Cb3/DmtxODGtLHcEvpr81Jm5qLg/hsHnxA2A= +github.com/google/btree v1.0.1 h1:gK4Kx5IaGY9CD5sPJ36FHiBJ6ZXl0kilRiiCj+jdYp4= +github.com/google/btree v1.0.1/go.mod h1:xXMiIv4Fb/0kKde4SpL7qlzvu5cMJDRkFDxJfI9uaxA= github.com/google/flatbuffers v1.11.0/go.mod h1:1AeVuKshWv4vARoZatz6mlQ0JxURH0Kv5+zNeJKJCa8= +github.com/google/flatbuffers v1.12.0 h1:/PtAHvnBY4Kqnx/xCQ3OIV9uYcSFGScBsWI3Oogeh6w= +github.com/google/flatbuffers v1.12.0/go.mod h1:1AeVuKshWv4vARoZatz6mlQ0JxURH0Kv5+zNeJKJCa8= github.com/google/go-cmp v0.2.0/go.mod h1:oXzfMopK8JAjlY9xF4vHSVASa0yLyX7SntLO5aqRK0M= github.com/google/go-cmp v0.3.0/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMywk6iLU= github.com/google/go-cmp v0.3.1/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMywk6iLU= @@ -850,6 +963,7 @@ github.com/google/pprof v0.0.0-20201023163331-3e6fc7fc9c4c/go.mod h1:kpwsk12EmLe github.com/google/pprof v0.0.0-20201203190320-1bf35d6f28c2/go.mod h1:kpwsk12EmLew5upagYY7GY0pfYCcupk39gWOCRROcvE= github.com/google/pprof v0.0.0-20210122040257-d980be63207e/go.mod h1:kpwsk12EmLew5upagYY7GY0pfYCcupk39gWOCRROcvE= github.com/google/pprof v0.0.0-20210226084205-cbba55b83ad5/go.mod h1:kpwsk12EmLew5upagYY7GY0pfYCcupk39gWOCRROcvE= +github.com/google/pprof v0.0.0-20210323184331-8eee2492667d/go.mod h1:kpwsk12EmLew5upagYY7GY0pfYCcupk39gWOCRROcvE= github.com/google/pprof v0.0.0-20210601050228-01bbb1931b22/go.mod h1:kpwsk12EmLew5upagYY7GY0pfYCcupk39gWOCRROcvE= github.com/google/pprof v0.0.0-20210609004039-a478d1d731e9/go.mod h1:kpwsk12EmLew5upagYY7GY0pfYCcupk39gWOCRROcvE= github.com/google/pprof v0.0.0-20210720184732-4bb14d4b1be1/go.mod h1:kpwsk12EmLew5upagYY7GY0pfYCcupk39gWOCRROcvE= @@ -862,32 +976,43 @@ github.com/google/uuid v1.2.0/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+ github.com/googleapis/gax-go/v2 v2.0.4/go.mod h1:0Wqv26UfaUD9n4G6kQubkQ+KchISgw+vpHVxEJEs9eg= github.com/googleapis/gax-go/v2 v2.0.5 h1:sjZBwGj9Jlw33ImPtvFviGYvseOtDM7hkSKB7+Tv3SM= github.com/googleapis/gax-go/v2 v2.0.5/go.mod h1:DWXyrwAJ9X0FpwwEdw+IPEYBICEFu5mhpdKc/us6bOk= -github.com/googleapis/gnostic v0.0.0-20170729233727-0c5108395e2d/go.mod h1:sJBsCZ4ayReDTBIg8b9dl28c5xFWyhBTVRp3pOg5EKY= -github.com/googleapis/gnostic v0.1.0/go.mod h1:sJBsCZ4ayReDTBIg8b9dl28c5xFWyhBTVRp3pOg5EKY= -github.com/googleapis/gnostic v0.4.0/go.mod h1:on+2t9HRStVgn95RSsFWFz+6Q0Snyqv1awfrALZdbtU= github.com/googleapis/gnostic v0.4.1/go.mod h1:LRhVm6pbyptWbWbuZ38d1eyptfvIytN3ir6b65WBswg= github.com/googleapis/gnostic v0.5.1/go.mod h1:6U4PtQXGIEt/Z3h5MAT7FNofLnw9vXk2cUuW7uA/OeU= github.com/googleapis/gnostic v0.5.5 h1:9fHAtK0uDfpveeqqo1hkEZJcFvYXAiCN3UutL8F9xHw= github.com/googleapis/gnostic v0.5.5/go.mod h1:7+EbHbldMins07ALC74bsA81Ovc97DwqyJO1AENw9kA= +github.com/gookit/color v1.3.6/go.mod h1:R3ogXq2B9rTbXoSHJ1HyUVAZ3poOJHpd9nQmyGZsfvQ= github.com/gopcua/opcua v0.2.0-rc2.0.20210409063412-baabb9b14fd2 h1:OtFKr0Kwe1oLpMR+uNMh/DPgC5fxAq4xRe6HBv8LDqQ= github.com/gopcua/opcua v0.2.0-rc2.0.20210409063412-baabb9b14fd2/go.mod h1:a6QH4F9XeODklCmWuvaOdL8v9H0d73CEKUHWVZLQyE8= -github.com/gophercloud/gophercloud v0.1.0/go.mod h1:vxM41WHh5uqHVBMZHzuwNOHh8XEoIEcSTewFxm1c5g8= -github.com/gophercloud/gophercloud v0.12.0/go.mod h1:gmC5oQqMDOMO1t1gq5DquX/yAU808e/4mzjjDA76+Ss= +github.com/gophercloud/gophercloud v0.16.0/go.mod h1:wRtmUelyIIv3CSSDI47aUwbs075O6i+LY+pXsKCBsb4= github.com/gopherjs/gopherjs v0.0.0-20181017120253-0766667cb4d1 h1:EGx4pi6eqNxGaHF6qqu48+N2wcFQ5qg5FXgOdqsJ5d8= github.com/gopherjs/gopherjs v0.0.0-20181017120253-0766667cb4d1/go.mod h1:wJfORRmW1u3UXTncJ5qlYoELFm8eSnnEO6hX4iZ3EWY= github.com/gordonklaus/ineffassign v0.0.0-20200309095847-7953dde2c7bf/go.mod h1:cuNKsD1zp2v6XfE/orVX2QE1LC+i254ceGcVeDT3pTU= +github.com/gordonklaus/ineffassign v0.0.0-20210225214923-2e10b2664254/go.mod h1:M9mZEtGIsR1oDaZagNPNG9iq9n2HrhZ17dsXk73V3Lw= github.com/gorilla/context v1.1.1/go.mod h1:kBGZzfjB9CEq2AlWe17Uuf7NDRt0dE0s8S51q0aT7Yg= github.com/gorilla/handlers v0.0.0-20150720190736-60c7bfde3e33/go.mod h1:Qkdc/uu4tH4g6mTK6auzZ766c4CA0Ng8+o/OAirnOIQ= +github.com/gorilla/handlers v1.5.1/go.mod h1:t8XrUpc4KVXb7HGyJ4/cEnwQiaxrX/hz1Zv/4g96P1Q= github.com/gorilla/mux v1.6.2/go.mod h1:1lud6UwP+6orDFRuTfBEV8e9/aOM/c4fVVCaMa2zaAs= github.com/gorilla/mux v1.7.2/go.mod h1:1lud6UwP+6orDFRuTfBEV8e9/aOM/c4fVVCaMa2zaAs= -github.com/gorilla/mux v1.7.3 h1:gnP5JzjVOuiZD07fKKToCAOjS0yOpj/qPETTXCCS6hw= github.com/gorilla/mux v1.7.3/go.mod h1:1lud6UwP+6orDFRuTfBEV8e9/aOM/c4fVVCaMa2zaAs= +github.com/gorilla/mux v1.7.4/go.mod h1:DVbg23sWSpFRCP0SfiEN6jmj59UnW/n46BH5rLB71So= +github.com/gorilla/mux v1.8.0 h1:i40aqfkR1h2SlN9hojwV5ZA91wcXFOvkdNIeFDP5koI= +github.com/gorilla/mux v1.8.0/go.mod h1:DVbg23sWSpFRCP0SfiEN6jmj59UnW/n46BH5rLB71So= +github.com/gorilla/securecookie v1.1.1/go.mod h1:ra0sb63/xPlUeL+yeDciTfxMRAA+MP+HVt/4epWDjd4= +github.com/gorilla/sessions v1.2.1/go.mod h1:dk2InVEVJ0sfLlnXv9EAgkf6ecYs/i80K/zI+bUmuGM= github.com/gorilla/websocket v0.0.0-20170926233335-4201258b820c/go.mod h1:E7qHFY5m1UJ88s3WnNqhKjPHQ0heANvMoAMk2YaljkQ= github.com/gorilla/websocket v1.4.0/go.mod h1:E7qHFY5m1UJ88s3WnNqhKjPHQ0heANvMoAMk2YaljkQ= github.com/gorilla/websocket v1.4.2 h1:+/TMaTYc4QFitKJxsQ7Yye35DkWvkdLcvGKqM+x0Ufc= github.com/gorilla/websocket v1.4.2/go.mod h1:YR8l580nyteQvAITg2hZ9XVh4b55+EU/adAjf1fMHhE= github.com/gosnmp/gosnmp v1.32.0 h1:gctewmZx5qFI0oHMzRnjETqIZ093d9NgZy9TQr3V0iA= github.com/gosnmp/gosnmp v1.32.0/go.mod h1:EIp+qkEpXoVsyZxXKy0AmXQx0mCHMMcIhXXvNDMpgF0= +github.com/gostaticanalysis/analysisutil v0.0.0-20190318220348-4088753ea4d3/go.mod h1:eEOZF4jCKGi+aprrirO9e7WKB3beBRtWgqGunKl6pKE= +github.com/gostaticanalysis/analysisutil v0.0.3/go.mod h1:eEOZF4jCKGi+aprrirO9e7WKB3beBRtWgqGunKl6pKE= +github.com/gostaticanalysis/analysisutil v0.1.0/go.mod h1:dMhHRU9KTiDcuLGdy87/2gTR8WruwYZrKdRq9m1O6uw= +github.com/gostaticanalysis/analysisutil v0.4.1/go.mod h1:18U/DLpRgIUd459wGxVHE0fRgmo1UgHDcbw7F5idXu0= +github.com/gostaticanalysis/comment v1.3.0/go.mod h1:xMicKDx7XRXYdVwY9f9wQpDJVnqWxw9wCauCMKp+IBI= +github.com/gostaticanalysis/comment v1.4.1/go.mod h1:ih6ZxzTHLdadaiSnF5WY3dxUoXfXAlTaRzuaNDlSado= +github.com/gostaticanalysis/forcetypeassert v0.0.0-20200621232751-01d4955beaa5/go.mod h1:qZEedyP/sY1lTGV1uJ3VhWZ2mqag3IkWsDHVbplHXak= +github.com/gostaticanalysis/nilerr v0.1.1/go.mod h1:wZYb6YI5YAxxq0i1+VJbY0s2YONW0HU0GPE3+5PWN4A= github.com/gregjones/httpcache v0.0.0-20180305231024-9cad4c3443a7/go.mod h1:FecbI9+v66THATjSRHfNgh1IVFe/9kFxbXtjV0ctIMA= github.com/grid-x/modbus v0.0.0-20210224155242-c4a3d042e99b h1:Y4xqzO0CDNoehCr3ncgie3IgFTO9AzV8PMMEWESFM5c= github.com/grid-x/modbus v0.0.0-20210224155242-c4a3d042e99b/go.mod h1:YaK0rKJenZ74vZFcSSLlAQqtG74PMI68eDjpDCDDmTw= @@ -895,22 +1020,27 @@ github.com/grid-x/serial v0.0.0-20191104121038-e24bc9bf6f08 h1:syBxnRYnSPUDdkdo5 github.com/grid-x/serial v0.0.0-20191104121038-e24bc9bf6f08/go.mod h1:kdOd86/VGFWRrtkNwf1MPk0u1gIjc4Y7R2j7nhwc7Rk= github.com/grpc-ecosystem/go-grpc-middleware v1.0.0/go.mod h1:FiyG127CGDf3tlThmgyCl78X/SZQqEOJBCDaAfeWzPs= github.com/grpc-ecosystem/go-grpc-middleware v1.0.1-0.20190118093823-f849b5445de4/go.mod h1:FiyG127CGDf3tlThmgyCl78X/SZQqEOJBCDaAfeWzPs= +github.com/grpc-ecosystem/go-grpc-middleware v1.2.2/go.mod h1:EaizFBKfUKtMIF5iaDEhniwNedqGo9FuLFzppDr3uwI= +github.com/grpc-ecosystem/go-grpc-middleware v1.3.0/go.mod h1:z0ButlSOZa5vEBq9m2m2hlwIgKw+rp3sdCBRoJY+30Y= github.com/grpc-ecosystem/go-grpc-prometheus v1.2.0/go.mod h1:8NvIoxWQoOIhqOTXgfV/d3M/q6VIi02HzZEHgUlZvzk= github.com/grpc-ecosystem/grpc-gateway v1.9.0/go.mod h1:vNeuVxBJEsws4ogUvrchl83t/GYV9WGTSLVdBhOQFDY= github.com/grpc-ecosystem/grpc-gateway v1.9.5/go.mod h1:vNeuVxBJEsws4ogUvrchl83t/GYV9WGTSLVdBhOQFDY= -github.com/grpc-ecosystem/grpc-gateway v1.14.6/go.mod h1:zdiPV4Yse/1gnckTHtghG4GkDEdKCRJduHpTxT3/jcw= +github.com/grpc-ecosystem/grpc-gateway v1.14.5/go.mod h1:UJ0EZAp832vCd54Wev9N1BMKEyvcZ5+IM0AwDrnlkEc= github.com/grpc-ecosystem/grpc-gateway v1.16.0 h1:gmcG1KaJ57LophUzW0Hy8NmPhnMZb4M0+kPpLofRdBo= github.com/grpc-ecosystem/grpc-gateway v1.16.0/go.mod h1:BDjrQk3hbvj6Nolgz8mAMFbcEtjT1g+wF4CSlocrBnw= +github.com/grpc-ecosystem/grpc-opentracing v0.0.0-20180507213350-8e809c8a8645/go.mod h1:6iZfnjpejD4L/4DwD7NryNaJyCQdzwWwH2MWhCA90Kw= github.com/hailocab/go-hostpool v0.0.0-20160125115350-e80d13ce29ed h1:5upAirOpQc1Q53c0bnx2ufif5kANL7bfZWcc6VJWJd8= github.com/hailocab/go-hostpool v0.0.0-20160125115350-e80d13ce29ed/go.mod h1:tMWxXQ9wFIaZeTI9F+hmhFiGpFmhOHzyShyFUhRm0H4= github.com/harlow/kinesis-consumer v0.3.1-0.20181230152818-2f58b136fee0 h1:U0KvGD9CJIl1nbgu9yLsfWxMT6WqL8fG0IBB7RvOZZQ= github.com/harlow/kinesis-consumer v0.3.1-0.20181230152818-2f58b136fee0/go.mod h1:dk23l2BruuUzRP8wbybQbPn3J7sZga2QHICCeaEy5rQ= +github.com/hashicorp/consul/api v1.1.0/go.mod h1:VmuI/Lkw1nC05EYQWNKwWGbkg+FbDBtguAZLlVdkD9Q= github.com/hashicorp/consul/api v1.3.0/go.mod h1:MmDNSzIMUjNpY/mQ398R4bk2FnqQLoPndWW5VkKPlCE= -github.com/hashicorp/consul/api v1.6.0/go.mod h1:1NSuaUUkFaJzMasbfq/11wKYWSR67Xn6r2DXKhuDNFg= +github.com/hashicorp/consul/api v1.8.1/go.mod h1:sDjTOq0yUyv5G4h+BqSea7Fn6BU+XbolEz1952UB+mk= github.com/hashicorp/consul/api v1.11.0 h1:Hw/G8TtRvOElqxVIhBzXciiSTbapq8hZ2XKZsXk5ZCE= github.com/hashicorp/consul/api v1.11.0/go.mod h1:XjsvQN+RJGWI2TWy1/kqaE16HrR2J/FWgkYjdZQsX9M= +github.com/hashicorp/consul/sdk v0.1.1/go.mod h1:VKf9jXwCTEY1QZP2MOLRhb5i/I/ssyNV1vwHyQBF0x8= github.com/hashicorp/consul/sdk v0.3.0/go.mod h1:VKf9jXwCTEY1QZP2MOLRhb5i/I/ssyNV1vwHyQBF0x8= -github.com/hashicorp/consul/sdk v0.6.0/go.mod h1:fY08Y9z5SvJqevyZNy6WWPXiG3KwBPAvlcdx16zZ0fM= +github.com/hashicorp/consul/sdk v0.7.0/go.mod h1:fY08Y9z5SvJqevyZNy6WWPXiG3KwBPAvlcdx16zZ0fM= github.com/hashicorp/consul/sdk v0.8.0 h1:OJtKBtEjboEZvG6AOUdh4Z1Zbyu0WcxQ0qatRrZHTVU= github.com/hashicorp/consul/sdk v0.8.0/go.mod h1:GBvyrGALthsZObzUGsfgHZQDXjg4lOjagTIwIR1vPms= github.com/hashicorp/errwrap v0.0.0-20141028054710-7554cd9344ce/go.mod h1:YH+1FKiLXxHSkmPseP+kNlulaMuP3n2brvKWEqk/Jc4= @@ -920,8 +1050,11 @@ github.com/hashicorp/go-cleanhttp v0.5.0/go.mod h1:JpRdi6/HCYpAwUzNwuwqhbovhLtng github.com/hashicorp/go-cleanhttp v0.5.1 h1:dH3aiDG9Jvb5r5+bYHsikaOUIpcM0xvgMXVoDkXMzJM= github.com/hashicorp/go-cleanhttp v0.5.1/go.mod h1:JpRdi6/HCYpAwUzNwuwqhbovhLtngrth3wmdIIUrZ80= github.com/hashicorp/go-hclog v0.12.0/go.mod h1:whpDNt7SSdeAju8AWKIWsul05p54N/39EeqMAyrmvFQ= -github.com/hashicorp/go-hclog v0.12.2 h1:F1fdYblUEsxKiailtkhCCG2g4bipEgaHiDc8vffNpD4= -github.com/hashicorp/go-hclog v0.12.2/go.mod h1:whpDNt7SSdeAju8AWKIWsul05p54N/39EeqMAyrmvFQ= +github.com/hashicorp/go-hclog v0.14.1/go.mod h1:whpDNt7SSdeAju8AWKIWsul05p54N/39EeqMAyrmvFQ= +github.com/hashicorp/go-hclog v0.15.0/go.mod h1:whpDNt7SSdeAju8AWKIWsul05p54N/39EeqMAyrmvFQ= +github.com/hashicorp/go-hclog v0.16.1/go.mod h1:whpDNt7SSdeAju8AWKIWsul05p54N/39EeqMAyrmvFQ= +github.com/hashicorp/go-hclog v0.16.2 h1:K4ev2ib4LdQETX5cSZBG0DVLk1jwGqSPXBjdah3veNs= +github.com/hashicorp/go-hclog v0.16.2/go.mod h1:whpDNt7SSdeAju8AWKIWsul05p54N/39EeqMAyrmvFQ= github.com/hashicorp/go-immutable-radix v1.0.0/go.mod h1:0y9vanUI8NX6FsYoO3zeMjhV/C5i9g4Q3DwcSNZ4P60= github.com/hashicorp/go-immutable-radix v1.2.0 h1:l6UW37iCXwZkZoAbEYnptSHVE/cQ5bOTPYG5W3vf9+8= github.com/hashicorp/go-immutable-radix v1.2.0/go.mod h1:0y9vanUI8NX6FsYoO3zeMjhV/C5i9g4Q3DwcSNZ4P60= @@ -932,6 +1065,8 @@ github.com/hashicorp/go-multierror v0.0.0-20161216184304-ed905158d874/go.mod h1: github.com/hashicorp/go-multierror v1.0.0/go.mod h1:dHtQlpGsu+cZNNAkkCN/P3hoUDHhCYQXV3UM06sGGrk= github.com/hashicorp/go-multierror v1.1.0 h1:B9UzwGQJehnUY1yNrnwREHc3fGbC2xefo8g4TbElacI= github.com/hashicorp/go-multierror v1.1.0/go.mod h1:spPvp8C1qA32ftKqdAHm4hHTbPw+vmowP0z+KUhOZdA= +github.com/hashicorp/go-plugin v1.4.0/go.mod h1:5fGEH17QVwTTcR0zV7yhDPLLmFX9YSZ38b18Udy6vYQ= +github.com/hashicorp/go-plugin v1.4.2/go.mod h1:5fGEH17QVwTTcR0zV7yhDPLLmFX9YSZ38b18Udy6vYQ= github.com/hashicorp/go-retryablehttp v0.5.3/go.mod h1:9B5zBasrRhHXnJnui7y6sL7es7NDiJgTc6Er0maI1Xs= github.com/hashicorp/go-rootcerts v1.0.0/go.mod h1:K6zTfqpRlCUIjkwsN4Z+hiSfzSTQa6eBIzfwKfwNnHU= github.com/hashicorp/go-rootcerts v1.0.2 h1:jzhAVGtqPKbwpyCPELlgNWhE1znq+qwJtW5Oi2viEzc= @@ -958,10 +1093,11 @@ github.com/hashicorp/memberlist v0.1.3/go.mod h1:ajVTdAv/9Im8oMAAj5G31PhhMCZJV2p github.com/hashicorp/memberlist v0.2.2 h1:5+RffWKwqJ71YPu9mWsF7ZOscZmwfasdA8kbdC7AO2g= github.com/hashicorp/memberlist v0.2.2/go.mod h1:MS2lj3INKhZjWNqd3N0m3J+Jxf3DAOnAH9VT3Sh9MUE= github.com/hashicorp/serf v0.8.2/go.mod h1:6hOLApaqBFA1NXqRQAsxw9QxuDEvNxSQRwA/JwenrHc= -github.com/hashicorp/serf v0.9.3/go.mod h1:UWDWwZeL5cuWDJdl0C6wrvrUwEqtQ4ZKBKKENpqIUyk= github.com/hashicorp/serf v0.9.5 h1:EBWvyu9tcRszt3Bxp3KNssBMP1KuHWyO51lz9+786iM= github.com/hashicorp/serf v0.9.5/go.mod h1:UWDWwZeL5cuWDJdl0C6wrvrUwEqtQ4ZKBKKENpqIUyk= -github.com/hetznercloud/hcloud-go v1.21.1/go.mod h1:xng8lbDUg+xM1dgc0yGHX5EeqbwIq7UYlMWMTx3SQVg= +github.com/hashicorp/yamux v0.0.0-20180604194846-3520598351bb/go.mod h1:+NfK9FKeTrX5uv1uIXGdwYDTeHna2qgaIlx54MXqjAM= +github.com/hashicorp/yamux v0.0.0-20190923154419-df201c70410d/go.mod h1:+NfK9FKeTrX5uv1uIXGdwYDTeHna2qgaIlx54MXqjAM= +github.com/hetznercloud/hcloud-go v1.24.0/go.mod h1:3YmyK8yaZZ48syie6xpm3dt26rtB6s65AisBHylXYFA= github.com/hpcloud/tail v1.0.0/go.mod h1:ab1qPbhIpdTxEkNHXyeSf5vhxWSCs/tWer42PpOxQnU= github.com/hudl/fargo v1.3.0/go.mod h1:y3CKSmjA+wD2gak7sUSXTAoopbhU08POFhmITJgmKTg= github.com/ianlancetaylor/demangle v0.0.0-20181102032728-5e5cf60278f6/go.mod h1:aSSvb/t6k1mPoxDqO4vJh6VOCGPwU4O0C2/Eqndh1Sc= @@ -974,10 +1110,10 @@ github.com/imdario/mergo v0.3.12/go.mod h1:jmQim1M+e3UYxmgPu/WyfjB3N3VflVyUjjjwH github.com/inconshreveable/mousetrap v1.0.0/go.mod h1:PxqpIevigyE2G7u3NXJIT2ANytuPF1OarO4DADm73n8= github.com/influxdata/apcupsd v0.0.0-20210427145308-694d5caead0e h1:3J1OB4RDKwXs5l8uEV6BP/tucOJOPDQysiT7/9cuXzA= github.com/influxdata/apcupsd v0.0.0-20210427145308-694d5caead0e/go.mod h1:WYK/Z/aXq9cbMFIL5ihcA4sX/r/3/WCas/Qvs/2fXcA= -github.com/influxdata/flux v0.65.0/go.mod h1:BwN2XG2lMszOoquQaFdPET8FRQfrXiZsWmcMO9rkaVY= +github.com/influxdata/flux v0.65.1/go.mod h1:J754/zds0vvpfwuq7Gc2wRdVwEodfpCFM7mYlOw2LqY= github.com/influxdata/go-syslog/v3 v3.0.0 h1:jichmjSZlYK0VMmlz+k4WeOQd7z745YLsvGMqwtYt4I= github.com/influxdata/go-syslog/v3 v3.0.0/go.mod h1:tulsOp+CecTAYC27u9miMgq21GqXRW6VdKbOG+QSP4Q= -github.com/influxdata/influxdb v1.8.2/go.mod h1:SIzcnsjaHRFpmlxpJ4S3NT64qtEKYweNTUMb/vh0OMQ= +github.com/influxdata/influxdb v1.8.4/go.mod h1:JugdFhsvvI8gadxOI6noqNeeBHvWNTbfYGtiAn+2jhI= github.com/influxdata/influxdb-observability/common v0.2.7 h1:C+oDh8Kbw+Ykx9yog/uJXL27rwMN3hgTLQfAFg1eQO0= github.com/influxdata/influxdb-observability/common v0.2.7/go.mod h1:+8VMGrfWZnXjc1c/oP+N4O/sHoneWgN3ojAHwgYgV4A= github.com/influxdata/influxdb-observability/influx2otel v0.2.7 h1:YIXH+qNQgAtTA5U3s/wxDxxh5Vz+ylhZhyuRxtfTBqs= @@ -985,7 +1121,7 @@ github.com/influxdata/influxdb-observability/influx2otel v0.2.7/go.mod h1:ASyDMo github.com/influxdata/influxdb-observability/otel2influx v0.2.7 h1:FACov3tcGCKfEGXsyUbgUOQx3zXffXaCFbN3ntAzh1E= github.com/influxdata/influxdb-observability/otel2influx v0.2.7/go.mod h1:tE3OSy4RyAHIjxYlFZBsWorEM3aqaUeqSx3mbacm8KI= github.com/influxdata/influxdb1-client v0.0.0-20191209144304-8bf82d3c094d/go.mod h1:qj24IKcXYK6Iy9ceXlo3Tc+vtHo9lIhSX5JddghvEPo= -github.com/influxdata/influxql v1.1.0/go.mod h1:KpVI7okXjK6PRi3Z5B+mtKZli+R1DnZgb3N+tzevNgo= +github.com/influxdata/influxql v1.1.1-0.20200828144457-65d3ef77d385/go.mod h1:gHp9y86a/pxhjJ+zMjNXiQAA197Xk9wLxaz+fGG+kWk= github.com/influxdata/line-protocol v0.0.0-20180522152040-32c6aa80de5e/go.mod h1:4kt73NQhadE3daL3WhR5EJ/J2ocX0PZzwxQ0gXJ7oFE= github.com/influxdata/promql/v2 v2.12.0/go.mod h1:fxOPu+DY0bqCTCECchSRtWfc+0X19ybifQhZoQNF5D8= github.com/influxdata/roaring v0.4.13-0.20180809181101-fc520f41fab6/go.mod h1:bSgUQ7q5ZLSO+bKBGqJiCBGAl+9DxyW63zLTujjUlOE= @@ -1039,27 +1175,46 @@ github.com/jackc/pgx/v4 v4.6.0/go.mod h1:vPh43ZzxijXUVJ+t/EmXBtFmbFVO72cuneCT9oA github.com/jackc/puddle v0.0.0-20190413234325-e4ced69a3a2b/go.mod h1:m4B5Dj62Y0fbyuIc15OsIqK0+JU8nkqQjsgx7dvjSWk= github.com/jackc/puddle v0.0.0-20190608224051-11cab39313c9/go.mod h1:m4B5Dj62Y0fbyuIc15OsIqK0+JU8nkqQjsgx7dvjSWk= github.com/jackc/puddle v1.1.0/go.mod h1:m4B5Dj62Y0fbyuIc15OsIqK0+JU8nkqQjsgx7dvjSWk= -github.com/jaegertracing/jaeger v1.15.1 h1:7QzNAXq+4ko9GtCjozDNAp2uonoABu+B2Rk94hjQcp4= -github.com/jaegertracing/jaeger v1.15.1/go.mod h1:LUWPSnzNPGRubM8pk0inANGitpiMOOxihXx0+53llXI= +github.com/jaegertracing/jaeger v1.22.0/go.mod h1:WnwW68MjJEViSLRQhe0nkIsBDaF3CzfFd8wJcpJv24k= +github.com/jaegertracing/jaeger v1.23.0/go.mod h1:gB6Qc+Kjd/IX1G82oGTArbHI3ZRO//iUkaMW+gzL9uw= +github.com/jaegertracing/jaeger v1.26.0 h1:4LbUdb9l/Mx83zYvjLbkrayheX+Aga26NEI+feo3xzA= +github.com/jaegertracing/jaeger v1.26.0/go.mod h1:SwHsl1PLZVAdkQTPrziQ+4xV9FxzJXRvTDW1YrUIWEA= github.com/james4k/rcon v0.0.0-20120923215419-8fbb8268b60a h1:JxcWget6X/VfBMKxPIc28Jel37LGREut2fpV+ObkwJ0= github.com/james4k/rcon v0.0.0-20120923215419-8fbb8268b60a/go.mod h1:1qNVsDcmNQDsAXYfUuF/Z0rtK5eT8x9D6Pi7S3PjXAg= +github.com/jcmturner/aescts/v2 v2.0.0 h1:9YKLH6ey7H4eDBXW8khjYslgyqG2xZikXP0EQFKrle8= +github.com/jcmturner/aescts/v2 v2.0.0/go.mod h1:AiaICIRyfYg35RUkr8yESTqvSy7csK90qZ5xfvvsoNs= +github.com/jcmturner/dnsutils/v2 v2.0.0 h1:lltnkeZGL0wILNvrNiVCR6Ro5PGU/SeBvVO/8c/iPbo= +github.com/jcmturner/dnsutils/v2 v2.0.0/go.mod h1:b0TnjGOvI/n42bZa+hmXL+kFJZsFT7G4t3HTlQ184QM= +github.com/jcmturner/gofork v0.0.0-20190328161633-dc7c13fece03/go.mod h1:MK8+TM0La+2rjBD4jE12Kj1pCCxK7d2LK/UM3ncEo0o= github.com/jcmturner/gofork v1.0.0 h1:J7uCkflzTEhUZ64xqKnkDxq3kzc96ajM1Gli5ktUem8= github.com/jcmturner/gofork v1.0.0/go.mod h1:MK8+TM0La+2rjBD4jE12Kj1pCCxK7d2LK/UM3ncEo0o= +github.com/jcmturner/goidentity/v6 v6.0.1 h1:VKnZd2oEIMorCTsFBnJWbExfNN7yZr3EhJAxwOkZg6o= +github.com/jcmturner/goidentity/v6 v6.0.1/go.mod h1:X1YW3bgtvwAXju7V3LCIMpY0Gbxyjn/mY9zx4tFonSg= +github.com/jcmturner/gokrb5/v8 v8.4.2 h1:6ZIM6b/JJN0X8UM43ZOM6Z4SJzla+a/u7scXFJzodkA= +github.com/jcmturner/gokrb5/v8 v8.4.2/go.mod h1:sb+Xq/fTY5yktf/VxLsE3wlfPqQjp0aWNYyvBVK62bc= +github.com/jcmturner/rpc/v2 v2.0.3 h1:7FXXj8Ti1IaVFpSAziCZWNzbNuZmnvw/i6CqLNdWfZY= +github.com/jcmturner/rpc/v2 v2.0.3/go.mod h1:VUJYCIDm3PVOEHw8sgt091/20OJjskO/YJki3ELg/Hc= github.com/jessevdk/go-flags v1.4.0/go.mod h1:4FA24M0QyGHXBuZZK/XkWh8h0e1EYbRYJSGM75WSRxI= +github.com/jgautheron/goconst v1.4.0/go.mod h1:aAosetZ5zaeC/2EfMeRswtxUFBpe2Hr7HzkgX4fanO4= +github.com/jhump/protoreflect v1.6.0/go.mod h1:eaTn3RZAmMBcV0fifFvlm6VHNz3wSkYyXYWUh7ymB74= github.com/jhump/protoreflect v1.8.3-0.20210616212123-6cc1efa697ca h1:a0GZUdb+qnutF8shJxr2qs2qT3fnF+ptxTxPB8+oIvk= github.com/jhump/protoreflect v1.8.3-0.20210616212123-6cc1efa697ca/go.mod h1:7GcYQDdMU/O/BBrl/cX6PNHpXh6cenjd8pneu5yW7Tg= +github.com/jingyugao/rowserrcheck v0.0.0-20210130005344-c6a0c12dd98d/go.mod h1:/EZlaYCnEX24i7qdVhT9du5JrtFWYRQr67bVgR7JJC8= +github.com/jirfag/go-printf-func-name v0.0.0-20200119135958-7558a9eaa5af/go.mod h1:HEWGJkRDzjJY2sqdDwxccsGicWEf9BQOZsq2tV+xzM0= github.com/jmespath/go-jmespath v0.0.0-20160202185014-0b12d6b521d8/go.mod h1:Nht3zPeWKUH0NzdCt2Blrr5ys8VGpn0CEB0cQHVjt7k= github.com/jmespath/go-jmespath v0.0.0-20160803190731-bd40a432e4c7/go.mod h1:Nht3zPeWKUH0NzdCt2Blrr5ys8VGpn0CEB0cQHVjt7k= github.com/jmespath/go-jmespath v0.0.0-20180206201540-c2b33e8439af/go.mod h1:Nht3zPeWKUH0NzdCt2Blrr5ys8VGpn0CEB0cQHVjt7k= github.com/jmespath/go-jmespath v0.0.0-20180206201540-c2b33e8439af/go.mod h1:Nht3zPeWKUH0NzdCt2Blrr5ys8VGpn0CEB0cQHVjt7k= -github.com/jmespath/go-jmespath v0.3.0/go.mod h1:9QtRXoHjLGCJ5IBSaohpXITPlowMeeYCZ7fLUTSywik= github.com/jmespath/go-jmespath v0.4.0 h1:BEgLn5cpjn8UN1mAw4NjwDrS35OdebyEtFe+9YPoQUg= github.com/jmespath/go-jmespath v0.4.0/go.mod h1:T8mJZnbsbmF+m6zOOFylbeCJqk5+pHWvzYPziyZiYoo= github.com/jmespath/go-jmespath/internal/testify v1.5.1 h1:shLQSRRSCCPj3f2gpwzGwWFoC7ycTf1rcQZHOlsJ6N8= github.com/jmespath/go-jmespath/internal/testify v1.5.1/go.mod h1:L3OGu8Wl2/fWfCI6z80xFu9LTZmf1ZRjMHUOPmWr69U= +github.com/jmoiron/sqlx v1.2.0/go.mod h1:1FEQNm3xlJgrMD+FBdI9+xvCksHtbpVBBw5dYhBSsks= github.com/joho/godotenv v1.3.0 h1:Zjp+RcGpHhGlrMbJzXTrZZPrWj+1vfm90La1wgB6Bhc= github.com/joho/godotenv v1.3.0/go.mod h1:7hK45KPybAkOC6peb+G5yklZfMxEjkZhHbwpqxOKXbg= github.com/jonboulle/clockwork v0.1.0/go.mod h1:Ii8DK3G1RaLaWxj9trq07+26W01tbo22gdxWY5EU2bo= +github.com/josharian/intern v1.0.0 h1:vlS4z54oSdjm0bgjRigI+G1HpF+tI+9rE5LLzOg8HmY= +github.com/josharian/intern v1.0.0/go.mod h1:5DoeVV0s6jJacbCEi61lwdGj/aVlrQvzHFFd8Hwg//Y= github.com/jpillora/backoff v0.0.0-20180909062703-3050d21c67d7/go.mod h1:2iMrUgbbvHEiQClaW2NsSzMyGHqN+rDFqY705q49KG0= github.com/jpillora/backoff v1.0.0 h1:uvFg412JmmHBHw7iwprIxkPMI+sGQ4kzOWsMeHnm2EA= github.com/jpillora/backoff v1.0.0/go.mod h1:J/6gKK9jxlEcS3zixgDgUAsiuZ7yrSoa/FX5e0EB2j4= @@ -1085,6 +1240,7 @@ github.com/juju/loggo v0.0.0-20190526231331-6e530bcce5d8/go.mod h1:vgyd7OREkbtVE github.com/juju/testing v0.0.0-20191001232224-ce9dec17d28b/go.mod h1:63prj8cnj0tU0S9OHjGJn+b1h0ZghCndfnbQolrYTwA= github.com/julienschmidt/httprouter v1.2.0/go.mod h1:SYymIcj16QtmaHHD7aYtjjsJG7VTCxuUUipMqKk8s4w= github.com/julienschmidt/httprouter v1.3.0/go.mod h1:JR6WtHb+2LUe8TCKY3cZOxFyyO8IZAc4RVcycCCAKdM= +github.com/julz/importas v0.0.0-20210226073942-60b4fa260dd0/go.mod h1:oSFU2R4XK/P7kNBrnL/FEQlDGN1/6WoxXEjSSXO0DV0= github.com/jung-kurt/gofpdf v1.0.3-0.20190309125859-24315acbbda5/go.mod h1:7Id9E/uU8ce6rXgefFLlgrJj/GYY22cpxn+r32jIOes= github.com/jwilder/encoding v0.0.0-20170811194829-b4e1701a28ef/go.mod h1:Ct9fl0F6iIOGgxJ5npU/IUOhOhqlVrGjyIZc8/MagT0= github.com/kardianos/service v1.0.0 h1:HgQS3mFfOlyntWX8Oke98JcJLqt1DBcHR4kxShpYef0= @@ -1098,65 +1254,86 @@ github.com/kballard/go-shellquote v0.0.0-20180428030007-95032a82bc51/go.mod h1:C github.com/kisielk/errcheck v1.1.0/go.mod h1:EZBBE59ingxPouuu3KfxchcWSUPOHkagtvWXihfKN4Q= github.com/kisielk/errcheck v1.2.0/go.mod h1:/BMXB+zMLi60iA8Vv6Ksmxu/1UDYcXs4uQLJ+jE2L00= github.com/kisielk/errcheck v1.5.0/go.mod h1:pFxgyoBC7bSaBwPgfKdkLd5X25qrDl4LWUI2bnpBCr8= +github.com/kisielk/errcheck v1.6.0/go.mod h1:pFxgyoBC7bSaBwPgfKdkLd5X25qrDl4LWUI2bnpBCr8= github.com/kisielk/gotool v1.0.0/go.mod h1:XhKaO+MFFWcvkIS/tQcRk01m1F5IRFswLeQ+oQHNcck= github.com/klauspost/compress v1.4.0/go.mod h1:RyIbtBH6LamlWaDj8nUwkbUhJ87Yi3uG0guNDohfE1A= github.com/klauspost/compress v1.9.5/go.mod h1:RyIbtBH6LamlWaDj8nUwkbUhJ87Yi3uG0guNDohfE1A= +github.com/klauspost/compress v1.10.7/go.mod h1:aoV0uJVorq1K+umq18yTdKaF57EivdYsUV+/s2qKfXs= github.com/klauspost/compress v1.11.0/go.mod h1:aoV0uJVorq1K+umq18yTdKaF57EivdYsUV+/s2qKfXs= github.com/klauspost/compress v1.11.3/go.mod h1:aoV0uJVorq1K+umq18yTdKaF57EivdYsUV+/s2qKfXs= github.com/klauspost/compress v1.11.12/go.mod h1:aoV0uJVorq1K+umq18yTdKaF57EivdYsUV+/s2qKfXs= github.com/klauspost/compress v1.11.12/go.mod h1:aoV0uJVorq1K+umq18yTdKaF57EivdYsUV+/s2qKfXs= github.com/klauspost/compress v1.11.13/go.mod h1:aoV0uJVorq1K+umq18yTdKaF57EivdYsUV+/s2qKfXs= -github.com/klauspost/compress v1.13.1 h1:wXr2uRxZTJXHLly6qhJabee5JqIhTRoLBhDOA74hDEQ= -github.com/klauspost/compress v1.13.1/go.mod h1:8dP1Hq4DHOhN9w426knH3Rhby4rFm6D8eO+e+Dq5Gzg= +github.com/klauspost/compress v1.12.2/go.mod h1:8dP1Hq4DHOhN9w426knH3Rhby4rFm6D8eO+e+Dq5Gzg= +github.com/klauspost/compress v1.12.3/go.mod h1:8dP1Hq4DHOhN9w426knH3Rhby4rFm6D8eO+e+Dq5Gzg= +github.com/klauspost/compress v1.13.4 h1:0zhec2I8zGnjWcKyLl6i3gPqKANCCn5e9xmviEEeX6s= +github.com/klauspost/compress v1.13.4/go.mod h1:8dP1Hq4DHOhN9w426knH3Rhby4rFm6D8eO+e+Dq5Gzg= github.com/klauspost/cpuid v0.0.0-20170728055534-ae7887de9fa5/go.mod h1:Pj4uuM528wm8OyEC2QMXAi2YiTZ96dNQPGgoMS4s3ek= github.com/klauspost/crc32 v0.0.0-20161016154125-cb6bfca970f6/go.mod h1:+ZoRqAPRLkC4NPOvfYeR5KNOrY6TD+/sAC3HXPZgDYg= github.com/klauspost/pgzip v1.0.2-0.20170402124221-0bf5dcad4ada/go.mod h1:Ch1tH69qFZu15pkjo5kYi6mth2Zzwzt50oCQKQE9RUs= github.com/konsorten/go-windows-terminal-sequences v1.0.1/go.mod h1:T0+1ngSBFLxvqU3pZ+m/2kptfBszLMUkC4ZK/EgS/cQ= github.com/konsorten/go-windows-terminal-sequences v1.0.2/go.mod h1:T0+1ngSBFLxvqU3pZ+m/2kptfBszLMUkC4ZK/EgS/cQ= github.com/konsorten/go-windows-terminal-sequences v1.0.3/go.mod h1:T0+1ngSBFLxvqU3pZ+m/2kptfBszLMUkC4ZK/EgS/cQ= +github.com/kr/fs v0.1.0/go.mod h1:FFnZGqtBN9Gxj7eW1uZ42v5BccTP0vu6NEaFoC2HwRg= github.com/kr/logfmt v0.0.0-20140226030751-b84e30acd515/go.mod h1:+0opPa2QZZtGFBFZlji/RkVcI2GknAs/DXo4wKdlNEc= github.com/kr/pretty v0.1.0/go.mod h1:dAy3ld7l9f0ibDNOQOHHMYYIIbhfbHSm3C4ZsoJORNo= github.com/kr/pretty v0.2.0/go.mod h1:ipq/a2n7PKx3OHsz4KJII5eveXtPO4qwEXGdVfWzfnI= -github.com/kr/pretty v0.2.1 h1:Fmg33tUaq4/8ym9TJN1x7sLJnHVwhP33CNkpYV/7rwI= github.com/kr/pretty v0.2.1/go.mod h1:ipq/a2n7PKx3OHsz4KJII5eveXtPO4qwEXGdVfWzfnI= +github.com/kr/pretty v0.3.0 h1:WgNl7dwNpEZ6jJ9k1snq4pZsg7DOEN8hP9Xw0Tsjwk0= +github.com/kr/pretty v0.3.0/go.mod h1:640gp4NfQd8pI5XOwp5fnNeVWj67G7CFk/SaSQn7NBk= github.com/kr/pty v1.1.1/go.mod h1:pFQYn66WHrOpPYNljwOMqo10TkYh1fy3cYio2l3bCsQ= github.com/kr/pty v1.1.5/go.mod h1:9r2w37qlBe7rQ6e1fg1S/9xpWHSnaqNdHD3WcMdbPDA= github.com/kr/pty v1.1.8/go.mod h1:O1sed60cT9XZ5uDucP5qwvh+TE3NnUj51EiZO/lmSfw= github.com/kr/text v0.1.0/go.mod h1:4Jbv+DJW3UT/LiOwJeYQe1efqtUx/iVham/4vfdArNI= github.com/kr/text v0.2.0 h1:5Nx0Ya0ZqY2ygV366QzturHI13Jq95ApcVaJBhpS+AY= github.com/kr/text v0.2.0/go.mod h1:eLer722TekiGuMkidMxC/pM04lWEeraHUUmBw8l2grE= +github.com/kulti/thelper v0.4.0/go.mod h1:vMu2Cizjy/grP+jmsvOFDx1kYP6+PD1lqg4Yu5exl2U= +github.com/kunwardeep/paralleltest v1.0.2/go.mod h1:ZPqNm1fVHPllh5LPVujzbVz1JN2GhLxSfY+oqUsvG30= github.com/kylelemons/godebug v1.1.0 h1:RPNrshWIDI6G2gRW9EHilWtl7Z6Sb1BR0xunSBf0SNc= github.com/kylelemons/godebug v1.1.0/go.mod h1:9/0rRGxNHcop5bhtWyNeEfOS8JIWk580+fNqagV/RAw= +github.com/kyoh86/exportloopref v0.1.8/go.mod h1:1tUcJeiioIs7VWe5gcOObrux3lb66+sBqGZrRkMwPgg= github.com/leesper/go_rng v0.0.0-20190531154944-a612b043e353 h1:X/79QL0b4YJVO5+OsPH9rF2u428CIrGL/jLmPsoOQQ4= github.com/leesper/go_rng v0.0.0-20190531154944-a612b043e353/go.mod h1:N0SVk0uhy+E1PZ3C9ctsPRlvOPAFPkCNlcPBDkt0N3U= +github.com/leodido/go-urn v1.1.0/go.mod h1:+cyI34gQWZcE1eQU7NVgKkkzdXDQHr1dBMtdAPozLkw= github.com/leodido/ragel-machinery v0.0.0-20181214104525-299bdde78165 h1:bCiVCRCs1Heq84lurVinUPy19keqGEe4jh5vtK37jcg= github.com/leodido/ragel-machinery v0.0.0-20181214104525-299bdde78165/go.mod h1:WZxr2/6a/Ar9bMDc2rN/LJrE/hF6bXE4LPyDSIxwAfg= +github.com/leoluk/perflib_exporter v0.1.0/go.mod h1:rpV0lYj7lemdTm31t7zpCqYqPnw7xs86f+BaaNBVYFM= github.com/lib/pq v1.0.0/go.mod h1:5WUZQaWbwv1U+lTReE5YruASi9Al49XbQIvNi/34Woo= github.com/lib/pq v1.1.0/go.mod h1:5WUZQaWbwv1U+lTReE5YruASi9Al49XbQIvNi/34Woo= github.com/lib/pq v1.2.0/go.mod h1:5WUZQaWbwv1U+lTReE5YruASi9Al49XbQIvNi/34Woo= -github.com/lib/pq v1.3.0 h1:/qkRGz8zljWiDcFvgpwUpwIAPu3r07TDvs3Rws+o/pU= -github.com/lib/pq v1.3.0/go.mod h1:5WUZQaWbwv1U+lTReE5YruASi9Al49XbQIvNi/34Woo= +github.com/lib/pq v1.9.0 h1:L8nSXQQzAYByakOFMTwpjRoHsMJklur4Gi59b6VivR8= +github.com/lib/pq v1.9.0/go.mod h1:AlVN5x4E4T544tWzH6hKfbfQvm3HdbOxrmggDNAPY9o= github.com/lightstep/lightstep-tracer-common/golang/gogo v0.0.0-20190605223551-bc2310a04743/go.mod h1:qklhhLq1aX+mtWk9cPHPzaBjWImj5ULL6C7HFJtXQMM= github.com/lightstep/lightstep-tracer-go v0.18.1/go.mod h1:jlF1pusYV4pidLvZ+XD0UBX0ZE6WURAspgAczcDHrL4= +github.com/logrusorgru/aurora v0.0.0-20181002194514-a7b3b318ed4e/go.mod h1:7rIyQOR62GCctdiQpZ/zOJlFyk6y+94wXzv6RNZgaR4= +github.com/lucasb-eyer/go-colorful v1.0.2/go.mod h1:0MS4r+7BZKSJ5mw4/S5MPN+qHFF1fYclkSPilDOKW0s= +github.com/lucasb-eyer/go-colorful v1.0.3/go.mod h1:R4dSotOR9KMtayYi1e77YzuveK+i7ruzyGqttikkLy0= github.com/lyft/protoc-gen-validate v0.0.13/go.mod h1:XbGvPuh87YZc5TdIa2/I4pLk0QoUACkjt2znoq26NVQ= +github.com/magefile/mage v1.10.0/go.mod h1:z5UZb/iS3GoOSn0JgWuiw7dxlurVYTu+/jHXqQg881A= github.com/magiconair/properties v1.8.0/go.mod h1:PppfXfuXeibc/6YijjN8zIbojt8czPbwD3XqdrwzmxQ= -github.com/mailru/easyjson v0.0.0-20160728113105-d5b7844b561a/go.mod h1:C1wdFJiN94OJF2b5HbByQZoLdCWB1Yqtg26g4irojpc= +github.com/magiconair/properties v1.8.1/go.mod h1:PppfXfuXeibc/6YijjN8zIbojt8czPbwD3XqdrwzmxQ= +github.com/magiconair/properties v1.8.5/go.mod h1:y3VJvCyxH9uVvJTWEGAELF3aiYNyPKd5NZ3oSwXrF60= github.com/mailru/easyjson v0.0.0-20180823135443-60711f1a8329/go.mod h1:C1wdFJiN94OJF2b5HbByQZoLdCWB1Yqtg26g4irojpc= github.com/mailru/easyjson v0.0.0-20190312143242-1de009706dbe/go.mod h1:C1wdFJiN94OJF2b5HbByQZoLdCWB1Yqtg26g4irojpc= github.com/mailru/easyjson v0.0.0-20190614124828-94de47d64c63/go.mod h1:C1wdFJiN94OJF2b5HbByQZoLdCWB1Yqtg26g4irojpc= github.com/mailru/easyjson v0.0.0-20190626092158-b2ccc519800e/go.mod h1:C1wdFJiN94OJF2b5HbByQZoLdCWB1Yqtg26g4irojpc= github.com/mailru/easyjson v0.7.0/go.mod h1:KAzv3t3aY1NaHWoQz1+4F1ccyAH66Jk7yos7ldAVICs= -github.com/mailru/easyjson v0.7.1 h1:mdxE1MF9o53iCb2Ghj1VfWvh7ZOwHpnVG/xwXrV90U8= github.com/mailru/easyjson v0.7.1/go.mod h1:KAzv3t3aY1NaHWoQz1+4F1ccyAH66Jk7yos7ldAVICs= +github.com/mailru/easyjson v0.7.6/go.mod h1:xzfreul335JAWq5oZzymOObrkdz5UnU4kGfJJLY9Nlc= +github.com/mailru/easyjson v0.7.7 h1:UGYAvKxe3sBsEDzO8ZeWOSlIQfWFlxbzLZe7hwFURr0= +github.com/mailru/easyjson v0.7.7/go.mod h1:xzfreul335JAWq5oZzymOObrkdz5UnU4kGfJJLY9Nlc= +github.com/maratori/testpackage v1.0.1/go.mod h1:ddKdw+XG0Phzhx8BFDTKgpWP4i7MpApTE5fXSKAqwDU= github.com/markbates/oncer v0.0.0-20181203154359-bf2de49a0be2/go.mod h1:Ld9puTsIW75CHf65OeIOkyKbteujpZVXDpWK6YGZbxE= github.com/markbates/safe v1.0.1/go.mod h1:nAqgmRi7cY2nqMc92/bSEeQA+R4OheNU2T1kNSCBdG0= github.com/marstr/guid v1.1.0/go.mod h1:74gB1z2wpxxInTG6yaqA7KrtM0NZ+RbrcqDvYHefzho= +github.com/matoous/godox v0.0.0-20210227103229-6504466cf951/go.mod h1:1BELzlh859Sh1c6+90blK8lbYy0kwQf1bYlBhBysy1s= github.com/mattn/go-colorable v0.0.9/go.mod h1:9vuHe8Xs5qXnSaW/c/ABM9alt+Vo+STaOChaDxuIBZU= github.com/mattn/go-colorable v0.1.1/go.mod h1:FuOcm+DKB9mbwrcAfNl7/TZVBZ6rcnceauSikq3lYCQ= github.com/mattn/go-colorable v0.1.2/go.mod h1:U0ppj6V5qS13XJ6of8GYAs25YV2eR4EVcfRqFIhoBtE= github.com/mattn/go-colorable v0.1.4/go.mod h1:U0ppj6V5qS13XJ6of8GYAs25YV2eR4EVcfRqFIhoBtE= -github.com/mattn/go-colorable v0.1.6 h1:6Su7aK7lXmJ/U79bYtBjLNaha4Fs1Rg9plHpcH+vvnE= github.com/mattn/go-colorable v0.1.6/go.mod h1:u6P/XSegPjTcexA+o6vUJrdnUu04hMope9wVRipJSqc= +github.com/mattn/go-colorable v0.1.8 h1:c1ghPdyEDarC70ftn0y+A/Ee++9zz8ljHG1b13eJ0s8= +github.com/mattn/go-colorable v0.1.8/go.mod h1:u6P/XSegPjTcexA+o6vUJrdnUu04hMope9wVRipJSqc= github.com/mattn/go-ieproxy v0.0.0-20190610004146-91bb50d98149/go.mod h1:31jz6HNzdxOmlERGGEc4v/dMssOfmp2p5bT/okiKFFc= github.com/mattn/go-ieproxy v0.0.1 h1:qiyop7gCflfhwCzGyeT0gro3sF9AIg9HU98JORTkqfI= github.com/mattn/go-ieproxy v0.0.1/go.mod h1:pYabZ6IHcRpFh7vIaLfK7rdcWgFEb3SFJ6/gNWuh88E= @@ -1172,26 +1349,34 @@ github.com/mattn/go-isatty v0.0.12 h1:wuysRhFDzyxgEmMf5xjvJ2M9dZoWAXNNr5LSBS7uHX github.com/mattn/go-isatty v0.0.12/go.mod h1:cbi8OIDigv2wuxKPP5vlRcQ1OAZbq2CE4Kysco4FUpU= github.com/mattn/go-runewidth v0.0.2/go.mod h1:LwmH8dsx7+W8Uxz3IHJYH5QSwggIsqBzpuz5H//U1FU= github.com/mattn/go-runewidth v0.0.3/go.mod h1:LwmH8dsx7+W8Uxz3IHJYH5QSwggIsqBzpuz5H//U1FU= +github.com/mattn/go-runewidth v0.0.4/go.mod h1:LwmH8dsx7+W8Uxz3IHJYH5QSwggIsqBzpuz5H//U1FU= +github.com/mattn/go-runewidth v0.0.7/go.mod h1:H031xJmbD/WCDINGzjvQ9THkh0rPKHF+m2gUSrubnMI= +github.com/mattn/go-runewidth v0.0.8/go.mod h1:H031xJmbD/WCDINGzjvQ9THkh0rPKHF+m2gUSrubnMI= github.com/mattn/go-shellwords v1.0.3/go.mod h1:3xCvwCdWdlDJUrvuMn7Wuy9eWs4pE8vqg+NOMyg4B2o= +github.com/mattn/go-sqlite3 v1.9.0/go.mod h1:FPy6KqzDD04eiIsT53CuJW3U88zkxoIYsOqkbpncsNc= github.com/mattn/go-sqlite3 v1.11.0/go.mod h1:FPy6KqzDD04eiIsT53CuJW3U88zkxoIYsOqkbpncsNc= github.com/mattn/go-sqlite3 v1.11.0/go.mod h1:FPy6KqzDD04eiIsT53CuJW3U88zkxoIYsOqkbpncsNc= github.com/mattn/go-sqlite3 v1.14.6 h1:dNPt6NO46WmLVt2DLNpwczCmdV5boIZ6g/tlDrlRUbg= github.com/mattn/go-sqlite3 v1.14.6/go.mod h1:NyWgC/yNuGj7Q9rpYnZvas74GogHl5/Z4A/KQRfk6bU= github.com/mattn/go-tty v0.0.0-20180907095812-13ff1204f104/go.mod h1:XPvLUNfbS4fJH25nqRHfWLMa1ONC8Amw+mIA639KxkE= +github.com/mattn/goveralls v0.0.2/go.mod h1:8d1ZMHsd7fW6IRPKQh46F2WRpyib5/X4FOpevwGNQEw= github.com/matttproud/golang_protobuf_extensions v1.0.1/go.mod h1:D8He9yQNgCq6Z5Ld7szi9bcBfOoFv/3dc6xSMkL2PC0= github.com/matttproud/golang_protobuf_extensions v1.0.2-0.20181231171920-c182affec369 h1:I0XW9+e1XWDxdcEniV4rQAIOPUGDq67JSCiRCgGCZLI= github.com/matttproud/golang_protobuf_extensions v1.0.2-0.20181231171920-c182affec369/go.mod h1:BSXmuO+STAnVfrANrmjBb36TMTDstsz7MSK+HVaYKv4= +github.com/mbilski/exhaustivestruct v1.2.0/go.mod h1:OeTBVxQWoEmB2J2JCHmXWPJ0aksxSUOUy+nvtVEfzXc= github.com/mdlayher/genetlink v1.0.0 h1:OoHN1OdyEIkScEmRgxLEe2M9U8ClMytqA5niynLtfj0= github.com/mdlayher/genetlink v1.0.0/go.mod h1:0rJ0h4itni50A86M2kHcgS85ttZazNt7a8H2a2cw0Gc= github.com/mdlayher/netlink v0.0.0-20190409211403-11939a169225/go.mod h1:eQB3mZE4aiYnlUsyGGCOpPETfdQq4Jhsgf1fk3cwQaA= github.com/mdlayher/netlink v1.0.0/go.mod h1:KxeJAFOFLG6AjpyDkQ/iIhxygIUKD+vcwqcnu43w/+M= github.com/mdlayher/netlink v1.1.0 h1:mpdLgm+brq10nI9zM1BpX1kpDbh3NLl3RSnVq6ZSkfg= github.com/mdlayher/netlink v1.1.0/go.mod h1:H4WCitaheIsdF9yOYu8CFmCgQthAPIWZmcKp9uZHgmY= +github.com/mgechev/dots v0.0.0-20190921121421-c36f7dcfbb81/go.mod h1:KQ7+USdGKfpPjXk4Ga+5XxQM4Lm4e3gAogrreFAYpOg= +github.com/mgechev/revive v1.0.3/go.mod h1:POGGZagSo/0frdr7VeAifzS5Uka0d0GPiM35MsTO8nE= github.com/microsoft/ApplicationInsights-Go v0.4.4 h1:G4+H9WNs6ygSCe6sUyxRc2U81TI5Es90b2t/MwX5KqY= github.com/microsoft/ApplicationInsights-Go v0.4.4/go.mod h1:fKRUseBqkw6bDiXTs3ESTiU/4YTIHsQS4W3fP2ieF4U= github.com/miekg/dns v1.0.14/go.mod h1:W1PPwlIAgtquWBMBEV9nkV9Cazfe8ScdGz/Lj7v3Nrg= github.com/miekg/dns v1.1.26/go.mod h1:bPDLeHnStXmXAq1m/Ch/hvfNHr14JKNPMBo3VZKjuso= -github.com/miekg/dns v1.1.31/go.mod h1:KNUDUusw/aVsxyTYZM1oqvCicbwhgbNgztCETuNZ7xM= +github.com/miekg/dns v1.1.41/go.mod h1:p6aan82bvRIyn+zDIv9xYNUpwa73JcSh9BKwknJysuI= github.com/miekg/dns v1.1.43 h1:JKfpVSCB84vrAmHzyrsxB5NAr5kLoMXZArPSw7Qlgyg= github.com/miekg/dns v1.1.43/go.mod h1:+evo5L0630/F6ca/Z9+GAqzhjGyn8/c+TBaOyfEl0V4= github.com/miekg/pkcs11 v1.0.3/go.mod h1:XsNlhZGX73bx86s2hdc/FuaLm2CPZJemRLMA+WTFxgs= @@ -1205,6 +1390,8 @@ github.com/mitchellh/cli v1.1.0/go.mod h1:xcISNoH86gajksDmfB23e/pu+B+GeFRMYmoHXx github.com/mitchellh/go-homedir v1.0.0/go.mod h1:SfyaCUpYCn1Vlf4IUYiD9fPX4A5wJrkLzIz1N1q0pr0= github.com/mitchellh/go-homedir v1.1.0 h1:lukF9ziXFxDFPkA1vsr5zpc1XuPDn/wFntq5mG+4E0Y= github.com/mitchellh/go-homedir v1.1.0/go.mod h1:SfyaCUpYCn1Vlf4IUYiD9fPX4A5wJrkLzIz1N1q0pr0= +github.com/mitchellh/go-ps v1.0.0/go.mod h1:J4lOc8z8yJs6vUwklHw2XEIiT4z4C40KtWVN3nvg8Pg= +github.com/mitchellh/go-testing-interface v0.0.0-20171004221916-a61a99592b77/go.mod h1:kRemZodwjscx+RGhAo8eIhFbs2+BFgRtFPeD/KE+zxI= github.com/mitchellh/go-testing-interface v1.0.0 h1:fzU/JVNcaqHQEcVFAKeR41fkiLdIPrefOvVG1VZ96U0= github.com/mitchellh/go-testing-interface v1.0.0/go.mod h1:kRemZodwjscx+RGhAo8eIhFbs2+BFgRtFPeD/KE+zxI= github.com/mitchellh/go-wordwrap v1.0.0/go.mod h1:ZXFpozHsX6DPmq2I0TCekCxypsnAUbP2oI0UX1GXzOo= @@ -1212,9 +1399,14 @@ github.com/mitchellh/gox v0.4.0/go.mod h1:Sd9lOJ0+aimLBi73mGofS1ycjY8lL3uZM3JPS4 github.com/mitchellh/iochan v1.0.0/go.mod h1:JwYml1nuB7xOzsp52dPpHFffvOCDupsG0QubkSMEySY= github.com/mitchellh/mapstructure v0.0.0-20160808181253-ca63d7c062ee/go.mod h1:FVVH3fgwuzCH5S8UJGiWEs2h04kUh9fWfEaFds41c1Y= github.com/mitchellh/mapstructure v1.1.2/go.mod h1:FVVH3fgwuzCH5S8UJGiWEs2h04kUh9fWfEaFds41c1Y= -github.com/mitchellh/mapstructure v1.2.2 h1:dxe5oCinTXiTIcfgmZecdCzPmAJKd46KsCWc35r0TV4= github.com/mitchellh/mapstructure v1.2.2/go.mod h1:bFUtVrKA4DC2yAKiSyO/QUcy7e+RRV2QTWOzhPopBRo= +github.com/mitchellh/mapstructure v1.3.2/go.mod h1:bFUtVrKA4DC2yAKiSyO/QUcy7e+RRV2QTWOzhPopBRo= +github.com/mitchellh/mapstructure v1.3.3/go.mod h1:bFUtVrKA4DC2yAKiSyO/QUcy7e+RRV2QTWOzhPopBRo= +github.com/mitchellh/mapstructure v1.4.0/go.mod h1:bFUtVrKA4DC2yAKiSyO/QUcy7e+RRV2QTWOzhPopBRo= +github.com/mitchellh/mapstructure v1.4.1 h1:CpVNEelQCZBooIPDn+AR3NpivK/TIKU8bDxdASFVQag= +github.com/mitchellh/mapstructure v1.4.1/go.mod h1:bFUtVrKA4DC2yAKiSyO/QUcy7e+RRV2QTWOzhPopBRo= github.com/mitchellh/osext v0.0.0-20151018003038-5e2d6d41470f/go.mod h1:OkQIRizQZAeMln+1tSwduZz7+Af5oFlKirV/MSYes2A= +github.com/mjibson/esc v0.2.0/go.mod h1:9Hw9gxxfHulMF5OJKCyhYD7PzlSdhzXyaGEBRPH1OPs= github.com/moby/ipvs v1.0.1 h1:aoZ7fhLTXgDbzVrAnvV+XbKOU8kOET7B3+xULDF/1o0= github.com/moby/ipvs v1.0.1/go.mod h1:2pngiyseZbIKXNv7hsKj3O9UEz30c53MT9005gt2hxQ= github.com/moby/locker v1.0.1/go.mod h1:S7SDdo5zpBK84bzzVlKr2V0hz+7x9hWbYC/kq7oQppc= @@ -1240,9 +1432,12 @@ github.com/modern-go/reflect2 v0.0.0-20180701023420-4b7aa43c6742/go.mod h1:bx2lN github.com/modern-go/reflect2 v1.0.1 h1:9f412s+6RmYXLWZSEzVVgPGK7C2PphHj5RJrvfx9AWI= github.com/modern-go/reflect2 v1.0.1/go.mod h1:bx2lNnkwVCuqBIxFjflWJWanXIb3RllmbCylyMrvgv0= github.com/montanaflynn/stats v0.0.0-20171201202039-1bf9dbcd8cbe/go.mod h1:wL8QJuTMNUDYhXwkmfOly8iTdp5TEcJFWZD2D7SIkUc= +github.com/moricho/tparallel v0.2.1/go.mod h1:fXEIZxG2vdfl0ZF8b42f5a78EhjjD5mX8qUplsoSU4k= github.com/morikuni/aec v0.0.0-20170113033406-39771216ff4c/go.mod h1:BbKIizmSmc5MMPqRYbxO4ZU0S0+P200+tUnFx7PXmsc= github.com/morikuni/aec v1.0.0 h1:nP9CBfwrvYnBRgY6qfDQkygYDmYwOilePFkwzv4dU8A= github.com/morikuni/aec v1.0.0/go.mod h1:BbKIizmSmc5MMPqRYbxO4ZU0S0+P200+tUnFx7PXmsc= +github.com/mozilla/tls-observatory v0.0.0-20190404164649-a3c1b6cfecfd/go.mod h1:SrKMQvPiws7F7iqYp8/TX+IhxCYhzr6N/1yb8cwHsGk= +github.com/mozilla/tls-observatory v0.0.0-20201209171846-0547674fceff/go.mod h1:SrKMQvPiws7F7iqYp8/TX+IhxCYhzr6N/1yb8cwHsGk= github.com/mrunalp/fileutils v0.5.0/go.mod h1:M1WthSahJixYnrXQl/DFQuteStB1weuxD2QJNHXfbSQ= github.com/mschoch/smat v0.0.0-20160514031455-90eadee771ae/go.mod h1:qAyveg+e4CE+eKJXWVjKXM4ck2QobLqTDytGJbLLhJg= github.com/multiplay/go-ts3 v1.0.0 h1:loxtEFqvYtpoGh1jOqEt6aDzctYuQsi3vb3dMpvWiWw= @@ -1252,6 +1447,7 @@ github.com/munnerz/goautoneg v0.0.0-20191010083416-a7dc8b61c822/go.mod h1:+n7T8m github.com/mwitkow/go-conntrack v0.0.0-20161129095857-cc309e4a2223/go.mod h1:qRWi+5nqEBWmkhHvq77mSJWrCKwh8bxhgT7d/eI7P4U= github.com/mwitkow/go-conntrack v0.0.0-20190716064945-2f068394615f/go.mod h1:qRWi+5nqEBWmkhHvq77mSJWrCKwh8bxhgT7d/eI7P4U= github.com/mxk/go-flowrate v0.0.0-20140419014527-cca7078d478f/go.mod h1:ZdcZmHo+o7JKHSa8/e818NopupXU1YMK5fe1lsApnBw= +github.com/nakabonne/nestif v0.3.0/go.mod h1:dI314BppzXjJ4HsCnbo7XzrJHPszZsjnk5wEBSYHI2c= github.com/naoina/go-stringutil v0.1.0 h1:rCUeRUHjBjGTSHl0VC00jUPLz8/F9dDzYI70Hzifhks= github.com/naoina/go-stringutil v0.1.0/go.mod h1:XJ2SJL9jCtBh+P9q5btrd/Ylo8XwT/h1USek5+NqSA0= github.com/nats-io/jwt v0.3.0/go.mod h1:fRYCDE99xlTsqUzISS1Bi75UBJ6ljOJQOAAu5VglpSg= @@ -1274,11 +1470,15 @@ github.com/nats-io/nkeys v0.3.0 h1:cgM5tL53EvYRU+2YLXIK0G2mJtK12Ft9oeooSZMA2G8= github.com/nats-io/nkeys v0.3.0/go.mod h1:gvUNGjVcM2IPr5rCsRsC6Wb3Hr2CQAm08dsxtV6A5y4= github.com/nats-io/nuid v1.0.1 h1:5iA8DT8V7q8WK2EScv2padNa/rTESc1KdnPw4TC2paw= github.com/nats-io/nuid v1.0.1/go.mod h1:19wcPz3Ph3q0Jbyiqsd0kePYG7A95tJPxeL+1OSON2c= +github.com/nbutton23/zxcvbn-go v0.0.0-20180912185939-ae427f1e4c1d/go.mod h1:o96djdrsSGy3AWPyBgZMAGfxZNfgntdJG+11KU4QvbU= +github.com/nbutton23/zxcvbn-go v0.0.0-20201221231540-e56b841a3c88/go.mod h1:KSVJerMDfblTH7p5MZaTt+8zaT2iEk3AkVb9PQdZuE8= github.com/ncw/swift v1.0.47/go.mod h1:23YIA4yWVnGwv2dQlN4bB7egfYX6YLn0Yo/S6zZO/ZM= github.com/newrelic/newrelic-telemetry-sdk-go v0.5.1 h1:9YEHXplqlVkOltThchh+RxeODvTb1TBvQ1181aXg3pY= github.com/newrelic/newrelic-telemetry-sdk-go v0.5.1/go.mod h1:2kY6OeOxrJ+RIQlVjWDc/pZlT3MIf30prs6drzMfJ6E= github.com/niemeyer/pretty v0.0.0-20200227124842-a10e7caefd8e/go.mod h1:zD1mROLANZcx1PVRCS0qkT7pwLkGfwJo4zjcN/Tysno= +github.com/nishanths/exhaustive v0.1.0/go.mod h1:S1j9110vxV1ECdCudXRkeMnFQ/DQk9ajLT0Uf2MYZQQ= github.com/nishanths/predeclared v0.0.0-20200524104333-86fad755b4d3/go.mod h1:nt3d53pc1VYcphSCIaYAJtnPYnr3Zyn8fMq2wvPGPso= +github.com/nishanths/predeclared v0.2.1/go.mod h1:HvkGJcA3naj4lOwnFXFDkFxVtSqQMB9sbB1usJ+xjQE= github.com/nsqio/go-nsq v1.0.8 h1:3L2F8tNLlwXXlp2slDUrUWSBn2O3nMh8R1/KEDFTHPk= github.com/nsqio/go-nsq v1.0.8/go.mod h1:vKq36oyeVXgsS5Q8YEO7WghqidAVXQlcFxzQbQTuDEY= github.com/nxadm/tail v1.4.4 h1:DQuhQpB1tVlglWS2hLQ5OV6B5r8aGxSrPc5Qo6uTN78= @@ -1288,6 +1488,9 @@ github.com/oklog/run v1.0.0/go.mod h1:dlhp/R75TPv97u0XWUtDeV/lRKWPKSdTuV0TZvrmrQ github.com/oklog/run v1.1.0/go.mod h1:sVPdnTZT1zYwAJeCMu2Th4T21pA3FPOQRfWjQlk7DVU= github.com/oklog/ulid v1.3.1/go.mod h1:CirwcVhetQ6Lv90oh/F+FBtV6XMibvdAFo93nm5qn4U= github.com/olekukonko/tablewriter v0.0.0-20170122224234-a0225b3f23b5/go.mod h1:vsDQFd/mU46D+Z4whnwzcISnGGzXWMclvtLoiIKAKIo= +github.com/olekukonko/tablewriter v0.0.4/go.mod h1:zq6QwlOf5SlnkVbMSr5EoBv3636FWnp+qbPhuoO21uA= +github.com/olivere/elastic v6.2.35+incompatible/go.mod h1:J+q1zQJTgAz9woqsbVRqGeB5G1iqDKVBWLNSYW8yfJ8= +github.com/olivere/elastic v6.2.37+incompatible/go.mod h1:J+q1zQJTgAz9woqsbVRqGeB5G1iqDKVBWLNSYW8yfJ8= github.com/onsi/ginkgo v0.0.0-20151202141238-7f8ab55aaf3b/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE= github.com/onsi/ginkgo v0.0.0-20170829012221-11459a886d9c/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE= github.com/onsi/ginkgo v1.6.0/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE= @@ -1297,18 +1500,22 @@ github.com/onsi/ginkgo v1.10.1/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+ github.com/onsi/ginkgo v1.10.1/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE= github.com/onsi/ginkgo v1.10.3/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE= github.com/onsi/ginkgo v1.11.0/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE= +github.com/onsi/ginkgo v1.12.0/go.mod h1:oUhWkIvk5aDxtKvDDuw8gItl8pKl42LzjC9KZE0HfGg= github.com/onsi/ginkgo v1.12.1/go.mod h1:zj2OWP4+oCPe1qIXoGWkgMRwljMUYCdkwsT2108oapk= -github.com/onsi/ginkgo v1.14.0 h1:2mOpI4JVVPBN+WQRa0WKH2eXR+Ey+uK4n7Zj0aYpIQA= github.com/onsi/ginkgo v1.14.0/go.mod h1:iSB4RoI2tjJc9BBv4NKIKWKya62Rps+oPG/Lv9klQyY= +github.com/onsi/ginkgo v1.14.2 h1:8mVmC9kjFFmA8H4pKMUhcblgifdkOIXPvbhN1T36q1M= +github.com/onsi/ginkgo v1.14.2/go.mod h1:iSB4RoI2tjJc9BBv4NKIKWKya62Rps+oPG/Lv9klQyY= github.com/onsi/gomega v0.0.0-20151007035656-2152b45fa28a/go.mod h1:C1qb7wdrVGGVU+Z6iS04AVkA3Q65CEZX59MT0QO5uiA= github.com/onsi/gomega v0.0.0-20170829124025-dcabb60a477c/go.mod h1:C1qb7wdrVGGVU+Z6iS04AVkA3Q65CEZX59MT0QO5uiA= github.com/onsi/gomega v1.4.3/go.mod h1:ex+gbHU/CVuBBDIJjb2X0qEXbFg53c61hWP/1CpauHY= github.com/onsi/gomega v1.5.0/go.mod h1:ex+gbHU/CVuBBDIJjb2X0qEXbFg53c61hWP/1CpauHY= github.com/onsi/gomega v1.7.0/go.mod h1:ex+gbHU/CVuBBDIJjb2X0qEXbFg53c61hWP/1CpauHY= github.com/onsi/gomega v1.7.1/go.mod h1:XdKZgCCFLUoM/7CFJVPcG8C1xQ1AJ0vpAezJrB7JYyY= +github.com/onsi/gomega v1.9.0/go.mod h1:Ho0h+IUsWyvy1OpqCwxlQ/21gkhVunqlU8fDGcoTdcA= github.com/onsi/gomega v1.10.1/go.mod h1:iN09h71vgCQne3DLsj+A5owkum+a2tYe+TOCB1ybHNo= -github.com/onsi/gomega v1.10.3 h1:gph6h/qe9GSUw1NhH1gp+qb+h8rXD8Cy60Z32Qw3ELA= github.com/onsi/gomega v1.10.3/go.mod h1:V9xEwhxec5O8UDM77eCW8vLymOMltsqPVYWrpDsH8xc= +github.com/onsi/gomega v1.10.4 h1:NiTx7EEvBzu9sFOD1zORteLSt3o8gnlvZZwSE9TnY9U= +github.com/onsi/gomega v1.10.4/go.mod h1:g/HbgYopi++010VEqkFgJHKC09uJiW9UkXvMUuKHUCQ= github.com/op/go-logging v0.0.0-20160315200505-970db520ece7/go.mod h1:HzydrMdWErDVzsI23lYNej1Htcns9BCg93Dk0bBINWk= github.com/openconfig/gnmi v0.0.0-20180912164834-33a1865c3029 h1:lXQqyLroROhwR2Yq/kXbLzVecgmVeZh2TFLg6OxCd+w= github.com/openconfig/gnmi v0.0.0-20180912164834-33a1865c3029/go.mod h1:t+O9It+LKzfOAhKTT5O0ehDix+MTqbtT0T9t+7zzOvc= @@ -1350,8 +1557,10 @@ github.com/opencontainers/selinux v1.6.0/go.mod h1:VVGKuOLlE7v4PJyT6h7mNWvq1rzqi github.com/opencontainers/selinux v1.8.0/go.mod h1:RScLhm78qiWa2gbVCcGkC7tCGdgk3ogry1nUQF8Evvo= github.com/opencontainers/selinux v1.8.0/go.mod h1:RScLhm78qiWa2gbVCcGkC7tCGdgk3ogry1nUQF8Evvo= github.com/opencontainers/selinux v1.8.2/go.mod h1:MUIHuUEvKB1wtJjQdOyYRgOnLD2xAPP8dBsCoU0KuF8= +github.com/opentracing-contrib/go-grpc v0.0.0-20191001143057-db30781987df/go.mod h1:DYR5Eij8rJl8h7gblRrOZ8g0kW1umSpKqYIBTgeDtLo= github.com/opentracing-contrib/go-observer v0.0.0-20170622124052-a52f23424492 h1:lM6RxxfUMrYL/f8bWEUqdXrANWtrL7Nndbm9iFN0DlU= github.com/opentracing-contrib/go-observer v0.0.0-20170622124052-a52f23424492/go.mod h1:Ngi6UdF0k5OKD5t5wlmGhe/EDKPoUM3BXZSSfIuJbis= +github.com/opentracing-contrib/go-stdlib v0.0.0-20190519235532-cf7a6c988dc9/go.mod h1:PLldrQSroqzH70Xl+1DQcGnefIbqsKR7UDaiux3zV+w= github.com/opentracing-contrib/go-stdlib v1.0.0/go.mod h1:qtI1ogk+2JhVPIXVc6q+NHziSmy2W5GbdQZFUHADCBU= github.com/opentracing/basictracer-go v1.0.0/go.mod h1:QfBfYuafItcjQuMwinw9GhYKwFXS9KnPs5lxoYwgW74= github.com/opentracing/opentracing-go v1.0.2/go.mod h1:UkNAQd3GIcIGf0SeVgPpRdFStlNbqXla1AfSYxPUl2o= @@ -1366,27 +1575,35 @@ github.com/openzipkin/zipkin-go v0.2.1/go.mod h1:NaW6tEwdmWMaCDZzg8sh+IBNOxHMPnh github.com/openzipkin/zipkin-go v0.2.2/go.mod h1:NaW6tEwdmWMaCDZzg8sh+IBNOxHMPnhQw8ySjnjRyN4= github.com/openzipkin/zipkin-go v0.2.5 h1:UwtQQx2pyPIgWYHRg+epgdx1/HnBQTgN3/oIYEJTQzU= github.com/openzipkin/zipkin-go v0.2.5/go.mod h1:KpXfKdgRDnnhsxw4pNIH9Md5lyFqKUa4YDFlwRYAMyE= +github.com/ory/go-acc v0.2.6/go.mod h1:4Kb/UnPcT8qRAk3IAxta+hvVapdxTLWtrr7bFLlEgpw= +github.com/ory/viper v1.7.5/go.mod h1:ypOuyJmEUb3oENywQZRgeAMwqgOyDqwboO1tj3DjTaM= github.com/pact-foundation/pact-go v1.0.4/go.mod h1:uExwJY4kCzNPcHRj+hCR/HBbOOIwwtUjcrb0b5/5kLM= github.com/pascaldekloe/goe v0.0.0-20180627143212-57f6aae5913c/go.mod h1:lzWF7FIEvWOWxwDKqyGYQf6ZUaNfKdP144TG7ZOy1lc= github.com/pascaldekloe/goe v0.1.0 h1:cBOtyMzM9HTpWjXfbbunk26uA6nG3a8n06Wieeh0MwY= github.com/pascaldekloe/goe v0.1.0/go.mod h1:lzWF7FIEvWOWxwDKqyGYQf6ZUaNfKdP144TG7ZOy1lc= github.com/paulbellamy/ratecounter v0.2.0/go.mod h1:Hfx1hDpSGoqxkVVpBi/IlYD7kChlfo5C6hzIHwPqfFE= -github.com/pavius/impi v0.0.0-20180302134524-c1cbdcb8df2b/go.mod h1:x/hU0bfdWIhuOT1SKwiJg++yvkk6EuOtJk8WtDZqgr8= +github.com/pavius/impi v0.0.3/go.mod h1:x/hU0bfdWIhuOT1SKwiJg++yvkk6EuOtJk8WtDZqgr8= github.com/pborman/uuid v1.2.0/go.mod h1:X/NO0urCmaxf9VXbdlT7C2Yzkj2IKimNn4k+gtPdI/k= github.com/pelletier/go-toml v1.2.0/go.mod h1:5z9KED0ma1S8pY6P1sdut58dfprrGBbd/94hg7ilaic= github.com/pelletier/go-toml v1.4.0/go.mod h1:PN7xzY2wHTK0K9p34ErDQMlFxa51Fk0OUruD3k1mMwo= +github.com/pelletier/go-toml v1.6.0/go.mod h1:5N711Q9dKgbdkxHL+MEfF31hpT7l0S0s/t2kKREewys= github.com/pelletier/go-toml v1.7.0/go.mod h1:vwGMzjaWMwyfHwgIBhI2YUM4fB6nL6lVAvS1LBMMhTE= +github.com/pelletier/go-toml v1.8.0/go.mod h1:D6yutnOGMveHEPV7VQOuvI/gXY61bv+9bAOTRnLElKs= github.com/pelletier/go-toml v1.8.1/go.mod h1:T2/BmBdy8dvIRq1a/8aqjN41wvWlN4lrapLU/GW4pbc= +github.com/pelletier/go-toml v1.9.3/go.mod h1:u1nR/EPcESfeI/szUZKdtJ0xRNbUoANCkoOuaOx1Y+c= github.com/performancecopilot/speed v3.0.0+incompatible/go.mod h1:/CLtqpZ5gBg1M9iaPbIdPPGyKcA8hKdoy6hAWba7Yac= github.com/peterbourgon/diskv v2.0.1+incompatible/go.mod h1:uqqh8zWWbv1HBMNONnaR/tNboyR3/BZd58JJSHlUSCU= github.com/peterh/liner v1.0.1-0.20180619022028-8c1271fcf47f/go.mod h1:xIteQHvHuaLYG9IFj6mSxM0fCKrs34IrEQUhOYuGPHc= +github.com/phayes/checkstyle v0.0.0-20170904204023-bfd46e6a821d/go.mod h1:3OzsM7FXDQlpCiw2j81fOmAwQLnZnLGXVKUzeKQXIAw= github.com/philhofer/fwd v1.0.0/go.mod h1:gk3iGcWd9+svBvR0sR+KPcfE+RNWozjowpeBVG3ZVNU= github.com/philhofer/fwd v1.1.1 h1:GdGcTjf5RNAxwS4QLsiMzJYj5KEvPJD3Abr261yRQXQ= github.com/philhofer/fwd v1.1.1/go.mod h1:gk3iGcWd9+svBvR0sR+KPcfE+RNWozjowpeBVG3ZVNU= +github.com/pierrec/lz4 v0.0.0-20190327172049-315a67e90e41/go.mod h1:3/3N9NVKO0jef7pBehbT1qWhCMrIgbYNnFAZCqQ5LRc= github.com/pierrec/lz4 v1.0.2-0.20190131084431-473cd7ce01a1/go.mod h1:3/3N9NVKO0jef7pBehbT1qWhCMrIgbYNnFAZCqQ5LRc= github.com/pierrec/lz4 v2.0.5+incompatible/go.mod h1:pdkljMzZIN41W+lC3N2tnIh5sFi+IEE17M5jbnwPHcY= -github.com/pierrec/lz4 v2.5.2+incompatible h1:WCjObylUIOlKy/+7Abdn34TLIkXiA4UWUMhxq9m9ZXI= -github.com/pierrec/lz4 v2.5.2+incompatible/go.mod h1:pdkljMzZIN41W+lC3N2tnIh5sFi+IEE17M5jbnwPHcY= +github.com/pierrec/lz4 v2.4.1+incompatible/go.mod h1:pdkljMzZIN41W+lC3N2tnIh5sFi+IEE17M5jbnwPHcY= +github.com/pierrec/lz4 v2.6.0+incompatible h1:Ix9yFKn1nSPBLFl/yZknTp8TU5G4Ps0JDmguYK6iH1A= +github.com/pierrec/lz4 v2.6.0+incompatible/go.mod h1:pdkljMzZIN41W+lC3N2tnIh5sFi+IEE17M5jbnwPHcY= github.com/pion/dtls/v2 v2.0.9 h1:7Ow+V++YSZQMYzggI0P9vLJz/hUFcffsfGMfT/Qy+u8= github.com/pion/dtls/v2 v2.0.9/go.mod h1:O0Wr7si/Zj5/EBFlDzDd6UtVxx25CE1r7XM7BQKYQho= github.com/pion/logging v0.2.2 h1:M9+AIj/+pxNsDfAT64+MAVgJO0rsyLnoJKCqf//DoeY= @@ -1404,23 +1621,31 @@ github.com/pkg/errors v0.8.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINE github.com/pkg/errors v0.9.1 h1:FEBLx1zS214owpjy7qsBeixbURkuhQAwrK5UwLGTwt4= github.com/pkg/errors v0.9.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0= github.com/pkg/profile v1.2.1/go.mod h1:hJw3o1OdXxsrSjjVksARp5W95eeEaEfptyVZyv6JUPA= +github.com/pkg/sftp v1.10.1/go.mod h1:lYOWFsE0bwd1+KfKJaKeuokY15vzFx25BLbzYYoAxZI= github.com/pkg/term v0.0.0-20180730021639-bffc007b7fd5/go.mod h1:eCbImbZ95eXtAUIbLAuAVnBnwf83mjf6QIVH8SHYwqQ= +github.com/pmezard/go-difflib v0.0.0-20151028094244-d8ed2627bdf0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4= github.com/pmezard/go-difflib v1.0.0 h1:4DBwDE0NGyQoBHbLQYPwSUPoCMWR5BEzIk/f1lZbAQM= github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4= +github.com/polyfloyd/go-errorlint v0.0.0-20201127212506-19bd8db6546f/go.mod h1:wi9BfjxjF/bwiZ701TzmfKu6UKC357IOAtNr0Td0Lvw= github.com/posener/complete v1.1.1/go.mod h1:em0nMJCgc9GFtwrmVmEMR/ZL6WyhyjMBndrE9hABlRI= github.com/posener/complete v1.2.3/go.mod h1:WZIdtGGp+qx0sLrYKtIRAruyNpv6hFCicSgv7Sy7s/s= github.com/pquerna/cachecontrol v0.0.0-20171018203845-0dec1b30a021/go.mod h1:prYjPmNq4d1NPVmpShWobRqXY3q7Vp+80DqgxxUrUIA= +github.com/pquerna/cachecontrol v0.1.0/go.mod h1:NrUG3Z7Rdu85UNR3vm7SOsl1nFIeSiQnrHV5K9mBcUI= github.com/prometheus/alertmanager v0.21.0/go.mod h1:h7tJ81NA0VLWvWEayi1QltevFkLF3KxmC/malTcT8Go= github.com/prometheus/client_golang v0.0.0-20180209125602-c332b6f63c06/go.mod h1:7SWBe2y4D6OKWSNQJUaRYU/AaXPKyh/dDVn+NZz0KFw= github.com/prometheus/client_golang v0.9.1/go.mod h1:7SWBe2y4D6OKWSNQJUaRYU/AaXPKyh/dDVn+NZz0KFw= +github.com/prometheus/client_golang v0.9.2/go.mod h1:OsXs2jCmiKlQ1lTBmv21f2mNfw4xf/QclQDMrYNZzcM= github.com/prometheus/client_golang v0.9.3-0.20190127221311-3c4408c8b829/go.mod h1:p2iRAGwDERtqlqzRXnrOVns+ignqQo//hLXqYxZYVNs= github.com/prometheus/client_golang v0.9.3/go.mod h1:/TN21ttK/J9q6uSwhBd54HahCDft0ttaMvbicHlPoso= github.com/prometheus/client_golang v1.0.0/go.mod h1:db9x61etRT2tGnBNRi70OPL5FsnadC4Ky3P0J6CfImo= github.com/prometheus/client_golang v1.1.0/go.mod h1:I1FGZT9+L76gKKOs5djB6ezCbFQP1xR9D75/vuwEF3g= github.com/prometheus/client_golang v1.3.0/go.mod h1:hJaj2vgQTGQmVCsAACORcieXFeDPbaTKGT+JTgUa3og= github.com/prometheus/client_golang v1.4.0/go.mod h1:e9GMxYsXl05ICDXkRhurwBS4Q3OK1iX/F2sw+iXX5zU= +github.com/prometheus/client_golang v1.5.1/go.mod h1:e9GMxYsXl05ICDXkRhurwBS4Q3OK1iX/F2sw+iXX5zU= github.com/prometheus/client_golang v1.6.0/go.mod h1:ZLOG9ck3JLRdB5MgO8f+lLTe83AXG6ro35rLTxvnIl4= github.com/prometheus/client_golang v1.7.1/go.mod h1:PY5Wy2awLA44sXw4AOSfFBetzPP4j5+D6mVACh+pe2M= +github.com/prometheus/client_golang v1.9.0/go.mod h1:FqZLKOZnGdFAhOK4nqGHa7D66IdsO+O441Eve7ptJDU= +github.com/prometheus/client_golang v1.10.0/go.mod h1:WJM3cc3yu7XKBKa/I8WeZm+V3eltZnBwfENSU7mdogU= github.com/prometheus/client_golang v1.11.0 h1:HNkLOAEQMIDv/K+04rukrLx6ch7msSRwf3/SASFAGtQ= github.com/prometheus/client_golang v1.11.0/go.mod h1:Z6t4BnS23TR94PD6BsDNk8yVqroYurpAkEiz0P2BEV0= github.com/prometheus/client_model v0.0.0-20171117100541-99fa1f4be8e5/go.mod h1:MbSGuTsp3dbXC40dX6PRTWyKYBIrTGTE9sqQNg2J8bo= @@ -1433,6 +1658,7 @@ github.com/prometheus/client_model v0.2.0 h1:uq5h0d+GuxiXLJLNABMgp2qUWDPiLvgCzz2 github.com/prometheus/client_model v0.2.0/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA= github.com/prometheus/common v0.0.0-20180110214958-89604d197083/go.mod h1:daVV7qP5qjZbuso7PdcryaAu0sAZbrN9i7WWcTMWvro= github.com/prometheus/common v0.0.0-20181113130724-41aa239b4cce/go.mod h1:daVV7qP5qjZbuso7PdcryaAu0sAZbrN9i7WWcTMWvro= +github.com/prometheus/common v0.0.0-20181126121408-4724e9255275/go.mod h1:daVV7qP5qjZbuso7PdcryaAu0sAZbrN9i7WWcTMWvro= github.com/prometheus/common v0.2.0/go.mod h1:TNfzLD0ON7rHzMJeJkieUDPYmFC7Snx/y86RQel1bk4= github.com/prometheus/common v0.4.0/go.mod h1:TNfzLD0ON7rHzMJeJkieUDPYmFC7Snx/y86RQel1bk4= github.com/prometheus/common v0.4.1/go.mod h1:TNfzLD0ON7rHzMJeJkieUDPYmFC7Snx/y86RQel1bk4= @@ -1440,11 +1666,17 @@ github.com/prometheus/common v0.6.0/go.mod h1:eBmuwkDJBwy6iBfxCBob6t6dR6ENT/y+J+ github.com/prometheus/common v0.7.0/go.mod h1:DjGbpBbp5NYNiECxcL/VnbXCCaQpKd3tt26CguLLsqA= github.com/prometheus/common v0.9.1/go.mod h1:yhUN8i9wzaXS3w1O07YhxHEBxD+W35wd8bs7vj7HSQ4= github.com/prometheus/common v0.10.0/go.mod h1:Tlit/dnDKsSWFlCLTWaA1cyBgKHSMdTB80sz/V91rCo= -github.com/prometheus/common v0.13.0/go.mod h1:U+gB1OBLb1lF3O42bTCL+FK18tX9Oar16Clt/msog/s= -github.com/prometheus/common v0.26.0 h1:iMAkS2TDoNWnKM+Kopnx/8tnEStIfpYA0ur0xQzzhMQ= +github.com/prometheus/common v0.15.0/go.mod h1:U+gB1OBLb1lF3O42bTCL+FK18tX9Oar16Clt/msog/s= +github.com/prometheus/common v0.18.0/go.mod h1:U+gB1OBLb1lF3O42bTCL+FK18tX9Oar16Clt/msog/s= +github.com/prometheus/common v0.23.0/go.mod h1:H6QK/N6XVT42whUeIdI3dp36w49c+/iMDk7UAI2qm7Q= +github.com/prometheus/common v0.25.0/go.mod h1:H6QK/N6XVT42whUeIdI3dp36w49c+/iMDk7UAI2qm7Q= github.com/prometheus/common v0.26.0/go.mod h1:M7rCNAaPfAosfx8veZJCuw84e35h3Cfd9VFqTh1DIvc= +github.com/prometheus/common v0.30.0 h1:JEkYlQnpzrzQFxi6gnukFPdQ+ac82oRhzMcIduJu/Ug= +github.com/prometheus/common v0.30.0/go.mod h1:vu+V0TpY+O6vW9J44gczi3Ap/oXXR10b+M/gUGO4Hls= +github.com/prometheus/exporter-toolkit v0.5.1/go.mod h1:OCkM4805mmisBhLmVFw858QYi3v0wKdY6/UxrT0pZVg= github.com/prometheus/procfs v0.0.0-20180125133057-cb4147076ac7/go.mod h1:c3At6R/oaqEKCNdg8wHV1ftS6bRYblBhIjjI8uT2IGk= github.com/prometheus/procfs v0.0.0-20181005140218-185b4288413d/go.mod h1:c3At6R/oaqEKCNdg8wHV1ftS6bRYblBhIjjI8uT2IGk= +github.com/prometheus/procfs v0.0.0-20181204211112-1dc9a6cbc91a/go.mod h1:c3At6R/oaqEKCNdg8wHV1ftS6bRYblBhIjjI8uT2IGk= github.com/prometheus/procfs v0.0.0-20190117184657-bf6a532e95b1/go.mod h1:c3At6R/oaqEKCNdg8wHV1ftS6bRYblBhIjjI8uT2IGk= github.com/prometheus/procfs v0.0.0-20190507164030-5867b95ac084/go.mod h1:TjEm7ze935MbeOT/UhFTIMYKhuLP4wbCsTZCD3I8kEA= github.com/prometheus/procfs v0.0.0-20190522114515-bc1a522cf7b1/go.mod h1:TjEm7ze935MbeOT/UhFTIMYKhuLP4wbCsTZCD3I8kEA= @@ -1458,17 +1690,27 @@ github.com/prometheus/procfs v0.2.0/go.mod h1:lV6e/gmhEcM9IjHGsFOCxxuZ+z1YqCvr4O github.com/prometheus/procfs v0.2.0/go.mod h1:lV6e/gmhEcM9IjHGsFOCxxuZ+z1YqCvr4OA4YeYWdaU= github.com/prometheus/procfs v0.6.0 h1:mxy4L2jP6qMonqmq+aTtOx1ifVWUgG/TAmntgbh3xv4= github.com/prometheus/procfs v0.6.0/go.mod h1:cz+aTbrPOrUb4q7XlbU9ygM+/jj0fzG6c1xBZuNvfVA= -github.com/prometheus/prometheus v1.8.2-0.20200911110723-e83ef207b6c2 h1:IB/5RJRcJiR/YzKs4Aou86s/RaMepZOZVCArYNHJHWc= -github.com/prometheus/prometheus v1.8.2-0.20200911110723-e83ef207b6c2/go.mod h1:Td6hjwdXDmVt5CI9T03Sw+yBNxLBq/Yx3ZtmtP8zlCA= +github.com/prometheus/prometheus v1.8.2-0.20210430082741-2a4b8e12bbf2 h1:AHi2TGs09Mv4v688/bjcY2PfAcu9+p4aPvsgVQ4nYDk= +github.com/prometheus/prometheus v1.8.2-0.20210430082741-2a4b8e12bbf2/go.mod h1:5aBj+GpLB+V5MCnrKm5+JAqEJwzDiLugOmDhgt7sDec= +github.com/prometheus/statsd_exporter v0.20.0/go.mod h1:YL3FWCG8JBBtaUSxAg4Gz2ZYu22bS84XM89ZQXXTWmQ= github.com/prometheus/tsdb v0.7.1/go.mod h1:qhTCs0VvXwvX/y3TZrWD7rabWM+ijKTux40TwIPHuXU= +github.com/quasilyte/go-consistent v0.0.0-20190521200055-c6f3937de18c/go.mod h1:5STLWrekHfjyYwxBRVRXNOSewLJ3PWfDJd1VyTS21fI= +github.com/quasilyte/go-ruleguard v0.3.0/go.mod h1:p2miAhLp6fERzFNbcuQ4bevXs8rgK//uCHsUDkumITg= +github.com/quasilyte/go-ruleguard/dsl v0.0.0-20210106184943-e47d54850b18/go.mod h1:KeCP03KrjuSO0H1kTuZQCWlQPulDV6YMIXmpQss17rU= +github.com/quasilyte/go-ruleguard/dsl v0.0.0-20210115110123-c73ee1cbff1f/go.mod h1:KeCP03KrjuSO0H1kTuZQCWlQPulDV6YMIXmpQss17rU= +github.com/quasilyte/go-ruleguard/rules v0.0.0-20201231183845-9e62ed36efe1/go.mod h1:7JTjp89EGyU1d6XfBiXihJNG37wB2VRkd125Q1u7Plc= +github.com/quasilyte/regex/syntax v0.0.0-20200407221936-30656e2c4a95/go.mod h1:rlzQ04UMyJXu/aOvhd8qT+hvDrFpiwqp8MRXDY9szc0= github.com/rcrowley/go-metrics v0.0.0-20181016184325-3113b8401b8a/go.mod h1:bCqnVzQkZxMG4s8nGwiZ5l3QUCyqpo9Y+/ZMZ9VjZe4= -github.com/rcrowley/go-metrics v0.0.0-20200313005456-10cdbea86bc0 h1:MkV+77GLUNo5oJ0jf870itWm3D0Sjh7+Za9gazKc5LQ= -github.com/rcrowley/go-metrics v0.0.0-20200313005456-10cdbea86bc0/go.mod h1:bCqnVzQkZxMG4s8nGwiZ5l3QUCyqpo9Y+/ZMZ9VjZe4= +github.com/rcrowley/go-metrics v0.0.0-20190826022208-cac0b30c2563/go.mod h1:bCqnVzQkZxMG4s8nGwiZ5l3QUCyqpo9Y+/ZMZ9VjZe4= +github.com/rcrowley/go-metrics v0.0.0-20201227073835-cf1acfcdf475 h1:N/ElC8H3+5XpJzTSTfLsJV/mx9Q9g7kxmchpfZyxgzM= +github.com/rcrowley/go-metrics v0.0.0-20201227073835-cf1acfcdf475/go.mod h1:bCqnVzQkZxMG4s8nGwiZ5l3QUCyqpo9Y+/ZMZ9VjZe4= github.com/remyoudompheng/bigfft v0.0.0-20200410134404-eec4a21b6bb0 h1:OdAsTTz6OkFY5QxjkYwrChwuRruF69c169dPK26NUlk= github.com/remyoudompheng/bigfft v0.0.0-20200410134404-eec4a21b6bb0/go.mod h1:qqbHyh8v60DhA7CoWK5oRCqLrMHRGoxYCSS9EjAz6Eo= github.com/retailnext/hllpp v1.0.1-0.20180308014038-101a6d2f8b52/go.mod h1:RDpi1RftBQPUCDRw6SmxeaREsAaRKnOclghuzp/WRzc= github.com/riemann/riemann-go-client v0.5.0 h1:yPP7tz1vSYJkSZvZFCsMiDsHHXX57x8/fEX3qyEXuAA= github.com/riemann/riemann-go-client v0.5.0/go.mod h1:FMiaOL8dgBnRfgwENzV0xlYJ2eCbV1o7yqVwOBLbShQ= +github.com/rivo/tview v0.0.0-20200219210816-cd38d7432498/go.mod h1:6lkG1x+13OShEf0EaOCaTQYyB7d5nSbb181KtjlS+84= +github.com/rivo/uniseg v0.1.0/go.mod h1:J6wj4VEh+S6ZtnVlnTBMWIodfgj8LQOQFoIToxlJtxc= github.com/robertkrimen/otto v0.0.0-20191219234010-c382bd3c16ff h1:+6NUiITWwE5q1KO6SAfUX918c+Tab0+tGAM/mtdlUyA= github.com/robertkrimen/otto v0.0.0-20191219234010-c382bd3c16ff/go.mod h1:xvqspoSXJTIpemEonrMDFq6XzwHYYgToXWj5eRX1OtY= github.com/robfig/cron/v3 v3.0.1 h1:WdRxkvbJztn8LMz/QEvLN5sBU+xKpSqwwUO1Pjr4qDs= @@ -1478,50 +1720,64 @@ github.com/rogpeppe/fastuuid v1.2.0/go.mod h1:jVj6XXZzXRy/MSR5jhDC/2q6DgLz+nrA6L github.com/rogpeppe/go-internal v1.1.0/go.mod h1:M8bDsm7K2OlrFYOpmOWEs/qY81heoFRclV5y23lUDJ4= github.com/rogpeppe/go-internal v1.2.2/go.mod h1:M8bDsm7K2OlrFYOpmOWEs/qY81heoFRclV5y23lUDJ4= github.com/rogpeppe/go-internal v1.3.0/go.mod h1:M8bDsm7K2OlrFYOpmOWEs/qY81heoFRclV5y23lUDJ4= +github.com/rogpeppe/go-internal v1.6.1/go.mod h1:xXDCJY+GAPziupqXw64V24skbSoqbTEfhy4qGm1nDQc= +github.com/rogpeppe/go-internal v1.6.2 h1:aIihoIOHCiLZHxyoNQ+ABL4NKhFTgKLBdMLyEAh98m0= +github.com/rogpeppe/go-internal v1.6.2/go.mod h1:xXDCJY+GAPziupqXw64V24skbSoqbTEfhy4qGm1nDQc= github.com/rs/cors v1.7.0/go.mod h1:gFx+x8UowdsKA9AchylcLynDq+nNFfI8FkUZdN/jGCU= +github.com/rs/cors v1.8.0/go.mod h1:EBwu+T5AvHOcXwvZIkQFjUN6s8Czyqw12GL/Y0tUyRM= github.com/rs/xid v1.2.1/go.mod h1:+uKXf+4Djp6Md1KODXJxgGQPKngRmWyn10oCKFzNHOQ= github.com/rs/zerolog v1.13.0/go.mod h1:YbFCdg8HfsridGWAh22vktObvhZbQsZXe4/zB0OKkWU= github.com/rs/zerolog v1.15.0/go.mod h1:xYTKnLHcpfU2225ny5qZjxnj9NvkumZYjJHlAThCjNc= +github.com/russross/blackfriday v1.5.2/go.mod h1:JO/DiYxRf+HjHt06OyowR9PTA263kcR/rfWxYHBV53g= github.com/russross/blackfriday/v2 v2.0.1/go.mod h1:+Rmxgy9KzJVeS9/2gXHxylqXiyQDYRxCVz55jmeOWTM= +github.com/ryancurrah/gomodguard v1.2.0/go.mod h1:rNqbC4TOIdUDcVMSIpNNAzTbzXAZa6W5lnUepvuMMgQ= +github.com/ryanrolds/sqlclosecheck v0.3.0/go.mod h1:1gREqxyTGR3lVtpngyFo3hZAgk0KCtEdgEkHwDbigdA= github.com/ryanuber/columnize v0.0.0-20160712163229-9b3edd62028f/go.mod h1:sm1tb6uqfes/u+d4ooFouqFdy9/2g9QGwK3SQygK0Ts= github.com/ryanuber/columnize v2.1.0+incompatible/go.mod h1:sm1tb6uqfes/u+d4ooFouqFdy9/2g9QGwK3SQygK0Ts= github.com/safchain/ethtool v0.0.0-20190326074333-42ed695e3de8/go.mod h1:Z0q5wiBQGYcxhMZ6gUqHn6pYNLypFAvaL3UvgZLR0U4= github.com/safchain/ethtool v0.0.0-20200218184317-f459e2d13664 h1:gvolwzuDhul9qK6/oHqxCHD5TEYfsWNBGidOeG6kvpk= github.com/safchain/ethtool v0.0.0-20200218184317-f459e2d13664/go.mod h1:Z0q5wiBQGYcxhMZ6gUqHn6pYNLypFAvaL3UvgZLR0U4= -github.com/samuel/go-zookeeper v0.0.0-20190810000440-0ceca61e4d75/go.mod h1:gi+0XIa01GRL2eRQVjQkKGqKF3SF9vZR/HnPullcV2E= github.com/samuel/go-zookeeper v0.0.0-20190923202752-2cc03de413da/go.mod h1:gi+0XIa01GRL2eRQVjQkKGqKF3SF9vZR/HnPullcV2E= github.com/samuel/go-zookeeper v0.0.0-20200724154423-2164a8ac840e h1:CGjiMQ0wMH4wtNWrlj6kiTbkPt2F3rbYnhGX6TWLfco= github.com/samuel/go-zookeeper v0.0.0-20200724154423-2164a8ac840e/go.mod h1:gi+0XIa01GRL2eRQVjQkKGqKF3SF9vZR/HnPullcV2E= +github.com/sanity-io/litter v1.2.0/go.mod h1:JF6pZUFgu2Q0sBZ+HSV35P8TVPI1TTzEwyu9FXAw2W4= +github.com/sanposhiho/wastedassign v0.1.3/go.mod h1:LGpq5Hsv74QaqM47WtIsRSF/ik9kqk07kchgv66tLVE= +github.com/scaleway/scaleway-sdk-go v1.0.0-beta.7.0.20210223165440-c65ae3540d44/go.mod h1:CJJ5VAbozOl0yEw7nHB9+7BXTJbIn6h7W+f6Gau5IP8= github.com/sean-/seed v0.0.0-20170313163322-e2103e2c3529 h1:nn5Wsu0esKSJiIVhscUtVbo7ada43DJhG55ua/hjS5I= github.com/sean-/seed v0.0.0-20170313163322-e2103e2c3529/go.mod h1:DxrIzT+xaE7yg65j358z/aeFdxmN0P9QXhEzd20vsDc= github.com/seccomp/libseccomp-golang v0.9.1/go.mod h1:GbW5+tmTXfcxTToHLXlScSlAvWlF4P2Ca7zGrPiEpWo= +github.com/securego/gosec v0.0.0-20200203094520-d13bb6d2420c/go.mod h1:gp0gaHj0WlmPh9BdsTmo1aq6C27yIPWdxCKGFGdVKBE= +github.com/securego/gosec/v2 v2.6.1/go.mod h1:I76p3NTHBXsGhybUW+cEQ692q2Vp+A0Z6ZLzDIZy+Ao= github.com/segmentio/kafka-go v0.1.0/go.mod h1:X6itGqS9L4jDletMsxZ7Dz+JFWxM6JHfPOCvTvk+EJo= github.com/segmentio/kafka-go v0.2.0/go.mod h1:X6itGqS9L4jDletMsxZ7Dz+JFWxM6JHfPOCvTvk+EJo= github.com/sensu/sensu-go/api/core/v2 v2.9.0 h1:NanHMIWbrHP/L4Ge0V1x2+0G9bxFHpvhwjdr3wSF9Vg= github.com/sensu/sensu-go/api/core/v2 v2.9.0/go.mod h1:QcgxKxydmScE66hLBTzbFhhiPSR/JHqUjNi/+Lelh6E= github.com/sergi/go-diff v1.0.0/go.mod h1:0CfEIISq7TuYL3j771MWULgwwjU+GofnZX9QAmXWZgo= -github.com/shirou/gopsutil v2.18.10+incompatible/go.mod h1:5b4v6he4MtMOwMlS0TUMTu2PcXUg8+E1lC7eC3UO/RA= +github.com/shazow/go-diff v0.0.0-20160112020656-b6b7b6733b8c/go.mod h1:/PevMnwAxekIXwN8qQyfc5gl2NlkB3CQlkizAbOkeBs= +github.com/shirou/gopsutil v3.21.5+incompatible/go.mod h1:5b4v6he4MtMOwMlS0TUMTu2PcXUg8+E1lC7eC3UO/RA= github.com/shirou/gopsutil v3.21.8+incompatible h1:sh0foI8tMRlCidUJR+KzqWYWxrkuuPIGiO6Vp+KXdCU= github.com/shirou/gopsutil v3.21.8+incompatible/go.mod h1:5b4v6he4MtMOwMlS0TUMTu2PcXUg8+E1lC7eC3UO/RA= -github.com/shirou/w32 v0.0.0-20160930032740-bb4de0191aa4/go.mod h1:qsXQc7+bwAM3Q1u/4XEfrquwF8Lw7D7y5cD8CuHnfIc= +github.com/shirou/gopsutil/v3 v3.21.1/go.mod h1:igHnfak0qnw1biGeI2qKQvu0ZkwvEkUcCLlYhZzdr/4= github.com/shopspring/decimal v0.0.0-20180709203117-cd690d0c9e24/go.mod h1:M+9NzErvs504Cn4c5DxATwIqPbtswREoFCre64PpcG4= github.com/shopspring/decimal v0.0.0-20200105231215-408a2507e114 h1:Pm6R878vxWWWR+Sa3ppsLce/Zq+JNTs6aVvRu13jv9A= github.com/shopspring/decimal v0.0.0-20200105231215-408a2507e114/go.mod h1:DKyhrW/HYNuLGql+MJL6WCR6knT2jwCFRcu2hWCYk4o= github.com/showwin/speedtest-go v1.1.4 h1:pcY1W5LYZu44lH6Fuu80nu/Pj67n//VArlZudbAgR6E= github.com/showwin/speedtest-go v1.1.4/go.mod h1:dJugxvC/AQDt4HQQKZ9lKNa2+b1c8nzj9IL0a/F8l1U= +github.com/shurcooL/go v0.0.0-20180423040247-9e1955d9fb6e/go.mod h1:TDJrrUr11Vxrven61rcy3hJMUqaf/CLWYhHNPmT14Lk= +github.com/shurcooL/go-goon v0.0.0-20170922171312-37c2f522c041/go.mod h1:N5mDOmsrJOB+vfqUK+7DmDyjhSLIIBnXo9lvZJj3MWQ= github.com/shurcooL/httpfs v0.0.0-20190707220628-8d4bc4ba7749/go.mod h1:ZY1cvUeJuFPAdZ/B6v7RHavJWZn2YPVFQ1OSXhCGOkg= github.com/shurcooL/sanitized_anchor_name v1.0.0/go.mod h1:1NzhyTcUVG4SuEtjjoZeVRXNmyL/1OwPU0+IJeTBvfc= github.com/shurcooL/vfsgen v0.0.0-20181202132449-6a9ea43bcacd/go.mod h1:TrYk7fJVaAttu97ZZKrO9UbRa8izdowaMIZcxYMbVaw= -github.com/shurcooL/vfsgen v0.0.0-20200627165143-92b8a710ab6c/go.mod h1:TrYk7fJVaAttu97ZZKrO9UbRa8izdowaMIZcxYMbVaw= +github.com/shurcooL/vfsgen v0.0.0-20200824052919-0d455de96546/go.mod h1:TrYk7fJVaAttu97ZZKrO9UbRa8izdowaMIZcxYMbVaw= github.com/signalfx/com_signalfx_metrics_protobuf v0.0.2 h1:X886QgwZH5qr9HIQkk3mWcNEhUxx6D8rUZumzLV4Wiw= github.com/signalfx/com_signalfx_metrics_protobuf v0.0.2/go.mod h1:tCQQqyJAVF1+mxNdqOi18sS/zaSrE6EMyWwRA2QTl70= github.com/signalfx/gohistogram v0.0.0-20160107210732-1ccfd2ff5083 h1:WsShHmu12ZztYPfh9b+I+VjYD1o8iOHhB67WZCMEEE8= github.com/signalfx/gohistogram v0.0.0-20160107210732-1ccfd2ff5083/go.mod h1:adPDS6s7WaajdFBV9mQ7i0dKfQ8xiDnF9ZNETVPpp7c= -github.com/signalfx/golib/v3 v3.3.34 h1:s78S24+exS0jH21oeSB1qPeiekIKkeXGv0hg7f67HvU= -github.com/signalfx/golib/v3 v3.3.34/go.mod h1:PB7OovVijH7OGhzMewarEcIZG3eG6akWMDucIb5Jnb4= +github.com/signalfx/golib/v3 v3.3.38 h1:4EukKPAxVsqlkfaetUv+BpbuJ2l0YeQbwiQg3ADtlzU= +github.com/signalfx/golib/v3 v3.3.38/go.mod h1:J7vY30VdC39CSin5ZRIrThnkyNW8x1fnJGD+NBW4LuY= github.com/signalfx/gomemcache v0.0.0-20180823214636-4f7ef64c72a9/go.mod h1:Ytb8KfCSyuwy/VILnROdgCvbQLA5ch0nkbG7lKT0BXw= -github.com/signalfx/sapm-proto v0.4.0 h1:5lQX++6FeIjUZEIcnSgBqhOpmSjMkRBW3y/4ZiKMo5E= -github.com/signalfx/sapm-proto v0.4.0/go.mod h1:x3gtwJ1GRejtkghB4nYpwixh2zqJrLbPU959ZNhM0Fk= +github.com/signalfx/sapm-proto v0.7.2 h1:iM/y3gezQm1/j7JBS0gXhEJ8ROeneb6DY7n0OcnvLks= +github.com/signalfx/sapm-proto v0.7.2/go.mod h1:HLufOh6Gd2altGxbeve+s6hh0EWCWoOM7MmuYuvs5PI= github.com/sirupsen/logrus v1.0.4-0.20170822132746-89742aefa4b2/go.mod h1:pMByvHTf9Beacp5x1UXfOR9xyW/9antXMhjMPG0dEzc= github.com/sirupsen/logrus v1.0.6/go.mod h1:pMByvHTf9Beacp5x1UXfOR9xyW/9antXMhjMPG0dEzc= github.com/sirupsen/logrus v1.2.0/go.mod h1:LxeOpSwHxABJmUn/MG1IvRgCAasNZTLOkJPxbbu5VWo= @@ -1533,6 +1789,7 @@ github.com/sirupsen/logrus v1.6.0/go.mod h1:7uNnSEd1DgxDLC74fIahvMZmmYsHGZGEOFrf github.com/sirupsen/logrus v1.6.0/go.mod h1:7uNnSEd1DgxDLC74fIahvMZmmYsHGZGEOFrfsX/uA88= github.com/sirupsen/logrus v1.6.0/go.mod h1:7uNnSEd1DgxDLC74fIahvMZmmYsHGZGEOFrfsX/uA88= github.com/sirupsen/logrus v1.7.0/go.mod h1:yWOB1SBYBC5VeMP7gHvWumXLIWorT60ONWic61uBYv0= +github.com/sirupsen/logrus v1.8.0/go.mod h1:4GuYW9TZmE769R5STWrRakJc4UqQ3+QQ95fyz7ENv1A= github.com/sirupsen/logrus v1.8.1 h1:dJKuHgqk1NNQlqoA6BTlM1Wf9DOH3NBjQyu0h9+AZZE= github.com/sirupsen/logrus v1.8.1/go.mod h1:yWOB1SBYBC5VeMP7gHvWumXLIWorT60ONWic61uBYv0= github.com/sleepinggenius2/gosmi v0.4.3 h1:99Zwzy1Cvgsh396sw07oR2G4ab88ILGZFMxSlGWnR6o= @@ -1540,21 +1797,31 @@ github.com/sleepinggenius2/gosmi v0.4.3/go.mod h1:l8OniPmd3bJzw0MXP2/qh7AhP/e+bT github.com/smartystreets/assertions v0.0.0-20180927180507-b2de0cb4f26d h1:zE9ykElWQ6/NYmHa3jpm/yHnI4xSofP+UP6SpjHcSeM= github.com/smartystreets/assertions v0.0.0-20180927180507-b2de0cb4f26d/go.mod h1:OnSkiWE9lh6wB0YB77sQom3nweQdgAjqCqsofrRNTgc= github.com/smartystreets/goconvey v0.0.0-20190330032615-68dc04aab96a/go.mod h1:syvi0/a8iFYH4r/RixwvyeAJjdLS9QV7WQ/tjFTllLA= -github.com/smartystreets/goconvey v1.6.4-0.20190306220146-200a235640ff/go.mod h1:KSQcGKpxUMHk3nbYzs/tIBAM2iDooCn0BmttHOJEbLs= github.com/smartystreets/goconvey v1.6.4 h1:fv0U8FUIMPNf1L9lnHLvLhgicrIVChEkdzIKYqbNC9s= github.com/smartystreets/goconvey v1.6.4/go.mod h1:syvi0/a8iFYH4r/RixwvyeAJjdLS9QV7WQ/tjFTllLA= github.com/snowflakedb/gosnowflake v1.5.0 h1:Md7P8zbPegXy0+/SZ2nG8whXYkAT44nQ/yEb35LlIKo= github.com/snowflakedb/gosnowflake v1.5.0/go.mod h1:1kyg2XEduwti88V11PKRHImhXLK5WpGiayY6lFNYb98= github.com/soheilhy/cmux v0.1.4/go.mod h1:IM3LyeVVIOuxMH7sFAkER9+bJ4dT7Ms6E4xg4kGIyLM= +github.com/soheilhy/cmux v0.1.5/go.mod h1:T7TcVDs9LWfQgPlPsdngu6I6QIoyIFZDDC6sNE1GqG0= +github.com/sonatard/noctx v0.0.1/go.mod h1:9D2D/EoULe8Yy2joDHJj7bv3sZoq9AaSb8B4lqBjiZI= github.com/sony/gobreaker v0.4.1/go.mod h1:ZKptC7FHNvhBz7dN2LGjPVBz2sZJmc0/PkyDJOjmxWY= +github.com/sourcegraph/go-diff v0.6.1/go.mod h1:iBszgVvyxdc8SFZ7gm69go2KDdt3ag071iBaWPF6cjs= github.com/spaolacci/murmur3 v0.0.0-20180118202830-f09979ecbc72/go.mod h1:JwIasOWyU6f++ZhiEuf87xNszmSA2myDM2Kzu9HwQUA= +github.com/spaolacci/murmur3 v1.1.0/go.mod h1:JwIasOWyU6f++ZhiEuf87xNszmSA2myDM2Kzu9HwQUA= github.com/spf13/afero v1.1.2/go.mod h1:j4pytiNVoe2o6bmDsKpLACNPDBIoEAkihy7loJ1B0CQ= github.com/spf13/afero v1.2.2/go.mod h1:9ZxEEn6pIJ8Rxe320qSDBk6AsU0r9pR7Q4OcevTdifk= +github.com/spf13/afero v1.6.0/go.mod h1:Ai8FlHk4v/PARR026UzYexafAt9roJ7LcLMAmO6Z93I= github.com/spf13/cast v1.3.0/go.mod h1:Qx5cxh0v+4UWYiBimWS+eyWzqEqokIECu5etghLkUJE= +github.com/spf13/cast v1.3.1/go.mod h1:Qx5cxh0v+4UWYiBimWS+eyWzqEqokIECu5etghLkUJE= github.com/spf13/cobra v0.0.2-0.20171109065643-2da4a54c5cee/go.mod h1:1l0Ry5zgKvJasoi3XT1TypsSe7PqH0Sj9dhYf7v3XqQ= github.com/spf13/cobra v0.0.3/go.mod h1:1l0Ry5zgKvJasoi3XT1TypsSe7PqH0Sj9dhYf7v3XqQ= +github.com/spf13/cobra v0.0.5/go.mod h1:3K3wKZymM7VvHMDS9+Akkh4K60UwM26emMESw8tLCHU= +github.com/spf13/cobra v0.0.7/go.mod h1:/6GTrnGXV9HjY+aR4k0oJ5tcvakLuG6EuKReYlHNrgE= github.com/spf13/cobra v1.0.0/go.mod h1:/6GTrnGXV9HjY+aR4k0oJ5tcvakLuG6EuKReYlHNrgE= +github.com/spf13/cobra v1.1.3/go.mod h1:pGADOWyqRD/YMrPZigI/zbliZ2wVD/23d+is3pSWzOo= +github.com/spf13/cobra v1.2.1/go.mod h1:ExllRjgxM/piMAM+3tAZvg8fsklGAf3tPfi+i8t68Nk= github.com/spf13/jwalterweatherman v1.0.0/go.mod h1:cQK4TGJAtQXfYWX+Ddv3mKDzgVb68N+wFjFa4jdeBTo= +github.com/spf13/jwalterweatherman v1.1.0/go.mod h1:aNWZUN0dPAAO/Ljvb5BEdw96iTZ0EXowPYD95IqWIGo= github.com/spf13/pflag v0.0.0-20170130214245-9ff6c6923cff/go.mod h1:DYY7MBk1bdzusC3SYhjObp+wFpr4gzcvqqNjLnInEg4= github.com/spf13/pflag v1.0.1-0.20171106142849-4c012f6dcd95/go.mod h1:DYY7MBk1bdzusC3SYhjObp+wFpr4gzcvqqNjLnInEg4= github.com/spf13/pflag v1.0.1/go.mod h1:DYY7MBk1bdzusC3SYhjObp+wFpr4gzcvqqNjLnInEg4= @@ -1562,7 +1829,13 @@ github.com/spf13/pflag v1.0.3/go.mod h1:DYY7MBk1bdzusC3SYhjObp+wFpr4gzcvqqNjLnIn github.com/spf13/pflag v1.0.3/go.mod h1:DYY7MBk1bdzusC3SYhjObp+wFpr4gzcvqqNjLnInEg4= github.com/spf13/pflag v1.0.5 h1:iy+VFUOCP1a+8yFto/drg2CJ5u0yRoB7fZw3DKv/JXA= github.com/spf13/pflag v1.0.5/go.mod h1:McXfInJRrz4CZXVZOBLb0bTZqETkiAhM9Iw0y3An2Bg= +github.com/spf13/viper v1.3.2/go.mod h1:ZiWeW+zYFKm7srdB9IoDzzZXaJaI5eL9QjNiN/DMA2s= github.com/spf13/viper v1.4.0/go.mod h1:PTJ7Z/lr49W6bUbkmS1V3by4uWynFiR9p7+dSq/yZzE= +github.com/spf13/viper v1.6.2/go.mod h1:t3iDnF5Jlj76alVNuyFBk5oUMCvsrkbvZK0WQdfDi5k= +github.com/spf13/viper v1.7.0/go.mod h1:8WkrPz2fc9jxqZNCJI/76HCieCp4Q8HaLFoCha5qpdg= +github.com/spf13/viper v1.7.1/go.mod h1:8WkrPz2fc9jxqZNCJI/76HCieCp4Q8HaLFoCha5qpdg= +github.com/spf13/viper v1.8.1/go.mod h1:o0Pch8wJ9BVSWGQMbra6iw0oQ5oktSIBaujf1rJH9Ns= +github.com/ssgreg/nlreturn/v2 v2.1.0/go.mod h1:E/iiPB78hV7Szg2YfRgyIrk1AD6JVMTRkkxBiELzh2I= github.com/stefanberger/go-pkcs11uri v0.0.0-20201008174630-78d3cae3a980/go.mod h1:AO3tvPzVZ/ayst6UlUKUv6rcPQInYe3IknH3jYhAKu8= github.com/stoewer/go-strcase v1.2.0/go.mod h1:IBiWB2sKIp3wVVQ3Y035++gc+knqhUQag1KpM8ahLw8= github.com/streadway/amqp v0.0.0-20190404075320-75d898a42a94/go.mod h1:AZpEONHx3DKn8O/DFsRAY58/XVQiIPMTMB1SddzLXVw= @@ -1574,7 +1847,9 @@ github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+ github.com/stretchr/objx v0.1.1/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME= github.com/stretchr/objx v0.2.0 h1:Hbg2NidpLE8veEBkEZTL3CvlkUIVzuU9jDplZO54c48= github.com/stretchr/objx v0.2.0/go.mod h1:qt09Ya8vawLte6SNmTgCsAVtYtaKzEcn8ATUoHMkEqE= +github.com/stretchr/testify v0.0.0-20161117074351-18a02ba4a312/go.mod h1:a8OnRcib4nhh0OaRAV+Yts87kKdq0PP7pXfy6kDkUVs= github.com/stretchr/testify v0.0.0-20180303142811-b89eecf5ca5d/go.mod h1:a8OnRcib4nhh0OaRAV+Yts87kKdq0PP7pXfy6kDkUVs= +github.com/stretchr/testify v1.1.4/go.mod h1:a8OnRcib4nhh0OaRAV+Yts87kKdq0PP7pXfy6kDkUVs= github.com/stretchr/testify v1.2.0/go.mod h1:a8OnRcib4nhh0OaRAV+Yts87kKdq0PP7pXfy6kDkUVs= github.com/stretchr/testify v1.2.2/go.mod h1:a8OnRcib4nhh0OaRAV+Yts87kKdq0PP7pXfy6kDkUVs= github.com/stretchr/testify v1.3.0/go.mod h1:M5WIy9Dh21IEIfnGCwXGc5bZfKNJtfHm1UVUgZn+9EI= @@ -1584,15 +1859,18 @@ github.com/stretchr/testify v1.6.0/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/ github.com/stretchr/testify v1.6.1/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg= github.com/stretchr/testify v1.7.0 h1:nwc3DEeHmmLAfoZucVR881uASk0Mfjw8xYJ99tb5CcY= github.com/stretchr/testify v1.7.0/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg= +github.com/subosito/gotenv v1.2.0/go.mod h1:N0PQaV/YGNqwC0u51sEeR/aUtSLEXKX9iv69rRypqCw= github.com/syndtr/gocapability v0.0.0-20170704070218-db04d3cc01c8/go.mod h1:hkRG7XYTFWNJGYcbNJQlaLq0fg1yr4J4t/NcTQtrfww= github.com/syndtr/gocapability v0.0.0-20180916011248-d98352740cb2/go.mod h1:hkRG7XYTFWNJGYcbNJQlaLq0fg1yr4J4t/NcTQtrfww= github.com/syndtr/gocapability v0.0.0-20200815063812-42c35b437635/go.mod h1:hkRG7XYTFWNJGYcbNJQlaLq0fg1yr4J4t/NcTQtrfww= github.com/tbrandon/mbserver v0.0.0-20170611213546-993e1772cc62 h1:Oj2e7Sae4XrOsk3ij21QjjEgAcVSeo9nkp0dI//cD2o= github.com/tbrandon/mbserver v0.0.0-20170611213546-993e1772cc62/go.mod h1:qUzPVlSj2UgxJkVbH0ZwuuiR46U8RBMDT5KLY78Ifpw= github.com/tchap/go-patricia v2.2.6+incompatible/go.mod h1:bmLyhP68RS6kStMGxByiQ23RP/odRBOTVjwp2cDyi6I= +github.com/tdakkota/asciicheck v0.0.0-20200416200610-e657995f937b/go.mod h1:yHp0ai0Z9gUljN3o0xMhYJnH/IcvkdTBOX2fmJ93JEM= github.com/tedsuo/ifrit v0.0.0-20180802180643-bea94bb476cc/go.mod h1:eyZnKCc955uh98WQvzOm0dgAeLnf2O0Rz0LPoC5ze+0= github.com/testcontainers/testcontainers-go v0.11.1 h1:FiYsB83LSGbiawoV8TpAZGfcCUbtaeeg1SXqEKUxh08= github.com/testcontainers/testcontainers-go v0.11.1/go.mod h1:/V0UVq+1e7NWYoqTPog179clf0Qp9TOyp4EcXaEFQz8= +github.com/tetafro/godot v1.4.4/go.mod h1:FVDd4JuKliW3UgjswZfJfHq4vAx0bD/Jd5brJjGeaz4= github.com/tidwall/gjson v1.9.0 h1:+Od7AE26jAaMgVC31cQV/Ope5iKXulNMflrlB7k+F9E= github.com/tidwall/gjson v1.9.0/go.mod h1:5/xDoumyyDNerp2U36lyolv46b3uF/9Bu6OfyQ9GImk= github.com/tidwall/match v1.0.3 h1:FQUVvBImDutD8wJLN6c5eMzWtjgONK9MwIBCOrUJKeE= @@ -1600,27 +1878,45 @@ github.com/tidwall/match v1.0.3/go.mod h1:eRSPERbgtNPcGhD8UCthc6PmLEQXEWd3PRB5JT github.com/tidwall/pretty v1.0.0/go.mod h1:XNkn88O1ChpSDQmQeStsy+sBenx6DDtFZJxhVysOjyk= github.com/tidwall/pretty v1.1.0 h1:K3hMW5epkdAVwibsQEfR/7Zj0Qgt4DxtNumTq/VloO8= github.com/tidwall/pretty v1.1.0/go.mod h1:XNkn88O1ChpSDQmQeStsy+sBenx6DDtFZJxhVysOjyk= +github.com/timakin/bodyclose v0.0.0-20200424151742-cb6215831a94/go.mod h1:Qimiffbc6q9tBWlVV6x0P9sat/ao1xEkREYPPj9hphk= github.com/tinylib/msgp v1.0.2/go.mod h1:+d+yLhGm8mzTaHzB+wgMYrodPfmZrzkirds8fDWklFE= github.com/tinylib/msgp v1.1.6 h1:i+SbKraHhnrf9M5MYmvQhFnbLhAXSDWF8WWsuyRdocw= github.com/tinylib/msgp v1.1.6/go.mod h1:75BAfg2hauQhs3qedfdDZmWAPcFMAvJE5b9rGOMufyw= -github.com/tklauser/go-sysconf v0.3.5 h1:uu3Xl4nkLzQfXNsWn15rPc/HQCJKObbt1dKJeWp3vU4= github.com/tklauser/go-sysconf v0.3.5/go.mod h1:MkWzOF4RMCshBAMXuhXJs64Rte09mITnppBXY/rYEFI= -github.com/tklauser/numcpus v0.2.2 h1:oyhllyrScuYI6g+h/zUvNXNp1wy7x8qQy3t/piefldA= +github.com/tklauser/go-sysconf v0.3.9 h1:JeUVdAOWhhxVcU6Eqr/ATFHgXk/mmiItdKeJPev3vTo= +github.com/tklauser/go-sysconf v0.3.9/go.mod h1:11DU/5sG7UexIrp/O6g35hrWzu0JxlwQ3LSFUzyeuhs= github.com/tklauser/numcpus v0.2.2/go.mod h1:x3qojaO3uyYt0i56EW/VUYs7uBvdl2fkfZFu0T9wgjM= +github.com/tklauser/numcpus v0.3.0 h1:ILuRUQBtssgnxw0XXIjKUC56fgnOrFoQQ/4+DeU2biQ= +github.com/tklauser/numcpus v0.3.0/go.mod h1:yFGUr7TUHQRAhyqBcEg0Ge34zDBAsIvJJcyE6boqnA8= github.com/tmc/grpc-websocket-proxy v0.0.0-20170815181823-89b8d40f7ca8/go.mod h1:ncp9v5uamzpCO7NfCPTXjqaC+bZgJeR0sMTm6dMHP7U= github.com/tmc/grpc-websocket-proxy v0.0.0-20190109142713-0ad062ec5ee5/go.mod h1:ncp9v5uamzpCO7NfCPTXjqaC+bZgJeR0sMTm6dMHP7U= +github.com/tomarrell/wrapcheck v0.0.0-20201130113247-1683564d9756/go.mod h1:yiFB6fFoV7saXirUGfuK+cPtUh4NX/Hf5y2WC2lehu0= +github.com/tommy-muehle/go-mnd/v2 v2.3.1/go.mod h1:WsUAkMJMYww6l/ufffCD3m+P7LEvr8TnZn9lwVDlgzw= github.com/tv42/httpunix v0.0.0-20150427012821-b75d8614f926/go.mod h1:9ESjWnEqriFuLhtthL60Sar/7RFoluCcXsuvEwTV5KM= github.com/uber/jaeger-client-go v2.25.0+incompatible/go.mod h1:WVhlPFC8FDjOFMMWRy2pZqQJSXxYSwNYOkTr/Z6d3Kk= -github.com/uber/jaeger-lib v2.2.0+incompatible/go.mod h1:ComeNDZlWwrWnDv8aPp0Ba6+uUTzImX/AauajbLI56U= +github.com/uber/jaeger-client-go v2.29.1+incompatible/go.mod h1:WVhlPFC8FDjOFMMWRy2pZqQJSXxYSwNYOkTr/Z6d3Kk= +github.com/uber/jaeger-lib v2.4.0+incompatible/go.mod h1:ComeNDZlWwrWnDv8aPp0Ba6+uUTzImX/AauajbLI56U= +github.com/uber/jaeger-lib v2.4.1+incompatible/go.mod h1:ComeNDZlWwrWnDv8aPp0Ba6+uUTzImX/AauajbLI56U= github.com/ugorji/go v1.1.4/go.mod h1:uQMGLiO92mf5W77hV/PUCpI3pbzQx3CRekS0kk+RGrc= +github.com/ugorji/go v1.1.7/go.mod h1:kZn38zHttfInRq0xu/PH0az30d+z6vm202qpg1oXVMw= +github.com/ugorji/go/codec v0.0.0-20181204163529-d75b2dcb6bc8/go.mod h1:VFNgLljTbGfSG7qAOspJ7OScBnGdDN/yBr0sguwnwf0= +github.com/ugorji/go/codec v1.1.7/go.mod h1:Ax+UKWsSmolVDwsd+7N3ZtXu+yMGCf907BLYF3GoBXY= +github.com/ultraware/funlen v0.0.3/go.mod h1:Dp4UiAus7Wdb9KUZsYWZEWiRzGuM2kXM1lPbfaF6xhA= +github.com/ultraware/whitespace v0.0.4/go.mod h1:aVMh/gQve5Maj9hQ/hg+F75lr/X5A89uZnzAmWSineA= github.com/urfave/cli v0.0.0-20171014202726-7bc6a0acffa5/go.mod h1:70zkFmudgCuE/ngEzBv17Jvp/497gISqfk5gWijbERA= github.com/urfave/cli v1.20.0/go.mod h1:70zkFmudgCuE/ngEzBv17Jvp/497gISqfk5gWijbERA= github.com/urfave/cli v1.22.1/go.mod h1:Gos4lmkARVdJ6EkW0WaNv/tZAAMe9V7XWyB60NtXRu0= github.com/urfave/cli v1.22.2/go.mod h1:Gos4lmkARVdJ6EkW0WaNv/tZAAMe9V7XWyB60NtXRu0= +github.com/uudashr/gocognit v1.0.1/go.mod h1:j44Ayx2KW4+oB6SWMv8KsmHzZrOInQav7D3cQMJ5JUM= +github.com/valyala/bytebufferpool v1.0.0/go.mod h1:6bBcMArwyJ5K/AmCkWv1jt77kVWyCJ6HpOuEn7z0Csc= +github.com/valyala/fasthttp v1.16.0/go.mod h1:YOKImeEosDdBPnxc0gy7INqi3m1zK6A+xl6TwOBhHCA= +github.com/valyala/quicktemplate v1.6.3/go.mod h1:fwPzK2fHuYEODzJ9pkw0ipCPNHZ2tD5KW4lOuSdPKzY= +github.com/valyala/tcplisten v0.0.0-20161114210144-ceec8f93295a/go.mod h1:v3UYOV9WzVtRmSR+PDvWpU/qWl4Wa5LApYYX4ZtKbio= github.com/vapourismo/knx-go v0.0.0-20201122213738-75fe09ace330 h1:iBlTJosRsR70amr0zsmSPvaKNH8K/p3YlX/5SdPmSl8= github.com/vapourismo/knx-go v0.0.0-20201122213738-75fe09ace330/go.mod h1:7+aWBsUJCo9OQRCgTypRmIQW9KKKcPMjtrdnYIBsS70= github.com/vaughan0/go-ini v0.0.0-20130923145212-a98ad7ee00ec/go.mod h1:owBmyHYMLkxyrugmfwE/DLJyW8Ro9mkphwuVErQ0iUw= github.com/vektah/gqlparser v1.1.2/go.mod h1:1ycwN7Ij5njmMkPPAOaRFY4rET2Enx7IkVv3vaXspKw= +github.com/vektra/mockery v0.0.0-20181123154057-e78b021dcbb5/go.mod h1:ppEjwdhyy7Y31EnHRDm1JkChoC7LXIJ7Ex0VYLWtZtQ= github.com/vishvananda/netlink v0.0.0-20181108222139-023a6dafdcdf/go.mod h1:+SR5DhBJrl6ZM7CoCKvpw5BKroDKQ+PJqOg65H/2ktk= github.com/vishvananda/netlink v1.1.0/go.mod h1:cTgwzPIzzgDAYoQrMm0EdrjRUBkTqKYppBueQtXaqoE= github.com/vishvananda/netlink v1.1.1-0.20201029203352-d40f9887b852 h1:cPXZWzzG0NllBLdjWoD1nDfaqu98YMv+OneaKc8sPOA= @@ -1634,6 +1930,7 @@ github.com/vjeantet/grok v1.0.1/go.mod h1:ax1aAchzC6/QMXMcyzHQGZWaW1l195+uMYIkCW github.com/vmware/govmomi v0.26.0 h1:JMZR5c7MHH3nCEAVYS3WyRIA35W3+b3tLwAqxVzq1Rw= github.com/vmware/govmomi v0.26.0/go.mod h1:daTuJEcQosNMXYJOeku0qdBJP9SOLLWB3Mqz8THtv6o= github.com/vmware/vmw-guestinfo v0.0.0-20170707015358-25eff159a728/go.mod h1:x9oS4Wk2s2u4tS29nEaDLdzvuHdB19CvSGJjPgkZJNk= +github.com/wadey/gocovmerge v0.0.0-20160331181800-b5bfa59ec0ad/go.mod h1:Hy8o65+MXnS6EwGElrSRjUzQDLXreJlzYLlWiHtt8hM= github.com/wavefronthq/wavefront-sdk-go v0.9.7 h1:SrtABcXXeKCW5SerQYsnCzHo15GeggjZmL+DjtTy6CI= github.com/wavefronthq/wavefront-sdk-go v0.9.7/go.mod h1:JTGsu+KKgxx+GitC65VVdftN2iep1nVpQi/8EGR6v4Y= github.com/willf/bitset v1.1.3/go.mod h1:RjeCKbqT1RxIR/KWY6phxZiaY1IyutSBfGjNPySAYV4= @@ -1649,11 +1946,13 @@ github.com/xdg-go/scram v1.0.2 h1:akYIkZ28e6A96dkWNJQu3nmCzH3YfwMPQExUYDaRv7w= github.com/xdg-go/scram v1.0.2/go.mod h1:1WAq6h33pAW+iRreB34OORO2Nf7qel3VV3fjBj+hCSs= github.com/xdg-go/stringprep v1.0.2 h1:6iq84/ryjjeRmMJwxutI51F2GIPlP5BfTvXHeYjyhBc= github.com/xdg-go/stringprep v1.0.2/go.mod h1:8F9zXuvzgwmyT5DUm4GUfZGDdT3W+LCvS6+da4O5kxM= -github.com/xdg/scram v0.0.0-20180814205039-7eeb5667e42c h1:u40Z8hqBAAQyv+vATcGgV0YCnDjqSL7/q/JyPhhJSPk= github.com/xdg/scram v0.0.0-20180814205039-7eeb5667e42c/go.mod h1:lB8K/P019DLNhemzwFU4jHLhdvlE6uDZjXFejJXr49I= +github.com/xdg/scram v1.0.3 h1:nTadYh2Fs4BK2xdldEa2g5bbaZp0/+1nJMMPtPxS/to= +github.com/xdg/scram v1.0.3/go.mod h1:lB8K/P019DLNhemzwFU4jHLhdvlE6uDZjXFejJXr49I= github.com/xdg/stringprep v0.0.0-20180714160509-73f8eece6fdc/go.mod h1:Jhud4/sHMO4oL310DaZAKk9ZaJ08SJfe+sJh0HrGL1Y= -github.com/xdg/stringprep v1.0.0 h1:d9X0esnoa3dFsV0FG35rAT0RIhYFlPq7MiP+DW89La0= github.com/xdg/stringprep v1.0.0/go.mod h1:Jhud4/sHMO4oL310DaZAKk9ZaJ08SJfe+sJh0HrGL1Y= +github.com/xdg/stringprep v1.0.3 h1:cmL5Enob4W83ti/ZHuZLuKD/xqJfus4fVPwE+/BDm+4= +github.com/xdg/stringprep v1.0.3/go.mod h1:Jhud4/sHMO4oL310DaZAKk9ZaJ08SJfe+sJh0HrGL1Y= github.com/xeipuuv/gojsonpointer v0.0.0-20180127040702-4e3ac2762d5f/go.mod h1:N2zxlSyiKSe5eX1tZViRH5QA0qijqEDrYZiPEAiq3wU= github.com/xeipuuv/gojsonreference v0.0.0-20180127040603-bd5ef7bd5415/go.mod h1:GwrjFmJcFw6At/Gs6z4yjiIwzuJ1/+UwLxMQDVQXShQ= github.com/xeipuuv/gojsonschema v0.0.0-20180618132009-1d523034197f/go.mod h1:5yf86TLmAcydyeJq5YvxkGPE2fm/u4myDekKRoLuqhs= @@ -1677,18 +1976,25 @@ github.com/yvasiyarov/newrelic_platform_go v0.0.0-20140908184405-b21fdbd4370f/go github.com/zenazn/goji v0.9.0/go.mod h1:7S9M489iMyHBNxwZnk9/EHS098H4/F6TATF2mIxtB1Q= go.etcd.io/bbolt v1.3.2/go.mod h1:IbVyRI1SCnLcuJnV2u8VeU0CEYM7e686BmAb1XKL+uU= go.etcd.io/bbolt v1.3.3/go.mod h1:IbVyRI1SCnLcuJnV2u8VeU0CEYM7e686BmAb1XKL+uU= -go.etcd.io/bbolt v1.3.4/go.mod h1:G5EMThwa9y8QZGBClrRx5EY+Yw9kAhnjy3bSjsnlVTQ= go.etcd.io/bbolt v1.3.5/go.mod h1:G5EMThwa9y8QZGBClrRx5EY+Yw9kAhnjy3bSjsnlVTQ= go.etcd.io/etcd v0.0.0-20191023171146-3cf2f69b5738/go.mod h1:dnLIgRNXwCJa5e+c6mIZCrds/GIG4ncV9HhK5PX7jPg= go.etcd.io/etcd v0.5.0-alpha.5.0.20200910180754-dd1b699fc489 h1:1JFLBqwIgdyHN1ZtgjTBwO+blA6gVOmZurpiMEsETKo= go.etcd.io/etcd v0.5.0-alpha.5.0.20200910180754-dd1b699fc489/go.mod h1:yVHk9ub3CSBatqGNg7GRmsnfLWtoW60w4eDYfh7vHDg= go.etcd.io/etcd/api/v3 v3.5.0 h1:GsV3S+OfZEOCNXdtNkBSR7kgLobAa/SO6tCxRa0GAYw= go.etcd.io/etcd/api/v3 v3.5.0/go.mod h1:cbVKeC6lCfl7j/8jBhAK6aIYO9XOjdptoxU/nLQcPvs= +go.etcd.io/etcd/client/pkg/v3 v3.5.0/go.mod h1:IJHfcCEKxYu1Os13ZdwCwIUTUVGYTSAM3YSwc9/Ac1g= +go.etcd.io/etcd/client/v2 v2.305.0/go.mod h1:h9puh54ZTgAKtEbut2oe9P4L/oqKCVB6xsXlzd7alYQ= go.mongodb.org/mongo-driver v1.0.3/go.mod h1:u7ryQJ+DOzQmeO7zB6MHyr8jkEQvC8vH7qLUO4lqsUM= go.mongodb.org/mongo-driver v1.1.1/go.mod h1:u7ryQJ+DOzQmeO7zB6MHyr8jkEQvC8vH7qLUO4lqsUM= go.mongodb.org/mongo-driver v1.1.2/go.mod h1:u7ryQJ+DOzQmeO7zB6MHyr8jkEQvC8vH7qLUO4lqsUM= go.mongodb.org/mongo-driver v1.3.0/go.mod h1:MSWZXKOynuguX+JSvwP8i+58jYCXxbia8HS3gZBapIE= go.mongodb.org/mongo-driver v1.3.2/go.mod h1:MSWZXKOynuguX+JSvwP8i+58jYCXxbia8HS3gZBapIE= +go.mongodb.org/mongo-driver v1.3.4/go.mod h1:MSWZXKOynuguX+JSvwP8i+58jYCXxbia8HS3gZBapIE= +go.mongodb.org/mongo-driver v1.4.3/go.mod h1:WcMNYLx/IlOxLe6JRJiv2uXuCz6zBLndR4SoGjYphSc= +go.mongodb.org/mongo-driver v1.4.4/go.mod h1:WcMNYLx/IlOxLe6JRJiv2uXuCz6zBLndR4SoGjYphSc= +go.mongodb.org/mongo-driver v1.4.6/go.mod h1:WcMNYLx/IlOxLe6JRJiv2uXuCz6zBLndR4SoGjYphSc= +go.mongodb.org/mongo-driver v1.5.1/go.mod h1:gRXCHX4Jo7J0IJ1oDQyUxF7jfy19UfxniMS4xxMmUqw= +go.mongodb.org/mongo-driver v1.5.2/go.mod h1:gRXCHX4Jo7J0IJ1oDQyUxF7jfy19UfxniMS4xxMmUqw= go.mongodb.org/mongo-driver v1.5.3 h1:wWbFB6zaGHpzguF3f7tW94sVE8sFl3lHx8OZx/4OuFI= go.mongodb.org/mongo-driver v1.5.3/go.mod h1:gRXCHX4Jo7J0IJ1oDQyUxF7jfy19UfxniMS4xxMmUqw= go.mozilla.org/pkcs7 v0.0.0-20200128120323-432b2356ecb1/go.mod h1:SNgMg+EgDFwmvSmLRTNKC5fegJjB7v23qTQ0XLGUNHk= @@ -1702,6 +2008,8 @@ go.opencensus.io v0.22.4/go.mod h1:yxeiOL68Rb0Xd1ddK5vPZ/oVn4vY4Ynel7k9FzqtOIw= go.opencensus.io v0.22.5/go.mod h1:5pWMHQbX5EPX2/62yrJeAkowc+lfs/XD7Uxpq3pI6kk= go.opencensus.io v0.23.0 h1:gqCw0LfLxScz8irSi8exQc7fyQ0fKQU/qnC/X8+V/1M= go.opencensus.io v0.23.0/go.mod h1:XItmlyltB5F7CS4xOC1DcqMoFqwtC6OG2xF7mCv7P7E= +go.opentelemetry.io/collector v0.28.0 h1:XmRwoSj3HZtC7O/12fBoQ9DInvwBwFHgHLZrwNxNjQY= +go.opentelemetry.io/collector v0.28.0/go.mod h1:AP/BTXwo1eedoJO7V+HQ68CSvJU1lcdqOzJCgt1VsNs= go.opentelemetry.io/collector/model v0.35.0 h1:NpKjghiqlei4ecwjOYOMhD6tj4gY8yiWHPJmbFs/ArI= go.opentelemetry.io/collector/model v0.35.0/go.mod h1:+7YCSjJG+MqiIFjauzt7oM2qkqBsaJWh5hcsO4fwsAc= go.opentelemetry.io/otel v1.0.0-RC3 h1:kvwiyEkiUT/JaadXzVLI/R1wDO934A7r3Bs2wEe6wqA= @@ -1731,25 +2039,32 @@ go.uber.org/atomic v1.3.2/go.mod h1:gD2HeocX3+yG+ygLZcrzQJaqmWj9AIm7n08wl/qW/PE= go.uber.org/atomic v1.4.0/go.mod h1:gD2HeocX3+yG+ygLZcrzQJaqmWj9AIm7n08wl/qW/PE= go.uber.org/atomic v1.5.0/go.mod h1:sABNBOSYdrvTF6hTgEIbc7YasKWGhgEQZyfxyTvoXHQ= go.uber.org/atomic v1.6.0/go.mod h1:sABNBOSYdrvTF6hTgEIbc7YasKWGhgEQZyfxyTvoXHQ= -go.uber.org/atomic v1.7.0 h1:ADUqmZGgLDDfbSL9ZmPxKTybcoEYHgpYfELNoN+7hsw= go.uber.org/atomic v1.7.0/go.mod h1:fEN4uk6kAWBTFdckzkM89CLk9XfWZrxpCo0nPH17wJc= +go.uber.org/atomic v1.9.0 h1:ECmE8Bn/WFTYwEW/bpKD3M8VtR/zQVbavAoalC1PYyE= +go.uber.org/atomic v1.9.0/go.mod h1:fEN4uk6kAWBTFdckzkM89CLk9XfWZrxpCo0nPH17wJc= +go.uber.org/automaxprocs v1.4.0/go.mod h1:/mTEdr7LvHhs0v7mjdxDreTz1OG5zdZGqgOnhWiR/+Q= go.uber.org/goleak v1.1.10/go.mod h1:8a7PlsEVH3e/a/GLqe5IIrQx6GzcnRmZEufDUTk4A7A= go.uber.org/multierr v1.1.0/go.mod h1:wR5kodmAFQ0UK8QlbwjlSNy0Z68gJhDJUG5sjR94q/0= go.uber.org/multierr v1.3.0/go.mod h1:VgVr7evmIr6uPjLBxg28wmKNXyqE9akIJ5XnfpiKl+4= +go.uber.org/multierr v1.5.0/go.mod h1:FeouvMocqHpRaaGuG9EjoKcStLC43Zu/fmqdUMPcKYU= go.uber.org/multierr v1.6.0 h1:y6IPFStTAIT5Ytl7/XYmHvzXQ7S3g/IeZW9hyZ5thw4= go.uber.org/multierr v1.6.0/go.mod h1:cdWPpRnG4AhwMwsgIHip0KRBQjJy5kYEpYjJxpXp9iU= go.uber.org/tools v0.0.0-20190618225709-2cfd321de3ee/go.mod h1:vJERXedbb3MVM5f9Ejo0C68/HhF8uaILCdgjnY+goOA= go.uber.org/zap v1.9.1/go.mod h1:vwi/ZaCAaUcBkycHslxD9B2zi4UTXhF60s6SWpuDF0Q= go.uber.org/zap v1.10.0/go.mod h1:vwi/ZaCAaUcBkycHslxD9B2zi4UTXhF60s6SWpuDF0Q= go.uber.org/zap v1.13.0/go.mod h1:zwrFLgMcdUuIBviXEYEH1YKNaOBnKXsx2IPda5bBwHM= +go.uber.org/zap v1.16.0/go.mod h1:MA8QOfq0BHJwdXa996Y4dYkAqRKB8/1K1QMMZVaNZjQ= +go.uber.org/zap v1.17.0/go.mod h1:MXVU+bhUf/A7Xi2HNOnopQOrmycQ5Ih87HtOu4q5SSo= +go.uber.org/zap v1.19.0/go.mod h1:xg/QME4nWcxGxrpdeYfq7UvYrLh66cuVKdrbD1XF/NI= golang.org/x/crypto v0.0.0-20171113213409-9f005a07e0d3/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4= golang.org/x/crypto v0.0.0-20180904163835-0709b304e793/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4= golang.org/x/crypto v0.0.0-20181009213950-7c1a557ab941/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4= golang.org/x/crypto v0.0.0-20181029021203-45a5f77698d3/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4= -golang.org/x/crypto v0.0.0-20190211182817-74369b46fc67/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4= +golang.org/x/crypto v0.0.0-20181203042331-505ab145d0a9/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4= golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w= golang.org/x/crypto v0.0.0-20190320223903-b7391e95e576/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w= golang.org/x/crypto v0.0.0-20190325154230-a5d413f7728c/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w= +golang.org/x/crypto v0.0.0-20190404164418-38d8ce5564a5/go.mod h1:WFFai1msRO1wXaEeE5yQxYXgSfI8pQAWXbQop6sCtWE= golang.org/x/crypto v0.0.0-20190411191339-88737f569e3a/go.mod h1:WFFai1msRO1wXaEeE5yQxYXgSfI8pQAWXbQop6sCtWE= golang.org/x/crypto v0.0.0-20190422162423-af44ce270edf/go.mod h1:WFFai1msRO1wXaEeE5yQxYXgSfI8pQAWXbQop6sCtWE= golang.org/x/crypto v0.0.0-20190510104115-cbcb75029529/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= @@ -1762,19 +2077,22 @@ golang.org/x/crypto v0.0.0-20190820162420-60c769a6c586/go.mod h1:yigFU9vqHzYiE8U golang.org/x/crypto v0.0.0-20190923035154-9ee001bba392/go.mod h1:/lpIB1dKB+9EgE3H3cr1v9wB50oz8l4C4h62xy7jSTY= golang.org/x/crypto v0.0.0-20191002192127-34f69633bfdc/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= golang.org/x/crypto v0.0.0-20191011191535-87dc89f01550/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= -golang.org/x/crypto v0.0.0-20191202143827-86a70503ff7e/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= golang.org/x/crypto v0.0.0-20191206172530-e9b2fee46413/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= golang.org/x/crypto v0.0.0-20200204104054-c9f3fb736b72/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= -golang.org/x/crypto v0.0.0-20200220183623-bac4c82f6975/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= golang.org/x/crypto v0.0.0-20200302210943-78000ba7a073/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= golang.org/x/crypto v0.0.0-20200323165209-0ec3e9974c59/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= +golang.org/x/crypto v0.0.0-20200510223506-06a226fb4e37/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= golang.org/x/crypto v0.0.0-20200622213623-75b288015ac9/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= golang.org/x/crypto v0.0.0-20200728195943-123391ffb6de/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= -golang.org/x/crypto v0.0.0-20200820211705-5c72a883971a/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= golang.org/x/crypto v0.0.0-20201002170205-7f63de1d35b0/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= +golang.org/x/crypto v0.0.0-20201112155050-0c6587e931a9/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= +golang.org/x/crypto v0.0.0-20201208171446-5f87f3452ae9/go.mod h1:jdWPYTVW3xRLrWPugEBEK3UY2ZEsg3UU495nc5E+M+I= +golang.org/x/crypto v0.0.0-20201221181555-eec23a3978ad/go.mod h1:jdWPYTVW3xRLrWPugEBEK3UY2ZEsg3UU495nc5E+M+I= +golang.org/x/crypto v0.0.0-20210220033148-5ea612d1eb83/go.mod h1:jdWPYTVW3xRLrWPugEBEK3UY2ZEsg3UU495nc5E+M+I= golang.org/x/crypto v0.0.0-20210314154223-e6e6c4f2bb5b/go.mod h1:T9bdIzuCu7OtxOm1hfPfRQxPLYneinmdGuTeoZ9dtd4= golang.org/x/crypto v0.0.0-20210314154223-e6e6c4f2bb5b/go.mod h1:T9bdIzuCu7OtxOm1hfPfRQxPLYneinmdGuTeoZ9dtd4= golang.org/x/crypto v0.0.0-20210322153248-0c34fe9e7dc2/go.mod h1:T9bdIzuCu7OtxOm1hfPfRQxPLYneinmdGuTeoZ9dtd4= +golang.org/x/crypto v0.0.0-20210421170649-83a5a9bb288b/go.mod h1:T9bdIzuCu7OtxOm1hfPfRQxPLYneinmdGuTeoZ9dtd4= golang.org/x/crypto v0.0.0-20210513164829-c07d793c2f9a/go.mod h1:P+XmwS30IXTQdn5tA2iutPOUgjI07+tq3H3K9MVA1s8= golang.org/x/crypto v0.0.0-20210616213533-5ff15b29337e h1:gsTQYXdTw2Gq7RBsWvlQ91b+aEQ6bXFUngBGuR8sPpI= golang.org/x/crypto v0.0.0-20210616213533-5ff15b29337e/go.mod h1:GvvjBRRGRdwPK5ydBHafDWAxML/pGHZbMvKqRZ5+Abc= @@ -1791,6 +2109,7 @@ golang.org/x/exp v0.0.0-20191227195350-da58074b4299/go.mod h1:2RIsYlXP63K8oxa1u0 golang.org/x/exp v0.0.0-20200119233911-0405dc783f0a/go.mod h1:2RIsYlXP63K8oxa1u096TMicItID8zy7Y6sNkU49FU4= golang.org/x/exp v0.0.0-20200207192155-f17229e696bd/go.mod h1:J/WKrq2StrnmMY6+EHIKF9dgMWnmCNThgcyBT1FY9mM= golang.org/x/exp v0.0.0-20200224162631-6cc2880d07d6/go.mod h1:3jZMyOhIsHpP37uCMkUooju7aAi5cS1Q23tOzKc+0MU= +golang.org/x/exp v0.0.0-20200331195152-e8c3332aa8e5/go.mod h1:4M0jN8W1tt0AVLNr8HDosyJCDCDuyL9N9+3m7wDWgKw= golang.org/x/image v0.0.0-20180708004352-c73c2afc3b81/go.mod h1:ux5Hcp/YLpHSI86hEcLt0YII63i6oz57MZXIpbrjZUs= golang.org/x/image v0.0.0-20190227222117-0694c2d4d067/go.mod h1:kZ7UVZpmo3dzQBMxlp+ypCbDeSB+sBbTgSJuh5dn5js= golang.org/x/image v0.0.0-20190802002840-cff245a6509b/go.mod h1:FeLwcggjj3mMvU+oOTbSwawSJRM1uh48EjtB4UJZlP0= @@ -1818,7 +2137,7 @@ golang.org/x/mod v0.4.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= golang.org/x/mod v0.4.1/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= golang.org/x/mod v0.4.2 h1:Gz96sIWK3OalVv/I/qNygP42zyoKp3xptRVCWRFEBvo= golang.org/x/mod v0.4.2/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= -golang.org/x/net v0.0.0-20170114055629-f2499483f923/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= +golang.org/x/net v0.0.0-20180530234432-1e491301e022/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20180724234803-3673e40ba225/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20180826012351-8a410e7b638d/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20180906233101-161cd47e91fd/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= @@ -1846,13 +2165,13 @@ golang.org/x/net v0.0.0-20190628185345-da137c7871d7/go.mod h1:z5CRVTTTmAJ677TzLL golang.org/x/net v0.0.0-20190724013045-ca1201d0de80/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= golang.org/x/net v0.0.0-20190813141303-74dc4d7220e7/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= golang.org/x/net v0.0.0-20190827160401-ba9fcec4b297/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= +golang.org/x/net v0.0.0-20190921015927-1a5e07d1ff72/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= golang.org/x/net v0.0.0-20190923162816-aa69164e4478/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= golang.org/x/net v0.0.0-20191002035440-2ec189313ef0/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= golang.org/x/net v0.0.0-20191003171128-d98b1b443823/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= golang.org/x/net v0.0.0-20191004110552-13f9640d40b9/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= golang.org/x/net v0.0.0-20191007182048-72f939374954/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= golang.org/x/net v0.0.0-20191112182307-2180aed22343/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= -golang.org/x/net v0.0.0-20191126235420-ef20fe5d7933/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= golang.org/x/net v0.0.0-20191209160850-c0dbc17a3553/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= golang.org/x/net v0.0.0-20200114155413-6afb5195e5aa/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= golang.org/x/net v0.0.0-20200202094626-16171245cfb2/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= @@ -1860,12 +2179,14 @@ golang.org/x/net v0.0.0-20200222125558-5a598a2470a0/go.mod h1:z5CRVTTTmAJ677TzLL golang.org/x/net v0.0.0-20200226121028-0de0cce0169b/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= golang.org/x/net v0.0.0-20200301022130-244492dfa37a/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= golang.org/x/net v0.0.0-20200324143707-d3edc9973b7e/go.mod h1:qpuaurCH72eLCgpAm/N6yyVIVM9cpaDIP3A8BGJEC5A= +golang.org/x/net v0.0.0-20200421231249-e086a090c8fd/go.mod h1:qpuaurCH72eLCgpAm/N6yyVIVM9cpaDIP3A8BGJEC5A= golang.org/x/net v0.0.0-20200425230154-ff2c4b7c35a0/go.mod h1:qpuaurCH72eLCgpAm/N6yyVIVM9cpaDIP3A8BGJEC5A= golang.org/x/net v0.0.0-20200501053045-e0ff5e5a1de5/go.mod h1:qpuaurCH72eLCgpAm/N6yyVIVM9cpaDIP3A8BGJEC5A= golang.org/x/net v0.0.0-20200506145744-7e3656a0809f/go.mod h1:qpuaurCH72eLCgpAm/N6yyVIVM9cpaDIP3A8BGJEC5A= golang.org/x/net v0.0.0-20200513185701-a91f0712d120/go.mod h1:qpuaurCH72eLCgpAm/N6yyVIVM9cpaDIP3A8BGJEC5A= golang.org/x/net v0.0.0-20200520004742-59133d7f0dd7/go.mod h1:qpuaurCH72eLCgpAm/N6yyVIVM9cpaDIP3A8BGJEC5A= golang.org/x/net v0.0.0-20200520182314-0ba52f642ac2/go.mod h1:qpuaurCH72eLCgpAm/N6yyVIVM9cpaDIP3A8BGJEC5A= +golang.org/x/net v0.0.0-20200602114024-627f9648deb9/go.mod h1:qpuaurCH72eLCgpAm/N6yyVIVM9cpaDIP3A8BGJEC5A= golang.org/x/net v0.0.0-20200625001655-4c5254603344/go.mod h1:/O7V0waA8r7cgGh81Ro3o1hOxt32SMVPicZroKQ2sZA= golang.org/x/net v0.0.0-20200707034311-ab3426394381/go.mod h1:/O7V0waA8r7cgGh81Ro3o1hOxt32SMVPicZroKQ2sZA= golang.org/x/net v0.0.0-20200813134508-3edf25e44fcc/go.mod h1:/O7V0waA8r7cgGh81Ro3o1hOxt32SMVPicZroKQ2sZA= @@ -1876,21 +2197,28 @@ golang.org/x/net v0.0.0-20201021035429-f5854403a974/go.mod h1:sp8m0HH+o8qH0wwXwY golang.org/x/net v0.0.0-20201031054903-ff519b6c9102/go.mod h1:sp8m0HH+o8qH0wwXwYZr8TS3Oi6o0r6Gce1SSxlDquU= golang.org/x/net v0.0.0-20201110031124-69a78807bb2b/go.mod h1:sp8m0HH+o8qH0wwXwYZr8TS3Oi6o0r6Gce1SSxlDquU= golang.org/x/net v0.0.0-20201201195509-5d6afe98e0b7/go.mod h1:sp8m0HH+o8qH0wwXwYZr8TS3Oi6o0r6Gce1SSxlDquU= +golang.org/x/net v0.0.0-20201202161906-c7110b5ffcbb/go.mod h1:sp8m0HH+o8qH0wwXwYZr8TS3Oi6o0r6Gce1SSxlDquU= golang.org/x/net v0.0.0-20201209123823-ac852fbbde11/go.mod h1:m0MpNAwzfU5UDzcl9v0D8zg8gWTRqZa9RBIspLL5mdg= golang.org/x/net v0.0.0-20201224014010-6772e930b67b/go.mod h1:m0MpNAwzfU5UDzcl9v0D8zg8gWTRqZa9RBIspLL5mdg= golang.org/x/net v0.0.0-20201224014010-6772e930b67b/go.mod h1:m0MpNAwzfU5UDzcl9v0D8zg8gWTRqZa9RBIspLL5mdg= golang.org/x/net v0.0.0-20201224014010-6772e930b67b/go.mod h1:m0MpNAwzfU5UDzcl9v0D8zg8gWTRqZa9RBIspLL5mdg= golang.org/x/net v0.0.0-20210119194325-5f4716e94777/go.mod h1:m0MpNAwzfU5UDzcl9v0D8zg8gWTRqZa9RBIspLL5mdg= +golang.org/x/net v0.0.0-20210224082022-3d97a244fca7/go.mod h1:m0MpNAwzfU5UDzcl9v0D8zg8gWTRqZa9RBIspLL5mdg= golang.org/x/net v0.0.0-20210226172049-e18ecbb05110/go.mod h1:m0MpNAwzfU5UDzcl9v0D8zg8gWTRqZa9RBIspLL5mdg= golang.org/x/net v0.0.0-20210226172049-e18ecbb05110/go.mod h1:m0MpNAwzfU5UDzcl9v0D8zg8gWTRqZa9RBIspLL5mdg= golang.org/x/net v0.0.0-20210316092652-d523dce5a7f4/go.mod h1:RBQZq4jEuRlivfhVLdyRGr576XBO4/greRjx4P4O3yc= +golang.org/x/net v0.0.0-20210324051636-2c4c8ecb7826/go.mod h1:RBQZq4jEuRlivfhVLdyRGr576XBO4/greRjx4P4O3yc= golang.org/x/net v0.0.0-20210331212208-0fccb6fa2b5c/go.mod h1:p54w0d4576C0XHj96bSt6lcn1PtDYWL6XObtHCRCNQM= golang.org/x/net v0.0.0-20210405180319-a5a99cb37ef4/go.mod h1:p54w0d4576C0XHj96bSt6lcn1PtDYWL6XObtHCRCNQM= golang.org/x/net v0.0.0-20210405180319-a5a99cb37ef4/go.mod h1:p54w0d4576C0XHj96bSt6lcn1PtDYWL6XObtHCRCNQM= +golang.org/x/net v0.0.0-20210427231257-85d9c07bbe3a/go.mod h1:OJAsFXCWl8Ukc7SiCT/9KSuxbyM7479/AVlXFRxuMCk= golang.org/x/net v0.0.0-20210503060351-7fd8e65b6420/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y= +golang.org/x/net v0.0.0-20210510120150-4163338589ed/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y= golang.org/x/net v0.0.0-20210520170846-37e1c6afe023/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y= -golang.org/x/net v0.0.0-20210610132358-84b48f89b13b h1:k+E048sYJHyVnsr1GDrRZWQ32D2C7lWs9JRc0bel53A= +golang.org/x/net v0.0.0-20210525063256-abc453219eb5/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y= golang.org/x/net v0.0.0-20210610132358-84b48f89b13b/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y= +golang.org/x/net v0.0.0-20210614182718-04defd469f4e h1:XpT3nA5TvE525Ne3hInMh6+GETgn27Zfm9dxsThnX2Q= +golang.org/x/net v0.0.0-20210614182718-04defd469f4e/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y= golang.org/x/oauth2 v0.0.0-20180821212333-d2e6202438be/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U= golang.org/x/oauth2 v0.0.0-20190226205417-e64efc72b421/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw= golang.org/x/oauth2 v0.0.0-20190604053449-0f29369cfe45/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw= @@ -1902,6 +2230,8 @@ golang.org/x/oauth2 v0.0.0-20201208152858-08078c50e5b5/go.mod h1:KelEdhl1UZF7XfJ golang.org/x/oauth2 v0.0.0-20210218202405-ba52d332ba99/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A= golang.org/x/oauth2 v0.0.0-20210220000619-9bb904979d93/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A= golang.org/x/oauth2 v0.0.0-20210313182246-cd4f82c27b84/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A= +golang.org/x/oauth2 v0.0.0-20210323180902-22b0adad7558/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A= +golang.org/x/oauth2 v0.0.0-20210402161424-2e8d93401602/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A= golang.org/x/oauth2 v0.0.0-20210514164344-f6687ab2804c/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A= golang.org/x/oauth2 v0.0.0-20210628180205-a41e5a781914/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A= golang.org/x/oauth2 v0.0.0-20210805134026-6f1e6394065a h1:4Kd8OPUx1xgUwrHDaviWZO8MsgoZTZYC3g+8m16RBww= @@ -1919,7 +2249,6 @@ golang.org/x/sync v0.0.0-20201020160332-67f06af15bc9/go.mod h1:RxMgew5VJxzue5/jJ golang.org/x/sync v0.0.0-20201207232520-09787c993a3a/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20210220032951-036812b2e83c h1:5KslGYwFpkhGh+Q16bwMP3cOontH8FOep7tGV86Y7SQ= golang.org/x/sync v0.0.0-20210220032951-036812b2e83c/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= -golang.org/x/sys v0.0.0-20170830134202-bb24a47a89ea/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20180823144017-11551d06cbcc/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20180830151530-49385e6e1522/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20180905080454-ebe1bf3edb33/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= @@ -1928,14 +2257,15 @@ golang.org/x/sys v0.0.0-20181026203630-95b1ffbd15a5/go.mod h1:STP8DvDyc/dI5b8T5h golang.org/x/sys v0.0.0-20181107165924-66b7b1311ac8/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20181116152217-5ac8a444bdc5/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20181122145206-62eef0e2fa9b/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= +golang.org/x/sys v0.0.0-20181205085412-a5c9d58dba9a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20190130150945-aca44879d564/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20190204203706-41f3e6584952/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= -golang.org/x/sys v0.0.0-20190209173611-3b5209105503/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20190222072716-a9d3bda3a223/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20190312061237-fead79001313/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20190321052220-f7bb7a8bee54/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20190403152447-81d4e9dc473e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20190405154228-4b34438f7a67/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20190411185658-b44545bcd369/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20190412213103-97732733099d/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20190419153524-e8e3143a4f4a/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= @@ -1950,6 +2280,8 @@ golang.org/x/sys v0.0.0-20190606165138-5da285871e9c/go.mod h1:h1NjWce9XRLGQEsW7w golang.org/x/sys v0.0.0-20190606203320-7fc4e5ec1444/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20190616124812-15dcb6c0061f/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20190624142023-c5567b49c5d0/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20190626150813-e07cf5db2756/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20190626221950-04f50cda93cb/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20190726091711-fc99dfbffb4e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20190801041406-cbf593c0f2f3/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20190812073006-9eafafc0a87e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= @@ -1970,7 +2302,6 @@ golang.org/x/sys v0.0.0-20191026070338-33540a1f6037/go.mod h1:h1NjWce9XRLGQEsW7w golang.org/x/sys v0.0.0-20191112214154-59a1497f0cea/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20191115151921-52ab43148777/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20191120155948-bd437916bb0e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20191128015809-6d18c012aee9/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20191204072324-ce4227a45e2e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20191210023423-ac6580df4449/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20191220142924-d4481acd189f/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= @@ -1995,14 +2326,13 @@ golang.org/x/sys v0.0.0-20200511232937-7e40ca221e25/go.mod h1:h1NjWce9XRLGQEsW7w golang.org/x/sys v0.0.0-20200515095857-1151b9dac4a9/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20200519105757-fe76b779f299/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20200523222454-059865788121/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200602225109-6fdc65e7d980/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20200615200032-f1bc736245b1/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20200622214017-ed371f2e16b4/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20200625212154-ddb9806d33ae/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20200728102440-3e129f6d46b1/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20200803210538-64077c9b5642/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20200817155316-9781c653f443/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20200821140526-fda516888d29/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20200821140526-fda516888d29/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20200828194041-157a740278f4/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20200831180312-196b9ba8737a/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20200905004654-be1d3432aa8f/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= @@ -2010,6 +2340,7 @@ golang.org/x/sys v0.0.0-20200909081042-eff7692f9009/go.mod h1:h1NjWce9XRLGQEsW7w golang.org/x/sys v0.0.0-20200916030750-2334cc1a136f/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20200922070232-aee5d888a860/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20200930185726-fdedc70b468f/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20201024232916-9f70ab9862d5/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20201112073958-5cba982894dd/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20201117170446-d9b008d0a637/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20201119102817-f84b799fce68/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= @@ -2019,20 +2350,25 @@ golang.org/x/sys v0.0.0-20201201145000-ef89a241ccb3/go.mod h1:h1NjWce9XRLGQEsW7w golang.org/x/sys v0.0.0-20201201145000-ef89a241ccb3/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20201202213521-69691e467435/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20201202213521-69691e467435/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20201214210602-f9fddec55a1e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20210104204734-6f8348627aad/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20210119212857-b64e53b001e4/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20210124154548-22da62e12c0c/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20210124154548-22da62e12c0c/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20210124154548-22da62e12c0c/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20210220050731-9a76102bfb43/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20210225134936-a50acf3fe073/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20210303074136-134d130e1a04/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20210305230114-8fe3ee5dd75b/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20210309074719-68d13333faf2/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20210314195730-07df6a141424/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20210315160823-c6e025ad8005/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20210316164454-77fc1eacc6aa/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20210316164454-77fc1eacc6aa/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20210320140829-1e4c9ba3b0c4/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20210324051608-47abb6519492/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20210330210617-4fbd30eecc44/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20210403161142-5e06dd20ab57/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20210423082822-04245dca01da/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20210423185535-09eb48e85fd7/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20210426230700-d19ff857e887/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= @@ -2045,11 +2381,13 @@ golang.org/x/sys v0.0.0-20210611083646-a4fc73990273/go.mod h1:oPkhp1MJrh7nUepCBc golang.org/x/sys v0.0.0-20210615035016-665e8c7367d1/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20210616094352-59db8d763f22/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20210630005230-0f9fa26af87c/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.0.0-20210806184541-e5e7981a1069 h1:siQdpVirKtzPhKl3lZWozZraCFObP8S1v6PRp0bLrtU= golang.org/x/sys v0.0.0-20210806184541-e5e7981a1069/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1 h1:v+OssWQX+hTHEmOBgwxdZxK4zHq3yOs8F9J7mk0PY8E= +golang.org/x/sys v0.0.0-20210816074244-15123e1e1f71 h1:ikCpsnYR+Ew0vu99XlDp55lGgDJdIMx3f4a18jfse/s= +golang.org/x/sys v0.0.0-20210816074244-15123e1e1f71/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/term v0.0.0-20201117132131-f5c789dd3221/go.mod h1:Nr5EML6q2oocZ2LXRh80K7BxOlk5/8JxuGnuhpl+muw= golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo= -golang.org/x/text v0.0.0-20160726164857-2910a502d2bf/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= +golang.org/x/term v0.0.0-20210220032956-6a3ed077a48d h1:SZxvLBoTP5yHO3Frd4z4vrF+DBX9vMVanchswa69toE= +golang.org/x/term v0.0.0-20210220032956-6a3ed077a48d/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo= golang.org/x/text v0.0.0-20170915032832-14c0d48ead0c/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= golang.org/x/text v0.3.1-0.20180807135948-17ff2d5776d2/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= @@ -2065,21 +2403,27 @@ golang.org/x/time v0.0.0-20190308202827-9d24e82272b4/go.mod h1:tRJNPiyCQ0inRvYxb golang.org/x/time v0.0.0-20191024005414-555d28b269f0/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= golang.org/x/time v0.0.0-20200416051211-89c76fbcd5d1/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= golang.org/x/time v0.0.0-20200630173020-3af7569d3a1e/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= +golang.org/x/time v0.0.0-20210220033141-f8bda1e9f3ba/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= golang.org/x/time v0.0.0-20210723032227-1f47c861a9ac h1:7zkz7BUtwNFFqcowJ+RIgu2MaV/MapERkDIy+mwPyjs= golang.org/x/time v0.0.0-20210723032227-1f47c861a9ac/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= golang.org/x/tools v0.0.0-20180221164845-07fd8470d635/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= golang.org/x/tools v0.0.0-20180525024113-a5b4c53f6e8b/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= golang.org/x/tools v0.0.0-20180828015842-6cd1fcedba52/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= golang.org/x/tools v0.0.0-20180917221912-90fa682c2a6e/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= -golang.org/x/tools v0.0.0-20181011042414-1f849cf54d09/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= golang.org/x/tools v0.0.0-20181030221726-6c7e314b6563/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= +golang.org/x/tools v0.0.0-20181112210238-4b1f3b6b1646/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= +golang.org/x/tools v0.0.0-20190110163146-51295c7ec13a/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= golang.org/x/tools v0.0.0-20190114222345-bf090417da8b/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= golang.org/x/tools v0.0.0-20190125232054-d66bd3c5d5a6/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= golang.org/x/tools v0.0.0-20190206041539-40960b6deb8e/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= golang.org/x/tools v0.0.0-20190226205152-f727befe758c/go.mod h1:9Yl7xja0Znq3iFh3HoIrodX9oNMXvdceNzlUR8zjMvY= +golang.org/x/tools v0.0.0-20190307163923-6a08e3108db3/go.mod h1:25r3+/G6/xytQM8iWZKq3Hn0kr0rgFKPUNVEL/dr3z4= golang.org/x/tools v0.0.0-20190311212946-11955173bddd/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs= +golang.org/x/tools v0.0.0-20190311215038-5c2858a9cfe5/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs= golang.org/x/tools v0.0.0-20190312151545-0bb0c0a6e846/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs= golang.org/x/tools v0.0.0-20190312170243-e65039ee4138/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs= +golang.org/x/tools v0.0.0-20190321232350-e250d351ecad/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs= +golang.org/x/tools v0.0.0-20190322203728-c1a832b0ad89/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs= golang.org/x/tools v0.0.0-20190328211700-ab21143f2384/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs= golang.org/x/tools v0.0.0-20190329151228-23e29df326fe/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs= golang.org/x/tools v0.0.0-20190416151739-9c9e1878f421/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs= @@ -2097,27 +2441,29 @@ golang.org/x/tools v0.0.0-20190624222133-a101b041ded4/go.mod h1:/rFqwRUd4F7ZHNgw golang.org/x/tools v0.0.0-20190628153133-6cdbf07be9d0/go.mod h1:/rFqwRUd4F7ZHNgwSSTFct+R/Kf4OFW1sUzUTQQTgfc= golang.org/x/tools v0.0.0-20190816200558-6889da9d5479/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= golang.org/x/tools v0.0.0-20190823170909-c4a336ef6a2f/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= -golang.org/x/tools v0.0.0-20190906203814-12febf440ab1/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= golang.org/x/tools v0.0.0-20190907020128-2ca718005c18/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= +golang.org/x/tools v0.0.0-20190910044552-dd2b5c81c578/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= golang.org/x/tools v0.0.0-20190911174233-4f2ddba30aff/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= +golang.org/x/tools v0.0.0-20190916130336-e45ffcd953cc/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= golang.org/x/tools v0.0.0-20191012152004-8de300cfc20a/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= golang.org/x/tools v0.0.0-20191029041327-9cc4af7d6b2c/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= golang.org/x/tools v0.0.0-20191029190741-b9c20aec41a5/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= golang.org/x/tools v0.0.0-20191108193012-7d206e10da11/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= +golang.org/x/tools v0.0.0-20191112195655-aa38f8e97acc/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= golang.org/x/tools v0.0.0-20191113191852-77e3bb0ad9e7/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= golang.org/x/tools v0.0.0-20191115202509-3a792d9c32b2/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= golang.org/x/tools v0.0.0-20191119224855-298f0cb1881e/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= golang.org/x/tools v0.0.0-20191125144606-a911d9008d1f/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= golang.org/x/tools v0.0.0-20191130070609-6e064ea0cf2d/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= -golang.org/x/tools v0.0.0-20191203134012-c197fd4bf371/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= -golang.org/x/tools v0.0.0-20191216052735-49a3e744a425/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= golang.org/x/tools v0.0.0-20191216173652-a0e659d51361/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= golang.org/x/tools v0.0.0-20191227053925-7b8e75db28f4/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= golang.org/x/tools v0.0.0-20200103221440-774c71fcf114/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= golang.org/x/tools v0.0.0-20200108203644-89082a384178/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= golang.org/x/tools v0.0.0-20200117161641-43d50277825c/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= +golang.org/x/tools v0.0.0-20200117220505-0cba7a3a9ee9/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= golang.org/x/tools v0.0.0-20200122220014-bf1340f18c4a/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= golang.org/x/tools v0.0.0-20200130002326-2f3ba24bd6e7/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= +golang.org/x/tools v0.0.0-20200203023011-6f24f261dadb/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= golang.org/x/tools v0.0.0-20200204074204-1cc6d1ef6c74/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= golang.org/x/tools v0.0.0-20200207183749-b753a1ba74fa/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= golang.org/x/tools v0.0.0-20200212150539-ea181f53ac56/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= @@ -2125,7 +2471,11 @@ golang.org/x/tools v0.0.0-20200224181240-023911ca70b2/go.mod h1:TB2adYChydJhpapK golang.org/x/tools v0.0.0-20200227222343-706bc42d1f0d/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= golang.org/x/tools v0.0.0-20200304193943-95d2e580d8eb/go.mod h1:o4KQGtdN14AW+yjsvvwRTJJuXz8XRtIHtEnmAXLyFUw= golang.org/x/tools v0.0.0-20200312045724-11d5b4c81c7d/go.mod h1:o4KQGtdN14AW+yjsvvwRTJJuXz8XRtIHtEnmAXLyFUw= +golang.org/x/tools v0.0.0-20200324003944-a576cf524670/go.mod h1:Sl4aGygMT6LrqrWclx+PTx3U+LnKx/seiNR+3G19Ar8= +golang.org/x/tools v0.0.0-20200329025819-fd4102a86c65/go.mod h1:Sl4aGygMT6LrqrWclx+PTx3U+LnKx/seiNR+3G19Ar8= golang.org/x/tools v0.0.0-20200331025713-a30bf2db82d4/go.mod h1:Sl4aGygMT6LrqrWclx+PTx3U+LnKx/seiNR+3G19Ar8= +golang.org/x/tools v0.0.0-20200414032229-332987a829c3/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE= +golang.org/x/tools v0.0.0-20200422022333-3d57cf2e726e/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE= golang.org/x/tools v0.0.0-20200501065659-ab2804fb9c9d/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE= golang.org/x/tools v0.0.0-20200512131952-2bc93b1c0c88/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE= golang.org/x/tools v0.0.0-20200513201620-d5fe73897c97/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE= @@ -2133,17 +2483,34 @@ golang.org/x/tools v0.0.0-20200515010526-7d3b6ebf133d/go.mod h1:EkVYQZoAsY45+roY golang.org/x/tools v0.0.0-20200522201501-cb1345f3a375/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE= golang.org/x/tools v0.0.0-20200618134242-20370b0cb4b2/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE= golang.org/x/tools v0.0.0-20200619180055-7c47624df98f/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE= +golang.org/x/tools v0.0.0-20200622203043-20e05c1c8ffa/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE= +golang.org/x/tools v0.0.0-20200624225443-88f3c62a19ff/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE= +golang.org/x/tools v0.0.0-20200625211823-6506e20df31f/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE= golang.org/x/tools v0.0.0-20200717024301-6ddee64345a6/go.mod h1:njjCfa9FT2d7l9Bc6FUM5FLjQPp3cFF28FI3qnDFljA= +golang.org/x/tools v0.0.0-20200724022722-7017fd6b1305/go.mod h1:njjCfa9FT2d7l9Bc6FUM5FLjQPp3cFF28FI3qnDFljA= golang.org/x/tools v0.0.0-20200729194436-6467de6f59a7/go.mod h1:njjCfa9FT2d7l9Bc6FUM5FLjQPp3cFF28FI3qnDFljA= golang.org/x/tools v0.0.0-20200804011535-6c149bb5ef0d/go.mod h1:njjCfa9FT2d7l9Bc6FUM5FLjQPp3cFF28FI3qnDFljA= -golang.org/x/tools v0.0.0-20200822203824-307de81be3f4/go.mod h1:njjCfa9FT2d7l9Bc6FUM5FLjQPp3cFF28FI3qnDFljA= +golang.org/x/tools v0.0.0-20200812195022-5ae4c3c160a0/go.mod h1:njjCfa9FT2d7l9Bc6FUM5FLjQPp3cFF28FI3qnDFljA= +golang.org/x/tools v0.0.0-20200820010801-b793a1359eac/go.mod h1:njjCfa9FT2d7l9Bc6FUM5FLjQPp3cFF28FI3qnDFljA= golang.org/x/tools v0.0.0-20200825202427-b303f430e36d/go.mod h1:njjCfa9FT2d7l9Bc6FUM5FLjQPp3cFF28FI3qnDFljA= +golang.org/x/tools v0.0.0-20200831203904-5a2aa26beb65/go.mod h1:Cj7w3i3Rnn0Xh82ur9kSqwfTHTeVxaDqrfMjpcNT6bE= golang.org/x/tools v0.0.0-20200904185747-39188db58858/go.mod h1:Cj7w3i3Rnn0Xh82ur9kSqwfTHTeVxaDqrfMjpcNT6bE= +golang.org/x/tools v0.0.0-20201001104356-43ebab892c4c/go.mod h1:z6u4i615ZeAfBE4XtMziQW1fSVJXACjjbWkB/mvPzlU= +golang.org/x/tools v0.0.0-20201002184944-ecd9fd270d5d/go.mod h1:z6u4i615ZeAfBE4XtMziQW1fSVJXACjjbWkB/mvPzlU= +golang.org/x/tools v0.0.0-20201011145850-ed2f50202694/go.mod h1:z6u4i615ZeAfBE4XtMziQW1fSVJXACjjbWkB/mvPzlU= golang.org/x/tools v0.0.0-20201022035929-9cf592e881e9/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA= +golang.org/x/tools v0.0.0-20201023174141-c8cfbd0f21e6/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA= +golang.org/x/tools v0.0.0-20201028025901-8cd080b735b3/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA= golang.org/x/tools v0.0.0-20201110124207-079ba7bd75cd/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA= +golang.org/x/tools v0.0.0-20201114224030-61ea331ec02b/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA= +golang.org/x/tools v0.0.0-20201118003311-bd56c0adb394/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA= golang.org/x/tools v0.0.0-20201124115921-2c860bdd6e78/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA= golang.org/x/tools v0.0.0-20201201161351-ac6f37ff4c2a/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA= golang.org/x/tools v0.0.0-20201208233053-a543418bbed2/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA= +golang.org/x/tools v0.0.0-20201230224404-63754364767c/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA= +golang.org/x/tools v0.0.0-20210101214203-2dba1e4ea05c/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA= +golang.org/x/tools v0.0.0-20210102185154-773b96fafca2/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA= +golang.org/x/tools v0.0.0-20210104081019-d8d6ddbec6ee/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA= golang.org/x/tools v0.0.0-20210105154028-b0ab187a4818/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA= golang.org/x/tools v0.0.0-20210106214847-113979e3529a/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA= golang.org/x/tools v0.1.0/go.mod h1:xkSsbof2nBLbhDlRMhhhyNLN/zl3eTqcnHD5viDpcZ0= @@ -2194,7 +2561,9 @@ google.golang.org/api v0.35.0/go.mod h1:/XrVsuzM0rZmrsbjJutiuftIzeuTQcEeaYcSk/mQ google.golang.org/api v0.36.0/go.mod h1:+z5ficQTmoYpPn8LCUNVpK5I7hwkpjbcgqA7I34qYtE= google.golang.org/api v0.40.0/go.mod h1:fYKFpnQN0DsDSKRVRcQSDQNtqWPfM9i+zNPxepjRCQ8= google.golang.org/api v0.41.0/go.mod h1:RkxM5lITDfTzmyKFPt+wGrCJbVfniCr2ool8kTBzRTU= +google.golang.org/api v0.42.0/go.mod h1:+Oj4s6ch2SEGtPjGqfUfZonBH0GjQH89gTeKKAEGZKI= google.golang.org/api v0.43.0/go.mod h1:nQsDGjRXMo4lvh5hP0TKqF244gqhGcr/YSIykhUk/94= +google.golang.org/api v0.44.0/go.mod h1:EBOGZqzyhtvMDoxwS97ctnh0zUmYY6CxqXsc1AvkYD8= google.golang.org/api v0.47.0/go.mod h1:Wbvgpq1HddcWVtzsVLyfLp8lDg6AA241LmgIL59tHXo= google.golang.org/api v0.48.0/go.mod h1:71Pr1vy+TAZRPkPs/xlCf5SsU8WjuAWv1Pfjbtukyy4= google.golang.org/api v0.50.0/go.mod h1:4bNT5pAuq5ji4SRZm+5QIkjny9JAyVD/3gaSihNefaw= @@ -2211,6 +2580,7 @@ google.golang.org/appengine v1.6.6/go.mod h1:8WjMMxjGQR8xUklV/ARdw2HLXBOI7O7uCID google.golang.org/appengine v1.6.7 h1:FZR1q0exgwxzPzp/aF+VccGrSfxfPpkBqjIIEq3ru6c= google.golang.org/appengine v1.6.7/go.mod h1:8WjMMxjGQR8xUklV/ARdw2HLXBOI7O7uCIDZVag1xfc= google.golang.org/cloud v0.0.0-20151119220103-975617b05ea8/go.mod h1:0H1ncTHf11KCFhTc/+EFRbzSCOZx+VUbRMk55Yv5MYk= +google.golang.org/genproto v0.0.0-20170818010345-ee236bd376b0/go.mod h1:JiN7NxoALGmiZfu7CAH4rXhgtRTLTxftemlI0sWmxmc= google.golang.org/genproto v0.0.0-20180817151627-c66870c02cf8/go.mod h1:JiN7NxoALGmiZfu7CAH4rXhgtRTLTxftemlI0sWmxmc= google.golang.org/genproto v0.0.0-20190307195333-5fe7a883aa19/go.mod h1:VzzqZJRnGkLBvHegQrXjBqPurQTc5/KpmUdxsrq26oE= google.golang.org/genproto v0.0.0-20190418145605-e7d98fc518a7/go.mod h1:VzzqZJRnGkLBvHegQrXjBqPurQTc5/KpmUdxsrq26oE= @@ -2222,6 +2592,7 @@ google.golang.org/genproto v0.0.0-20190716160619-c506a9f90610/go.mod h1:DMBHOl98 google.golang.org/genproto v0.0.0-20190801165951-fa694d86fc64/go.mod h1:DMBHOl98Agz4BDEuKkezgsaosCRResVns1a3J2ZsMNc= google.golang.org/genproto v0.0.0-20190819201941-24fa4b261c55/go.mod h1:DMBHOl98Agz4BDEuKkezgsaosCRResVns1a3J2ZsMNc= google.golang.org/genproto v0.0.0-20190911173649-1774047e7e51/go.mod h1:IbNlFCBrqXvoKpeg0TB2l7cyZUmoaFKYIwrEpbDKLA8= +google.golang.org/genproto v0.0.0-20190927181202-20e1ac93f88c/go.mod h1:IbNlFCBrqXvoKpeg0TB2l7cyZUmoaFKYIwrEpbDKLA8= google.golang.org/genproto v0.0.0-20191108220845-16a3f7862a1a/go.mod h1:n3cpQtvxv34hfy77yVDNjmbRyujviMdxYliBSkLhpCc= google.golang.org/genproto v0.0.0-20191115194625-c23dd37a84c9/go.mod h1:n3cpQtvxv34hfy77yVDNjmbRyujviMdxYliBSkLhpCc= google.golang.org/genproto v0.0.0-20191216164720-4f79533eabd1/go.mod h1:n3cpQtvxv34hfy77yVDNjmbRyujviMdxYliBSkLhpCc= @@ -2237,6 +2608,7 @@ google.golang.org/genproto v0.0.0-20200228133532-8c2c7df3a383/go.mod h1:55QSHmfG google.golang.org/genproto v0.0.0-20200305110556-506484158171/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c= google.golang.org/genproto v0.0.0-20200312145019-da6875a35672/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c= google.golang.org/genproto v0.0.0-20200331122359-1ee6d9798940/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c= +google.golang.org/genproto v0.0.0-20200423170343-7949de9c1215/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c= google.golang.org/genproto v0.0.0-20200430143042-b979b6f78d84/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c= google.golang.org/genproto v0.0.0-20200511104702-f5ebc3bea380/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c= google.golang.org/genproto v0.0.0-20200513103714-09dca8ec2884/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c= @@ -2245,7 +2617,6 @@ google.golang.org/genproto v0.0.0-20200526211855-cb27e3aa2013/go.mod h1:NbSheEEY google.golang.org/genproto v0.0.0-20200618031413-b414f8b61790/go.mod h1:jDfRM7FcilCzHH/e9qn6dsT145K34l5v+OpcnNgKAAA= google.golang.org/genproto v0.0.0-20200729003335-053ba62fc06f/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= google.golang.org/genproto v0.0.0-20200804131852-c06518451d9c/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= -google.golang.org/genproto v0.0.0-20200815001618-f69a88009b70/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= google.golang.org/genproto v0.0.0-20200825200019-8632dd797987/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= google.golang.org/genproto v0.0.0-20200904004341-0bd0a958aa1d/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= google.golang.org/genproto v0.0.0-20201019141844-1ed22bb0c154/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= @@ -2257,6 +2628,7 @@ google.golang.org/genproto v0.0.0-20201214200347-8c77b98c765d/go.mod h1:FWY/as6D google.golang.org/genproto v0.0.0-20210222152913-aa3ee6e6a81c/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= google.golang.org/genproto v0.0.0-20210303154014-9728d6b83eeb/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= google.golang.org/genproto v0.0.0-20210310155132-4ce2db91004e/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= +google.golang.org/genproto v0.0.0-20210312152112-fc591d9ea70f/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= google.golang.org/genproto v0.0.0-20210319143718-93e7006c17a6/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= google.golang.org/genproto v0.0.0-20210402141018-6c239bbf2bb1/go.mod h1:9lPAdzaEmUacj36I+k7YKbEc5CXzPIeORRgDAUOu28A= google.golang.org/genproto v0.0.0-20210513213006-bf773b8c8384/go.mod h1:P3QM42oQyzQSnHPnZ/vqoCdDmzH28fzWByN9asMeM8A= @@ -2273,6 +2645,7 @@ google.golang.org/genproto v0.0.0-20210824181836-a4879c3d0e89/go.mod h1:eFjDcFEc google.golang.org/genproto v0.0.0-20210827211047-25e5f791fe06 h1:Ogdiaj9EMVKYHnDsESxwlTr/k5eqCdwoQVJEcdg0NbE= google.golang.org/genproto v0.0.0-20210827211047-25e5f791fe06/go.mod h1:eFjDcFEctNawg4eG61bRv87N7iHBWyVhJu7u1kqDUXY= google.golang.org/grpc v0.0.0-20160317175043-d3ddb4469d5a/go.mod h1:yo6s7OP7yaDglbqo1J04qKzAhqBH6lvTonzMVmEdcZw= +google.golang.org/grpc v1.8.0/go.mod h1:yo6s7OP7yaDglbqo1J04qKzAhqBH6lvTonzMVmEdcZw= google.golang.org/grpc v1.17.0/go.mod h1:6QZJwpn2B+Zp71q/5VxRsJ6NXXVCE5NRUHRo+f3cWCs= google.golang.org/grpc v1.19.0/go.mod h1:mqu4LbDTu4XGKhr4mRzUsmM4RtVoemTSY81AxZiDr8c= google.golang.org/grpc v1.20.0/go.mod h1:chYK+tFQF0nDUGJgXMSgLCQk3phJEuONr2DCgLDdAQM= @@ -2332,7 +2705,6 @@ gopkg.in/check.v1 v1.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8 gopkg.in/check.v1 v1.0.0-20180628173108-788fd7840127/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= gopkg.in/check.v1 v1.0.0-20190902080502-41f04d3bba15/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= gopkg.in/check.v1 v1.0.0-20200227125254-8fa46927fb4f/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= -gopkg.in/check.v1 v1.0.0-20200902074654-038fdea0a05b/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c h1:Hei/4ADfdWqJk1ZMxUNpqntNwaWcugrBjAiHlqqRiVk= gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c/go.mod h1:JHkPIbrfpd72SG/EVd6muEfDQjcINNoR0C8j2r3qZ4Q= gopkg.in/cheggaaa/pb.v1 v1.0.25/go.mod h1:V/YB90LKu/1FcN3WVnfiiE5oMCibMjukxqG/qStrOgw= @@ -2347,22 +2719,23 @@ gopkg.in/fsnotify.v1 v1.4.7/go.mod h1:Tz8NjZHkW78fSQdbUxIjBTcgA1z1m8ZHf0WmKUhAMy gopkg.in/fsnotify/fsnotify.v1 v1.4.7/go.mod h1:Fyux9zXlo4rWoMSIzpn9fDAYjalPqJ/K1qJ27s+7ltE= gopkg.in/gcfg.v1 v1.2.3/go.mod h1:yesOnuUOFQAhST5vPY4nbZsb/huCgGGXlipJsBn0b3o= gopkg.in/gemnasium/logrus-airbrake-hook.v2 v2.1.2/go.mod h1:Xk6kEKp8OKb+X14hQBKWaSkCsqBpgog8nAV2xsGOxlo= +gopkg.in/go-playground/assert.v1 v1.2.1/go.mod h1:9RXL0bg/zibRAgZUYszZSwO/z8Y/a8bDuhia5mkpMnE= +gopkg.in/go-playground/validator.v9 v9.29.1/go.mod h1:+c9/zcJMFNgbLvly1L1V+PpxWdVbfP1avr/N00E2vyQ= gopkg.in/gorethink/gorethink.v3 v3.0.5 h1:e2Uc/Xe+hpcVQFsj6MuHlYog3r0JYpnTzwDj/y2O4MU= gopkg.in/gorethink/gorethink.v3 v3.0.5/go.mod h1:+3yIIHJUGMBK+wyPH+iN5TP+88ikFDfZdqTlK3Y9q8I= gopkg.in/inconshreveable/log15.v2 v2.0.0-20180818164646-67afb5ed74ec/go.mod h1:aPpfJ7XW+gOuirDoZ8gHhLh3kZ1B08FtV2bbmy7Jv3s= gopkg.in/inf.v0 v0.9.1 h1:73M5CoZyi3ZLMOyDlQh031Cx6N9NDJ2Vvfl76EDAgDc= gopkg.in/inf.v0 v0.9.1/go.mod h1:cWUDdTG/fYaXco+Dcufb5Vnc6Gp2YChqWtbxRZE0mXw= -gopkg.in/ini.v1 v1.42.0 h1:7N3gPTt50s8GuLortA00n8AqRTk75qOP98+mTPpgzRk= gopkg.in/ini.v1 v1.42.0/go.mod h1:pNLf8WUiyNEtQjuu5G5vTm06TEv9tsIgeAvK8hOrP4k= -gopkg.in/jcmturner/aescts.v1 v1.0.1 h1:cVVZBK2b1zY26haWB4vbBiZrfFQnfbTVrE3xZq6hrEw= +gopkg.in/ini.v1 v1.51.0/go.mod h1:pNLf8WUiyNEtQjuu5G5vTm06TEv9tsIgeAvK8hOrP4k= +gopkg.in/ini.v1 v1.52.0/go.mod h1:pNLf8WUiyNEtQjuu5G5vTm06TEv9tsIgeAvK8hOrP4k= +gopkg.in/ini.v1 v1.57.0/go.mod h1:pNLf8WUiyNEtQjuu5G5vTm06TEv9tsIgeAvK8hOrP4k= +gopkg.in/ini.v1 v1.62.0 h1:duBzk771uxoUuOlyRLkHsygud9+5lrlGjdFBb4mSKDU= +gopkg.in/ini.v1 v1.62.0/go.mod h1:pNLf8WUiyNEtQjuu5G5vTm06TEv9tsIgeAvK8hOrP4k= gopkg.in/jcmturner/aescts.v1 v1.0.1/go.mod h1:nsR8qBOg+OucoIW+WMhB3GspUQXq9XorLnQb9XtvcOo= -gopkg.in/jcmturner/dnsutils.v1 v1.0.1 h1:cIuC1OLRGZrld+16ZJvvZxVJeKPsvd5eUIvxfoN5hSM= gopkg.in/jcmturner/dnsutils.v1 v1.0.1/go.mod h1:m3v+5svpVOhtFAP/wSz+yzh4Mc0Fg7eRhxkJMWSIz9Q= -gopkg.in/jcmturner/goidentity.v3 v3.0.0 h1:1duIyWiTaYvVx3YX2CYtpJbUFd7/UuPYCfgXtQ3VTbI= gopkg.in/jcmturner/goidentity.v3 v3.0.0/go.mod h1:oG2kH0IvSYNIu80dVAyu/yoefjq1mNfM5bm88whjWx4= -gopkg.in/jcmturner/gokrb5.v7 v7.5.0 h1:a9tsXlIDD9SKxotJMK3niV7rPZAJeX2aD/0yg3qlIrg= -gopkg.in/jcmturner/gokrb5.v7 v7.5.0/go.mod h1:l8VISx+WGYp+Fp7KRbsiUuXTTOnxIc3Tuvyavf11/WM= -gopkg.in/jcmturner/rpc.v1 v1.1.0 h1:QHIUxTX1ISuAv9dD2wJ9HWQVuWDX/Zc0PfeC2tjc4rU= +gopkg.in/jcmturner/gokrb5.v7 v7.2.3/go.mod h1:l8VISx+WGYp+Fp7KRbsiUuXTTOnxIc3Tuvyavf11/WM= gopkg.in/jcmturner/rpc.v1 v1.1.0/go.mod h1:YIdkC4XfD6GXbzje11McwsDuOlZQSb9W4vfLvuNnlv8= gopkg.in/ldap.v3 v3.1.0 h1:DIDWEjI7vQWREh0S8X5/NFPCZ3MCVd55LmXKPW4XLGE= gopkg.in/ldap.v3 v3.1.0/go.mod h1:dQjCc0R0kfyFjIlWNMH1DORwUASZyDxo2Ry1B51dXaQ= @@ -2388,12 +2761,12 @@ gopkg.in/yaml.v2 v2.2.2/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= gopkg.in/yaml.v2 v2.2.3/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= gopkg.in/yaml.v2 v2.2.4/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= gopkg.in/yaml.v2 v2.2.5/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= -gopkg.in/yaml.v2 v2.2.7/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= gopkg.in/yaml.v2 v2.2.8/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= gopkg.in/yaml.v2 v2.3.0/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= gopkg.in/yaml.v2 v2.4.0 h1:D8xgwECY7CYvx+Y2n4sBz93Jn9JRvxdiyyo8CTfuKaY= gopkg.in/yaml.v2 v2.4.0/go.mod h1:RDklbk79AGWmwhnvt/jBztapEOGDOx6ZbXqjP6csGnQ= gopkg.in/yaml.v3 v3.0.0-20200313102051-9f266ea9e77c/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= +gopkg.in/yaml.v3 v3.0.0-20200605160147-a5ece683394c/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= gopkg.in/yaml.v3 v3.0.0-20200615113413-eeeca48fe776/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= gopkg.in/yaml.v3 v3.0.0-20210107192922-496545a6307b h1:h8qDotaEPuJATrMmW04NCwg7v22aHH28wwpauUhK9Oo= gopkg.in/yaml.v3 v3.0.0-20210107192922-496545a6307b/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= @@ -2410,25 +2783,31 @@ honnef.co/go/tools v0.0.0-20190523083050-ea95bdfd59fc/go.mod h1:rf3lG4BRIbNafJWh honnef.co/go/tools v0.0.1-2019.2.3/go.mod h1:a3bituU0lyd329TUQxRnasdCoJDkEUEAqEt0JzvZhAg= honnef.co/go/tools v0.0.1-2020.1.3/go.mod h1:X/FiERA/W4tHapMX5mGpAtMSVEeEUOyHaw9vFzvIQ3k= honnef.co/go/tools v0.0.1-2020.1.4/go.mod h1:X/FiERA/W4tHapMX5mGpAtMSVEeEUOyHaw9vFzvIQ3k= -k8s.io/api v0.18.8/go.mod h1:d/CXqwWv+Z2XEG1LgceeDmHQwpUJhROPx16SlxJgERY= +honnef.co/go/tools v0.1.1/go.mod h1:NgwopIslSNH47DimFoV78dnkksY2EFtX0ajyb3K/las= +honnef.co/go/tools v0.1.2/go.mod h1:NgwopIslSNH47DimFoV78dnkksY2EFtX0ajyb3K/las= +honnef.co/go/tools v0.2.0/go.mod h1:lPVVZ2BS5TfnjLyizF7o7hv7j9/L+8cZY2hLyjP9cGY= +honnef.co/go/tools v0.2.1/go.mod h1:lPVVZ2BS5TfnjLyizF7o7hv7j9/L+8cZY2hLyjP9cGY= k8s.io/api v0.20.1/go.mod h1:KqwcCVogGxQY3nBlRpwt+wpAMF/KjaCc7RpywacvqUo= k8s.io/api v0.20.4/go.mod h1:++lNL1AJMkDymriNniQsWRkMDzRaX2Y/POTUi8yvqYQ= -k8s.io/api v0.20.6 h1:bgdZrW++LqgrLikWYNruIKAtltXbSCX2l5mJu11hrVE= k8s.io/api v0.20.6/go.mod h1:X9e8Qag6JV/bL5G6bU8sdVRltWKmdHsFUGS3eVndqE8= -k8s.io/apimachinery v0.18.8/go.mod h1:6sQd+iHEqmOtALqOFjSWp2KZ9F0wlU/nWm0ZgsYWMig= +k8s.io/api v0.21.0/go.mod h1:+YbrhBBGgsxbF6o6Kj4KJPJnBmAKuXDeS3E18bgHNVU= +k8s.io/api v0.22.2 h1:M8ZzAD0V6725Fjg53fKeTJxGsJvRbk4TEm/fexHMtfw= +k8s.io/api v0.22.2/go.mod h1:y3ydYpLJAaDI+BbSe2xmGcqxiWHmWjkEeIbiwHvnPR8= k8s.io/apimachinery v0.20.1/go.mod h1:WlLqWAHZGg07AeltaI0MV5uk1Omp8xaN0JGLY6gkRpU= k8s.io/apimachinery v0.20.4/go.mod h1:WlLqWAHZGg07AeltaI0MV5uk1Omp8xaN0JGLY6gkRpU= k8s.io/apimachinery v0.20.6/go.mod h1:ejZXtW1Ra6V1O5H8xPBGz+T3+4gfkTCeExAHKU57MAc= +k8s.io/apimachinery v0.21.0/go.mod h1:jbreFvJo3ov9rj7eWT7+sYiRx+qZuCYXwWT1bcDswPY= k8s.io/apimachinery v0.22.2 h1:ejz6y/zNma8clPVfNDLnPbleBo6MpoFy/HBiBqCouVk= k8s.io/apimachinery v0.22.2/go.mod h1:O3oNtNadZdeOMxHFVxOreoznohCpy0z6mocxbZr7oJ0= k8s.io/apiserver v0.20.1/go.mod h1:ro5QHeQkgMS7ZGpvf4tSMx6bBOgPfE+f52KwvXfScaU= k8s.io/apiserver v0.20.4/go.mod h1:Mc80thBKOyy7tbvFtB4kJv1kbdD0eIH8k8vianJcbFM= k8s.io/apiserver v0.20.6/go.mod h1:QIJXNt6i6JB+0YQRNcS0hdRHJlMhflFmsBDeSgT1r8Q= -k8s.io/client-go v0.18.8/go.mod h1:HqFqMllQ5NnQJNwjro9k5zMyfhZlOwpuTLVrxjkYSxU= k8s.io/client-go v0.20.1/go.mod h1:/zcHdt1TeWSd5HoUe6elJmHSQ6uLLgp4bIJHVEuy+/Y= k8s.io/client-go v0.20.4/go.mod h1:LiMv25ND1gLUdBeYxBIwKpkSC5IsozMMmOOeSJboP+k= -k8s.io/client-go v0.20.6 h1:nJZOfolnsVtDtbGJNCxzOtKUAu7zvXjB8+pMo9UNxZo= k8s.io/client-go v0.20.6/go.mod h1:nNQMnOvEUEsOzRRFIIkdmYOjAZrC8bgq0ExboWSU1I0= +k8s.io/client-go v0.21.0/go.mod h1:nNBytTF9qPFDEhoqgEPaarobC8QPae13bElIVHzIglA= +k8s.io/client-go v0.22.2 h1:DaSQgs02aCC1QcwUdkKZWOeaVsQjYvWv8ZazcZ6JcHc= +k8s.io/client-go v0.22.2/go.mod h1:sAlhrkVDf50ZHx6z4K0S40wISNTarf1r800F+RlCF6U= k8s.io/component-base v0.20.1/go.mod h1:guxkoJnNoh8LNrbtiQOlyp2Y2XFCZQmrcg2n/DeYNLk= k8s.io/component-base v0.20.4/go.mod h1:t4p9EdiagbVCJKrQ1RsA5/V4rFQNDfRlevJajlGwgjI= k8s.io/component-base v0.20.6/go.mod h1:6f1MPBAeI+mvuts3sIdtpjljHWBQ2cIy38oBIWMYnrM= @@ -2436,24 +2815,21 @@ k8s.io/cri-api v0.17.3/go.mod h1:X1sbHmuXhwaHs9xxYffLqJogVsnI+f6cPRcgPel7ywM= k8s.io/cri-api v0.20.1/go.mod h1:2JRbKt+BFLTjtrILYVqQK5jqhI+XNdF6UiGMgczeBCI= k8s.io/cri-api v0.20.4/go.mod h1:2JRbKt+BFLTjtrILYVqQK5jqhI+XNdF6UiGMgczeBCI= k8s.io/cri-api v0.20.6/go.mod h1:ew44AjNXwyn1s0U4xCKGodU7J1HzBeZ1MpGrpa5r8Yc= -k8s.io/gengo v0.0.0-20190128074634-0689ccc1d7d6/go.mod h1:ezvh/TsK7cY6rbqRK0oQQ8IAqLxYwwyPxAX1Pzy0ii0= k8s.io/gengo v0.0.0-20200413195148-3a45101e95ac/go.mod h1:ezvh/TsK7cY6rbqRK0oQQ8IAqLxYwwyPxAX1Pzy0ii0= -k8s.io/klog v0.0.0-20181102134211-b9b56d5dfc92/go.mod h1:Gq+BEi5rUBO/HRz0bTSXDUcqjScdoY3a9IHpCEIOOfk= -k8s.io/klog v0.3.0/go.mod h1:Gq+BEi5rUBO/HRz0bTSXDUcqjScdoY3a9IHpCEIOOfk= k8s.io/klog v1.0.0 h1:Pt+yjF5aB1xDSVbau4VsWe+dQNzA0qv1LlXdC2dF6Q8= k8s.io/klog v1.0.0/go.mod h1:4Bi6QPql/J/LkTDqv7R/cd3hPo4k2DG6Ptcz060Ez5I= k8s.io/klog/v2 v2.0.0/go.mod h1:PBfzABfn139FHAV07az/IF9Wp1bkk3vpT2XSJ76fSDE= k8s.io/klog/v2 v2.4.0/go.mod h1:Od+F08eJP+W3HUb4pSrPpgp9DGU4GzlpG/TmITuYh/Y= +k8s.io/klog/v2 v2.8.0/go.mod h1:hy9LJ/NvuK+iVyP4Ehqva4HxZG/oXyIS3n3Jmire4Ec= k8s.io/klog/v2 v2.9.0 h1:D7HV+n1V57XeZ0m6tdRkfknthUaM06VFbWldOFh8kzM= k8s.io/klog/v2 v2.9.0/go.mod h1:hy9LJ/NvuK+iVyP4Ehqva4HxZG/oXyIS3n3Jmire4Ec= -k8s.io/kube-openapi v0.0.0-20200410145947-61e04a5be9a6/go.mod h1:GRQhZsXIAJ1xR0C9bd8UpWHZ5plfAS9fzPjJuQ6JL3E= k8s.io/kube-openapi v0.0.0-20201113171705-d219536bb9fd/go.mod h1:WOJ3KddDSol4tAGcJo0Tvi+dK12EcqSLqcWsryKMpfM= +k8s.io/kube-openapi v0.0.0-20210305001622-591a79e4bda7/go.mod h1:wXW5VT87nVfh/iLV8FpR2uDvrFyomxbtb1KivDbvPTE= k8s.io/kube-openapi v0.0.0-20210421082810-95288971da7e/go.mod h1:vHXdDvt9+2spS2Rx9ql3I8tycm3H9FDfdUoIuKCefvw= k8s.io/kubernetes v1.13.0/go.mod h1:ocZa8+6APFNC2tX1DZASIbocyYT5jHzqFVsY5aoB7Jk= -k8s.io/utils v0.0.0-20200324210504-a9aa75ae1b89/go.mod h1:sZAwmy6armz5eXlNoLmJcl4F1QuKu7sr+mFQ0byX7Ew= -k8s.io/utils v0.0.0-20200414100711-2df71ebbae66/go.mod h1:jPW/WVKK9YHAvNhRxK0md/EJ228hCsBRufyofKtW8HA= -k8s.io/utils v0.0.0-20201110183641-67b214c5f920 h1:CbnUZsM497iRC5QMVkHwyl8s2tB3g7yaSHkYPkpgelw= k8s.io/utils v0.0.0-20201110183641-67b214c5f920/go.mod h1:jPW/WVKK9YHAvNhRxK0md/EJ228hCsBRufyofKtW8HA= +k8s.io/utils v0.0.0-20210819203725-bdf08cb9a70a h1:8dYfu/Fc9Gz2rNJKB9IQRGgQOh2clmRzNIPPY1xLY5g= +k8s.io/utils v0.0.0-20210819203725-bdf08cb9a70a/go.mod h1:jPW/WVKK9YHAvNhRxK0md/EJ228hCsBRufyofKtW8HA= modernc.org/cc/v3 v3.32.4/go.mod h1:0R6jl1aZlIl2avnYfbfHBS1QB6/f+16mihBObaBC878= modernc.org/cc/v3 v3.33.5 h1:gfsIOmcv80EelyQyOHn/Xhlzex8xunhQxWiJRMYmPrI= modernc.org/cc/v3 v3.33.5/go.mod h1:0R6jl1aZlIl2avnYfbfHBS1QB6/f+16mihBObaBC878= @@ -2483,16 +2859,19 @@ modernc.org/token v1.0.0/go.mod h1:UGzOrNV1mAFSEB63lOFHIpNRUVMvYTc6yu1SMY/XTDM= modernc.org/z v1.0.1-0.20210308123920-1f282aa71362/go.mod h1:8/SRk5C/HgiQWCgXdfpb+1RvhORdkz5sw72d3jjtyqA= modernc.org/z v1.0.1 h1:WyIDpEpAIx4Hel6q/Pcgj/VhaQV5XPJ2I6ryIYbjnpc= modernc.org/z v1.0.1/go.mod h1:8/SRk5C/HgiQWCgXdfpb+1RvhORdkz5sw72d3jjtyqA= +mvdan.cc/gofumpt v0.1.0/go.mod h1:yXG1r1WqZVKWbVRtBWKWX9+CxGYfA51nSomhM0woR48= +mvdan.cc/interfacer v0.0.0-20180901003855-c20040233aed/go.mod h1:Xkxe497xwlCKkIaQYRfC7CSLworTXY9RMqwhhCm+8Nc= +mvdan.cc/lint v0.0.0-20170908181259-adc824a0674b/go.mod h1:2odslEg/xrtNQqCYg2/jCoyKnw3vv5biOc3JnIcYfL4= +mvdan.cc/unparam v0.0.0-20210104141923-aac4ce9116a7/go.mod h1:hBpJkZE8H/sb+VRFvw2+rBpHNsTBcvSpk61hr8mzXZE= rsc.io/binaryregexp v0.2.0/go.mod h1:qTv7/COck+e2FymRvadv62gMdZztPaShugOCi3I+8D8= rsc.io/pdf v0.1.1/go.mod h1:n8OzWcQ6Sp37PL01nO98y4iUCRdTGarVfzxY20ICaU4= rsc.io/quote/v3 v3.1.0/go.mod h1:yEA65RcK8LyAZtP9Kv3t0HmxON59tX3rD+tICJqUlj0= rsc.io/sampler v1.3.0/go.mod h1:T1hPZKmBbMNahiBKFy5HrXp6adAjACjK9JXDnKaTXpA= sigs.k8s.io/apiserver-network-proxy/konnectivity-client v0.0.14/go.mod h1:LEScyzhFmoF5pso/YSeBstl57mOzx9xlU9n85RGrDQg= sigs.k8s.io/apiserver-network-proxy/konnectivity-client v0.0.15/go.mod h1:LEScyzhFmoF5pso/YSeBstl57mOzx9xlU9n85RGrDQg= -sigs.k8s.io/structured-merge-diff/v3 v3.0.0-20200116222232-67a7b8c61874/go.mod h1:PlARxl6Hbt/+BC80dRLi1qAmnMqwqDg62YvvVkZjemw= -sigs.k8s.io/structured-merge-diff/v3 v3.0.0/go.mod h1:PlARxl6Hbt/+BC80dRLi1qAmnMqwqDg62YvvVkZjemw= sigs.k8s.io/structured-merge-diff/v4 v4.0.2/go.mod h1:bJZC9H9iH24zzfZ/41RGcq60oK1F7G282QMXDPYydCw= sigs.k8s.io/structured-merge-diff/v4 v4.0.3/go.mod h1:bJZC9H9iH24zzfZ/41RGcq60oK1F7G282QMXDPYydCw= +sigs.k8s.io/structured-merge-diff/v4 v4.1.0/go.mod h1:bJZC9H9iH24zzfZ/41RGcq60oK1F7G282QMXDPYydCw= sigs.k8s.io/structured-merge-diff/v4 v4.1.2 h1:Hr/htKFmJEbtMgS/UD0N+gtgctAqz81t3nu+sPzynno= sigs.k8s.io/structured-merge-diff/v4 v4.1.2/go.mod h1:j/nl6xW8vLS49O8YvXW1ocPhZawJtm+Yrr7PPRQ0Vg4= sigs.k8s.io/yaml v1.1.0/go.mod h1:UJmg0vDUVViEyp3mgSv9WPwZCDxu4rQW1olrI1uml+o= From 7c168220308a2b63ec9ea44ee4c7d4b64538b640 Mon Sep 17 00:00:00 2001 From: Bill Liu Date: Tue, 12 Oct 2021 03:35:21 +1300 Subject: [PATCH 67/81] docs: add external input plugin entry for opcda --- EXTERNAL_PLUGINS.md | 1 + 1 file changed, 1 insertion(+) diff --git a/EXTERNAL_PLUGINS.md b/EXTERNAL_PLUGINS.md index fc71044d6172d..7e074d10f2e8d 100644 --- a/EXTERNAL_PLUGINS.md +++ b/EXTERNAL_PLUGINS.md @@ -8,6 +8,7 @@ Pull requests welcome. ## Inputs - [awsalarms](https://github.com/vipinvkmenon/awsalarms) - Simple plugin to gather/monitor alarms generated in AWS. - [octoprint](https://github.com/BattleBas/octoprint-telegraf-plugin) - Gather 3d print information from the octoprint API. +- [opcda](https://github.com/lpc921/telegraf-execd-opcda) - Gather data from [OPC Fundation's Data Access (DA)](https://opcfoundation.org/about/opc-technologies/opc-classic/) protocol for industrial automation. - [open-hardware-monitor](https://github.com/marianob85/open_hardware_monitor-telegraf-plugin) - Gather sensors data provided by [Open Hardware Monitor](http://openhardwaremonitor.org) - [plex](https://github.com/russorat/telegraf-webhooks-plex) - Listens for events from Plex Media Server [Webhooks](https://support.plex.tv/articles/115002267687-webhooks/). - [rand](https://github.com/ssoroka/rand) - Generate random numbers From 42fc07e3bb9e615e35f79d32675bfe722e6b3c73 Mon Sep 17 00:00:00 2001 From: MyaLongmire Date: Mon, 11 Oct 2021 11:01:33 -0600 Subject: [PATCH 68/81] fix: table test init test reworked --- plugins/inputs/snmp/snmp.go | 2 +- plugins/inputs/snmp/snmp_test.go | 13 +- plugins/inputs/snmp/testdata/tableinit | 2599 ++++++++++++++++++++++++ 3 files changed, 2608 insertions(+), 6 deletions(-) create mode 100644 plugins/inputs/snmp/testdata/tableinit diff --git a/plugins/inputs/snmp/snmp.go b/plugins/inputs/snmp/snmp.go index a7eec5ed09fdb..3af70cd7e54da 100644 --- a/plugins/inputs/snmp/snmp.go +++ b/plugins/inputs/snmp/snmp.go @@ -787,7 +787,6 @@ type snmpTableCache struct { func snmpTable(oid string) (mibName string, oidNum string, oidText string, fields []Field, err error) { var stc snmpTableCache stc.mibName, stc.oidNum, stc.oidText, stc.fields, stc.err = snmpTableCall(oid) - println(stc.fields) return stc.mibName, stc.oidNum, stc.oidText, stc.fields, stc.err } @@ -819,6 +818,7 @@ func snmpTableCall(oid string) (mibName string, oidNum string, oidText string, f // grabs all columns from the table // mimmicks grabbing everything returned from snmptable -Ch -Cl -c public 127.0.0.1 oidFullName col := node.GetRow().AsTable().ColumnOrder + for i := range col { _, isTag := tagOids[mibPrefix+col[i]] fields = append(fields, Field{Name: col[i], Oid: mibPrefix + col[i], IsTag: isTag}) diff --git a/plugins/inputs/snmp/snmp_test.go b/plugins/inputs/snmp/snmp_test.go index a261866b073db..dc765932749d2 100644 --- a/plugins/inputs/snmp/snmp_test.go +++ b/plugins/inputs/snmp/snmp_test.go @@ -148,9 +148,10 @@ func TestTableInit(t *testing.T) { require.NoError(t, err) tbl := Table{ - Oid: ".1.0.0.0", + Oid: ".1.3.6.1.2.1.3.1", Fields: []Field{ - {Oid: "TEST::description", Name: "description", IsTag: true}, + {Oid: ".1.3.6.1.2.1.3.1.1.1", Name: "atIfIndex"}, + {Oid: "RFC1213-MIB::atPhysAddress", Name: "atPhysAddress", IsTag: true}, }, } s := &Snmp{ @@ -161,10 +162,12 @@ func TestTableInit(t *testing.T) { err = tbl.Init(s) require.NoError(t, err) - assert.Equal(t, "testTable", tbl.Name) + assert.Equal(t, "atTable", tbl.Name) - assert.Len(t, tbl.Fields, 1) - assert.Contains(t, tbl.Fields, Field{Oid: ".1.0.0.0.1.4", Name: "description", IsTag: true, initialized: true, snmp: s}) + assert.Len(t, tbl.Fields, 4) + assert.Contains(t, tbl.Fields, Field{Oid: ".1.3.6.1.2.1.3.1.1.1", Name: "atIfIndex", initialized: true, snmp: s}) + assert.Contains(t, tbl.Fields, Field{Oid: ".1.3.6.1.2.1.3.1.1.2", Name: "atPhysAddress", IsTag: true, initialized: true, snmp: s, Conversion: "hwaddr"}) + assert.Contains(t, tbl.Fields, Field{Oid: ".1.3.6.1.2.1.3.1.1.3", Name: "atNetAddress", initialized: true, IsTag: true, snmp: s}) } func TestSnmpInit(t *testing.T) { diff --git a/plugins/inputs/snmp/testdata/tableinit b/plugins/inputs/snmp/testdata/tableinit new file mode 100644 index 0000000000000..10220adbcee04 --- /dev/null +++ b/plugins/inputs/snmp/testdata/tableinit @@ -0,0 +1,2599 @@ + RFC1213-MIB DEFINITIONS ::= BEGIN + + IMPORTS + mgmt, NetworkAddress, IpAddress, Counter, Gauge, + TimeTicks + FROM RFC1155-SMI + OBJECT-TYPE + FROM RFC-1212 + TEXTUAL-CONVENTION + FROM SNMPv2-TC + IANAifType + FROM IANAifType-MIB; + + -- This MIB module uses the extended OBJECT-TYPE macro as + -- defined in [14]; + + + -- MIB-II (same prefix as MIB-I) + + mib-2 OBJECT IDENTIFIER ::= { mgmt 1 } + + -- textual conventions + + DisplayString ::= + OCTET STRING + -- This data type is used to model textual information taken + -- from the NVT ASCII character set. By convention, objects + -- with this syntax are declared as having + -- + -- SIZE (0..255) + + PhysAddress ::= + OCTET STRING + -- This data type is used to model media addresses. For many + -- types of media, this will be in a binary representation. + -- For example, an ethernet address would be represented as + -- a string of 6 octets. + + + -- groups in MIB-II + + system OBJECT IDENTIFIER ::= { mib-2 1 } + + interfaces OBJECT IDENTIFIER ::= { mib-2 2 } + + at OBJECT IDENTIFIER ::= { mib-2 3 } + + ip OBJECT IDENTIFIER ::= { mib-2 4 } + + icmp OBJECT IDENTIFIER ::= { mib-2 5 } + + tcp OBJECT IDENTIFIER ::= { mib-2 6 } + + udp OBJECT IDENTIFIER ::= { mib-2 7 } + + egp OBJECT IDENTIFIER ::= { mib-2 8 } + + -- historical (some say hysterical) + -- cmot OBJECT IDENTIFIER ::= { mib-2 9 } + + transmission OBJECT IDENTIFIER ::= { mib-2 10 } + + snmp OBJECT IDENTIFIER ::= { mib-2 11 } + + + -- the System group + + -- Implementation of the System group is mandatory for all + -- systems. If an agent is not configured to have a value + -- for any of these variables, a string of length 0 is + -- returned. + + sysDescr OBJECT-TYPE + SYNTAX DisplayString (SIZE (0..255)) + ACCESS read-only + STATUS mandatory + DESCRIPTION + "A textual description of the entity. This value + should include the full name and version + identification of the system's hardware type, + software operating-system, and networking + software. It is mandatory that this only contain + printable ASCII characters." + ::= { system 1 } + + sysObjectID OBJECT-TYPE + SYNTAX OBJECT IDENTIFIER + ACCESS read-only + STATUS mandatory + DESCRIPTION + "The vendor's authoritative identification of the + network management subsystem contained in the + entity. This value is allocated within the SMI + enterprises subtree (1.3.6.1.4.1) and provides an + easy and unambiguous means for determining `what + kind of box' is being managed. For example, if + vendor `Flintstones, Inc.' was assigned the + subtree 1.3.6.1.4.1.4242, it could assign the + identifier 1.3.6.1.4.1.4242.1.1 to its `Fred + Router'." + ::= { system 2 } + + sysUpTime OBJECT-TYPE + SYNTAX TimeTicks + ACCESS read-only + STATUS mandatory + DESCRIPTION + "The time (in hundredths of a second) since the + network management portion of the system was last + re-initialized." + ::= { system 3 } + + sysContact OBJECT-TYPE + SYNTAX DisplayString (SIZE (0..255)) + ACCESS read-write + STATUS mandatory + DESCRIPTION + "The textual identification of the contact person + for this managed node, together with information + on how to contact this person." + ::= { system 4 } + + sysName OBJECT-TYPE + SYNTAX DisplayString (SIZE (0..255)) + ACCESS read-write + STATUS mandatory + DESCRIPTION + "An administratively-assigned name for this + managed node. By convention, this is the node's + fully-qualified domain name." + ::= { system 5 } + + sysLocation OBJECT-TYPE + SYNTAX DisplayString (SIZE (0..255)) + ACCESS read-write + STATUS mandatory + DESCRIPTION + "The physical location of this node (e.g., + `telephone closet, 3rd floor')." + ::= { system 6 } + + sysServices OBJECT-TYPE + SYNTAX INTEGER (0..127) + ACCESS read-only + STATUS mandatory + DESCRIPTION + "A value which indicates the set of services that + this entity primarily offers. + + The value is a sum. This sum initially takes the + value zero, Then, for each layer, L, in the range + 1 through 7, that this node performs transactions + for, 2 raised to (L - 1) is added to the sum. For + example, a node which performs primarily routing + functions would have a value of 4 (2^(3-1)). In + contrast, a node which is a host offering + application services would have a value of 72 + (2^(4-1) + 2^(7-1)). Note that in the context of + the Internet suite of protocols, values should be + calculated accordingly: + + layer functionality + 1 physical (e.g., repeaters) + 2 datalink/subnetwork (e.g., bridges) + 3 internet (e.g., IP gateways) + 4 end-to-end (e.g., IP hosts) + 7 applications (e.g., mail relays) + + For systems including OSI protocols, layers 5 and + 6 may also be counted." + ::= { system 7 } + + -- the Interfaces group + + -- Implementation of the Interfaces group is mandatory for + -- all systems. + + ifNumber OBJECT-TYPE + SYNTAX INTEGER + ACCESS read-only + STATUS mandatory + DESCRIPTION + "The number of network interfaces (regardless of + their current state) present on this system." + ::= { interfaces 1 } + + + -- the Interfaces table + + -- The Interfaces table contains information on the entity's + -- interfaces. Each interface is thought of as being + -- attached to a `subnetwork'. Note that this term should + -- not be confused with `subnet' which refers to an + -- addressing partitioning scheme used in the Internet suite + -- of protocols. + + ifTable OBJECT-TYPE + SYNTAX SEQUENCE OF IfEntry + ACCESS not-accessible + STATUS mandatory + DESCRIPTION + "A list of interface entries. The number of + entries is given by the value of ifNumber." + ::= { interfaces 2 } + + ifEntry OBJECT-TYPE + SYNTAX IfEntry + ACCESS not-accessible + STATUS mandatory + DESCRIPTION + "An interface entry containing objects at the + subnetwork layer and below for a particular + interface." + INDEX { ifIndex } + ::= { ifTable 1 } + + IfEntry ::= + SEQUENCE { + ifIndex + INTEGER, + ifDescr + DisplayString, + ifType + IANAifType, + ifMtu + INTEGER, + ifSpeed + Gauge, + ifPhysAddress + PhysAddress, + ifAdminStatus + INTEGER, + ifOperStatus + INTEGER, + ifLastChange + TimeTicks, + ifInOctets + Counter, + ifInUcastPkts + Counter, + ifInNUcastPkts + Counter, + ifInDiscards + Counter, + ifInErrors + Counter, + ifInUnknownProtos + Counter, + ifOutOctets + Counter, + ifOutUcastPkts + Counter, + ifOutNUcastPkts + Counter, + ifOutDiscards + Counter, + ifOutErrors + Counter, + ifOutQLen + Gauge, + ifSpecific + OBJECT IDENTIFIER + } + + ifIndex OBJECT-TYPE + SYNTAX INTEGER + ACCESS read-only + STATUS mandatory + DESCRIPTION + "A unique value for each interface. Its value + ranges between 1 and the value of ifNumber. The + value for each interface must remain constant at + least from one re-initialization of the entity's + network management system to the next re- + initialization." + ::= { ifEntry 1 } + + ifDescr OBJECT-TYPE + SYNTAX DisplayString (SIZE (0..255)) + ACCESS read-only + STATUS mandatory + DESCRIPTION + "A textual string containing information about the + interface. This string should include the name of + the manufacturer, the product name and the version + of the hardware interface." + ::= { ifEntry 2 } + + ifType OBJECT-TYPE + SYNTAX IANAifType + ACCESS read-only + STATUS mandatory + DESCRIPTION + "The type of interface. Additional values for ifType + are assigned by the Internet Assigned Numbers + Authority (IANA), through updating the syntax of the + IANAifType textual convention." + ::= { ifEntry 3 } + + + ifMtu OBJECT-TYPE + SYNTAX INTEGER + ACCESS read-only + STATUS mandatory + DESCRIPTION + "The size of the largest datagram which can be + sent/received on the interface, specified in + octets. For interfaces that are used for + transmitting network datagrams, this is the size + of the largest network datagram that can be sent + on the interface." + ::= { ifEntry 4 } + + ifSpeed OBJECT-TYPE + SYNTAX Gauge + ACCESS read-only + STATUS mandatory + DESCRIPTION + "An estimate of the interface's current bandwidth + in bits per second. For interfaces which do not + vary in bandwidth or for those where no accurate + estimation can be made, this object should contain + the nominal bandwidth." + ::= { ifEntry 5 } + + ifPhysAddress OBJECT-TYPE + SYNTAX PhysAddress + ACCESS read-only + STATUS mandatory + DESCRIPTION + "The interface's address at the protocol layer + immediately `below' the network layer in the + protocol stack. For interfaces which do not have + such an address (e.g., a serial line), this object + should contain an octet string of zero length." + ::= { ifEntry 6 } + + ifAdminStatus OBJECT-TYPE + SYNTAX INTEGER { + up(1), -- ready to pass packets + down(2), + testing(3) -- in some test mode + } + ACCESS read-write + STATUS mandatory + DESCRIPTION + "The desired state of the interface. The + testing(3) state indicates that no operational + packets can be passed." + ::= { ifEntry 7 } + + ifOperStatus OBJECT-TYPE + SYNTAX INTEGER { + up(1), -- ready to pass packets + down(2), + testing(3), -- in some test mode + unknown(4), + dormant(5) + } + ACCESS read-only + STATUS mandatory + DESCRIPTION + "The current operational state of the interface. + The testing(3) state indicates that no operational + packets can be passed." + ::= { ifEntry 8 } + + ifLastChange OBJECT-TYPE + SYNTAX TimeTicks + ACCESS read-only + STATUS mandatory + DESCRIPTION + "The value of sysUpTime at the time the interface + entered its current operational state. If the + current state was entered prior to the last re- + initialization of the local network management + subsystem, then this object contains a zero + value." + ::= { ifEntry 9 } + + ifInOctets OBJECT-TYPE + SYNTAX Counter + ACCESS read-only + STATUS mandatory + DESCRIPTION + "The total number of octets received on the + interface, including framing characters." + ::= { ifEntry 10 } + + ifInUcastPkts OBJECT-TYPE + SYNTAX Counter + ACCESS read-only + STATUS mandatory + DESCRIPTION + "The number of subnetwork-unicast packets + delivered to a higher-layer protocol." + ::= { ifEntry 11 } + + ifInNUcastPkts OBJECT-TYPE + SYNTAX Counter + ACCESS read-only + STATUS mandatory + DESCRIPTION + "The number of non-unicast (i.e., subnetwork- + broadcast or subnetwork-multicast) packets + delivered to a higher-layer protocol." + ::= { ifEntry 12 } + + ifInDiscards OBJECT-TYPE + SYNTAX Counter + ACCESS read-only + STATUS mandatory + DESCRIPTION + "The number of inbound packets which were chosen + to be discarded even though no errors had been + detected to prevent their being deliverable to a + higher-layer protocol. One possible reason for + discarding such a packet could be to free up + buffer space." + ::= { ifEntry 13 } + + ifInErrors OBJECT-TYPE + SYNTAX Counter + ACCESS read-only + STATUS mandatory + DESCRIPTION + "The number of inbound packets that contained + errors preventing them from being deliverable to a + higher-layer protocol." + ::= { ifEntry 14 } + + ifInUnknownProtos OBJECT-TYPE + SYNTAX Counter + ACCESS read-only + STATUS mandatory + DESCRIPTION + "The number of packets received via the interface + which were discarded because of an unknown or + unsupported protocol." + ::= { ifEntry 15 } + + ifOutOctets OBJECT-TYPE + SYNTAX Counter + ACCESS read-only + STATUS mandatory + DESCRIPTION + "The total number of octets transmitted out of the + interface, including framing characters." + ::= { ifEntry 16 } + + ifOutUcastPkts OBJECT-TYPE + SYNTAX Counter + ACCESS read-only + STATUS mandatory + DESCRIPTION + "The total number of packets that higher-level + protocols requested be transmitted to a + subnetwork-unicast address, including those that + were discarded or not sent." + ::= { ifEntry 17 } + + ifOutNUcastPkts OBJECT-TYPE + SYNTAX Counter + ACCESS read-only + STATUS mandatory + DESCRIPTION + "The total number of packets that higher-level + protocols requested be transmitted to a non- + unicast (i.e., a subnetwork-broadcast or + subnetwork-multicast) address, including those + that were discarded or not sent." + ::= { ifEntry 18 } + + ifOutDiscards OBJECT-TYPE + SYNTAX Counter + ACCESS read-only + STATUS mandatory + DESCRIPTION + "The number of outbound packets which were chosen + to be discarded even though no errors had been + detected to prevent their being transmitted. One + possible reason for discarding such a packet could + be to free up buffer space." + ::= { ifEntry 19 } + + ifOutErrors OBJECT-TYPE + SYNTAX Counter + ACCESS read-only + STATUS mandatory + DESCRIPTION + "The number of outbound packets that could not be + transmitted because of errors." + ::= { ifEntry 20 } + + ifOutQLen OBJECT-TYPE + SYNTAX Gauge + ACCESS read-only + STATUS mandatory + DESCRIPTION + "The length of the output packet queue (in + packets)." + ::= { ifEntry 21 } + + ifSpecific OBJECT-TYPE + SYNTAX OBJECT IDENTIFIER + ACCESS read-only + STATUS mandatory + DESCRIPTION + "A reference to MIB definitions specific to the + particular media being used to realize the + interface. For example, if the interface is + realized by an ethernet, then the value of this + object refers to a document defining objects + specific to ethernet. If this information is not + present, its value should be set to the OBJECT + IDENTIFIER { 0 0 }, which is a syntactically valid + object identifier, and any conformant + implementation of ASN.1 and BER must be able to + generate and recognize this value." + ::= { ifEntry 22 } + + + -- the Address Translation group + + -- Implementation of the Address Translation group is + -- mandatory for all systems. Note however that this group + -- is deprecated by MIB-II. That is, it is being included + -- solely for compatibility with MIB-I nodes, and will most + -- likely be excluded from MIB-III nodes. From MIB-II and + -- onwards, each network protocol group contains its own + -- address translation tables. + + -- The Address Translation group contains one table which is + -- the union across all interfaces of the translation tables + -- for converting a NetworkAddress (e.g., an IP address) into + -- a subnetwork-specific address. For lack of a better term, + -- this document refers to such a subnetwork-specific address + -- as a `physical' address. + + -- Examples of such translation tables are: for broadcast + -- media where ARP is in use, the translation table is + -- equivalent to the ARP cache; or, on an X.25 network where + -- non-algorithmic translation to X.121 addresses is + -- required, the translation table contains the + -- NetworkAddress to X.121 address equivalences. + + atTable OBJECT-TYPE + SYNTAX SEQUENCE OF AtEntry + ACCESS not-accessible + STATUS deprecated + DESCRIPTION + "The Address Translation tables contain the + NetworkAddress to `physical' address equivalences. + Some interfaces do not use translation tables for + determining address equivalences (e.g., DDN-X.25 + has an algorithmic method); if all interfaces are + of this type, then the Address Translation table + is empty, i.e., has zero entries." + ::= { at 1 } + + atEntry OBJECT-TYPE + SYNTAX AtEntry + ACCESS not-accessible + STATUS deprecated + DESCRIPTION + "Each entry contains one NetworkAddress to + `physical' address equivalence." + INDEX { atIfIndex, + atNetAddress } + ::= { atTable 1 } + + AtEntry ::= + SEQUENCE { + atIfIndex + INTEGER, + atPhysAddress + PhysAddress, + atNetAddress + NetworkAddress + } + + atIfIndex OBJECT-TYPE + SYNTAX INTEGER + ACCESS read-write + STATUS deprecated + DESCRIPTION + "The interface on which this entry's equivalence + is effective. The interface identified by a + particular value of this index is the same + interface as identified by the same value of + ifIndex." + ::= { atEntry 1 } + + atPhysAddress OBJECT-TYPE + SYNTAX PhysAddress + ACCESS read-write + STATUS deprecated + DESCRIPTION + "The media-dependent `physical' address. + + Setting this object to a null string (one of zero + length) has the effect of invaliding the + corresponding entry in the atTable object. That + is, it effectively disassociates the interface + identified with said entry from the mapping + identified with said entry. It is an + implementation-specific matter as to whether the + agent removes an invalidated entry from the table. + Accordingly, management stations must be prepared + to receive tabular information from agents that + corresponds to entries not currently in use. + Proper interpretation of such entries requires + examination of the relevant atPhysAddress object." + ::= { atEntry 2 } + + atNetAddress OBJECT-TYPE + SYNTAX NetworkAddress + ACCESS read-write + STATUS deprecated + DESCRIPTION + "The NetworkAddress (e.g., the IP address) + corresponding to the media-dependent `physical' + address." + ::= { atEntry 3 } + + + -- the IP group + + -- Implementation of the IP group is mandatory for all + -- systems. + + ipForwarding OBJECT-TYPE + SYNTAX INTEGER { + forwarding(1), -- acting as a gateway + not-forwarding(2) -- NOT acting as a gateway + } + ACCESS read-write + STATUS mandatory + DESCRIPTION + "The indication of whether this entity is acting + as an IP gateway in respect to the forwarding of + datagrams received by, but not addressed to, this + entity. IP gateways forward datagrams. IP hosts + do not (except those source-routed via the host). + + Note that for some managed nodes, this object may + take on only a subset of the values possible. + Accordingly, it is appropriate for an agent to + return a `badValue' response if a management + station attempts to change this object to an + inappropriate value." + ::= { ip 1 } + + ipDefaultTTL OBJECT-TYPE + SYNTAX INTEGER + ACCESS read-write + STATUS mandatory + DESCRIPTION + "The default value inserted into the Time-To-Live + field of the IP header of datagrams originated at + this entity, whenever a TTL value is not supplied + by the transport layer protocol." + ::= { ip 2 } + + ipInReceives OBJECT-TYPE + SYNTAX Counter + ACCESS read-only + STATUS mandatory + DESCRIPTION + "The total number of input datagrams received from + interfaces, including those received in error." + ::= { ip 3 } + + ipInHdrErrors OBJECT-TYPE + SYNTAX Counter + ACCESS read-only + STATUS mandatory + DESCRIPTION + "The number of input datagrams discarded due to + errors in their IP headers, including bad + checksums, version number mismatch, other format + errors, time-to-live exceeded, errors discovered + in processing their IP options, etc." + ::= { ip 4 } + + ipInAddrErrors OBJECT-TYPE + SYNTAX Counter + ACCESS read-only + STATUS mandatory + DESCRIPTION + "The number of input datagrams discarded because + the IP address in their IP header's destination + field was not a valid address to be received at + this entity. This count includes invalid + addresses (e.g., 0.0.0.0) and addresses of + unsupported Classes (e.g., Class E). For entities + which are not IP Gateways and therefore do not + forward datagrams, this counter includes datagrams + discarded because the destination address was not + a local address." + ::= { ip 5 } + + ipForwDatagrams OBJECT-TYPE + SYNTAX Counter + ACCESS read-only + STATUS mandatory + DESCRIPTION + "The number of input datagrams for which this + entity was not their final IP destination, as a + result of which an attempt was made to find a + route to forward them to that final destination. + In entities which do not act as IP Gateways, this + counter will include only those packets which were + Source-Routed via this entity, and the Source- + Route option processing was successful." + ::= { ip 6 } + + ipInUnknownProtos OBJECT-TYPE + SYNTAX Counter + ACCESS read-only + STATUS mandatory + DESCRIPTION + "The number of locally-addressed datagrams + received successfully but discarded because of an + unknown or unsupported protocol." + ::= { ip 7 } + + ipInDiscards OBJECT-TYPE + SYNTAX Counter + ACCESS read-only + STATUS mandatory + DESCRIPTION + "The number of input IP datagrams for which no + problems were encountered to prevent their + continued processing, but which were discarded + (e.g., for lack of buffer space). Note that this + counter does not include any datagrams discarded + while awaiting re-assembly." + ::= { ip 8 } + + ipInDelivers OBJECT-TYPE + SYNTAX Counter + ACCESS read-only + STATUS mandatory + DESCRIPTION + "The total number of input datagrams successfully + delivered to IP user-protocols (including ICMP)." + ::= { ip 9 } + + ipOutRequests OBJECT-TYPE + SYNTAX Counter + ACCESS read-only + STATUS mandatory + DESCRIPTION + "The total number of IP datagrams which local IP + user-protocols (including ICMP) supplied to IP in + requests for transmission. Note that this counter + does not include any datagrams counted in + ipForwDatagrams." + ::= { ip 10 } + + ipOutDiscards OBJECT-TYPE + SYNTAX Counter + ACCESS read-only + STATUS mandatory + DESCRIPTION + "The number of output IP datagrams for which no + problem was encountered to prevent their + transmission to their destination, but which were + discarded (e.g., for lack of buffer space). Note + that this counter would include datagrams counted + in ipForwDatagrams if any such packets met this + (discretionary) discard criterion." + ::= { ip 11 } + + ipOutNoRoutes OBJECT-TYPE + SYNTAX Counter + ACCESS read-only + STATUS mandatory + DESCRIPTION + "The number of IP datagrams discarded because no + route could be found to transmit them to their + destination. Note that this counter includes any + packets counted in ipForwDatagrams which meet this + `no-route' criterion. Note that this includes any + datagrams which a host cannot route because all of + its default gateways are down." + ::= { ip 12 } + + ipReasmTimeout OBJECT-TYPE + SYNTAX INTEGER + ACCESS read-only + STATUS mandatory + DESCRIPTION + "The maximum number of seconds which received + fragments are held while they are awaiting + reassembly at this entity." + ::= { ip 13 } + + ipReasmReqds OBJECT-TYPE + SYNTAX Counter + ACCESS read-only + STATUS mandatory + DESCRIPTION + "The number of IP fragments received which needed + to be reassembled at this entity." + ::= { ip 14 } + + ipReasmOKs OBJECT-TYPE + SYNTAX Counter + ACCESS read-only + STATUS mandatory + DESCRIPTION + "The number of IP datagrams successfully re- + assembled." + ::= { ip 15 } + + ipReasmFails OBJECT-TYPE + SYNTAX Counter + ACCESS read-only + STATUS mandatory + DESCRIPTION + "The number of failures detected by the IP re- + assembly algorithm (for whatever reason: timed + out, errors, etc). Note that this is not + necessarily a count of discarded IP fragments + since some algorithms (notably the algorithm in + RFC 815) can lose track of the number of fragments + by combining them as they are received." + ::= { ip 16 } + + ipFragOKs OBJECT-TYPE + SYNTAX Counter + ACCESS read-only + STATUS mandatory + DESCRIPTION + "The number of IP datagrams that have been + successfully fragmented at this entity." + ::= { ip 17 } + + ipFragFails OBJECT-TYPE + SYNTAX Counter + ACCESS read-only + STATUS mandatory + DESCRIPTION + "The number of IP datagrams that have been + discarded because they needed to be fragmented at + this entity but could not be, e.g., because their + Don't Fragment flag was set." + ::= { ip 18 } + + ipFragCreates OBJECT-TYPE + SYNTAX Counter + ACCESS read-only + STATUS mandatory + DESCRIPTION + "The number of IP datagram fragments that have + been generated as a result of fragmentation at + this entity." + ::= { ip 19 } + + + + -- the IP address table + + -- The IP address table contains this entity's IP addressing + -- information. + + ipAddrTable OBJECT-TYPE + SYNTAX SEQUENCE OF IpAddrEntry + ACCESS not-accessible + STATUS mandatory + DESCRIPTION + "The table of addressing information relevant to + this entity's IP addresses." + ::= { ip 20 } + + ipAddrEntry OBJECT-TYPE + SYNTAX IpAddrEntry + ACCESS not-accessible + STATUS mandatory + DESCRIPTION + "The addressing information for one of this + entity's IP addresses." + INDEX { ipAdEntAddr } + ::= { ipAddrTable 1 } + + IpAddrEntry ::= + SEQUENCE { + ipAdEntAddr + IpAddress, + ipAdEntIfIndex + INTEGER, + ipAdEntNetMask + IpAddress, + ipAdEntBcastAddr + INTEGER, + ipAdEntReasmMaxSize + INTEGER (0..65535) + } + + ipAdEntAddr OBJECT-TYPE + SYNTAX IpAddress + ACCESS read-only + STATUS mandatory + DESCRIPTION + "The IP address to which this entry's addressing + information pertains." + ::= { ipAddrEntry 1 } + + + ipAdEntIfIndex OBJECT-TYPE + SYNTAX INTEGER + ACCESS read-only + STATUS mandatory + DESCRIPTION + "The index value which uniquely identifies the + interface to which this entry is applicable. The + interface identified by a particular value of this + index is the same interface as identified by the + same value of ifIndex." + ::= { ipAddrEntry 2 } + + ipAdEntNetMask OBJECT-TYPE + SYNTAX IpAddress + ACCESS read-only + STATUS mandatory + DESCRIPTION + "The subnet mask associated with the IP address of + this entry. The value of the mask is an IP + address with all the network bits set to 1 and all + the hosts bits set to 0." + ::= { ipAddrEntry 3 } + + ipAdEntBcastAddr OBJECT-TYPE + SYNTAX INTEGER + ACCESS read-only + STATUS mandatory + DESCRIPTION + "The value of the least-significant bit in the IP + broadcast address used for sending datagrams on + the (logical) interface associated with the IP + address of this entry. For example, when the + Internet standard all-ones broadcast address is + used, the value will be 1. This value applies to + both the subnet and network broadcasts addresses + used by the entity on this (logical) interface." + ::= { ipAddrEntry 4 } + + ipAdEntReasmMaxSize OBJECT-TYPE + SYNTAX INTEGER (0..65535) + ACCESS read-only + STATUS mandatory + DESCRIPTION + "The size of the largest IP datagram which this + entity can re-assemble from incoming IP fragmented + datagrams received on this interface." + ::= { ipAddrEntry 5 } + + -- the IP routing table + + -- The IP routing table contains an entry for each route + -- presently known to this entity. + + ipRouteTable OBJECT-TYPE + SYNTAX SEQUENCE OF IpRouteEntry + ACCESS not-accessible + STATUS mandatory + DESCRIPTION + "This entity's IP Routing table." + ::= { ip 21 } + + ipRouteEntry OBJECT-TYPE + SYNTAX IpRouteEntry + ACCESS not-accessible + STATUS mandatory + DESCRIPTION + "A route to a particular destination." + INDEX { ipRouteDest } + ::= { ipRouteTable 1 } + + IpRouteEntry ::= + SEQUENCE { + ipRouteDest + IpAddress, + ipRouteIfIndex + INTEGER, + ipRouteMetric1 + INTEGER, + ipRouteMetric2 + INTEGER, + ipRouteMetric3 + INTEGER, + ipRouteMetric4 + INTEGER, + ipRouteNextHop + IpAddress, + ipRouteType + INTEGER, + ipRouteProto + INTEGER, + ipRouteAge + INTEGER, + ipRouteMask + IpAddress, + ipRouteMetric5 + INTEGER, + ipRouteInfo + OBJECT IDENTIFIER + } + + ipRouteDest OBJECT-TYPE + SYNTAX IpAddress + ACCESS read-write + STATUS mandatory + DESCRIPTION + "The destination IP address of this route. An + entry with a value of 0.0.0.0 is considered a + default route. Multiple routes to a single + destination can appear in the table, but access to + such multiple entries is dependent on the table- + access mechanisms defined by the network + management protocol in use." + ::= { ipRouteEntry 1 } + + ipRouteIfIndex OBJECT-TYPE + SYNTAX INTEGER + ACCESS read-write + STATUS mandatory + DESCRIPTION + "The index value which uniquely identifies the + local interface through which the next hop of this + route should be reached. The interface identified + by a particular value of this index is the same + interface as identified by the same value of + ifIndex." + ::= { ipRouteEntry 2 } + + ipRouteMetric1 OBJECT-TYPE + SYNTAX INTEGER + ACCESS read-write + STATUS mandatory + DESCRIPTION + "The primary routing metric for this route. The + semantics of this metric are determined by the + routing-protocol specified in the route's + ipRouteProto value. If this metric is not used, + its value should be set to -1." + ::= { ipRouteEntry 3 } + + ipRouteMetric2 OBJECT-TYPE + SYNTAX INTEGER + ACCESS read-write + STATUS mandatory + DESCRIPTION + "An alternate routing metric for this route. The + semantics of this metric are determined by the + routing-protocol specified in the route's + ipRouteProto value. If this metric is not used, + its value should be set to -1." + ::= { ipRouteEntry 4 } + + ipRouteMetric3 OBJECT-TYPE + SYNTAX INTEGER + ACCESS read-write + STATUS mandatory + DESCRIPTION + "An alternate routing metric for this route. The + semantics of this metric are determined by the + routing-protocol specified in the route's + ipRouteProto value. If this metric is not used, + its value should be set to -1." + ::= { ipRouteEntry 5 } + + ipRouteMetric4 OBJECT-TYPE + SYNTAX INTEGER + ACCESS read-write + STATUS mandatory + DESCRIPTION + "An alternate routing metric for this route. The + semantics of this metric are determined by the + routing-protocol specified in the route's + ipRouteProto value. If this metric is not used, + its value should be set to -1." + ::= { ipRouteEntry 6 } + + ipRouteNextHop OBJECT-TYPE + SYNTAX IpAddress + ACCESS read-write + STATUS mandatory + DESCRIPTION + "The IP address of the next hop of this route. + (In the case of a route bound to an interface + which is realized via a broadcast media, the value + of this field is the agent's IP address on that + interface.)" + ::= { ipRouteEntry 7 } + + ipRouteType OBJECT-TYPE + SYNTAX INTEGER { + other(1), -- none of the following + + invalid(2), -- an invalidated route + -- route to directly + direct(3), -- connected (sub-)network + + -- route to a non-local + indirect(4) -- host/network/sub-network + } + ACCESS read-write + STATUS mandatory + DESCRIPTION + "The type of route. Note that the values + direct(3) and indirect(4) refer to the notion of + direct and indirect routing in the IP + architecture. + + Setting this object to the value invalid(2) has + the effect of invalidating the corresponding entry + in the ipRouteTable object. That is, it + effectively disassociates the destination + identified with said entry from the route + identified with said entry. It is an + implementation-specific matter as to whether the + agent removes an invalidated entry from the table. + Accordingly, management stations must be prepared + to receive tabular information from agents that + corresponds to entries not currently in use. + Proper interpretation of such entries requires + examination of the relevant ipRouteType object." + ::= { ipRouteEntry 8 } + + ipRouteProto OBJECT-TYPE + SYNTAX INTEGER { + other(1), -- none of the following + + -- non-protocol information, + -- e.g., manually configured + local(2), -- entries + + -- set via a network + netmgmt(3), -- management protocol + + -- obtained via ICMP, + icmp(4), -- e.g., Redirect + + -- the remaining values are + -- all gateway routing + -- protocols + egp(5), + ggp(6), + hello(7), + rip(8), + is-is(9), + es-is(10), + ciscoIgrp(11), + bbnSpfIgp(12), + ospf(13), + bgp(14) + } + ACCESS read-only + STATUS mandatory + DESCRIPTION + "The routing mechanism via which this route was + learned. Inclusion of values for gateway routing + protocols is not intended to imply that hosts + should support those protocols." + ::= { ipRouteEntry 9 } + + ipRouteAge OBJECT-TYPE + SYNTAX INTEGER + ACCESS read-write + STATUS mandatory + DESCRIPTION + "The number of seconds since this route was last + updated or otherwise determined to be correct. + Note that no semantics of `too old' can be implied + except through knowledge of the routing protocol + by which the route was learned." + ::= { ipRouteEntry 10 } + + ipRouteMask OBJECT-TYPE + SYNTAX IpAddress + ACCESS read-write + STATUS mandatory + DESCRIPTION + "Indicate the mask to be logical-ANDed with the + destination address before being compared to the + value in the ipRouteDest field. For those systems + that do not support arbitrary subnet masks, an + agent constructs the value of the ipRouteMask by + determining whether the value of the correspondent + ipRouteDest field belong to a class-A, B, or C + network, and then using one of: + + mask network + 255.0.0.0 class-A + 255.255.0.0 class-B + 255.255.255.0 class-C + + If the value of the ipRouteDest is 0.0.0.0 (a + default route), then the mask value is also + 0.0.0.0. It should be noted that all IP routing + subsystems implicitly use this mechanism." + ::= { ipRouteEntry 11 } + + ipRouteMetric5 OBJECT-TYPE + SYNTAX INTEGER + ACCESS read-write + STATUS mandatory + DESCRIPTION + "An alternate routing metric for this route. The + semantics of this metric are determined by the + routing-protocol specified in the route's + ipRouteProto value. If this metric is not used, + its value should be set to -1." + ::= { ipRouteEntry 12 } + + ipRouteInfo OBJECT-TYPE + SYNTAX OBJECT IDENTIFIER + ACCESS read-only + STATUS mandatory + DESCRIPTION + "A reference to MIB definitions specific to the + particular routing protocol which is responsible + for this route, as determined by the value + specified in the route's ipRouteProto value. If + this information is not present, its value should + be set to the OBJECT IDENTIFIER { 0 0 }, which is + a syntactically valid object identifier, and any + conformant implementation of ASN.1 and BER must be + able to generate and recognize this value." + ::= { ipRouteEntry 13 } + + + -- the IP Address Translation table + + -- The IP address translation table contain the IpAddress to + -- `physical' address equivalences. Some interfaces do not + -- use translation tables for determining address + -- equivalences (e.g., DDN-X.25 has an algorithmic method); + -- if all interfaces are of this type, then the Address + -- Translation table is empty, i.e., has zero entries. + + ipNetToMediaTable OBJECT-TYPE + SYNTAX SEQUENCE OF IpNetToMediaEntry + ACCESS not-accessible + STATUS mandatory + DESCRIPTION + "The IP Address Translation table used for mapping + from IP addresses to physical addresses." + ::= { ip 22 } + + ipNetToMediaEntry OBJECT-TYPE + SYNTAX IpNetToMediaEntry + ACCESS not-accessible + STATUS mandatory + DESCRIPTION + "Each entry contains one IpAddress to `physical' + address equivalence." + INDEX { ipNetToMediaIfIndex, + ipNetToMediaNetAddress } + ::= { ipNetToMediaTable 1 } + + IpNetToMediaEntry ::= + SEQUENCE { + ipNetToMediaIfIndex + INTEGER, + ipNetToMediaPhysAddress + PhysAddress, + ipNetToMediaNetAddress + IpAddress, + ipNetToMediaType + INTEGER + } + + ipNetToMediaIfIndex OBJECT-TYPE + SYNTAX INTEGER + ACCESS read-write + STATUS mandatory + DESCRIPTION + "The interface on which this entry's equivalence + is effective. The interface identified by a + particular value of this index is the same + interface as identified by the same value of + ifIndex." + ::= { ipNetToMediaEntry 1 } + + ipNetToMediaPhysAddress OBJECT-TYPE + SYNTAX PhysAddress + ACCESS read-write + STATUS mandatory + DESCRIPTION + "The media-dependent `physical' address." + ::= { ipNetToMediaEntry 2 } + + ipNetToMediaNetAddress OBJECT-TYPE + SYNTAX IpAddress + ACCESS read-write + STATUS mandatory + DESCRIPTION + "The IpAddress corresponding to the media- + dependent `physical' address." + ::= { ipNetToMediaEntry 3 } + + ipNetToMediaType OBJECT-TYPE + SYNTAX INTEGER { + other(1), -- none of the following + invalid(2), -- an invalidated mapping + dynamic(3), + static(4) + } + ACCESS read-write + STATUS mandatory + DESCRIPTION + "The type of mapping. + + Setting this object to the value invalid(2) has + the effect of invalidating the corresponding entry + in the ipNetToMediaTable. That is, it effectively + disassociates the interface identified with said + entry from the mapping identified with said entry. + It is an implementation-specific matter as to + whether the agent removes an invalidated entry + from the table. Accordingly, management stations + must be prepared to receive tabular information + from agents that corresponds to entries not + currently in use. Proper interpretation of such + entries requires examination of the relevant + ipNetToMediaType object." + ::= { ipNetToMediaEntry 4 } + + + -- additional IP objects + + ipRoutingDiscards OBJECT-TYPE + SYNTAX Counter + ACCESS read-only + STATUS mandatory + DESCRIPTION + "The number of routing entries which were chosen + to be discarded even though they are valid. One + possible reason for discarding such an entry could + be to free-up buffer space for other routing + entries." + ::= { ip 23 } + + + -- the ICMP group + + -- Implementation of the ICMP group is mandatory for all + -- systems. + + icmpInMsgs OBJECT-TYPE + SYNTAX Counter + ACCESS read-only + STATUS mandatory + DESCRIPTION + "The total number of ICMP messages which the + entity received. Note that this counter includes + all those counted by icmpInErrors." + ::= { icmp 1 } + + icmpInErrors OBJECT-TYPE + SYNTAX Counter + ACCESS read-only + STATUS mandatory + DESCRIPTION + "The number of ICMP messages which the entity + received but determined as having ICMP-specific + errors (bad ICMP checksums, bad length, etc.)." + ::= { icmp 2 } + + icmpInDestUnreachs OBJECT-TYPE + SYNTAX Counter + ACCESS read-only + STATUS mandatory + DESCRIPTION + "The number of ICMP Destination Unreachable + messages received." + ::= { icmp 3 } + + icmpInTimeExcds OBJECT-TYPE + SYNTAX Counter + ACCESS read-only + STATUS mandatory + DESCRIPTION + "The number of ICMP Time Exceeded messages + received." + ::= { icmp 4 } + + + icmpInParmProbs OBJECT-TYPE + SYNTAX Counter + ACCESS read-only + STATUS mandatory + DESCRIPTION + "The number of ICMP Parameter Problem messages + received." + ::= { icmp 5 } + + icmpInSrcQuenchs OBJECT-TYPE + SYNTAX Counter + ACCESS read-only + STATUS mandatory + DESCRIPTION + "The number of ICMP Source Quench messages + received." + ::= { icmp 6 } + + icmpInRedirects OBJECT-TYPE + SYNTAX Counter + ACCESS read-only + STATUS mandatory + DESCRIPTION + "The number of ICMP Redirect messages received." + ::= { icmp 7 } + + icmpInEchos OBJECT-TYPE + SYNTAX Counter + ACCESS read-only + STATUS mandatory + DESCRIPTION + "The number of ICMP Echo (request) messages + received." + ::= { icmp 8 } + + icmpInEchoReps OBJECT-TYPE + SYNTAX Counter + ACCESS read-only + STATUS mandatory + DESCRIPTION + "The number of ICMP Echo Reply messages received." + ::= { icmp 9 } + + icmpInTimestamps OBJECT-TYPE + SYNTAX Counter + ACCESS read-only + STATUS mandatory + DESCRIPTION + "The number of ICMP Timestamp (request) messages + received." + ::= { icmp 10 } + + icmpInTimestampReps OBJECT-TYPE + SYNTAX Counter + ACCESS read-only + STATUS mandatory + DESCRIPTION + "The number of ICMP Timestamp Reply messages + received." + ::= { icmp 11 } + + icmpInAddrMasks OBJECT-TYPE + SYNTAX Counter + ACCESS read-only + STATUS mandatory + DESCRIPTION + "The number of ICMP Address Mask Request messages + received." + ::= { icmp 12 } + + icmpInAddrMaskReps OBJECT-TYPE + SYNTAX Counter + ACCESS read-only + STATUS mandatory + DESCRIPTION + "The number of ICMP Address Mask Reply messages + received." + ::= { icmp 13 } + + icmpOutMsgs OBJECT-TYPE + SYNTAX Counter + ACCESS read-only + STATUS mandatory + DESCRIPTION + "The total number of ICMP messages which this + entity attempted to send. Note that this counter + includes all those counted by icmpOutErrors." + ::= { icmp 14 } + + icmpOutErrors OBJECT-TYPE + SYNTAX Counter + ACCESS read-only + STATUS mandatory + DESCRIPTION + "The number of ICMP messages which this entity did + not send due to problems discovered within ICMP + such as a lack of buffers. This value should not + include errors discovered outside the ICMP layer + such as the inability of IP to route the resultant + datagram. In some implementations there may be no + types of error which contribute to this counter's + value." + ::= { icmp 15 } + + icmpOutDestUnreachs OBJECT-TYPE + SYNTAX Counter + ACCESS read-only + STATUS mandatory + DESCRIPTION + "The number of ICMP Destination Unreachable + messages sent." + ::= { icmp 16 } + + icmpOutTimeExcds OBJECT-TYPE + SYNTAX Counter + ACCESS read-only + STATUS mandatory + DESCRIPTION + "The number of ICMP Time Exceeded messages sent." + ::= { icmp 17 } + + icmpOutParmProbs OBJECT-TYPE + SYNTAX Counter + ACCESS read-only + STATUS mandatory + DESCRIPTION + "The number of ICMP Parameter Problem messages + sent." + ::= { icmp 18 } + + icmpOutSrcQuenchs OBJECT-TYPE + SYNTAX Counter + ACCESS read-only + STATUS mandatory + DESCRIPTION + "The number of ICMP Source Quench messages sent." + ::= { icmp 19 } + + icmpOutRedirects OBJECT-TYPE + SYNTAX Counter + ACCESS read-only + STATUS mandatory + DESCRIPTION + "The number of ICMP Redirect messages sent. For a + host, this object will always be zero, since hosts + do not send redirects." + ::= { icmp 20 } + + icmpOutEchos OBJECT-TYPE + SYNTAX Counter + ACCESS read-only + STATUS mandatory + DESCRIPTION + "The number of ICMP Echo (request) messages sent." + ::= { icmp 21 } + + icmpOutEchoReps OBJECT-TYPE + SYNTAX Counter + ACCESS read-only + STATUS mandatory + DESCRIPTION + "The number of ICMP Echo Reply messages sent." + ::= { icmp 22 } + + icmpOutTimestamps OBJECT-TYPE + SYNTAX Counter + ACCESS read-only + STATUS mandatory + DESCRIPTION + "The number of ICMP Timestamp (request) messages + sent." + ::= { icmp 23 } + + icmpOutTimestampReps OBJECT-TYPE + SYNTAX Counter + ACCESS read-only + STATUS mandatory + DESCRIPTION + "The number of ICMP Timestamp Reply messages + sent." + ::= { icmp 24 } + + icmpOutAddrMasks OBJECT-TYPE + SYNTAX Counter + ACCESS read-only + STATUS mandatory + DESCRIPTION + "The number of ICMP Address Mask Request messages + sent." + ::= { icmp 25 } + + + icmpOutAddrMaskReps OBJECT-TYPE + SYNTAX Counter + ACCESS read-only + STATUS mandatory + DESCRIPTION + "The number of ICMP Address Mask Reply messages + sent." + ::= { icmp 26 } + + + -- the TCP group + + -- Implementation of the TCP group is mandatory for all + -- systems that implement the TCP. + + -- Note that instances of object types that represent + -- information about a particular TCP connection are + -- transient; they persist only as long as the connection + -- in question. + + tcpRtoAlgorithm OBJECT-TYPE + SYNTAX INTEGER { + other(1), -- none of the following + + constant(2), -- a constant rto + rsre(3), -- MIL-STD-1778, Appendix B + vanj(4) -- Van Jacobson's algorithm [10] + } + ACCESS read-only + STATUS mandatory + DESCRIPTION + "The algorithm used to determine the timeout value + used for retransmitting unacknowledged octets." + ::= { tcp 1 } + + tcpRtoMin OBJECT-TYPE + SYNTAX INTEGER + ACCESS read-only + STATUS mandatory + DESCRIPTION + "The minimum value permitted by a TCP + implementation for the retransmission timeout, + measured in milliseconds. More refined semantics + for objects of this type depend upon the algorithm + used to determine the retransmission timeout. In + particular, when the timeout algorithm is rsre(3), + an object of this type has the semantics of the + LBOUND quantity described in RFC 793." + ::= { tcp 2 } + + + tcpRtoMax OBJECT-TYPE + SYNTAX INTEGER + ACCESS read-only + STATUS mandatory + DESCRIPTION + "The maximum value permitted by a TCP + implementation for the retransmission timeout, + measured in milliseconds. More refined semantics + for objects of this type depend upon the algorithm + used to determine the retransmission timeout. In + particular, when the timeout algorithm is rsre(3), + an object of this type has the semantics of the + UBOUND quantity described in RFC 793." + ::= { tcp 3 } + + tcpMaxConn OBJECT-TYPE + SYNTAX INTEGER + ACCESS read-only + STATUS mandatory + DESCRIPTION + "The limit on the total number of TCP connections + the entity can support. In entities where the + maximum number of connections is dynamic, this + object should contain the value -1." + ::= { tcp 4 } + + tcpActiveOpens OBJECT-TYPE + SYNTAX Counter + ACCESS read-only + STATUS mandatory + DESCRIPTION + "The number of times TCP connections have made a + direct transition to the SYN-SENT state from the + CLOSED state." + ::= { tcp 5 } + + tcpPassiveOpens OBJECT-TYPE + SYNTAX Counter + ACCESS read-only + STATUS mandatory + DESCRIPTION + "The number of times TCP connections have made a + direct transition to the SYN-RCVD state from the + LISTEN state." + ::= { tcp 6 } + + tcpAttemptFails OBJECT-TYPE + SYNTAX Counter + ACCESS read-only + STATUS mandatory + DESCRIPTION + "The number of times TCP connections have made a + direct transition to the CLOSED state from either + the SYN-SENT state or the SYN-RCVD state, plus the + number of times TCP connections have made a direct + transition to the LISTEN state from the SYN-RCVD + state." + ::= { tcp 7 } + + tcpEstabResets OBJECT-TYPE + SYNTAX Counter + ACCESS read-only + STATUS mandatory + DESCRIPTION + "The number of times TCP connections have made a + direct transition to the CLOSED state from either + the ESTABLISHED state or the CLOSE-WAIT state." + ::= { tcp 8 } + + tcpCurrEstab OBJECT-TYPE + SYNTAX Gauge + ACCESS read-only + STATUS mandatory + DESCRIPTION + "The number of TCP connections for which the + current state is either ESTABLISHED or CLOSE- + WAIT." + ::= { tcp 9 } + + tcpInSegs OBJECT-TYPE + SYNTAX Counter + ACCESS read-only + STATUS mandatory + DESCRIPTION + "The total number of segments received, including + those received in error. This count includes + segments received on currently established + connections." + ::= { tcp 10 } + + tcpOutSegs OBJECT-TYPE + SYNTAX Counter + ACCESS read-only + STATUS mandatory + DESCRIPTION + "The total number of segments sent, including + those on current connections but excluding those + containing only retransmitted octets." + ::= { tcp 11 } + + tcpRetransSegs OBJECT-TYPE + SYNTAX Counter + ACCESS read-only + STATUS mandatory + DESCRIPTION + "The total number of segments retransmitted - that + is, the number of TCP segments transmitted + containing one or more previously transmitted + octets." + ::= { tcp 12 } + + + -- the TCP Connection table + + -- The TCP connection table contains information about this + -- entity's existing TCP connections. + + tcpConnTable OBJECT-TYPE + SYNTAX SEQUENCE OF TcpConnEntry + ACCESS not-accessible + STATUS mandatory + DESCRIPTION + "A table containing TCP connection-specific + information." + ::= { tcp 13 } + + tcpConnEntry OBJECT-TYPE + SYNTAX TcpConnEntry + ACCESS not-accessible + STATUS mandatory + DESCRIPTION + "Information about a particular current TCP + connection. An object of this type is transient, + in that it ceases to exist when (or soon after) + the connection makes the transition to the CLOSED + state." + INDEX { tcpConnLocalAddress, + tcpConnLocalPort, + tcpConnRemAddress, + tcpConnRemPort } + ::= { tcpConnTable 1 } + + TcpConnEntry ::= + SEQUENCE { + tcpConnState + INTEGER, + tcpConnLocalAddress + IpAddress, + tcpConnLocalPort + INTEGER (0..65535), + tcpConnRemAddress + IpAddress, + tcpConnRemPort + INTEGER (0..65535) + } + + tcpConnState OBJECT-TYPE + SYNTAX INTEGER { + closed(1), + listen(2), + synSent(3), + synReceived(4), + established(5), + finWait1(6), + finWait2(7), + closeWait(8), + lastAck(9), + closing(10), + timeWait(11), + deleteTCB(12) + } + ACCESS read-write + STATUS mandatory + DESCRIPTION + "The state of this TCP connection. + + The only value which may be set by a management + station is deleteTCB(12). Accordingly, it is + appropriate for an agent to return a `badValue' + response if a management station attempts to set + this object to any other value. + + If a management station sets this object to the + value deleteTCB(12), then this has the effect of + deleting the TCB (as defined in RFC 793) of the + corresponding connection on the managed node, + resulting in immediate termination of the + connection. + + As an implementation-specific option, a RST + segment may be sent from the managed node to the + other TCP endpoint (note however that RST segments + are not sent reliably)." + ::= { tcpConnEntry 1 } + + tcpConnLocalAddress OBJECT-TYPE + SYNTAX IpAddress + ACCESS read-only + STATUS mandatory + DESCRIPTION + "The local IP address for this TCP connection. In + the case of a connection in the listen state which + is willing to accept connections for any IP + interface associated with the node, the value + 0.0.0.0 is used." + ::= { tcpConnEntry 2 } + + tcpConnLocalPort OBJECT-TYPE + SYNTAX INTEGER (0..65535) + ACCESS read-only + STATUS mandatory + DESCRIPTION + "The local port number for this TCP connection." + ::= { tcpConnEntry 3 } + + tcpConnRemAddress OBJECT-TYPE + SYNTAX IpAddress + ACCESS read-only + STATUS mandatory + DESCRIPTION + "The remote IP address for this TCP connection." + ::= { tcpConnEntry 4 } + + tcpConnRemPort OBJECT-TYPE + SYNTAX INTEGER (0..65535) + ACCESS read-only + STATUS mandatory + DESCRIPTION + "The remote port number for this TCP connection." + ::= { tcpConnEntry 5 } + + + -- additional TCP objects + + tcpInErrs OBJECT-TYPE + SYNTAX Counter + ACCESS read-only + STATUS mandatory + DESCRIPTION + "The total number of segments received in error + (e.g., bad TCP checksums)." + ::= { tcp 14 } + + tcpOutRsts OBJECT-TYPE + SYNTAX Counter + ACCESS read-only + STATUS mandatory + DESCRIPTION + "The number of TCP segments sent containing the + RST flag." + ::= { tcp 15 } + + + -- the UDP group + + -- Implementation of the UDP group is mandatory for all + -- systems which implement the UDP. + + udpInDatagrams OBJECT-TYPE + SYNTAX Counter + ACCESS read-only + STATUS mandatory + DESCRIPTION + "The total number of UDP datagrams delivered to + UDP users." + ::= { udp 1 } + + udpNoPorts OBJECT-TYPE + SYNTAX Counter + ACCESS read-only + STATUS mandatory + DESCRIPTION + "The total number of received UDP datagrams for + which there was no application at the destination + port." + ::= { udp 2 } + + udpInErrors OBJECT-TYPE + SYNTAX Counter + ACCESS read-only + STATUS mandatory + DESCRIPTION + "The number of received UDP datagrams that could + not be delivered for reasons other than the lack + of an application at the destination port." + ::= { udp 3 } + + udpOutDatagrams OBJECT-TYPE + SYNTAX Counter + ACCESS read-only + STATUS mandatory + DESCRIPTION + "The total number of UDP datagrams sent from this + entity." + ::= { udp 4 } + + + -- the UDP Listener table + + -- The UDP listener table contains information about this + -- entity's UDP end-points on which a local application is + -- currently accepting datagrams. + + udpTable OBJECT-TYPE + SYNTAX SEQUENCE OF UdpEntry + ACCESS not-accessible + STATUS mandatory + DESCRIPTION + "A table containing UDP listener information." + ::= { udp 5 } + + udpEntry OBJECT-TYPE + SYNTAX UdpEntry + ACCESS not-accessible + STATUS mandatory + DESCRIPTION + "Information about a particular current UDP + listener." + INDEX { udpLocalAddress, udpLocalPort } + ::= { udpTable 1 } + + UdpEntry ::= + SEQUENCE { + udpLocalAddress + IpAddress, + udpLocalPort + INTEGER (0..65535) + } + + udpLocalAddress OBJECT-TYPE + SYNTAX IpAddress + ACCESS read-only + STATUS mandatory + DESCRIPTION + "The local IP address for this UDP listener. In + the case of a UDP listener which is willing to + accept datagrams for any IP interface associated + with the node, the value 0.0.0.0 is used." + ::= { udpEntry 1 } + + udpLocalPort OBJECT-TYPE + SYNTAX INTEGER (0..65535) + ACCESS read-only + STATUS mandatory + DESCRIPTION + "The local port number for this UDP listener." + ::= { udpEntry 2 } + + + -- the EGP group + + -- Implementation of the EGP group is mandatory for all + -- systems which implement the EGP. + + egpInMsgs OBJECT-TYPE + SYNTAX Counter + ACCESS read-only + STATUS mandatory + DESCRIPTION + "The number of EGP messages received without + error." + ::= { egp 1 } + + egpInErrors OBJECT-TYPE + SYNTAX Counter + ACCESS read-only + STATUS mandatory + DESCRIPTION + "The number of EGP messages received that proved + to be in error." + ::= { egp 2 } + + egpOutMsgs OBJECT-TYPE + SYNTAX Counter + ACCESS read-only + STATUS mandatory + DESCRIPTION + "The total number of locally generated EGP + messages." + ::= { egp 3 } + + egpOutErrors OBJECT-TYPE + SYNTAX Counter + ACCESS read-only + STATUS mandatory + DESCRIPTION + "The number of locally generated EGP messages not + sent due to resource limitations within an EGP + entity." + ::= { egp 4 } + + + -- the EGP Neighbor table + + -- The EGP neighbor table contains information about this + -- entity's EGP neighbors. + + egpNeighTable OBJECT-TYPE + SYNTAX SEQUENCE OF EgpNeighEntry + ACCESS not-accessible + STATUS mandatory + DESCRIPTION + "The EGP neighbor table." + ::= { egp 5 } + + egpNeighEntry OBJECT-TYPE + SYNTAX EgpNeighEntry + ACCESS not-accessible + STATUS mandatory + DESCRIPTION + "Information about this entity's relationship with + a particular EGP neighbor." + INDEX { egpNeighAddr } + ::= { egpNeighTable 1 } + + EgpNeighEntry ::= + SEQUENCE { + egpNeighState + INTEGER, + egpNeighAddr + IpAddress, + egpNeighAs + INTEGER, + egpNeighInMsgs + Counter, + egpNeighInErrs + Counter, + egpNeighOutMsgs + Counter, + egpNeighOutErrs + Counter, + egpNeighInErrMsgs + Counter, + egpNeighOutErrMsgs + Counter, + egpNeighStateUps + Counter, + egpNeighStateDowns + Counter, + egpNeighIntervalHello + INTEGER, + egpNeighIntervalPoll + INTEGER, + egpNeighMode + INTEGER, + egpNeighEventTrigger + INTEGER + } + + egpNeighState OBJECT-TYPE + SYNTAX INTEGER { + idle(1), + acquisition(2), + down(3), + up(4), + cease(5) + } + ACCESS read-only + STATUS mandatory + DESCRIPTION + "The EGP state of the local system with respect to + this entry's EGP neighbor. Each EGP state is + represented by a value that is one greater than + the numerical value associated with said state in + RFC 904." + ::= { egpNeighEntry 1 } + + egpNeighAddr OBJECT-TYPE + SYNTAX IpAddress + ACCESS read-only + STATUS mandatory + DESCRIPTION + "The IP address of this entry's EGP neighbor." + ::= { egpNeighEntry 2 } + + egpNeighAs OBJECT-TYPE + SYNTAX INTEGER + ACCESS read-only + STATUS mandatory + DESCRIPTION + "The autonomous system of this EGP peer. Zero + should be specified if the autonomous system + number of the neighbor is not yet known." + ::= { egpNeighEntry 3 } + + egpNeighInMsgs OBJECT-TYPE + SYNTAX Counter + ACCESS read-only + STATUS mandatory + DESCRIPTION + "The number of EGP messages received without error + from this EGP peer." + ::= { egpNeighEntry 4 } + + egpNeighInErrs OBJECT-TYPE + SYNTAX Counter + ACCESS read-only + STATUS mandatory + DESCRIPTION + "The number of EGP messages received from this EGP + peer that proved to be in error (e.g., bad EGP + checksum)." + ::= { egpNeighEntry 5 } + + egpNeighOutMsgs OBJECT-TYPE + SYNTAX Counter + ACCESS read-only + STATUS mandatory + DESCRIPTION + "The number of locally generated EGP messages to + this EGP peer." + ::= { egpNeighEntry 6 } + + egpNeighOutErrs OBJECT-TYPE + SYNTAX Counter + ACCESS read-only + STATUS mandatory + DESCRIPTION + "The number of locally generated EGP messages not + sent to this EGP peer due to resource limitations + within an EGP entity." + ::= { egpNeighEntry 7 } + + egpNeighInErrMsgs OBJECT-TYPE + SYNTAX Counter + ACCESS read-only + STATUS mandatory + DESCRIPTION + "The number of EGP-defined error messages received + from this EGP peer." + ::= { egpNeighEntry 8 } + + egpNeighOutErrMsgs OBJECT-TYPE + SYNTAX Counter + ACCESS read-only + STATUS mandatory + DESCRIPTION + "The number of EGP-defined error messages sent to + this EGP peer." + ::= { egpNeighEntry 9 } + + egpNeighStateUps OBJECT-TYPE + SYNTAX Counter + ACCESS read-only + STATUS mandatory + DESCRIPTION + "The number of EGP state transitions to the UP + state with this EGP peer." + ::= { egpNeighEntry 10 } + + egpNeighStateDowns OBJECT-TYPE + SYNTAX Counter + ACCESS read-only + STATUS mandatory + DESCRIPTION + "The number of EGP state transitions from the UP + state to any other state with this EGP peer." + ::= { egpNeighEntry 11 } + + egpNeighIntervalHello OBJECT-TYPE + SYNTAX INTEGER + ACCESS read-only + STATUS mandatory + DESCRIPTION + "The interval between EGP Hello command + retransmissions (in hundredths of a second). This + represents the t1 timer as defined in RFC 904." + ::= { egpNeighEntry 12 } + + egpNeighIntervalPoll OBJECT-TYPE + SYNTAX INTEGER + ACCESS read-only + STATUS mandatory + DESCRIPTION + "The interval between EGP poll command + retransmissions (in hundredths of a second). This + represents the t3 timer as defined in RFC 904." + ::= { egpNeighEntry 13 } + + egpNeighMode OBJECT-TYPE + SYNTAX INTEGER { active(1), passive(2) } + ACCESS read-only + STATUS mandatory + DESCRIPTION + "The polling mode of this EGP entity, either + passive or active." + ::= { egpNeighEntry 14 } + + egpNeighEventTrigger OBJECT-TYPE + SYNTAX INTEGER { start(1), stop(2) } + ACCESS read-write + STATUS mandatory + DESCRIPTION + "A control variable used to trigger operator- + initiated Start and Stop events. When read, this + variable always returns the most recent value that + egpNeighEventTrigger was set to. If it has not + been set since the last initialization of the + network management subsystem on the node, it + returns a value of `stop'. + + When set, this variable causes a Start or Stop + event on the specified neighbor, as specified on + pages 8-10 of RFC 904. Briefly, a Start event + causes an Idle peer to begin neighbor acquisition + and a non-Idle peer to reinitiate neighbor + acquisition. A stop event causes a non-Idle peer + to return to the Idle state until a Start event + occurs, either via egpNeighEventTrigger or + otherwise." + ::= { egpNeighEntry 15 } + + + -- additional EGP objects + + egpAs OBJECT-TYPE + SYNTAX INTEGER + ACCESS read-only + STATUS mandatory + DESCRIPTION + "The autonomous system number of this EGP entity." + ::= { egp 6 } + + -- the Transmission group + + -- Based on the transmission media underlying each interface + -- on a system, the corresponding portion of the Transmission + -- group is mandatory for that system. + + -- When Internet-standard definitions for managing + -- transmission media are defined, the transmission group is + -- used to provide a prefix for the names of those objects. + + -- Typically, such definitions reside in the experimental + -- portion of the MIB until they are "proven", then as a + -- part of the Internet standardization process, the + -- definitions are accordingly elevated and a new object + -- identifier, under the transmission group is defined. By + -- convention, the name assigned is: + -- + -- type OBJECT IDENTIFIER ::= { transmission number } + -- + -- where "type" is the symbolic value used for the media in + -- the ifType column of the ifTable object, and "number" is + -- the actual integer value corresponding to the symbol. + + + -- the SNMP group + + -- Implementation of the SNMP group is mandatory for all + -- systems which support an SNMP protocol entity. Some of + -- the objects defined below will be zero-valued in those + -- SNMP implementations that are optimized to support only + -- those functions specific to either a management agent or + -- a management station. In particular, it should be + -- observed that the objects below refer to an SNMP entity, + -- and there may be several SNMP entities residing on a + -- managed node (e.g., if the node is hosting acting as + -- a management station). + + snmpInPkts OBJECT-TYPE + SYNTAX Counter + ACCESS read-only + STATUS mandatory + DESCRIPTION + "The total number of Messages delivered to the + SNMP entity from the transport service." + ::= { snmp 1 } + + snmpOutPkts OBJECT-TYPE + SYNTAX Counter + ACCESS read-only + STATUS mandatory + DESCRIPTION + "The total number of SNMP Messages which were + passed from the SNMP protocol entity to the + transport service." + ::= { snmp 2 } + + snmpInBadVersions OBJECT-TYPE + SYNTAX Counter + ACCESS read-only + STATUS mandatory + DESCRIPTION + "The total number of SNMP Messages which were + delivered to the SNMP protocol entity and were for + an unsupported SNMP version." + ::= { snmp 3 } + + snmpInBadCommunityNames OBJECT-TYPE + SYNTAX Counter + ACCESS read-only + STATUS mandatory + DESCRIPTION + "The total number of SNMP Messages delivered to + the SNMP protocol entity which used a SNMP + community name not known to said entity." + ::= { snmp 4 } + + snmpInBadCommunityUses OBJECT-TYPE + SYNTAX Counter + ACCESS read-only + STATUS mandatory + DESCRIPTION + "The total number of SNMP Messages delivered to + the SNMP protocol entity which represented an SNMP + operation which was not allowed by the SNMP + community named in the Message." + ::= { snmp 5 } + + snmpInASNParseErrs OBJECT-TYPE + SYNTAX Counter + ACCESS read-only + STATUS mandatory + DESCRIPTION + "The total number of ASN.1 or BER errors + encountered by the SNMP protocol entity when + decoding received SNMP Messages." + ::= { snmp 6 } + + -- { snmp 7 } is not used + + snmpInTooBigs OBJECT-TYPE + SYNTAX Counter + ACCESS read-only + STATUS mandatory + DESCRIPTION + "The total number of SNMP PDUs which were + delivered to the SNMP protocol entity and for + which the value of the error-status field is + `tooBig'." + ::= { snmp 8 } + + snmpInNoSuchNames OBJECT-TYPE + SYNTAX Counter + ACCESS read-only + STATUS mandatory + DESCRIPTION + "The total number of SNMP PDUs which were + delivered to the SNMP protocol entity and for + which the value of the error-status field is + `noSuchName'." + ::= { snmp 9 } + + snmpInBadValues OBJECT-TYPE + SYNTAX Counter + ACCESS read-only + STATUS mandatory + DESCRIPTION + "The total number of SNMP PDUs which were + delivered to the SNMP protocol entity and for + which the value of the error-status field is + `badValue'." + ::= { snmp 10 } + + snmpInReadOnlys OBJECT-TYPE + SYNTAX Counter + ACCESS read-only + STATUS mandatory + DESCRIPTION + "The total number valid SNMP PDUs which were + delivered to the SNMP protocol entity and for + which the value of the error-status field is + `readOnly'. It should be noted that it is a + protocol error to generate an SNMP PDU which + contains the value `readOnly' in the error-status + field, as such this object is provided as a means + of detecting incorrect implementations of the + SNMP." + ::= { snmp 11 } + + snmpInGenErrs OBJECT-TYPE + SYNTAX Counter + ACCESS read-only + STATUS mandatory + DESCRIPTION + "The total number of SNMP PDUs which were + delivered to the SNMP protocol entity and for + which the value of the error-status field is + `genErr'." + ::= { snmp 12 } + + snmpInTotalReqVars OBJECT-TYPE + SYNTAX Counter + ACCESS read-only + STATUS mandatory + DESCRIPTION + "The total number of MIB objects which have been + retrieved successfully by the SNMP protocol entity + as the result of receiving valid SNMP Get-Request + and Get-Next PDUs." + ::= { snmp 13 } + + snmpInTotalSetVars OBJECT-TYPE + SYNTAX Counter + ACCESS read-only + STATUS mandatory + DESCRIPTION + "The total number of MIB objects which have been + altered successfully by the SNMP protocol entity + as the result of receiving valid SNMP Set-Request + PDUs." + ::= { snmp 14 } + + snmpInGetRequests OBJECT-TYPE + SYNTAX Counter + ACCESS read-only + STATUS mandatory + DESCRIPTION + "The total number of SNMP Get-Request PDUs which + have been accepted and processed by the SNMP + protocol entity." + ::= { snmp 15 } + + snmpInGetNexts OBJECT-TYPE + SYNTAX Counter + ACCESS read-only + STATUS mandatory + DESCRIPTION + "The total number of SNMP Get-Next PDUs which have + been accepted and processed by the SNMP protocol + entity." + ::= { snmp 16 } + + snmpInSetRequests OBJECT-TYPE + SYNTAX Counter + ACCESS read-only + STATUS mandatory + DESCRIPTION + "The total number of SNMP Set-Request PDUs which + have been accepted and processed by the SNMP + protocol entity." + ::= { snmp 17 } + + snmpInGetResponses OBJECT-TYPE + SYNTAX Counter + ACCESS read-only + STATUS mandatory + DESCRIPTION + "The total number of SNMP Get-Response PDUs which + have been accepted and processed by the SNMP + protocol entity." + ::= { snmp 18 } + + snmpInTraps OBJECT-TYPE + SYNTAX Counter + ACCESS read-only + STATUS mandatory + DESCRIPTION + "The total number of SNMP Trap PDUs which have + been accepted and processed by the SNMP protocol + entity." + ::= { snmp 19 } + + snmpOutTooBigs OBJECT-TYPE + SYNTAX Counter + ACCESS read-only + STATUS mandatory + DESCRIPTION + "The total number of SNMP PDUs which were + generated by the SNMP protocol entity and for + which the value of the error-status field is + `tooBig.'" + ::= { snmp 20 } + + snmpOutNoSuchNames OBJECT-TYPE + SYNTAX Counter + ACCESS read-only + STATUS mandatory + DESCRIPTION + "The total number of SNMP PDUs which were + generated by the SNMP protocol entity and for + which the value of the error-status is + `noSuchName'." + ::= { snmp 21 } + + snmpOutBadValues OBJECT-TYPE + SYNTAX Counter + ACCESS read-only + STATUS mandatory + DESCRIPTION + "The total number of SNMP PDUs which were + generated by the SNMP protocol entity and for + which the value of the error-status field is + `badValue'." + ::= { snmp 22 } + + -- { snmp 23 } is not used + + snmpOutGenErrs OBJECT-TYPE + SYNTAX Counter + ACCESS read-only + STATUS mandatory + DESCRIPTION + "The total number of SNMP PDUs which were + generated by the SNMP protocol entity and for + which the value of the error-status field is + `genErr'." + ::= { snmp 24 } + + snmpOutGetRequests OBJECT-TYPE + SYNTAX Counter + ACCESS read-only + STATUS mandatory + DESCRIPTION + "The total number of SNMP Get-Request PDUs which + have been generated by the SNMP protocol entity." + ::= { snmp 25 } + + snmpOutGetNexts OBJECT-TYPE + SYNTAX Counter + ACCESS read-only + STATUS mandatory + DESCRIPTION + "The total number of SNMP Get-Next PDUs which have + been generated by the SNMP protocol entity." + ::= { snmp 26 } + + snmpOutSetRequests OBJECT-TYPE + SYNTAX Counter + ACCESS read-only + STATUS mandatory + DESCRIPTION + "The total number of SNMP Set-Request PDUs which + have been generated by the SNMP protocol entity." + ::= { snmp 27 } + + snmpOutGetResponses OBJECT-TYPE + SYNTAX Counter + ACCESS read-only + STATUS mandatory + DESCRIPTION + "The total number of SNMP Get-Response PDUs which + have been generated by the SNMP protocol entity." + ::= { snmp 28 } + + snmpOutTraps OBJECT-TYPE + SYNTAX Counter + ACCESS read-only + STATUS mandatory + DESCRIPTION + "The total number of SNMP Trap PDUs which have + been generated by the SNMP protocol entity." + ::= { snmp 29 } + + snmpEnableAuthenTraps OBJECT-TYPE + SYNTAX INTEGER { enabled(1), disabled(2) } + ACCESS read-write + STATUS mandatory + DESCRIPTION + "Indicates whether the SNMP agent process is + permitted to generate authentication-failure + traps. The value of this object overrides any + configuration information; as such, it provides a + means whereby all authentication-failure traps may + be disabled. + + Note that it is strongly recommended that this + object be stored in non-volatile memory so that it + remains constant between re-initializations of the + network management system." + ::= { snmp 30 } + +END From 19dbc2956120335ee669125aa6797acd883e8ffe Mon Sep 17 00:00:00 2001 From: MyaLongmire Date: Mon, 11 Oct 2021 12:28:57 -0600 Subject: [PATCH 69/81] fix: added more mibs for tests --- plugins/inputs/snmp/snmp_test.go | 2 +- plugins/inputs/snmp/testdata/tableinit | 2599 ---------------- .../inputs/snmp/testdata/tabletest/RFC1155 | 119 + .../inputs/snmp/testdata/tabletest/RFC1213 | 2613 +++++++++++++++++ plugins/inputs/snmp/testdata/test.mib | 97 - 5 files changed, 2733 insertions(+), 2697 deletions(-) delete mode 100644 plugins/inputs/snmp/testdata/tableinit create mode 100644 plugins/inputs/snmp/testdata/tabletest/RFC1155 create mode 100644 plugins/inputs/snmp/testdata/tabletest/RFC1213 delete mode 100644 plugins/inputs/snmp/testdata/test.mib diff --git a/plugins/inputs/snmp/snmp_test.go b/plugins/inputs/snmp/snmp_test.go index 2b7c4d1e64386..1e7f4d00867cf 100644 --- a/plugins/inputs/snmp/snmp_test.go +++ b/plugins/inputs/snmp/snmp_test.go @@ -153,7 +153,7 @@ func TestFieldInit(t *testing.T) { } func TestTableInit(t *testing.T) { - testDataPath, err := filepath.Abs("./testdata") + testDataPath, err := filepath.Abs("./testdata/tabletest") require.NoError(t, err) tbl := Table{ diff --git a/plugins/inputs/snmp/testdata/tableinit b/plugins/inputs/snmp/testdata/tableinit deleted file mode 100644 index 10220adbcee04..0000000000000 --- a/plugins/inputs/snmp/testdata/tableinit +++ /dev/null @@ -1,2599 +0,0 @@ - RFC1213-MIB DEFINITIONS ::= BEGIN - - IMPORTS - mgmt, NetworkAddress, IpAddress, Counter, Gauge, - TimeTicks - FROM RFC1155-SMI - OBJECT-TYPE - FROM RFC-1212 - TEXTUAL-CONVENTION - FROM SNMPv2-TC - IANAifType - FROM IANAifType-MIB; - - -- This MIB module uses the extended OBJECT-TYPE macro as - -- defined in [14]; - - - -- MIB-II (same prefix as MIB-I) - - mib-2 OBJECT IDENTIFIER ::= { mgmt 1 } - - -- textual conventions - - DisplayString ::= - OCTET STRING - -- This data type is used to model textual information taken - -- from the NVT ASCII character set. By convention, objects - -- with this syntax are declared as having - -- - -- SIZE (0..255) - - PhysAddress ::= - OCTET STRING - -- This data type is used to model media addresses. For many - -- types of media, this will be in a binary representation. - -- For example, an ethernet address would be represented as - -- a string of 6 octets. - - - -- groups in MIB-II - - system OBJECT IDENTIFIER ::= { mib-2 1 } - - interfaces OBJECT IDENTIFIER ::= { mib-2 2 } - - at OBJECT IDENTIFIER ::= { mib-2 3 } - - ip OBJECT IDENTIFIER ::= { mib-2 4 } - - icmp OBJECT IDENTIFIER ::= { mib-2 5 } - - tcp OBJECT IDENTIFIER ::= { mib-2 6 } - - udp OBJECT IDENTIFIER ::= { mib-2 7 } - - egp OBJECT IDENTIFIER ::= { mib-2 8 } - - -- historical (some say hysterical) - -- cmot OBJECT IDENTIFIER ::= { mib-2 9 } - - transmission OBJECT IDENTIFIER ::= { mib-2 10 } - - snmp OBJECT IDENTIFIER ::= { mib-2 11 } - - - -- the System group - - -- Implementation of the System group is mandatory for all - -- systems. If an agent is not configured to have a value - -- for any of these variables, a string of length 0 is - -- returned. - - sysDescr OBJECT-TYPE - SYNTAX DisplayString (SIZE (0..255)) - ACCESS read-only - STATUS mandatory - DESCRIPTION - "A textual description of the entity. This value - should include the full name and version - identification of the system's hardware type, - software operating-system, and networking - software. It is mandatory that this only contain - printable ASCII characters." - ::= { system 1 } - - sysObjectID OBJECT-TYPE - SYNTAX OBJECT IDENTIFIER - ACCESS read-only - STATUS mandatory - DESCRIPTION - "The vendor's authoritative identification of the - network management subsystem contained in the - entity. This value is allocated within the SMI - enterprises subtree (1.3.6.1.4.1) and provides an - easy and unambiguous means for determining `what - kind of box' is being managed. For example, if - vendor `Flintstones, Inc.' was assigned the - subtree 1.3.6.1.4.1.4242, it could assign the - identifier 1.3.6.1.4.1.4242.1.1 to its `Fred - Router'." - ::= { system 2 } - - sysUpTime OBJECT-TYPE - SYNTAX TimeTicks - ACCESS read-only - STATUS mandatory - DESCRIPTION - "The time (in hundredths of a second) since the - network management portion of the system was last - re-initialized." - ::= { system 3 } - - sysContact OBJECT-TYPE - SYNTAX DisplayString (SIZE (0..255)) - ACCESS read-write - STATUS mandatory - DESCRIPTION - "The textual identification of the contact person - for this managed node, together with information - on how to contact this person." - ::= { system 4 } - - sysName OBJECT-TYPE - SYNTAX DisplayString (SIZE (0..255)) - ACCESS read-write - STATUS mandatory - DESCRIPTION - "An administratively-assigned name for this - managed node. By convention, this is the node's - fully-qualified domain name." - ::= { system 5 } - - sysLocation OBJECT-TYPE - SYNTAX DisplayString (SIZE (0..255)) - ACCESS read-write - STATUS mandatory - DESCRIPTION - "The physical location of this node (e.g., - `telephone closet, 3rd floor')." - ::= { system 6 } - - sysServices OBJECT-TYPE - SYNTAX INTEGER (0..127) - ACCESS read-only - STATUS mandatory - DESCRIPTION - "A value which indicates the set of services that - this entity primarily offers. - - The value is a sum. This sum initially takes the - value zero, Then, for each layer, L, in the range - 1 through 7, that this node performs transactions - for, 2 raised to (L - 1) is added to the sum. For - example, a node which performs primarily routing - functions would have a value of 4 (2^(3-1)). In - contrast, a node which is a host offering - application services would have a value of 72 - (2^(4-1) + 2^(7-1)). Note that in the context of - the Internet suite of protocols, values should be - calculated accordingly: - - layer functionality - 1 physical (e.g., repeaters) - 2 datalink/subnetwork (e.g., bridges) - 3 internet (e.g., IP gateways) - 4 end-to-end (e.g., IP hosts) - 7 applications (e.g., mail relays) - - For systems including OSI protocols, layers 5 and - 6 may also be counted." - ::= { system 7 } - - -- the Interfaces group - - -- Implementation of the Interfaces group is mandatory for - -- all systems. - - ifNumber OBJECT-TYPE - SYNTAX INTEGER - ACCESS read-only - STATUS mandatory - DESCRIPTION - "The number of network interfaces (regardless of - their current state) present on this system." - ::= { interfaces 1 } - - - -- the Interfaces table - - -- The Interfaces table contains information on the entity's - -- interfaces. Each interface is thought of as being - -- attached to a `subnetwork'. Note that this term should - -- not be confused with `subnet' which refers to an - -- addressing partitioning scheme used in the Internet suite - -- of protocols. - - ifTable OBJECT-TYPE - SYNTAX SEQUENCE OF IfEntry - ACCESS not-accessible - STATUS mandatory - DESCRIPTION - "A list of interface entries. The number of - entries is given by the value of ifNumber." - ::= { interfaces 2 } - - ifEntry OBJECT-TYPE - SYNTAX IfEntry - ACCESS not-accessible - STATUS mandatory - DESCRIPTION - "An interface entry containing objects at the - subnetwork layer and below for a particular - interface." - INDEX { ifIndex } - ::= { ifTable 1 } - - IfEntry ::= - SEQUENCE { - ifIndex - INTEGER, - ifDescr - DisplayString, - ifType - IANAifType, - ifMtu - INTEGER, - ifSpeed - Gauge, - ifPhysAddress - PhysAddress, - ifAdminStatus - INTEGER, - ifOperStatus - INTEGER, - ifLastChange - TimeTicks, - ifInOctets - Counter, - ifInUcastPkts - Counter, - ifInNUcastPkts - Counter, - ifInDiscards - Counter, - ifInErrors - Counter, - ifInUnknownProtos - Counter, - ifOutOctets - Counter, - ifOutUcastPkts - Counter, - ifOutNUcastPkts - Counter, - ifOutDiscards - Counter, - ifOutErrors - Counter, - ifOutQLen - Gauge, - ifSpecific - OBJECT IDENTIFIER - } - - ifIndex OBJECT-TYPE - SYNTAX INTEGER - ACCESS read-only - STATUS mandatory - DESCRIPTION - "A unique value for each interface. Its value - ranges between 1 and the value of ifNumber. The - value for each interface must remain constant at - least from one re-initialization of the entity's - network management system to the next re- - initialization." - ::= { ifEntry 1 } - - ifDescr OBJECT-TYPE - SYNTAX DisplayString (SIZE (0..255)) - ACCESS read-only - STATUS mandatory - DESCRIPTION - "A textual string containing information about the - interface. This string should include the name of - the manufacturer, the product name and the version - of the hardware interface." - ::= { ifEntry 2 } - - ifType OBJECT-TYPE - SYNTAX IANAifType - ACCESS read-only - STATUS mandatory - DESCRIPTION - "The type of interface. Additional values for ifType - are assigned by the Internet Assigned Numbers - Authority (IANA), through updating the syntax of the - IANAifType textual convention." - ::= { ifEntry 3 } - - - ifMtu OBJECT-TYPE - SYNTAX INTEGER - ACCESS read-only - STATUS mandatory - DESCRIPTION - "The size of the largest datagram which can be - sent/received on the interface, specified in - octets. For interfaces that are used for - transmitting network datagrams, this is the size - of the largest network datagram that can be sent - on the interface." - ::= { ifEntry 4 } - - ifSpeed OBJECT-TYPE - SYNTAX Gauge - ACCESS read-only - STATUS mandatory - DESCRIPTION - "An estimate of the interface's current bandwidth - in bits per second. For interfaces which do not - vary in bandwidth or for those where no accurate - estimation can be made, this object should contain - the nominal bandwidth." - ::= { ifEntry 5 } - - ifPhysAddress OBJECT-TYPE - SYNTAX PhysAddress - ACCESS read-only - STATUS mandatory - DESCRIPTION - "The interface's address at the protocol layer - immediately `below' the network layer in the - protocol stack. For interfaces which do not have - such an address (e.g., a serial line), this object - should contain an octet string of zero length." - ::= { ifEntry 6 } - - ifAdminStatus OBJECT-TYPE - SYNTAX INTEGER { - up(1), -- ready to pass packets - down(2), - testing(3) -- in some test mode - } - ACCESS read-write - STATUS mandatory - DESCRIPTION - "The desired state of the interface. The - testing(3) state indicates that no operational - packets can be passed." - ::= { ifEntry 7 } - - ifOperStatus OBJECT-TYPE - SYNTAX INTEGER { - up(1), -- ready to pass packets - down(2), - testing(3), -- in some test mode - unknown(4), - dormant(5) - } - ACCESS read-only - STATUS mandatory - DESCRIPTION - "The current operational state of the interface. - The testing(3) state indicates that no operational - packets can be passed." - ::= { ifEntry 8 } - - ifLastChange OBJECT-TYPE - SYNTAX TimeTicks - ACCESS read-only - STATUS mandatory - DESCRIPTION - "The value of sysUpTime at the time the interface - entered its current operational state. If the - current state was entered prior to the last re- - initialization of the local network management - subsystem, then this object contains a zero - value." - ::= { ifEntry 9 } - - ifInOctets OBJECT-TYPE - SYNTAX Counter - ACCESS read-only - STATUS mandatory - DESCRIPTION - "The total number of octets received on the - interface, including framing characters." - ::= { ifEntry 10 } - - ifInUcastPkts OBJECT-TYPE - SYNTAX Counter - ACCESS read-only - STATUS mandatory - DESCRIPTION - "The number of subnetwork-unicast packets - delivered to a higher-layer protocol." - ::= { ifEntry 11 } - - ifInNUcastPkts OBJECT-TYPE - SYNTAX Counter - ACCESS read-only - STATUS mandatory - DESCRIPTION - "The number of non-unicast (i.e., subnetwork- - broadcast or subnetwork-multicast) packets - delivered to a higher-layer protocol." - ::= { ifEntry 12 } - - ifInDiscards OBJECT-TYPE - SYNTAX Counter - ACCESS read-only - STATUS mandatory - DESCRIPTION - "The number of inbound packets which were chosen - to be discarded even though no errors had been - detected to prevent their being deliverable to a - higher-layer protocol. One possible reason for - discarding such a packet could be to free up - buffer space." - ::= { ifEntry 13 } - - ifInErrors OBJECT-TYPE - SYNTAX Counter - ACCESS read-only - STATUS mandatory - DESCRIPTION - "The number of inbound packets that contained - errors preventing them from being deliverable to a - higher-layer protocol." - ::= { ifEntry 14 } - - ifInUnknownProtos OBJECT-TYPE - SYNTAX Counter - ACCESS read-only - STATUS mandatory - DESCRIPTION - "The number of packets received via the interface - which were discarded because of an unknown or - unsupported protocol." - ::= { ifEntry 15 } - - ifOutOctets OBJECT-TYPE - SYNTAX Counter - ACCESS read-only - STATUS mandatory - DESCRIPTION - "The total number of octets transmitted out of the - interface, including framing characters." - ::= { ifEntry 16 } - - ifOutUcastPkts OBJECT-TYPE - SYNTAX Counter - ACCESS read-only - STATUS mandatory - DESCRIPTION - "The total number of packets that higher-level - protocols requested be transmitted to a - subnetwork-unicast address, including those that - were discarded or not sent." - ::= { ifEntry 17 } - - ifOutNUcastPkts OBJECT-TYPE - SYNTAX Counter - ACCESS read-only - STATUS mandatory - DESCRIPTION - "The total number of packets that higher-level - protocols requested be transmitted to a non- - unicast (i.e., a subnetwork-broadcast or - subnetwork-multicast) address, including those - that were discarded or not sent." - ::= { ifEntry 18 } - - ifOutDiscards OBJECT-TYPE - SYNTAX Counter - ACCESS read-only - STATUS mandatory - DESCRIPTION - "The number of outbound packets which were chosen - to be discarded even though no errors had been - detected to prevent their being transmitted. One - possible reason for discarding such a packet could - be to free up buffer space." - ::= { ifEntry 19 } - - ifOutErrors OBJECT-TYPE - SYNTAX Counter - ACCESS read-only - STATUS mandatory - DESCRIPTION - "The number of outbound packets that could not be - transmitted because of errors." - ::= { ifEntry 20 } - - ifOutQLen OBJECT-TYPE - SYNTAX Gauge - ACCESS read-only - STATUS mandatory - DESCRIPTION - "The length of the output packet queue (in - packets)." - ::= { ifEntry 21 } - - ifSpecific OBJECT-TYPE - SYNTAX OBJECT IDENTIFIER - ACCESS read-only - STATUS mandatory - DESCRIPTION - "A reference to MIB definitions specific to the - particular media being used to realize the - interface. For example, if the interface is - realized by an ethernet, then the value of this - object refers to a document defining objects - specific to ethernet. If this information is not - present, its value should be set to the OBJECT - IDENTIFIER { 0 0 }, which is a syntactically valid - object identifier, and any conformant - implementation of ASN.1 and BER must be able to - generate and recognize this value." - ::= { ifEntry 22 } - - - -- the Address Translation group - - -- Implementation of the Address Translation group is - -- mandatory for all systems. Note however that this group - -- is deprecated by MIB-II. That is, it is being included - -- solely for compatibility with MIB-I nodes, and will most - -- likely be excluded from MIB-III nodes. From MIB-II and - -- onwards, each network protocol group contains its own - -- address translation tables. - - -- The Address Translation group contains one table which is - -- the union across all interfaces of the translation tables - -- for converting a NetworkAddress (e.g., an IP address) into - -- a subnetwork-specific address. For lack of a better term, - -- this document refers to such a subnetwork-specific address - -- as a `physical' address. - - -- Examples of such translation tables are: for broadcast - -- media where ARP is in use, the translation table is - -- equivalent to the ARP cache; or, on an X.25 network where - -- non-algorithmic translation to X.121 addresses is - -- required, the translation table contains the - -- NetworkAddress to X.121 address equivalences. - - atTable OBJECT-TYPE - SYNTAX SEQUENCE OF AtEntry - ACCESS not-accessible - STATUS deprecated - DESCRIPTION - "The Address Translation tables contain the - NetworkAddress to `physical' address equivalences. - Some interfaces do not use translation tables for - determining address equivalences (e.g., DDN-X.25 - has an algorithmic method); if all interfaces are - of this type, then the Address Translation table - is empty, i.e., has zero entries." - ::= { at 1 } - - atEntry OBJECT-TYPE - SYNTAX AtEntry - ACCESS not-accessible - STATUS deprecated - DESCRIPTION - "Each entry contains one NetworkAddress to - `physical' address equivalence." - INDEX { atIfIndex, - atNetAddress } - ::= { atTable 1 } - - AtEntry ::= - SEQUENCE { - atIfIndex - INTEGER, - atPhysAddress - PhysAddress, - atNetAddress - NetworkAddress - } - - atIfIndex OBJECT-TYPE - SYNTAX INTEGER - ACCESS read-write - STATUS deprecated - DESCRIPTION - "The interface on which this entry's equivalence - is effective. The interface identified by a - particular value of this index is the same - interface as identified by the same value of - ifIndex." - ::= { atEntry 1 } - - atPhysAddress OBJECT-TYPE - SYNTAX PhysAddress - ACCESS read-write - STATUS deprecated - DESCRIPTION - "The media-dependent `physical' address. - - Setting this object to a null string (one of zero - length) has the effect of invaliding the - corresponding entry in the atTable object. That - is, it effectively disassociates the interface - identified with said entry from the mapping - identified with said entry. It is an - implementation-specific matter as to whether the - agent removes an invalidated entry from the table. - Accordingly, management stations must be prepared - to receive tabular information from agents that - corresponds to entries not currently in use. - Proper interpretation of such entries requires - examination of the relevant atPhysAddress object." - ::= { atEntry 2 } - - atNetAddress OBJECT-TYPE - SYNTAX NetworkAddress - ACCESS read-write - STATUS deprecated - DESCRIPTION - "The NetworkAddress (e.g., the IP address) - corresponding to the media-dependent `physical' - address." - ::= { atEntry 3 } - - - -- the IP group - - -- Implementation of the IP group is mandatory for all - -- systems. - - ipForwarding OBJECT-TYPE - SYNTAX INTEGER { - forwarding(1), -- acting as a gateway - not-forwarding(2) -- NOT acting as a gateway - } - ACCESS read-write - STATUS mandatory - DESCRIPTION - "The indication of whether this entity is acting - as an IP gateway in respect to the forwarding of - datagrams received by, but not addressed to, this - entity. IP gateways forward datagrams. IP hosts - do not (except those source-routed via the host). - - Note that for some managed nodes, this object may - take on only a subset of the values possible. - Accordingly, it is appropriate for an agent to - return a `badValue' response if a management - station attempts to change this object to an - inappropriate value." - ::= { ip 1 } - - ipDefaultTTL OBJECT-TYPE - SYNTAX INTEGER - ACCESS read-write - STATUS mandatory - DESCRIPTION - "The default value inserted into the Time-To-Live - field of the IP header of datagrams originated at - this entity, whenever a TTL value is not supplied - by the transport layer protocol." - ::= { ip 2 } - - ipInReceives OBJECT-TYPE - SYNTAX Counter - ACCESS read-only - STATUS mandatory - DESCRIPTION - "The total number of input datagrams received from - interfaces, including those received in error." - ::= { ip 3 } - - ipInHdrErrors OBJECT-TYPE - SYNTAX Counter - ACCESS read-only - STATUS mandatory - DESCRIPTION - "The number of input datagrams discarded due to - errors in their IP headers, including bad - checksums, version number mismatch, other format - errors, time-to-live exceeded, errors discovered - in processing their IP options, etc." - ::= { ip 4 } - - ipInAddrErrors OBJECT-TYPE - SYNTAX Counter - ACCESS read-only - STATUS mandatory - DESCRIPTION - "The number of input datagrams discarded because - the IP address in their IP header's destination - field was not a valid address to be received at - this entity. This count includes invalid - addresses (e.g., 0.0.0.0) and addresses of - unsupported Classes (e.g., Class E). For entities - which are not IP Gateways and therefore do not - forward datagrams, this counter includes datagrams - discarded because the destination address was not - a local address." - ::= { ip 5 } - - ipForwDatagrams OBJECT-TYPE - SYNTAX Counter - ACCESS read-only - STATUS mandatory - DESCRIPTION - "The number of input datagrams for which this - entity was not their final IP destination, as a - result of which an attempt was made to find a - route to forward them to that final destination. - In entities which do not act as IP Gateways, this - counter will include only those packets which were - Source-Routed via this entity, and the Source- - Route option processing was successful." - ::= { ip 6 } - - ipInUnknownProtos OBJECT-TYPE - SYNTAX Counter - ACCESS read-only - STATUS mandatory - DESCRIPTION - "The number of locally-addressed datagrams - received successfully but discarded because of an - unknown or unsupported protocol." - ::= { ip 7 } - - ipInDiscards OBJECT-TYPE - SYNTAX Counter - ACCESS read-only - STATUS mandatory - DESCRIPTION - "The number of input IP datagrams for which no - problems were encountered to prevent their - continued processing, but which were discarded - (e.g., for lack of buffer space). Note that this - counter does not include any datagrams discarded - while awaiting re-assembly." - ::= { ip 8 } - - ipInDelivers OBJECT-TYPE - SYNTAX Counter - ACCESS read-only - STATUS mandatory - DESCRIPTION - "The total number of input datagrams successfully - delivered to IP user-protocols (including ICMP)." - ::= { ip 9 } - - ipOutRequests OBJECT-TYPE - SYNTAX Counter - ACCESS read-only - STATUS mandatory - DESCRIPTION - "The total number of IP datagrams which local IP - user-protocols (including ICMP) supplied to IP in - requests for transmission. Note that this counter - does not include any datagrams counted in - ipForwDatagrams." - ::= { ip 10 } - - ipOutDiscards OBJECT-TYPE - SYNTAX Counter - ACCESS read-only - STATUS mandatory - DESCRIPTION - "The number of output IP datagrams for which no - problem was encountered to prevent their - transmission to their destination, but which were - discarded (e.g., for lack of buffer space). Note - that this counter would include datagrams counted - in ipForwDatagrams if any such packets met this - (discretionary) discard criterion." - ::= { ip 11 } - - ipOutNoRoutes OBJECT-TYPE - SYNTAX Counter - ACCESS read-only - STATUS mandatory - DESCRIPTION - "The number of IP datagrams discarded because no - route could be found to transmit them to their - destination. Note that this counter includes any - packets counted in ipForwDatagrams which meet this - `no-route' criterion. Note that this includes any - datagrams which a host cannot route because all of - its default gateways are down." - ::= { ip 12 } - - ipReasmTimeout OBJECT-TYPE - SYNTAX INTEGER - ACCESS read-only - STATUS mandatory - DESCRIPTION - "The maximum number of seconds which received - fragments are held while they are awaiting - reassembly at this entity." - ::= { ip 13 } - - ipReasmReqds OBJECT-TYPE - SYNTAX Counter - ACCESS read-only - STATUS mandatory - DESCRIPTION - "The number of IP fragments received which needed - to be reassembled at this entity." - ::= { ip 14 } - - ipReasmOKs OBJECT-TYPE - SYNTAX Counter - ACCESS read-only - STATUS mandatory - DESCRIPTION - "The number of IP datagrams successfully re- - assembled." - ::= { ip 15 } - - ipReasmFails OBJECT-TYPE - SYNTAX Counter - ACCESS read-only - STATUS mandatory - DESCRIPTION - "The number of failures detected by the IP re- - assembly algorithm (for whatever reason: timed - out, errors, etc). Note that this is not - necessarily a count of discarded IP fragments - since some algorithms (notably the algorithm in - RFC 815) can lose track of the number of fragments - by combining them as they are received." - ::= { ip 16 } - - ipFragOKs OBJECT-TYPE - SYNTAX Counter - ACCESS read-only - STATUS mandatory - DESCRIPTION - "The number of IP datagrams that have been - successfully fragmented at this entity." - ::= { ip 17 } - - ipFragFails OBJECT-TYPE - SYNTAX Counter - ACCESS read-only - STATUS mandatory - DESCRIPTION - "The number of IP datagrams that have been - discarded because they needed to be fragmented at - this entity but could not be, e.g., because their - Don't Fragment flag was set." - ::= { ip 18 } - - ipFragCreates OBJECT-TYPE - SYNTAX Counter - ACCESS read-only - STATUS mandatory - DESCRIPTION - "The number of IP datagram fragments that have - been generated as a result of fragmentation at - this entity." - ::= { ip 19 } - - - - -- the IP address table - - -- The IP address table contains this entity's IP addressing - -- information. - - ipAddrTable OBJECT-TYPE - SYNTAX SEQUENCE OF IpAddrEntry - ACCESS not-accessible - STATUS mandatory - DESCRIPTION - "The table of addressing information relevant to - this entity's IP addresses." - ::= { ip 20 } - - ipAddrEntry OBJECT-TYPE - SYNTAX IpAddrEntry - ACCESS not-accessible - STATUS mandatory - DESCRIPTION - "The addressing information for one of this - entity's IP addresses." - INDEX { ipAdEntAddr } - ::= { ipAddrTable 1 } - - IpAddrEntry ::= - SEQUENCE { - ipAdEntAddr - IpAddress, - ipAdEntIfIndex - INTEGER, - ipAdEntNetMask - IpAddress, - ipAdEntBcastAddr - INTEGER, - ipAdEntReasmMaxSize - INTEGER (0..65535) - } - - ipAdEntAddr OBJECT-TYPE - SYNTAX IpAddress - ACCESS read-only - STATUS mandatory - DESCRIPTION - "The IP address to which this entry's addressing - information pertains." - ::= { ipAddrEntry 1 } - - - ipAdEntIfIndex OBJECT-TYPE - SYNTAX INTEGER - ACCESS read-only - STATUS mandatory - DESCRIPTION - "The index value which uniquely identifies the - interface to which this entry is applicable. The - interface identified by a particular value of this - index is the same interface as identified by the - same value of ifIndex." - ::= { ipAddrEntry 2 } - - ipAdEntNetMask OBJECT-TYPE - SYNTAX IpAddress - ACCESS read-only - STATUS mandatory - DESCRIPTION - "The subnet mask associated with the IP address of - this entry. The value of the mask is an IP - address with all the network bits set to 1 and all - the hosts bits set to 0." - ::= { ipAddrEntry 3 } - - ipAdEntBcastAddr OBJECT-TYPE - SYNTAX INTEGER - ACCESS read-only - STATUS mandatory - DESCRIPTION - "The value of the least-significant bit in the IP - broadcast address used for sending datagrams on - the (logical) interface associated with the IP - address of this entry. For example, when the - Internet standard all-ones broadcast address is - used, the value will be 1. This value applies to - both the subnet and network broadcasts addresses - used by the entity on this (logical) interface." - ::= { ipAddrEntry 4 } - - ipAdEntReasmMaxSize OBJECT-TYPE - SYNTAX INTEGER (0..65535) - ACCESS read-only - STATUS mandatory - DESCRIPTION - "The size of the largest IP datagram which this - entity can re-assemble from incoming IP fragmented - datagrams received on this interface." - ::= { ipAddrEntry 5 } - - -- the IP routing table - - -- The IP routing table contains an entry for each route - -- presently known to this entity. - - ipRouteTable OBJECT-TYPE - SYNTAX SEQUENCE OF IpRouteEntry - ACCESS not-accessible - STATUS mandatory - DESCRIPTION - "This entity's IP Routing table." - ::= { ip 21 } - - ipRouteEntry OBJECT-TYPE - SYNTAX IpRouteEntry - ACCESS not-accessible - STATUS mandatory - DESCRIPTION - "A route to a particular destination." - INDEX { ipRouteDest } - ::= { ipRouteTable 1 } - - IpRouteEntry ::= - SEQUENCE { - ipRouteDest - IpAddress, - ipRouteIfIndex - INTEGER, - ipRouteMetric1 - INTEGER, - ipRouteMetric2 - INTEGER, - ipRouteMetric3 - INTEGER, - ipRouteMetric4 - INTEGER, - ipRouteNextHop - IpAddress, - ipRouteType - INTEGER, - ipRouteProto - INTEGER, - ipRouteAge - INTEGER, - ipRouteMask - IpAddress, - ipRouteMetric5 - INTEGER, - ipRouteInfo - OBJECT IDENTIFIER - } - - ipRouteDest OBJECT-TYPE - SYNTAX IpAddress - ACCESS read-write - STATUS mandatory - DESCRIPTION - "The destination IP address of this route. An - entry with a value of 0.0.0.0 is considered a - default route. Multiple routes to a single - destination can appear in the table, but access to - such multiple entries is dependent on the table- - access mechanisms defined by the network - management protocol in use." - ::= { ipRouteEntry 1 } - - ipRouteIfIndex OBJECT-TYPE - SYNTAX INTEGER - ACCESS read-write - STATUS mandatory - DESCRIPTION - "The index value which uniquely identifies the - local interface through which the next hop of this - route should be reached. The interface identified - by a particular value of this index is the same - interface as identified by the same value of - ifIndex." - ::= { ipRouteEntry 2 } - - ipRouteMetric1 OBJECT-TYPE - SYNTAX INTEGER - ACCESS read-write - STATUS mandatory - DESCRIPTION - "The primary routing metric for this route. The - semantics of this metric are determined by the - routing-protocol specified in the route's - ipRouteProto value. If this metric is not used, - its value should be set to -1." - ::= { ipRouteEntry 3 } - - ipRouteMetric2 OBJECT-TYPE - SYNTAX INTEGER - ACCESS read-write - STATUS mandatory - DESCRIPTION - "An alternate routing metric for this route. The - semantics of this metric are determined by the - routing-protocol specified in the route's - ipRouteProto value. If this metric is not used, - its value should be set to -1." - ::= { ipRouteEntry 4 } - - ipRouteMetric3 OBJECT-TYPE - SYNTAX INTEGER - ACCESS read-write - STATUS mandatory - DESCRIPTION - "An alternate routing metric for this route. The - semantics of this metric are determined by the - routing-protocol specified in the route's - ipRouteProto value. If this metric is not used, - its value should be set to -1." - ::= { ipRouteEntry 5 } - - ipRouteMetric4 OBJECT-TYPE - SYNTAX INTEGER - ACCESS read-write - STATUS mandatory - DESCRIPTION - "An alternate routing metric for this route. The - semantics of this metric are determined by the - routing-protocol specified in the route's - ipRouteProto value. If this metric is not used, - its value should be set to -1." - ::= { ipRouteEntry 6 } - - ipRouteNextHop OBJECT-TYPE - SYNTAX IpAddress - ACCESS read-write - STATUS mandatory - DESCRIPTION - "The IP address of the next hop of this route. - (In the case of a route bound to an interface - which is realized via a broadcast media, the value - of this field is the agent's IP address on that - interface.)" - ::= { ipRouteEntry 7 } - - ipRouteType OBJECT-TYPE - SYNTAX INTEGER { - other(1), -- none of the following - - invalid(2), -- an invalidated route - -- route to directly - direct(3), -- connected (sub-)network - - -- route to a non-local - indirect(4) -- host/network/sub-network - } - ACCESS read-write - STATUS mandatory - DESCRIPTION - "The type of route. Note that the values - direct(3) and indirect(4) refer to the notion of - direct and indirect routing in the IP - architecture. - - Setting this object to the value invalid(2) has - the effect of invalidating the corresponding entry - in the ipRouteTable object. That is, it - effectively disassociates the destination - identified with said entry from the route - identified with said entry. It is an - implementation-specific matter as to whether the - agent removes an invalidated entry from the table. - Accordingly, management stations must be prepared - to receive tabular information from agents that - corresponds to entries not currently in use. - Proper interpretation of such entries requires - examination of the relevant ipRouteType object." - ::= { ipRouteEntry 8 } - - ipRouteProto OBJECT-TYPE - SYNTAX INTEGER { - other(1), -- none of the following - - -- non-protocol information, - -- e.g., manually configured - local(2), -- entries - - -- set via a network - netmgmt(3), -- management protocol - - -- obtained via ICMP, - icmp(4), -- e.g., Redirect - - -- the remaining values are - -- all gateway routing - -- protocols - egp(5), - ggp(6), - hello(7), - rip(8), - is-is(9), - es-is(10), - ciscoIgrp(11), - bbnSpfIgp(12), - ospf(13), - bgp(14) - } - ACCESS read-only - STATUS mandatory - DESCRIPTION - "The routing mechanism via which this route was - learned. Inclusion of values for gateway routing - protocols is not intended to imply that hosts - should support those protocols." - ::= { ipRouteEntry 9 } - - ipRouteAge OBJECT-TYPE - SYNTAX INTEGER - ACCESS read-write - STATUS mandatory - DESCRIPTION - "The number of seconds since this route was last - updated or otherwise determined to be correct. - Note that no semantics of `too old' can be implied - except through knowledge of the routing protocol - by which the route was learned." - ::= { ipRouteEntry 10 } - - ipRouteMask OBJECT-TYPE - SYNTAX IpAddress - ACCESS read-write - STATUS mandatory - DESCRIPTION - "Indicate the mask to be logical-ANDed with the - destination address before being compared to the - value in the ipRouteDest field. For those systems - that do not support arbitrary subnet masks, an - agent constructs the value of the ipRouteMask by - determining whether the value of the correspondent - ipRouteDest field belong to a class-A, B, or C - network, and then using one of: - - mask network - 255.0.0.0 class-A - 255.255.0.0 class-B - 255.255.255.0 class-C - - If the value of the ipRouteDest is 0.0.0.0 (a - default route), then the mask value is also - 0.0.0.0. It should be noted that all IP routing - subsystems implicitly use this mechanism." - ::= { ipRouteEntry 11 } - - ipRouteMetric5 OBJECT-TYPE - SYNTAX INTEGER - ACCESS read-write - STATUS mandatory - DESCRIPTION - "An alternate routing metric for this route. The - semantics of this metric are determined by the - routing-protocol specified in the route's - ipRouteProto value. If this metric is not used, - its value should be set to -1." - ::= { ipRouteEntry 12 } - - ipRouteInfo OBJECT-TYPE - SYNTAX OBJECT IDENTIFIER - ACCESS read-only - STATUS mandatory - DESCRIPTION - "A reference to MIB definitions specific to the - particular routing protocol which is responsible - for this route, as determined by the value - specified in the route's ipRouteProto value. If - this information is not present, its value should - be set to the OBJECT IDENTIFIER { 0 0 }, which is - a syntactically valid object identifier, and any - conformant implementation of ASN.1 and BER must be - able to generate and recognize this value." - ::= { ipRouteEntry 13 } - - - -- the IP Address Translation table - - -- The IP address translation table contain the IpAddress to - -- `physical' address equivalences. Some interfaces do not - -- use translation tables for determining address - -- equivalences (e.g., DDN-X.25 has an algorithmic method); - -- if all interfaces are of this type, then the Address - -- Translation table is empty, i.e., has zero entries. - - ipNetToMediaTable OBJECT-TYPE - SYNTAX SEQUENCE OF IpNetToMediaEntry - ACCESS not-accessible - STATUS mandatory - DESCRIPTION - "The IP Address Translation table used for mapping - from IP addresses to physical addresses." - ::= { ip 22 } - - ipNetToMediaEntry OBJECT-TYPE - SYNTAX IpNetToMediaEntry - ACCESS not-accessible - STATUS mandatory - DESCRIPTION - "Each entry contains one IpAddress to `physical' - address equivalence." - INDEX { ipNetToMediaIfIndex, - ipNetToMediaNetAddress } - ::= { ipNetToMediaTable 1 } - - IpNetToMediaEntry ::= - SEQUENCE { - ipNetToMediaIfIndex - INTEGER, - ipNetToMediaPhysAddress - PhysAddress, - ipNetToMediaNetAddress - IpAddress, - ipNetToMediaType - INTEGER - } - - ipNetToMediaIfIndex OBJECT-TYPE - SYNTAX INTEGER - ACCESS read-write - STATUS mandatory - DESCRIPTION - "The interface on which this entry's equivalence - is effective. The interface identified by a - particular value of this index is the same - interface as identified by the same value of - ifIndex." - ::= { ipNetToMediaEntry 1 } - - ipNetToMediaPhysAddress OBJECT-TYPE - SYNTAX PhysAddress - ACCESS read-write - STATUS mandatory - DESCRIPTION - "The media-dependent `physical' address." - ::= { ipNetToMediaEntry 2 } - - ipNetToMediaNetAddress OBJECT-TYPE - SYNTAX IpAddress - ACCESS read-write - STATUS mandatory - DESCRIPTION - "The IpAddress corresponding to the media- - dependent `physical' address." - ::= { ipNetToMediaEntry 3 } - - ipNetToMediaType OBJECT-TYPE - SYNTAX INTEGER { - other(1), -- none of the following - invalid(2), -- an invalidated mapping - dynamic(3), - static(4) - } - ACCESS read-write - STATUS mandatory - DESCRIPTION - "The type of mapping. - - Setting this object to the value invalid(2) has - the effect of invalidating the corresponding entry - in the ipNetToMediaTable. That is, it effectively - disassociates the interface identified with said - entry from the mapping identified with said entry. - It is an implementation-specific matter as to - whether the agent removes an invalidated entry - from the table. Accordingly, management stations - must be prepared to receive tabular information - from agents that corresponds to entries not - currently in use. Proper interpretation of such - entries requires examination of the relevant - ipNetToMediaType object." - ::= { ipNetToMediaEntry 4 } - - - -- additional IP objects - - ipRoutingDiscards OBJECT-TYPE - SYNTAX Counter - ACCESS read-only - STATUS mandatory - DESCRIPTION - "The number of routing entries which were chosen - to be discarded even though they are valid. One - possible reason for discarding such an entry could - be to free-up buffer space for other routing - entries." - ::= { ip 23 } - - - -- the ICMP group - - -- Implementation of the ICMP group is mandatory for all - -- systems. - - icmpInMsgs OBJECT-TYPE - SYNTAX Counter - ACCESS read-only - STATUS mandatory - DESCRIPTION - "The total number of ICMP messages which the - entity received. Note that this counter includes - all those counted by icmpInErrors." - ::= { icmp 1 } - - icmpInErrors OBJECT-TYPE - SYNTAX Counter - ACCESS read-only - STATUS mandatory - DESCRIPTION - "The number of ICMP messages which the entity - received but determined as having ICMP-specific - errors (bad ICMP checksums, bad length, etc.)." - ::= { icmp 2 } - - icmpInDestUnreachs OBJECT-TYPE - SYNTAX Counter - ACCESS read-only - STATUS mandatory - DESCRIPTION - "The number of ICMP Destination Unreachable - messages received." - ::= { icmp 3 } - - icmpInTimeExcds OBJECT-TYPE - SYNTAX Counter - ACCESS read-only - STATUS mandatory - DESCRIPTION - "The number of ICMP Time Exceeded messages - received." - ::= { icmp 4 } - - - icmpInParmProbs OBJECT-TYPE - SYNTAX Counter - ACCESS read-only - STATUS mandatory - DESCRIPTION - "The number of ICMP Parameter Problem messages - received." - ::= { icmp 5 } - - icmpInSrcQuenchs OBJECT-TYPE - SYNTAX Counter - ACCESS read-only - STATUS mandatory - DESCRIPTION - "The number of ICMP Source Quench messages - received." - ::= { icmp 6 } - - icmpInRedirects OBJECT-TYPE - SYNTAX Counter - ACCESS read-only - STATUS mandatory - DESCRIPTION - "The number of ICMP Redirect messages received." - ::= { icmp 7 } - - icmpInEchos OBJECT-TYPE - SYNTAX Counter - ACCESS read-only - STATUS mandatory - DESCRIPTION - "The number of ICMP Echo (request) messages - received." - ::= { icmp 8 } - - icmpInEchoReps OBJECT-TYPE - SYNTAX Counter - ACCESS read-only - STATUS mandatory - DESCRIPTION - "The number of ICMP Echo Reply messages received." - ::= { icmp 9 } - - icmpInTimestamps OBJECT-TYPE - SYNTAX Counter - ACCESS read-only - STATUS mandatory - DESCRIPTION - "The number of ICMP Timestamp (request) messages - received." - ::= { icmp 10 } - - icmpInTimestampReps OBJECT-TYPE - SYNTAX Counter - ACCESS read-only - STATUS mandatory - DESCRIPTION - "The number of ICMP Timestamp Reply messages - received." - ::= { icmp 11 } - - icmpInAddrMasks OBJECT-TYPE - SYNTAX Counter - ACCESS read-only - STATUS mandatory - DESCRIPTION - "The number of ICMP Address Mask Request messages - received." - ::= { icmp 12 } - - icmpInAddrMaskReps OBJECT-TYPE - SYNTAX Counter - ACCESS read-only - STATUS mandatory - DESCRIPTION - "The number of ICMP Address Mask Reply messages - received." - ::= { icmp 13 } - - icmpOutMsgs OBJECT-TYPE - SYNTAX Counter - ACCESS read-only - STATUS mandatory - DESCRIPTION - "The total number of ICMP messages which this - entity attempted to send. Note that this counter - includes all those counted by icmpOutErrors." - ::= { icmp 14 } - - icmpOutErrors OBJECT-TYPE - SYNTAX Counter - ACCESS read-only - STATUS mandatory - DESCRIPTION - "The number of ICMP messages which this entity did - not send due to problems discovered within ICMP - such as a lack of buffers. This value should not - include errors discovered outside the ICMP layer - such as the inability of IP to route the resultant - datagram. In some implementations there may be no - types of error which contribute to this counter's - value." - ::= { icmp 15 } - - icmpOutDestUnreachs OBJECT-TYPE - SYNTAX Counter - ACCESS read-only - STATUS mandatory - DESCRIPTION - "The number of ICMP Destination Unreachable - messages sent." - ::= { icmp 16 } - - icmpOutTimeExcds OBJECT-TYPE - SYNTAX Counter - ACCESS read-only - STATUS mandatory - DESCRIPTION - "The number of ICMP Time Exceeded messages sent." - ::= { icmp 17 } - - icmpOutParmProbs OBJECT-TYPE - SYNTAX Counter - ACCESS read-only - STATUS mandatory - DESCRIPTION - "The number of ICMP Parameter Problem messages - sent." - ::= { icmp 18 } - - icmpOutSrcQuenchs OBJECT-TYPE - SYNTAX Counter - ACCESS read-only - STATUS mandatory - DESCRIPTION - "The number of ICMP Source Quench messages sent." - ::= { icmp 19 } - - icmpOutRedirects OBJECT-TYPE - SYNTAX Counter - ACCESS read-only - STATUS mandatory - DESCRIPTION - "The number of ICMP Redirect messages sent. For a - host, this object will always be zero, since hosts - do not send redirects." - ::= { icmp 20 } - - icmpOutEchos OBJECT-TYPE - SYNTAX Counter - ACCESS read-only - STATUS mandatory - DESCRIPTION - "The number of ICMP Echo (request) messages sent." - ::= { icmp 21 } - - icmpOutEchoReps OBJECT-TYPE - SYNTAX Counter - ACCESS read-only - STATUS mandatory - DESCRIPTION - "The number of ICMP Echo Reply messages sent." - ::= { icmp 22 } - - icmpOutTimestamps OBJECT-TYPE - SYNTAX Counter - ACCESS read-only - STATUS mandatory - DESCRIPTION - "The number of ICMP Timestamp (request) messages - sent." - ::= { icmp 23 } - - icmpOutTimestampReps OBJECT-TYPE - SYNTAX Counter - ACCESS read-only - STATUS mandatory - DESCRIPTION - "The number of ICMP Timestamp Reply messages - sent." - ::= { icmp 24 } - - icmpOutAddrMasks OBJECT-TYPE - SYNTAX Counter - ACCESS read-only - STATUS mandatory - DESCRIPTION - "The number of ICMP Address Mask Request messages - sent." - ::= { icmp 25 } - - - icmpOutAddrMaskReps OBJECT-TYPE - SYNTAX Counter - ACCESS read-only - STATUS mandatory - DESCRIPTION - "The number of ICMP Address Mask Reply messages - sent." - ::= { icmp 26 } - - - -- the TCP group - - -- Implementation of the TCP group is mandatory for all - -- systems that implement the TCP. - - -- Note that instances of object types that represent - -- information about a particular TCP connection are - -- transient; they persist only as long as the connection - -- in question. - - tcpRtoAlgorithm OBJECT-TYPE - SYNTAX INTEGER { - other(1), -- none of the following - - constant(2), -- a constant rto - rsre(3), -- MIL-STD-1778, Appendix B - vanj(4) -- Van Jacobson's algorithm [10] - } - ACCESS read-only - STATUS mandatory - DESCRIPTION - "The algorithm used to determine the timeout value - used for retransmitting unacknowledged octets." - ::= { tcp 1 } - - tcpRtoMin OBJECT-TYPE - SYNTAX INTEGER - ACCESS read-only - STATUS mandatory - DESCRIPTION - "The minimum value permitted by a TCP - implementation for the retransmission timeout, - measured in milliseconds. More refined semantics - for objects of this type depend upon the algorithm - used to determine the retransmission timeout. In - particular, when the timeout algorithm is rsre(3), - an object of this type has the semantics of the - LBOUND quantity described in RFC 793." - ::= { tcp 2 } - - - tcpRtoMax OBJECT-TYPE - SYNTAX INTEGER - ACCESS read-only - STATUS mandatory - DESCRIPTION - "The maximum value permitted by a TCP - implementation for the retransmission timeout, - measured in milliseconds. More refined semantics - for objects of this type depend upon the algorithm - used to determine the retransmission timeout. In - particular, when the timeout algorithm is rsre(3), - an object of this type has the semantics of the - UBOUND quantity described in RFC 793." - ::= { tcp 3 } - - tcpMaxConn OBJECT-TYPE - SYNTAX INTEGER - ACCESS read-only - STATUS mandatory - DESCRIPTION - "The limit on the total number of TCP connections - the entity can support. In entities where the - maximum number of connections is dynamic, this - object should contain the value -1." - ::= { tcp 4 } - - tcpActiveOpens OBJECT-TYPE - SYNTAX Counter - ACCESS read-only - STATUS mandatory - DESCRIPTION - "The number of times TCP connections have made a - direct transition to the SYN-SENT state from the - CLOSED state." - ::= { tcp 5 } - - tcpPassiveOpens OBJECT-TYPE - SYNTAX Counter - ACCESS read-only - STATUS mandatory - DESCRIPTION - "The number of times TCP connections have made a - direct transition to the SYN-RCVD state from the - LISTEN state." - ::= { tcp 6 } - - tcpAttemptFails OBJECT-TYPE - SYNTAX Counter - ACCESS read-only - STATUS mandatory - DESCRIPTION - "The number of times TCP connections have made a - direct transition to the CLOSED state from either - the SYN-SENT state or the SYN-RCVD state, plus the - number of times TCP connections have made a direct - transition to the LISTEN state from the SYN-RCVD - state." - ::= { tcp 7 } - - tcpEstabResets OBJECT-TYPE - SYNTAX Counter - ACCESS read-only - STATUS mandatory - DESCRIPTION - "The number of times TCP connections have made a - direct transition to the CLOSED state from either - the ESTABLISHED state or the CLOSE-WAIT state." - ::= { tcp 8 } - - tcpCurrEstab OBJECT-TYPE - SYNTAX Gauge - ACCESS read-only - STATUS mandatory - DESCRIPTION - "The number of TCP connections for which the - current state is either ESTABLISHED or CLOSE- - WAIT." - ::= { tcp 9 } - - tcpInSegs OBJECT-TYPE - SYNTAX Counter - ACCESS read-only - STATUS mandatory - DESCRIPTION - "The total number of segments received, including - those received in error. This count includes - segments received on currently established - connections." - ::= { tcp 10 } - - tcpOutSegs OBJECT-TYPE - SYNTAX Counter - ACCESS read-only - STATUS mandatory - DESCRIPTION - "The total number of segments sent, including - those on current connections but excluding those - containing only retransmitted octets." - ::= { tcp 11 } - - tcpRetransSegs OBJECT-TYPE - SYNTAX Counter - ACCESS read-only - STATUS mandatory - DESCRIPTION - "The total number of segments retransmitted - that - is, the number of TCP segments transmitted - containing one or more previously transmitted - octets." - ::= { tcp 12 } - - - -- the TCP Connection table - - -- The TCP connection table contains information about this - -- entity's existing TCP connections. - - tcpConnTable OBJECT-TYPE - SYNTAX SEQUENCE OF TcpConnEntry - ACCESS not-accessible - STATUS mandatory - DESCRIPTION - "A table containing TCP connection-specific - information." - ::= { tcp 13 } - - tcpConnEntry OBJECT-TYPE - SYNTAX TcpConnEntry - ACCESS not-accessible - STATUS mandatory - DESCRIPTION - "Information about a particular current TCP - connection. An object of this type is transient, - in that it ceases to exist when (or soon after) - the connection makes the transition to the CLOSED - state." - INDEX { tcpConnLocalAddress, - tcpConnLocalPort, - tcpConnRemAddress, - tcpConnRemPort } - ::= { tcpConnTable 1 } - - TcpConnEntry ::= - SEQUENCE { - tcpConnState - INTEGER, - tcpConnLocalAddress - IpAddress, - tcpConnLocalPort - INTEGER (0..65535), - tcpConnRemAddress - IpAddress, - tcpConnRemPort - INTEGER (0..65535) - } - - tcpConnState OBJECT-TYPE - SYNTAX INTEGER { - closed(1), - listen(2), - synSent(3), - synReceived(4), - established(5), - finWait1(6), - finWait2(7), - closeWait(8), - lastAck(9), - closing(10), - timeWait(11), - deleteTCB(12) - } - ACCESS read-write - STATUS mandatory - DESCRIPTION - "The state of this TCP connection. - - The only value which may be set by a management - station is deleteTCB(12). Accordingly, it is - appropriate for an agent to return a `badValue' - response if a management station attempts to set - this object to any other value. - - If a management station sets this object to the - value deleteTCB(12), then this has the effect of - deleting the TCB (as defined in RFC 793) of the - corresponding connection on the managed node, - resulting in immediate termination of the - connection. - - As an implementation-specific option, a RST - segment may be sent from the managed node to the - other TCP endpoint (note however that RST segments - are not sent reliably)." - ::= { tcpConnEntry 1 } - - tcpConnLocalAddress OBJECT-TYPE - SYNTAX IpAddress - ACCESS read-only - STATUS mandatory - DESCRIPTION - "The local IP address for this TCP connection. In - the case of a connection in the listen state which - is willing to accept connections for any IP - interface associated with the node, the value - 0.0.0.0 is used." - ::= { tcpConnEntry 2 } - - tcpConnLocalPort OBJECT-TYPE - SYNTAX INTEGER (0..65535) - ACCESS read-only - STATUS mandatory - DESCRIPTION - "The local port number for this TCP connection." - ::= { tcpConnEntry 3 } - - tcpConnRemAddress OBJECT-TYPE - SYNTAX IpAddress - ACCESS read-only - STATUS mandatory - DESCRIPTION - "The remote IP address for this TCP connection." - ::= { tcpConnEntry 4 } - - tcpConnRemPort OBJECT-TYPE - SYNTAX INTEGER (0..65535) - ACCESS read-only - STATUS mandatory - DESCRIPTION - "The remote port number for this TCP connection." - ::= { tcpConnEntry 5 } - - - -- additional TCP objects - - tcpInErrs OBJECT-TYPE - SYNTAX Counter - ACCESS read-only - STATUS mandatory - DESCRIPTION - "The total number of segments received in error - (e.g., bad TCP checksums)." - ::= { tcp 14 } - - tcpOutRsts OBJECT-TYPE - SYNTAX Counter - ACCESS read-only - STATUS mandatory - DESCRIPTION - "The number of TCP segments sent containing the - RST flag." - ::= { tcp 15 } - - - -- the UDP group - - -- Implementation of the UDP group is mandatory for all - -- systems which implement the UDP. - - udpInDatagrams OBJECT-TYPE - SYNTAX Counter - ACCESS read-only - STATUS mandatory - DESCRIPTION - "The total number of UDP datagrams delivered to - UDP users." - ::= { udp 1 } - - udpNoPorts OBJECT-TYPE - SYNTAX Counter - ACCESS read-only - STATUS mandatory - DESCRIPTION - "The total number of received UDP datagrams for - which there was no application at the destination - port." - ::= { udp 2 } - - udpInErrors OBJECT-TYPE - SYNTAX Counter - ACCESS read-only - STATUS mandatory - DESCRIPTION - "The number of received UDP datagrams that could - not be delivered for reasons other than the lack - of an application at the destination port." - ::= { udp 3 } - - udpOutDatagrams OBJECT-TYPE - SYNTAX Counter - ACCESS read-only - STATUS mandatory - DESCRIPTION - "The total number of UDP datagrams sent from this - entity." - ::= { udp 4 } - - - -- the UDP Listener table - - -- The UDP listener table contains information about this - -- entity's UDP end-points on which a local application is - -- currently accepting datagrams. - - udpTable OBJECT-TYPE - SYNTAX SEQUENCE OF UdpEntry - ACCESS not-accessible - STATUS mandatory - DESCRIPTION - "A table containing UDP listener information." - ::= { udp 5 } - - udpEntry OBJECT-TYPE - SYNTAX UdpEntry - ACCESS not-accessible - STATUS mandatory - DESCRIPTION - "Information about a particular current UDP - listener." - INDEX { udpLocalAddress, udpLocalPort } - ::= { udpTable 1 } - - UdpEntry ::= - SEQUENCE { - udpLocalAddress - IpAddress, - udpLocalPort - INTEGER (0..65535) - } - - udpLocalAddress OBJECT-TYPE - SYNTAX IpAddress - ACCESS read-only - STATUS mandatory - DESCRIPTION - "The local IP address for this UDP listener. In - the case of a UDP listener which is willing to - accept datagrams for any IP interface associated - with the node, the value 0.0.0.0 is used." - ::= { udpEntry 1 } - - udpLocalPort OBJECT-TYPE - SYNTAX INTEGER (0..65535) - ACCESS read-only - STATUS mandatory - DESCRIPTION - "The local port number for this UDP listener." - ::= { udpEntry 2 } - - - -- the EGP group - - -- Implementation of the EGP group is mandatory for all - -- systems which implement the EGP. - - egpInMsgs OBJECT-TYPE - SYNTAX Counter - ACCESS read-only - STATUS mandatory - DESCRIPTION - "The number of EGP messages received without - error." - ::= { egp 1 } - - egpInErrors OBJECT-TYPE - SYNTAX Counter - ACCESS read-only - STATUS mandatory - DESCRIPTION - "The number of EGP messages received that proved - to be in error." - ::= { egp 2 } - - egpOutMsgs OBJECT-TYPE - SYNTAX Counter - ACCESS read-only - STATUS mandatory - DESCRIPTION - "The total number of locally generated EGP - messages." - ::= { egp 3 } - - egpOutErrors OBJECT-TYPE - SYNTAX Counter - ACCESS read-only - STATUS mandatory - DESCRIPTION - "The number of locally generated EGP messages not - sent due to resource limitations within an EGP - entity." - ::= { egp 4 } - - - -- the EGP Neighbor table - - -- The EGP neighbor table contains information about this - -- entity's EGP neighbors. - - egpNeighTable OBJECT-TYPE - SYNTAX SEQUENCE OF EgpNeighEntry - ACCESS not-accessible - STATUS mandatory - DESCRIPTION - "The EGP neighbor table." - ::= { egp 5 } - - egpNeighEntry OBJECT-TYPE - SYNTAX EgpNeighEntry - ACCESS not-accessible - STATUS mandatory - DESCRIPTION - "Information about this entity's relationship with - a particular EGP neighbor." - INDEX { egpNeighAddr } - ::= { egpNeighTable 1 } - - EgpNeighEntry ::= - SEQUENCE { - egpNeighState - INTEGER, - egpNeighAddr - IpAddress, - egpNeighAs - INTEGER, - egpNeighInMsgs - Counter, - egpNeighInErrs - Counter, - egpNeighOutMsgs - Counter, - egpNeighOutErrs - Counter, - egpNeighInErrMsgs - Counter, - egpNeighOutErrMsgs - Counter, - egpNeighStateUps - Counter, - egpNeighStateDowns - Counter, - egpNeighIntervalHello - INTEGER, - egpNeighIntervalPoll - INTEGER, - egpNeighMode - INTEGER, - egpNeighEventTrigger - INTEGER - } - - egpNeighState OBJECT-TYPE - SYNTAX INTEGER { - idle(1), - acquisition(2), - down(3), - up(4), - cease(5) - } - ACCESS read-only - STATUS mandatory - DESCRIPTION - "The EGP state of the local system with respect to - this entry's EGP neighbor. Each EGP state is - represented by a value that is one greater than - the numerical value associated with said state in - RFC 904." - ::= { egpNeighEntry 1 } - - egpNeighAddr OBJECT-TYPE - SYNTAX IpAddress - ACCESS read-only - STATUS mandatory - DESCRIPTION - "The IP address of this entry's EGP neighbor." - ::= { egpNeighEntry 2 } - - egpNeighAs OBJECT-TYPE - SYNTAX INTEGER - ACCESS read-only - STATUS mandatory - DESCRIPTION - "The autonomous system of this EGP peer. Zero - should be specified if the autonomous system - number of the neighbor is not yet known." - ::= { egpNeighEntry 3 } - - egpNeighInMsgs OBJECT-TYPE - SYNTAX Counter - ACCESS read-only - STATUS mandatory - DESCRIPTION - "The number of EGP messages received without error - from this EGP peer." - ::= { egpNeighEntry 4 } - - egpNeighInErrs OBJECT-TYPE - SYNTAX Counter - ACCESS read-only - STATUS mandatory - DESCRIPTION - "The number of EGP messages received from this EGP - peer that proved to be in error (e.g., bad EGP - checksum)." - ::= { egpNeighEntry 5 } - - egpNeighOutMsgs OBJECT-TYPE - SYNTAX Counter - ACCESS read-only - STATUS mandatory - DESCRIPTION - "The number of locally generated EGP messages to - this EGP peer." - ::= { egpNeighEntry 6 } - - egpNeighOutErrs OBJECT-TYPE - SYNTAX Counter - ACCESS read-only - STATUS mandatory - DESCRIPTION - "The number of locally generated EGP messages not - sent to this EGP peer due to resource limitations - within an EGP entity." - ::= { egpNeighEntry 7 } - - egpNeighInErrMsgs OBJECT-TYPE - SYNTAX Counter - ACCESS read-only - STATUS mandatory - DESCRIPTION - "The number of EGP-defined error messages received - from this EGP peer." - ::= { egpNeighEntry 8 } - - egpNeighOutErrMsgs OBJECT-TYPE - SYNTAX Counter - ACCESS read-only - STATUS mandatory - DESCRIPTION - "The number of EGP-defined error messages sent to - this EGP peer." - ::= { egpNeighEntry 9 } - - egpNeighStateUps OBJECT-TYPE - SYNTAX Counter - ACCESS read-only - STATUS mandatory - DESCRIPTION - "The number of EGP state transitions to the UP - state with this EGP peer." - ::= { egpNeighEntry 10 } - - egpNeighStateDowns OBJECT-TYPE - SYNTAX Counter - ACCESS read-only - STATUS mandatory - DESCRIPTION - "The number of EGP state transitions from the UP - state to any other state with this EGP peer." - ::= { egpNeighEntry 11 } - - egpNeighIntervalHello OBJECT-TYPE - SYNTAX INTEGER - ACCESS read-only - STATUS mandatory - DESCRIPTION - "The interval between EGP Hello command - retransmissions (in hundredths of a second). This - represents the t1 timer as defined in RFC 904." - ::= { egpNeighEntry 12 } - - egpNeighIntervalPoll OBJECT-TYPE - SYNTAX INTEGER - ACCESS read-only - STATUS mandatory - DESCRIPTION - "The interval between EGP poll command - retransmissions (in hundredths of a second). This - represents the t3 timer as defined in RFC 904." - ::= { egpNeighEntry 13 } - - egpNeighMode OBJECT-TYPE - SYNTAX INTEGER { active(1), passive(2) } - ACCESS read-only - STATUS mandatory - DESCRIPTION - "The polling mode of this EGP entity, either - passive or active." - ::= { egpNeighEntry 14 } - - egpNeighEventTrigger OBJECT-TYPE - SYNTAX INTEGER { start(1), stop(2) } - ACCESS read-write - STATUS mandatory - DESCRIPTION - "A control variable used to trigger operator- - initiated Start and Stop events. When read, this - variable always returns the most recent value that - egpNeighEventTrigger was set to. If it has not - been set since the last initialization of the - network management subsystem on the node, it - returns a value of `stop'. - - When set, this variable causes a Start or Stop - event on the specified neighbor, as specified on - pages 8-10 of RFC 904. Briefly, a Start event - causes an Idle peer to begin neighbor acquisition - and a non-Idle peer to reinitiate neighbor - acquisition. A stop event causes a non-Idle peer - to return to the Idle state until a Start event - occurs, either via egpNeighEventTrigger or - otherwise." - ::= { egpNeighEntry 15 } - - - -- additional EGP objects - - egpAs OBJECT-TYPE - SYNTAX INTEGER - ACCESS read-only - STATUS mandatory - DESCRIPTION - "The autonomous system number of this EGP entity." - ::= { egp 6 } - - -- the Transmission group - - -- Based on the transmission media underlying each interface - -- on a system, the corresponding portion of the Transmission - -- group is mandatory for that system. - - -- When Internet-standard definitions for managing - -- transmission media are defined, the transmission group is - -- used to provide a prefix for the names of those objects. - - -- Typically, such definitions reside in the experimental - -- portion of the MIB until they are "proven", then as a - -- part of the Internet standardization process, the - -- definitions are accordingly elevated and a new object - -- identifier, under the transmission group is defined. By - -- convention, the name assigned is: - -- - -- type OBJECT IDENTIFIER ::= { transmission number } - -- - -- where "type" is the symbolic value used for the media in - -- the ifType column of the ifTable object, and "number" is - -- the actual integer value corresponding to the symbol. - - - -- the SNMP group - - -- Implementation of the SNMP group is mandatory for all - -- systems which support an SNMP protocol entity. Some of - -- the objects defined below will be zero-valued in those - -- SNMP implementations that are optimized to support only - -- those functions specific to either a management agent or - -- a management station. In particular, it should be - -- observed that the objects below refer to an SNMP entity, - -- and there may be several SNMP entities residing on a - -- managed node (e.g., if the node is hosting acting as - -- a management station). - - snmpInPkts OBJECT-TYPE - SYNTAX Counter - ACCESS read-only - STATUS mandatory - DESCRIPTION - "The total number of Messages delivered to the - SNMP entity from the transport service." - ::= { snmp 1 } - - snmpOutPkts OBJECT-TYPE - SYNTAX Counter - ACCESS read-only - STATUS mandatory - DESCRIPTION - "The total number of SNMP Messages which were - passed from the SNMP protocol entity to the - transport service." - ::= { snmp 2 } - - snmpInBadVersions OBJECT-TYPE - SYNTAX Counter - ACCESS read-only - STATUS mandatory - DESCRIPTION - "The total number of SNMP Messages which were - delivered to the SNMP protocol entity and were for - an unsupported SNMP version." - ::= { snmp 3 } - - snmpInBadCommunityNames OBJECT-TYPE - SYNTAX Counter - ACCESS read-only - STATUS mandatory - DESCRIPTION - "The total number of SNMP Messages delivered to - the SNMP protocol entity which used a SNMP - community name not known to said entity." - ::= { snmp 4 } - - snmpInBadCommunityUses OBJECT-TYPE - SYNTAX Counter - ACCESS read-only - STATUS mandatory - DESCRIPTION - "The total number of SNMP Messages delivered to - the SNMP protocol entity which represented an SNMP - operation which was not allowed by the SNMP - community named in the Message." - ::= { snmp 5 } - - snmpInASNParseErrs OBJECT-TYPE - SYNTAX Counter - ACCESS read-only - STATUS mandatory - DESCRIPTION - "The total number of ASN.1 or BER errors - encountered by the SNMP protocol entity when - decoding received SNMP Messages." - ::= { snmp 6 } - - -- { snmp 7 } is not used - - snmpInTooBigs OBJECT-TYPE - SYNTAX Counter - ACCESS read-only - STATUS mandatory - DESCRIPTION - "The total number of SNMP PDUs which were - delivered to the SNMP protocol entity and for - which the value of the error-status field is - `tooBig'." - ::= { snmp 8 } - - snmpInNoSuchNames OBJECT-TYPE - SYNTAX Counter - ACCESS read-only - STATUS mandatory - DESCRIPTION - "The total number of SNMP PDUs which were - delivered to the SNMP protocol entity and for - which the value of the error-status field is - `noSuchName'." - ::= { snmp 9 } - - snmpInBadValues OBJECT-TYPE - SYNTAX Counter - ACCESS read-only - STATUS mandatory - DESCRIPTION - "The total number of SNMP PDUs which were - delivered to the SNMP protocol entity and for - which the value of the error-status field is - `badValue'." - ::= { snmp 10 } - - snmpInReadOnlys OBJECT-TYPE - SYNTAX Counter - ACCESS read-only - STATUS mandatory - DESCRIPTION - "The total number valid SNMP PDUs which were - delivered to the SNMP protocol entity and for - which the value of the error-status field is - `readOnly'. It should be noted that it is a - protocol error to generate an SNMP PDU which - contains the value `readOnly' in the error-status - field, as such this object is provided as a means - of detecting incorrect implementations of the - SNMP." - ::= { snmp 11 } - - snmpInGenErrs OBJECT-TYPE - SYNTAX Counter - ACCESS read-only - STATUS mandatory - DESCRIPTION - "The total number of SNMP PDUs which were - delivered to the SNMP protocol entity and for - which the value of the error-status field is - `genErr'." - ::= { snmp 12 } - - snmpInTotalReqVars OBJECT-TYPE - SYNTAX Counter - ACCESS read-only - STATUS mandatory - DESCRIPTION - "The total number of MIB objects which have been - retrieved successfully by the SNMP protocol entity - as the result of receiving valid SNMP Get-Request - and Get-Next PDUs." - ::= { snmp 13 } - - snmpInTotalSetVars OBJECT-TYPE - SYNTAX Counter - ACCESS read-only - STATUS mandatory - DESCRIPTION - "The total number of MIB objects which have been - altered successfully by the SNMP protocol entity - as the result of receiving valid SNMP Set-Request - PDUs." - ::= { snmp 14 } - - snmpInGetRequests OBJECT-TYPE - SYNTAX Counter - ACCESS read-only - STATUS mandatory - DESCRIPTION - "The total number of SNMP Get-Request PDUs which - have been accepted and processed by the SNMP - protocol entity." - ::= { snmp 15 } - - snmpInGetNexts OBJECT-TYPE - SYNTAX Counter - ACCESS read-only - STATUS mandatory - DESCRIPTION - "The total number of SNMP Get-Next PDUs which have - been accepted and processed by the SNMP protocol - entity." - ::= { snmp 16 } - - snmpInSetRequests OBJECT-TYPE - SYNTAX Counter - ACCESS read-only - STATUS mandatory - DESCRIPTION - "The total number of SNMP Set-Request PDUs which - have been accepted and processed by the SNMP - protocol entity." - ::= { snmp 17 } - - snmpInGetResponses OBJECT-TYPE - SYNTAX Counter - ACCESS read-only - STATUS mandatory - DESCRIPTION - "The total number of SNMP Get-Response PDUs which - have been accepted and processed by the SNMP - protocol entity." - ::= { snmp 18 } - - snmpInTraps OBJECT-TYPE - SYNTAX Counter - ACCESS read-only - STATUS mandatory - DESCRIPTION - "The total number of SNMP Trap PDUs which have - been accepted and processed by the SNMP protocol - entity." - ::= { snmp 19 } - - snmpOutTooBigs OBJECT-TYPE - SYNTAX Counter - ACCESS read-only - STATUS mandatory - DESCRIPTION - "The total number of SNMP PDUs which were - generated by the SNMP protocol entity and for - which the value of the error-status field is - `tooBig.'" - ::= { snmp 20 } - - snmpOutNoSuchNames OBJECT-TYPE - SYNTAX Counter - ACCESS read-only - STATUS mandatory - DESCRIPTION - "The total number of SNMP PDUs which were - generated by the SNMP protocol entity and for - which the value of the error-status is - `noSuchName'." - ::= { snmp 21 } - - snmpOutBadValues OBJECT-TYPE - SYNTAX Counter - ACCESS read-only - STATUS mandatory - DESCRIPTION - "The total number of SNMP PDUs which were - generated by the SNMP protocol entity and for - which the value of the error-status field is - `badValue'." - ::= { snmp 22 } - - -- { snmp 23 } is not used - - snmpOutGenErrs OBJECT-TYPE - SYNTAX Counter - ACCESS read-only - STATUS mandatory - DESCRIPTION - "The total number of SNMP PDUs which were - generated by the SNMP protocol entity and for - which the value of the error-status field is - `genErr'." - ::= { snmp 24 } - - snmpOutGetRequests OBJECT-TYPE - SYNTAX Counter - ACCESS read-only - STATUS mandatory - DESCRIPTION - "The total number of SNMP Get-Request PDUs which - have been generated by the SNMP protocol entity." - ::= { snmp 25 } - - snmpOutGetNexts OBJECT-TYPE - SYNTAX Counter - ACCESS read-only - STATUS mandatory - DESCRIPTION - "The total number of SNMP Get-Next PDUs which have - been generated by the SNMP protocol entity." - ::= { snmp 26 } - - snmpOutSetRequests OBJECT-TYPE - SYNTAX Counter - ACCESS read-only - STATUS mandatory - DESCRIPTION - "The total number of SNMP Set-Request PDUs which - have been generated by the SNMP protocol entity." - ::= { snmp 27 } - - snmpOutGetResponses OBJECT-TYPE - SYNTAX Counter - ACCESS read-only - STATUS mandatory - DESCRIPTION - "The total number of SNMP Get-Response PDUs which - have been generated by the SNMP protocol entity." - ::= { snmp 28 } - - snmpOutTraps OBJECT-TYPE - SYNTAX Counter - ACCESS read-only - STATUS mandatory - DESCRIPTION - "The total number of SNMP Trap PDUs which have - been generated by the SNMP protocol entity." - ::= { snmp 29 } - - snmpEnableAuthenTraps OBJECT-TYPE - SYNTAX INTEGER { enabled(1), disabled(2) } - ACCESS read-write - STATUS mandatory - DESCRIPTION - "Indicates whether the SNMP agent process is - permitted to generate authentication-failure - traps. The value of this object overrides any - configuration information; as such, it provides a - means whereby all authentication-failure traps may - be disabled. - - Note that it is strongly recommended that this - object be stored in non-volatile memory so that it - remains constant between re-initializations of the - network management system." - ::= { snmp 30 } - -END diff --git a/plugins/inputs/snmp/testdata/tabletest/RFC1155 b/plugins/inputs/snmp/testdata/tabletest/RFC1155 new file mode 100644 index 0000000000000..571667e45dc29 --- /dev/null +++ b/plugins/inputs/snmp/testdata/tabletest/RFC1155 @@ -0,0 +1,119 @@ +RFC1155 DEFINITIONS ::= BEGIN + +EXPORTS -- EVERYTHING + internet, directory, mgmt, + experimental, private, enterprises, + OBJECT-TYPE, ObjectName, ObjectSyntax, SimpleSyntax, + ApplicationSyntax, NetworkAddress, IpAddress, + Counter, Gauge, TimeTicks, Opaque; + + -- the path to the root + + internet OBJECT IDENTIFIER ::= { iso org(3) dod(6) 1 } + + directory OBJECT IDENTIFIER ::= { internet 1 } + + mgmt OBJECT IDENTIFIER ::= { internet 2 } + + experimental OBJECT IDENTIFIER ::= { internet 3 } + + private OBJECT IDENTIFIER ::= { internet 4 } + enterprises OBJECT IDENTIFIER ::= { private 1 } + + -- definition of object types + + OBJECT-TYPE MACRO ::= + BEGIN + TYPE NOTATION ::= "SYNTAX" type (TYPE ObjectSyntax) + "ACCESS" Access + "STATUS" Status + VALUE NOTATION ::= value (VALUE ObjectName) + + Access ::= "read-only" + | "read-write" + | "write-only" + | "not-accessible" + Status ::= "mandatory" + | "optional" + | "obsolete" + END + + -- names of objects in the MIB + + ObjectName ::= + OBJECT IDENTIFIER + + -- syntax of objects in the MIB + + ObjectSyntax ::= + CHOICE { + simple + SimpleSyntax, + -- note that simple SEQUENCEs are not directly + -- mentioned here to keep things simple (i.e., + -- prevent mis-use). However, application-wide + -- types which are IMPLICITly encoded simple + -- SEQUENCEs may appear in the following CHOICE + + application-wide + ApplicationSyntax + } + + SimpleSyntax ::= + CHOICE { + number + INTEGER, + string + OCTET STRING, + object + OBJECT IDENTIFIER, + empty + NULL + } + + ApplicationSyntax ::= + CHOICE { + address + NetworkAddress, + counter + Counter, + gauge + Gauge, + ticks + TimeTicks, + arbitrary + Opaque + + -- other application-wide types, as they are + -- defined, will be added here + } + + -- application-wide types + + NetworkAddress ::= + CHOICE { + internet + IpAddress + } + + IpAddress ::= + [APPLICATION 0] -- in network-byte order + IMPLICIT OCTET STRING (SIZE (4)) + + Counter ::= + [APPLICATION 1] + IMPLICIT INTEGER (0..4294967295) + + Gauge ::= + [APPLICATION 2] + IMPLICIT INTEGER (0..4294967295) + + TimeTicks ::= + [APPLICATION 3] + IMPLICIT INTEGER (0..4294967295) + + Opaque ::= + [APPLICATION 4] -- arbitrary ASN.1 value, + IMPLICIT OCTET STRING -- "double-wrapped" + + END \ No newline at end of file diff --git a/plugins/inputs/snmp/testdata/tabletest/RFC1213 b/plugins/inputs/snmp/testdata/tabletest/RFC1213 new file mode 100644 index 0000000000000..6819d122bb48c --- /dev/null +++ b/plugins/inputs/snmp/testdata/tabletest/RFC1213 @@ -0,0 +1,2613 @@ +RFC1213-MIB DEFINITIONS ::= BEGIN + +IMPORTS + mgmt, NetworkAddress, IpAddress, Counter, Gauge, + TimeTicks + FROM RFC1155 + OBJECT-TYPE + FROM RFC-1212; + +-- This MIB module uses the extended OBJECT-TYPE macro as +-- defined in [14]; + +-- MIB-II (same prefix as MIB-I) + +mib-2 OBJECT IDENTIFIER ::= { mgmt 1 } + +-- textual conventions + +DisplayString ::= + OCTET STRING +-- This data type is used to model textual information taken +-- from the NVT ASCII character set. By convention, objects +-- with this syntax are declared as having + +-- +-- SIZE (0..255) + +PhysAddress ::= + OCTET STRING +-- This data type is used to model media addresses. For many +-- types of media, this will be in a binary representation. +-- For example, an ethernet address would be represented as +-- a string of 6 octets. + +-- groups in MIB-II + +system OBJECT IDENTIFIER ::= { mib-2 1 } + +interfaces OBJECT IDENTIFIER ::= { mib-2 2 } + +at OBJECT IDENTIFIER ::= { mib-2 3 } + +ip OBJECT IDENTIFIER ::= { mib-2 4 } + +icmp OBJECT IDENTIFIER ::= { mib-2 5 } + +tcp OBJECT IDENTIFIER ::= { mib-2 6 } + +udp OBJECT IDENTIFIER ::= { mib-2 7 } + +egp OBJECT IDENTIFIER ::= { mib-2 8 } + +-- historical (some say hysterical) +-- cmot OBJECT IDENTIFIER ::= { mib-2 9 } + +transmission OBJECT IDENTIFIER ::= { mib-2 10 } + +snmp OBJECT IDENTIFIER ::= { mib-2 11 } + +-- the System group + +-- Implementation of the System group is mandatory for all +-- systems. If an agent is not configured to have a value +-- for any of these variables, a string of length 0 is +-- returned. + +sysDescr OBJECT-TYPE + SYNTAX DisplayString (SIZE (0..255)) + ACCESS read-only + STATUS mandatory + DESCRIPTION + "A textual description of the entity. This value + should include the full name and version + identification of the system's hardware type, + software operating-system, and networking + software. It is mandatory that this only contain + printable ASCII characters." + ::= { system 1 } + +sysObjectID OBJECT-TYPE + SYNTAX OBJECT IDENTIFIER + ACCESS read-only + STATUS mandatory + DESCRIPTION + "The vendor's authoritative identification of the + network management subsystem contained in the + entity. This value is allocated within the SMI + enterprises subtree (1.3.6.1.4.1) and provides an + easy and unambiguous means for determining `what + kind of box' is being managed. For example, if + vendor `Flintstones, Inc.' was assigned the + subtree 1.3.6.1.4.1.4242, it could assign the + identifier 1.3.6.1.4.1.4242.1.1 to its `Fred + Router'." + ::= { system 2 } + +sysUpTime OBJECT-TYPE + SYNTAX TimeTicks + ACCESS read-only + STATUS mandatory + DESCRIPTION + "The time (in hundredths of a second) since the + network management portion of the system was last + re-initialized." + ::= { system 3 } + +sysContact OBJECT-TYPE + SYNTAX DisplayString (SIZE (0..255)) + ACCESS read-write + STATUS mandatory + DESCRIPTION + "The textual identification of the contact person + for this managed node, together with information + on how to contact this person." + ::= { system 4 } + +sysName OBJECT-TYPE + SYNTAX DisplayString (SIZE (0..255)) + ACCESS read-write + STATUS mandatory + DESCRIPTION + "An administratively-assigned name for this + managed node. By convention, this is the node's + fully-qualified domain name." + ::= { system 5 } + +sysLocation OBJECT-TYPE + SYNTAX DisplayString (SIZE (0..255)) + ACCESS read-write + STATUS mandatory + DESCRIPTION + "The physical location of this node (e.g., + `telephone closet, 3rd floor')." + ::= { system 6 } + +sysServices OBJECT-TYPE + SYNTAX INTEGER (0..127) + ACCESS read-only + STATUS mandatory + DESCRIPTION + "A value which indicates the set of services that + this entity primarily offers. + + The value is a sum. This sum initially takes the + value zero, Then, for each layer, L, in the range + 1 through 7, that this node performs transactions + for, 2 raised to (L - 1) is added to the sum. For + example, a node which performs primarily routing + functions would have a value of 4 (2^(3-1)). In + contrast, a node which is a host offering + application services would have a value of 72 + (2^(4-1) + 2^(7-1)). Note that in the context of + the Internet suite of protocols, values should be + calculated accordingly: + + layer functionality + 1 physical (e.g., repeaters) + 2 datalink/subnetwork (e.g., bridges) + 3 internet (e.g., IP gateways) + 4 end-to-end (e.g., IP hosts) + 7 applications (e.g., mail relays) + + For systems including OSI protocols, layers 5 and + 6 may also be counted." + ::= { system 7 } + +-- the Interfaces group + +-- Implementation of the Interfaces group is mandatory for +-- all systems. + +ifNumber OBJECT-TYPE + SYNTAX INTEGER + ACCESS read-only + STATUS mandatory + DESCRIPTION + "The number of network interfaces (regardless of + their current state) present on this system." + ::= { interfaces 1 } + +-- the Interfaces table + +-- The Interfaces table contains information on the entity's +-- interfaces. Each interface is thought of as being +-- attached to a `subnetwork'. Note that this term should +-- not be confused with `subnet' which refers to an +-- addressing partitioning scheme used in the Internet suite +-- of protocols. + +ifTable OBJECT-TYPE + SYNTAX SEQUENCE OF IfEntry + ACCESS not-accessible + STATUS mandatory + DESCRIPTION + "A list of interface entries. The number of + entries is given by the value of ifNumber." + ::= { interfaces 2 } + +ifEntry OBJECT-TYPE + SYNTAX IfEntry + ACCESS not-accessible + STATUS mandatory + DESCRIPTION + "An interface entry containing objects at the + subnetwork layer and below for a particular + interface." + INDEX { ifIndex } + ::= { ifTable 1 } + +IfEntry ::= + SEQUENCE { + ifIndex + INTEGER, + ifDescr + DisplayString, + ifType + INTEGER, + ifMtu + INTEGER, + ifSpeed + Gauge, + ifPhysAddress + PhysAddress, + ifAdminStatus + INTEGER, + ifOperStatus + INTEGER, + ifLastChange + TimeTicks, + ifInOctets + Counter, + ifInUcastPkts + Counter, + ifInNUcastPkts + Counter, + ifInDiscards + Counter, + ifInErrors + Counter, + ifInUnknownProtos + Counter, + ifOutOctets + Counter, + ifOutUcastPkts + Counter, + ifOutNUcastPkts + Counter, + ifOutDiscards + Counter, + ifOutErrors + Counter, + ifOutQLen + Gauge, + ifSpecific + OBJECT IDENTIFIER + } + +ifIndex OBJECT-TYPE + SYNTAX INTEGER + ACCESS read-only + STATUS mandatory + DESCRIPTION + "A unique value for each interface. Its value + ranges between 1 and the value of ifNumber. The + value for each interface must remain constant at + least from one re-initialization of the entity's + network management system to the next re- + initialization." + ::= { ifEntry 1 } + +ifDescr OBJECT-TYPE + SYNTAX DisplayString (SIZE (0..255)) + ACCESS read-only + STATUS mandatory + DESCRIPTION + "A textual string containing information about the + interface. This string should include the name of + the manufacturer, the product name and the version + of the hardware interface." + ::= { ifEntry 2 } + +ifType OBJECT-TYPE + SYNTAX INTEGER { + other(1), -- none of the following + regular1822(2), + hdh1822(3), + ddn-x25(4), + rfc877-x25(5), + ethernet-csmacd(6), + iso88023-csmacd(7), + iso88024-tokenBus(8), + iso88025-tokenRing(9), + iso88026-man(10), + starLan(11), + proteon-10Mbit(12), + proteon-80Mbit(13), + hyperchannel(14), + fddi(15), + lapb(16), + sdlc(17), + ds1(18), -- T-1 + e1(19), -- european equiv. of T-1 + basicISDN(20), + primaryISDN(21), -- proprietary serial + propPointToPointSerial(22), + ppp(23), + softwareLoopback(24), + eon(25), -- CLNP over IP [11] + ethernet-3Mbit(26), + nsip(27), -- XNS over IP + slip(28), -- generic SLIP + ultra(29), -- ULTRA technologies + ds3(30), -- T-3 + sip(31), -- SMDS + frame-relay(32) + } + ACCESS read-only + STATUS mandatory + DESCRIPTION + "The type of interface, distinguished according to + the physical/link protocol(s) immediately `below' + the network layer in the protocol stack." + ::= { ifEntry 3 } + +ifMtu OBJECT-TYPE + SYNTAX INTEGER + ACCESS read-only + STATUS mandatory + DESCRIPTION + "The size of the largest datagram which can be + sent/received on the interface, specified in + octets. For interfaces that are used for + transmitting network datagrams, this is the size + of the largest network datagram that can be sent + on the interface." + ::= { ifEntry 4 } + +ifSpeed OBJECT-TYPE + SYNTAX Gauge + ACCESS read-only + STATUS mandatory + DESCRIPTION + "An estimate of the interface's current bandwidth + in bits per second. For interfaces which do not + vary in bandwidth or for those where no accurate + estimation can be made, this object should contain + the nominal bandwidth." + ::= { ifEntry 5 } + +ifPhysAddress OBJECT-TYPE + SYNTAX PhysAddress + ACCESS read-only + STATUS mandatory + DESCRIPTION + "The interface's address at the protocol layer + immediately `below' the network layer in the + protocol stack. For interfaces which do not have + + such an address (e.g., a serial line), this object + should contain an octet string of zero length." + ::= { ifEntry 6 } + +ifAdminStatus OBJECT-TYPE + SYNTAX INTEGER { + up(1), -- ready to pass packets + down(2), + testing(3) -- in some test mode + } + ACCESS read-write + STATUS mandatory + DESCRIPTION + "The desired state of the interface. The + testing(3) state indicates that no operational + packets can be passed." + ::= { ifEntry 7 } + +ifOperStatus OBJECT-TYPE + SYNTAX INTEGER { + up(1), -- ready to pass packets + down(2), + testing(3) -- in some test mode + } + ACCESS read-only + STATUS mandatory + DESCRIPTION + "The current operational state of the interface. + The testing(3) state indicates that no operational + packets can be passed." + ::= { ifEntry 8 } + +ifLastChange OBJECT-TYPE + SYNTAX TimeTicks + ACCESS read-only + STATUS mandatory + DESCRIPTION + "The value of sysUpTime at the time the interface + entered its current operational state. If the + current state was entered prior to the last re- + initialization of the local network management + subsystem, then this object contains a zero + value." + ::= { ifEntry 9 } + +ifInOctets OBJECT-TYPE + SYNTAX Counter + ACCESS read-only + STATUS mandatory + DESCRIPTION + "The total number of octets received on the + interface, including framing characters." + ::= { ifEntry 10 } + +ifInUcastPkts OBJECT-TYPE + SYNTAX Counter + ACCESS read-only + STATUS mandatory + DESCRIPTION + "The number of subnetwork-unicast packets + delivered to a higher-layer protocol." + ::= { ifEntry 11 } + +ifInNUcastPkts OBJECT-TYPE + SYNTAX Counter + ACCESS read-only + STATUS mandatory + DESCRIPTION + "The number of non-unicast (i.e., subnetwork- + broadcast or subnetwork-multicast) packets + delivered to a higher-layer protocol." + ::= { ifEntry 12 } + +ifInDiscards OBJECT-TYPE + SYNTAX Counter + ACCESS read-only + STATUS mandatory + DESCRIPTION + "The number of inbound packets which were chosen + to be discarded even though no errors had been + detected to prevent their being deliverable to a + higher-layer protocol. One possible reason for + discarding such a packet could be to free up + buffer space." + ::= { ifEntry 13 } + +ifInErrors OBJECT-TYPE + SYNTAX Counter + ACCESS read-only + STATUS mandatory + DESCRIPTION + "The number of inbound packets that contained + errors preventing them from being deliverable to a + higher-layer protocol." + ::= { ifEntry 14 } + +ifInUnknownProtos OBJECT-TYPE + SYNTAX Counter + ACCESS read-only + STATUS mandatory + DESCRIPTION + "The number of packets received via the interface + which were discarded because of an unknown or + unsupported protocol." + ::= { ifEntry 15 } + +ifOutOctets OBJECT-TYPE + SYNTAX Counter + ACCESS read-only + STATUS mandatory + DESCRIPTION + "The total number of octets transmitted out of the + interface, including framing characters." + ::= { ifEntry 16 } + +ifOutUcastPkts OBJECT-TYPE + SYNTAX Counter + ACCESS read-only + STATUS mandatory + DESCRIPTION + "The total number of packets that higher-level + protocols requested be transmitted to a + subnetwork-unicast address, including those that + were discarded or not sent." + ::= { ifEntry 17 } + +ifOutNUcastPkts OBJECT-TYPE + SYNTAX Counter + ACCESS read-only + STATUS mandatory + DESCRIPTION + "The total number of packets that higher-level + protocols requested be transmitted to a non- + unicast (i.e., a subnetwork-broadcast or + subnetwork-multicast) address, including those + that were discarded or not sent." + ::= { ifEntry 18 } + +ifOutDiscards OBJECT-TYPE + SYNTAX Counter + ACCESS read-only + STATUS mandatory + DESCRIPTION + "The number of outbound packets which were chosen + + to be discarded even though no errors had been + detected to prevent their being transmitted. One + possible reason for discarding such a packet could + be to free up buffer space." + ::= { ifEntry 19 } + +ifOutErrors OBJECT-TYPE + SYNTAX Counter + ACCESS read-only + STATUS mandatory + DESCRIPTION + "The number of outbound packets that could not be + transmitted because of errors." + ::= { ifEntry 20 } + +ifOutQLen OBJECT-TYPE + SYNTAX Gauge + ACCESS read-only + STATUS mandatory + DESCRIPTION + "The length of the output packet queue (in + packets)." + ::= { ifEntry 21 } + +ifSpecific OBJECT-TYPE + SYNTAX OBJECT IDENTIFIER + ACCESS read-only + STATUS mandatory + DESCRIPTION + "A reference to MIB definitions specific to the + particular media being used to realize the + interface. For example, if the interface is + realized by an ethernet, then the value of this + object refers to a document defining objects + specific to ethernet. If this information is not + present, its value should be set to the OBJECT + IDENTIFIER { 0 0 }, which is a syntatically valid + object identifier, and any conformant + implementation of ASN.1 and BER must be able to + generate and recognize this value." + ::= { ifEntry 22 } + +-- the Address Translation group + +-- Implementation of the Address Translation group is +-- mandatory for all systems. Note however that this group +-- is deprecated by MIB-II. That is, it is being included + +-- solely for compatibility with MIB-I nodes, and will most +-- likely be excluded from MIB-III nodes. From MIB-II and +-- onwards, each network protocol group contains its own +-- address translation tables. + +-- The Address Translation group contains one table which is +-- the union across all interfaces of the translation tables +-- for converting a NetworkAddress (e.g., an IP address) into +-- a subnetwork-specific address. For lack of a better term, +-- this document refers to such a subnetwork-specific address +-- as a `physical' address. + +-- Examples of such translation tables are: for broadcast +-- media where ARP is in use, the translation table is +-- equivalent to the ARP cache; or, on an X.25 network where +-- non-algorithmic translation to X.121 addresses is +-- required, the translation table contains the +-- NetworkAddress to X.121 address equivalences. + +atTable OBJECT-TYPE + SYNTAX SEQUENCE OF AtEntry + ACCESS not-accessible + STATUS deprecated + DESCRIPTION + "The Address Translation tables contain the + NetworkAddress to `physical' address equivalences. + Some interfaces do not use translation tables for + determining address equivalences (e.g., DDN-X.25 + has an algorithmic method); if all interfaces are + of this type, then the Address Translation table + is empty, i.e., has zero entries." + ::= { at 1 } + +atEntry OBJECT-TYPE + SYNTAX AtEntry + ACCESS not-accessible + STATUS deprecated + DESCRIPTION + "Each entry contains one NetworkAddress to + `physical' address equivalence." + INDEX { atIfIndex, + atNetAddress } + ::= { atTable 1 } + +AtEntry ::= + SEQUENCE { + atIfIndex + INTEGER, + atPhysAddress + PhysAddress, + atNetAddress + NetworkAddress + } + +atIfIndex OBJECT-TYPE + SYNTAX INTEGER + ACCESS read-write + STATUS deprecated + DESCRIPTION + "The interface on which this entry's equivalence + is effective. The interface identified by a + particular value of this index is the same + interface as identified by the same value of + ifIndex." + ::= { atEntry 1 } + +atPhysAddress OBJECT-TYPE + SYNTAX PhysAddress + ACCESS read-write + STATUS deprecated + DESCRIPTION + "The media-dependent `physical' address. + + Setting this object to a null string (one of zero + length) has the effect of invaliding the + corresponding entry in the atTable object. That + is, it effectively dissasociates the interface + identified with said entry from the mapping + identified with said entry. It is an + implementation-specific matter as to whether the + agent removes an invalidated entry from the table. + Accordingly, management stations must be prepared + to receive tabular information from agents that + corresponds to entries not currently in use. + Proper interpretation of such entries requires + examination of the relevant atPhysAddress object." + ::= { atEntry 2 } + +atNetAddress OBJECT-TYPE + SYNTAX NetworkAddress + ACCESS read-write + STATUS deprecated + DESCRIPTION + "The NetworkAddress (e.g., the IP address) + corresponding to the media-dependent `physical' + address." + ::= { atEntry 3 } + +-- the IP group + +-- Implementation of the IP group is mandatory for all +-- systems. + +ipForwarding OBJECT-TYPE + SYNTAX INTEGER { + forwarding(1), -- acting as a gateway + not-forwarding(2) -- NOT acting as a gateway + } + ACCESS read-write + STATUS mandatory + DESCRIPTION + "The indication of whether this entity is acting + as an IP gateway in respect to the forwarding of + datagrams received by, but not addressed to, this + entity. IP gateways forward datagrams. IP hosts + do not (except those source-routed via the host). + + Note that for some managed nodes, this object may + take on only a subset of the values possible. + Accordingly, it is appropriate for an agent to + return a `badValue' response if a management + station attempts to change this object to an + inappropriate value." + ::= { ip 1 } + +ipDefaultTTL OBJECT-TYPE + SYNTAX INTEGER + ACCESS read-write + STATUS mandatory + DESCRIPTION + "The default value inserted into the Time-To-Live + field of the IP header of datagrams originated at + this entity, whenever a TTL value is not supplied + by the transport layer protocol." + ::= { ip 2 } + +ipInReceives OBJECT-TYPE + SYNTAX Counter + ACCESS read-only + STATUS mandatory + DESCRIPTION + "The total number of input datagrams received from + interfaces, including those received in error." + ::= { ip 3 } + +ipInHdrErrors OBJECT-TYPE + SYNTAX Counter + ACCESS read-only + STATUS mandatory + DESCRIPTION + "The number of input datagrams discarded due to + errors in their IP headers, including bad + checksums, version number mismatch, other format + errors, time-to-live exceeded, errors discovered + in processing their IP options, etc." + ::= { ip 4 } + +ipInAddrErrors OBJECT-TYPE + SYNTAX Counter + ACCESS read-only + STATUS mandatory + DESCRIPTION + "The number of input datagrams discarded because + the IP address in their IP header's destination + field was not a valid address to be received at + this entity. This count includes invalid + addresses (e.g., 0.0.0.0) and addresses of + unsupported Classes (e.g., Class E). For entities + which are not IP Gateways and therefore do not + forward datagrams, this counter includes datagrams + discarded because the destination address was not + a local address." + ::= { ip 5 } + +ipForwDatagrams OBJECT-TYPE + SYNTAX Counter + ACCESS read-only + STATUS mandatory + DESCRIPTION + "The number of input datagrams for which this + entity was not their final IP destination, as a + result of which an attempt was made to find a + route to forward them to that final destination. + In entities which do not act as IP Gateways, this + counter will include only those packets which were + Source-Routed via this entity, and the Source- + Route option processing was successful." + ::= { ip 6 } + +ipInUnknownProtos OBJECT-TYPE + SYNTAX Counter + ACCESS read-only + STATUS mandatory + DESCRIPTION + "The number of locally-addressed datagrams + received successfully but discarded because of an + unknown or unsupported protocol." + ::= { ip 7 } + +ipInDiscards OBJECT-TYPE + SYNTAX Counter + ACCESS read-only + STATUS mandatory + DESCRIPTION + "The number of input IP datagrams for which no + problems were encountered to prevent their + continued processing, but which were discarded + (e.g., for lack of buffer space). Note that this + counter does not include any datagrams discarded + while awaiting re-assembly." + ::= { ip 8 } + +ipInDelivers OBJECT-TYPE + SYNTAX Counter + ACCESS read-only + STATUS mandatory + DESCRIPTION + "The total number of input datagrams successfully + delivered to IP user-protocols (including ICMP)." + ::= { ip 9 } + +ipOutRequests OBJECT-TYPE + SYNTAX Counter + ACCESS read-only + STATUS mandatory + DESCRIPTION + "The total number of IP datagrams which local IP + user-protocols (including ICMP) supplied to IP in + requests for transmission. Note that this counter + does not include any datagrams counted in + ipForwDatagrams." + ::= { ip 10 } + +ipOutDiscards OBJECT-TYPE + SYNTAX Counter + ACCESS read-only + STATUS mandatory + DESCRIPTION + "The number of output IP datagrams for which no + + problem was encountered to prevent their + transmission to their destination, but which were + discarded (e.g., for lack of buffer space). Note + that this counter would include datagrams counted + in ipForwDatagrams if any such packets met this + (discretionary) discard criterion." + ::= { ip 11 } + +ipOutNoRoutes OBJECT-TYPE + SYNTAX Counter + ACCESS read-only + STATUS mandatory + DESCRIPTION + "The number of IP datagrams discarded because no + route could be found to transmit them to their + destination. Note that this counter includes any + packets counted in ipForwDatagrams which meet this + `no-route' criterion. Note that this includes any + datagarms which a host cannot route because all of + its default gateways are down." + ::= { ip 12 } + +ipReasmTimeout OBJECT-TYPE + SYNTAX INTEGER + ACCESS read-only + STATUS mandatory + DESCRIPTION + "The maximum number of seconds which received + fragments are held while they are awaiting + reassembly at this entity." + ::= { ip 13 } + +ipReasmReqds OBJECT-TYPE + SYNTAX Counter + ACCESS read-only + STATUS mandatory + DESCRIPTION + "The number of IP fragments received which needed + to be reassembled at this entity." + ::= { ip 14 } + +ipReasmOKs OBJECT-TYPE + SYNTAX Counter + ACCESS read-only + STATUS mandatory + DESCRIPTION + "The number of IP datagrams successfully re- + assembled." + ::= { ip 15 } + +ipReasmFails OBJECT-TYPE + SYNTAX Counter + ACCESS read-only + STATUS mandatory + DESCRIPTION + "The number of failures detected by the IP re- + assembly algorithm (for whatever reason: timed + out, errors, etc). Note that this is not + necessarily a count of discarded IP fragments + since some algorithms (notably the algorithm in + RFC 815) can lose track of the number of fragments + by combining them as they are received." + ::= { ip 16 } + +ipFragOKs OBJECT-TYPE + SYNTAX Counter + ACCESS read-only + STATUS mandatory + DESCRIPTION + "The number of IP datagrams that have been + successfully fragmented at this entity." + ::= { ip 17 } + +ipFragFails OBJECT-TYPE + SYNTAX Counter + ACCESS read-only + STATUS mandatory + DESCRIPTION + "The number of IP datagrams that have been + discarded because they needed to be fragmented at + this entity but could not be, e.g., because their + Don't Fragment flag was set." + ::= { ip 18 } + +ipFragCreates OBJECT-TYPE + SYNTAX Counter + ACCESS read-only + STATUS mandatory + DESCRIPTION + "The number of IP datagram fragments that have + been generated as a result of fragmentation at + this entity." + ::= { ip 19 } + +-- the IP address table + +-- The IP address table contains this entity's IP addressing +-- information. + +ipAddrTable OBJECT-TYPE + SYNTAX SEQUENCE OF IpAddrEntry + ACCESS not-accessible + STATUS mandatory + DESCRIPTION + "The table of addressing information relevant to + this entity's IP addresses." + ::= { ip 20 } + +ipAddrEntry OBJECT-TYPE + SYNTAX IpAddrEntry + ACCESS not-accessible + STATUS mandatory + DESCRIPTION + "The addressing information for one of this + entity's IP addresses." + INDEX { ipAdEntAddr } + ::= { ipAddrTable 1 } + +IpAddrEntry ::= + SEQUENCE { + ipAdEntAddr + IpAddress, + ipAdEntIfIndex + INTEGER, + ipAdEntNetMask + IpAddress, + ipAdEntBcastAddr + INTEGER, + ipAdEntReasmMaxSize + INTEGER (0..65535) + } + +ipAdEntAddr OBJECT-TYPE + SYNTAX IpAddress + ACCESS read-only + STATUS mandatory + DESCRIPTION + "The IP address to which this entry's addressing + information pertains." + ::= { ipAddrEntry 1 } + +ipAdEntIfIndex OBJECT-TYPE + SYNTAX INTEGER + ACCESS read-only + STATUS mandatory + DESCRIPTION + "The index value which uniquely identifies the + interface to which this entry is applicable. The + interface identified by a particular value of this + index is the same interface as identified by the + same value of ifIndex." + ::= { ipAddrEntry 2 } + +ipAdEntNetMask OBJECT-TYPE + SYNTAX IpAddress + ACCESS read-only + STATUS mandatory + DESCRIPTION + "The subnet mask associated with the IP address of + this entry. The value of the mask is an IP + address with all the network bits set to 1 and all + the hosts bits set to 0." + ::= { ipAddrEntry 3 } + +ipAdEntBcastAddr OBJECT-TYPE + SYNTAX INTEGER + ACCESS read-only + STATUS mandatory + DESCRIPTION + "The value of the least-significant bit in the IP + broadcast address used for sending datagrams on + the (logical) interface associated with the IP + address of this entry. For example, when the + Internet standard all-ones broadcast address is + used, the value will be 1. This value applies to + both the subnet and network broadcasts addresses + used by the entity on this (logical) interface." + ::= { ipAddrEntry 4 } + +ipAdEntReasmMaxSize OBJECT-TYPE + SYNTAX INTEGER (0..65535) + ACCESS read-only + STATUS mandatory + DESCRIPTION + "The size of the largest IP datagram which this + entity can re-assemble from incoming IP fragmented + datagrams received on this interface." + ::= { ipAddrEntry 5 } + +-- the IP routing table + +-- The IP routing table contains an entry for each route +-- presently known to this entity. + +ipRouteTable OBJECT-TYPE + SYNTAX SEQUENCE OF IpRouteEntry + ACCESS not-accessible + STATUS mandatory + DESCRIPTION + "This entity's IP Routing table." + ::= { ip 21 } + +ipRouteEntry OBJECT-TYPE + SYNTAX IpRouteEntry + ACCESS not-accessible + STATUS mandatory + DESCRIPTION + "A route to a particular destination." + INDEX { ipRouteDest } + ::= { ipRouteTable 1 } + +IpRouteEntry ::= + SEQUENCE { + ipRouteDest + IpAddress, + ipRouteIfIndex + INTEGER, + ipRouteMetric1 + INTEGER, + ipRouteMetric2 + INTEGER, + ipRouteMetric3 + INTEGER, + ipRouteMetric4 + INTEGER, + ipRouteNextHop + IpAddress, + ipRouteType + INTEGER, + ipRouteProto + INTEGER, + ipRouteAge + INTEGER, + ipRouteMask + IpAddress, + ipRouteMetric5 + INTEGER, + ipRouteInfo + OBJECT IDENTIFIER + } + +ipRouteDest OBJECT-TYPE + SYNTAX IpAddress + ACCESS read-write + STATUS mandatory + DESCRIPTION + "The destination IP address of this route. An + entry with a value of 0.0.0.0 is considered a + default route. Multiple routes to a single + destination can appear in the table, but access to + such multiple entries is dependent on the table- + access mechanisms defined by the network + management protocol in use." + ::= { ipRouteEntry 1 } + +ipRouteIfIndex OBJECT-TYPE + SYNTAX INTEGER + ACCESS read-write + STATUS mandatory + DESCRIPTION + "The index value which uniquely identifies the + local interface through which the next hop of this + route should be reached. The interface identified + by a particular value of this index is the same + interface as identified by the same value of + ifIndex." + ::= { ipRouteEntry 2 } + +ipRouteMetric1 OBJECT-TYPE + SYNTAX INTEGER + ACCESS read-write + STATUS mandatory + DESCRIPTION + "The primary routing metric for this route. The + semantics of this metric are determined by the + routing-protocol specified in the route's + ipRouteProto value. If this metric is not used, + its value should be set to -1." + ::= { ipRouteEntry 3 } + +ipRouteMetric2 OBJECT-TYPE + SYNTAX INTEGER + ACCESS read-write + STATUS mandatory + DESCRIPTION + "An alternate routing metric for this route. The + semantics of this metric are determined by the + routing-protocol specified in the route's + ipRouteProto value. If this metric is not used, + its value should be set to -1." + ::= { ipRouteEntry 4 } + +ipRouteMetric3 OBJECT-TYPE + SYNTAX INTEGER + ACCESS read-write + STATUS mandatory + DESCRIPTION + "An alternate routing metric for this route. The + semantics of this metric are determined by the + routing-protocol specified in the route's + ipRouteProto value. If this metric is not used, + its value should be set to -1." + ::= { ipRouteEntry 5 } + +ipRouteMetric4 OBJECT-TYPE + SYNTAX INTEGER + ACCESS read-write + STATUS mandatory + DESCRIPTION + "An alternate routing metric for this route. The + semantics of this metric are determined by the + routing-protocol specified in the route's + ipRouteProto value. If this metric is not used, + its value should be set to -1." + ::= { ipRouteEntry 6 } + +ipRouteNextHop OBJECT-TYPE + SYNTAX IpAddress + ACCESS read-write + STATUS mandatory + DESCRIPTION + "The IP address of the next hop of this route. + (In the case of a route bound to an interface + which is realized via a broadcast media, the value + of this field is the agent's IP address on that + interface.)" + ::= { ipRouteEntry 7 } + +ipRouteType OBJECT-TYPE + SYNTAX INTEGER { + other(1), -- none of the following + + invalid(2), -- an invalidated route + + -- route to directly + direct(3), -- connected (sub-)network + + -- route to a non-local + indirect(4) -- host/network/sub-network + } + ACCESS read-write + STATUS mandatory + DESCRIPTION + "The type of route. Note that the values + direct(3) and indirect(4) refer to the notion of + direct and indirect routing in the IP + architecture. + + Setting this object to the value invalid(2) has + the effect of invalidating the corresponding entry + in the ipRouteTable object. That is, it + effectively dissasociates the destination + identified with said entry from the route + identified with said entry. It is an + implementation-specific matter as to whether the + agent removes an invalidated entry from the table. + Accordingly, management stations must be prepared + to receive tabular information from agents that + corresponds to entries not currently in use. + Proper interpretation of such entries requires + examination of the relevant ipRouteType object." + ::= { ipRouteEntry 8 } + +ipRouteProto OBJECT-TYPE + SYNTAX INTEGER { + other(1), -- none of the following + + -- non-protocol information, + -- e.g., manually configured + local(2), -- entries + + -- set via a network + netmgmt(3), -- management protocol + + -- obtained via ICMP, + icmp(4), -- e.g., Redirect + + -- the remaining values are + -- all gateway routing + -- protocols + egp(5), + ggp(6), + hello(7), + rip(8), + is-is(9), + es-is(10), + ciscoIgrp(11), + bbnSpfIgp(12), + ospf(13), + bgp(14) + } + ACCESS read-only + STATUS mandatory + DESCRIPTION + "The routing mechanism via which this route was + learned. Inclusion of values for gateway routing + protocols is not intended to imply that hosts + should support those protocols." + ::= { ipRouteEntry 9 } + +ipRouteAge OBJECT-TYPE + SYNTAX INTEGER + ACCESS read-write + STATUS mandatory + DESCRIPTION + "The number of seconds since this route was last + updated or otherwise determined to be correct. + Note that no semantics of `too old' can be implied + except through knowledge of the routing protocol + by which the route was learned." + ::= { ipRouteEntry 10 } + +ipRouteMask OBJECT-TYPE + SYNTAX IpAddress + ACCESS read-write + STATUS mandatory + DESCRIPTION + "Indicate the mask to be logical-ANDed with the + destination address before being compared to the + value in the ipRouteDest field. For those systems + that do not support arbitrary subnet masks, an + agent constructs the value of the ipRouteMask by + determining whether the value of the correspondent + ipRouteDest field belong to a class-A, B, or C + network, and then using one of: + + mask network + 255.0.0.0 class-A + 255.255.0.0 class-B + 255.255.255.0 class-C + + If the value of the ipRouteDest is 0.0.0.0 (a + default route), then the mask value is also + 0.0.0.0. It should be noted that all IP routing + subsystems implicitly use this mechanism." + ::= { ipRouteEntry 11 } + +ipRouteMetric5 OBJECT-TYPE + SYNTAX INTEGER + ACCESS read-write + STATUS mandatory + DESCRIPTION + "An alternate routing metric for this route. The + semantics of this metric are determined by the + routing-protocol specified in the route's + ipRouteProto value. If this metric is not used, + its value should be set to -1." + ::= { ipRouteEntry 12 } + +ipRouteInfo OBJECT-TYPE + SYNTAX OBJECT IDENTIFIER + ACCESS read-only + STATUS mandatory + DESCRIPTION + "A reference to MIB definitions specific to the + particular routing protocol which is responsible + for this route, as determined by the value + specified in the route's ipRouteProto value. If + this information is not present, its value should + be set to the OBJECT IDENTIFIER { 0 0 }, which is + a syntatically valid object identifier, and any + conformant implementation of ASN.1 and BER must be + able to generate and recognize this value." + ::= { ipRouteEntry 13 } + +-- the IP Address Translation table + +-- The IP address translation table contain the IpAddress to +-- `physical' address equivalences. Some interfaces do not +-- use translation tables for determining address +-- equivalences (e.g., DDN-X.25 has an algorithmic method); +-- if all interfaces are of this type, then the Address +-- Translation table is empty, i.e., has zero entries. + +ipNetToMediaTable OBJECT-TYPE + SYNTAX SEQUENCE OF IpNetToMediaEntry + ACCESS not-accessible + STATUS mandatory + DESCRIPTION + "The IP Address Translation table used for mapping + from IP addresses to physical addresses." + ::= { ip 22 } + +ipNetToMediaEntry OBJECT-TYPE + SYNTAX IpNetToMediaEntry + ACCESS not-accessible + STATUS mandatory + DESCRIPTION + "Each entry contains one IpAddress to `physical' + address equivalence." + INDEX { ipNetToMediaIfIndex, + ipNetToMediaNetAddress } + ::= { ipNetToMediaTable 1 } + +IpNetToMediaEntry ::= + SEQUENCE { + ipNetToMediaIfIndex + INTEGER, + ipNetToMediaPhysAddress + PhysAddress, + ipNetToMediaNetAddress + IpAddress, + ipNetToMediaType + INTEGER + } + +ipNetToMediaIfIndex OBJECT-TYPE + SYNTAX INTEGER + ACCESS read-write + STATUS mandatory + DESCRIPTION + "The interface on which this entry's equivalence + is effective. The interface identified by a + particular value of this index is the same + interface as identified by the same value of + ifIndex." + ::= { ipNetToMediaEntry 1 } + +ipNetToMediaPhysAddress OBJECT-TYPE + SYNTAX PhysAddress + ACCESS read-write + STATUS mandatory + DESCRIPTION + "The media-dependent `physical' address." + ::= { ipNetToMediaEntry 2 } + +ipNetToMediaNetAddress OBJECT-TYPE + SYNTAX IpAddress + ACCESS read-write + STATUS mandatory + DESCRIPTION + "The IpAddress corresponding to the media- + dependent `physical' address." + ::= { ipNetToMediaEntry 3 } + +ipNetToMediaType OBJECT-TYPE + SYNTAX INTEGER { + other(1), -- none of the following + invalid(2), -- an invalidated mapping + dynamic(3), + static(4) + } + ACCESS read-write + STATUS mandatory + DESCRIPTION + "The type of mapping. + + Setting this object to the value invalid(2) has + the effect of invalidating the corresponding entry + in the ipNetToMediaTable. That is, it effectively + dissasociates the interface identified with said + entry from the mapping identified with said entry. + It is an implementation-specific matter as to + whether the agent removes an invalidated entry + from the table. Accordingly, management stations + must be prepared to receive tabular information + from agents that corresponds to entries not + currently in use. Proper interpretation of such + entries requires examination of the relevant + ipNetToMediaType object." + ::= { ipNetToMediaEntry 4 } + +-- additional IP objects + +ipRoutingDiscards OBJECT-TYPE + SYNTAX Counter + ACCESS read-only + STATUS mandatory + DESCRIPTION + "The number of routing entries which were chosen + to be discarded even though they are valid. One + possible reason for discarding such an entry could + be to free-up buffer space for other routing + + entries." + ::= { ip 23 } + +-- the ICMP group + +-- Implementation of the ICMP group is mandatory for all +-- systems. + +icmpInMsgs OBJECT-TYPE + SYNTAX Counter + ACCESS read-only + STATUS mandatory + DESCRIPTION + "The total number of ICMP messages which the + entity received. Note that this counter includes + all those counted by icmpInErrors." + ::= { icmp 1 } + +icmpInErrors OBJECT-TYPE + SYNTAX Counter + ACCESS read-only + STATUS mandatory + DESCRIPTION + "The number of ICMP messages which the entity + received but determined as having ICMP-specific + errors (bad ICMP checksums, bad length, etc.)." + ::= { icmp 2 } + +icmpInDestUnreachs OBJECT-TYPE + SYNTAX Counter + ACCESS read-only + STATUS mandatory + DESCRIPTION + "The number of ICMP Destination Unreachable + messages received." + ::= { icmp 3 } + +icmpInTimeExcds OBJECT-TYPE + SYNTAX Counter + ACCESS read-only + STATUS mandatory + DESCRIPTION + "The number of ICMP Time Exceeded messages + received." + ::= { icmp 4 } + +icmpInParmProbs OBJECT-TYPE + SYNTAX Counter + ACCESS read-only + STATUS mandatory + DESCRIPTION + "The number of ICMP Parameter Problem messages + received." + ::= { icmp 5 } + +icmpInSrcQuenchs OBJECT-TYPE + SYNTAX Counter + ACCESS read-only + STATUS mandatory + DESCRIPTION + "The number of ICMP Source Quench messages + received." + ::= { icmp 6 } + +icmpInRedirects OBJECT-TYPE + SYNTAX Counter + ACCESS read-only + STATUS mandatory + DESCRIPTION + "The number of ICMP Redirect messages received." + ::= { icmp 7 } + +icmpInEchos OBJECT-TYPE + SYNTAX Counter + ACCESS read-only + STATUS mandatory + DESCRIPTION + "The number of ICMP Echo (request) messages + received." + ::= { icmp 8 } + +icmpInEchoReps OBJECT-TYPE + SYNTAX Counter + ACCESS read-only + STATUS mandatory + DESCRIPTION + "The number of ICMP Echo Reply messages received." + ::= { icmp 9 } + +icmpInTimestamps OBJECT-TYPE + SYNTAX Counter + ACCESS read-only + STATUS mandatory + DESCRIPTION + "The number of ICMP Timestamp (request) messages + received." + ::= { icmp 10 } + +icmpInTimestampReps OBJECT-TYPE + SYNTAX Counter + ACCESS read-only + STATUS mandatory + DESCRIPTION + "The number of ICMP Timestamp Reply messages + received." + ::= { icmp 11 } + +icmpInAddrMasks OBJECT-TYPE + SYNTAX Counter + ACCESS read-only + STATUS mandatory + DESCRIPTION + "The number of ICMP Address Mask Request messages + received." + ::= { icmp 12 } + +icmpInAddrMaskReps OBJECT-TYPE + SYNTAX Counter + ACCESS read-only + STATUS mandatory + DESCRIPTION + "The number of ICMP Address Mask Reply messages + received." + ::= { icmp 13 } + +icmpOutMsgs OBJECT-TYPE + SYNTAX Counter + ACCESS read-only + STATUS mandatory + DESCRIPTION + "The total number of ICMP messages which this + entity attempted to send. Note that this counter + includes all those counted by icmpOutErrors." + ::= { icmp 14 } + +icmpOutErrors OBJECT-TYPE + SYNTAX Counter + ACCESS read-only + STATUS mandatory + DESCRIPTION + "The number of ICMP messages which this entity did + not send due to problems discovered within ICMP + + such as a lack of buffers. This value should not + include errors discovered outside the ICMP layer + such as the inability of IP to route the resultant + datagram. In some implementations there may be no + types of error which contribute to this counter's + value." + ::= { icmp 15 } + +icmpOutDestUnreachs OBJECT-TYPE + SYNTAX Counter + ACCESS read-only + STATUS mandatory + DESCRIPTION + "The number of ICMP Destination Unreachable + messages sent." + ::= { icmp 16 } + +icmpOutTimeExcds OBJECT-TYPE + SYNTAX Counter + ACCESS read-only + STATUS mandatory + DESCRIPTION + "The number of ICMP Time Exceeded messages sent." + ::= { icmp 17 } + +icmpOutParmProbs OBJECT-TYPE + SYNTAX Counter + ACCESS read-only + STATUS mandatory + DESCRIPTION + "The number of ICMP Parameter Problem messages + sent." + ::= { icmp 18 } + +icmpOutSrcQuenchs OBJECT-TYPE + SYNTAX Counter + ACCESS read-only + STATUS mandatory + DESCRIPTION + "The number of ICMP Source Quench messages sent." + ::= { icmp 19 } + +icmpOutRedirects OBJECT-TYPE + SYNTAX Counter + ACCESS read-only + STATUS mandatory + DESCRIPTION + "The number of ICMP Redirect messages sent. For a + + host, this object will always be zero, since hosts + do not send redirects." + ::= { icmp 20 } + +icmpOutEchos OBJECT-TYPE + SYNTAX Counter + ACCESS read-only + STATUS mandatory + DESCRIPTION + "The number of ICMP Echo (request) messages sent." + ::= { icmp 21 } + +icmpOutEchoReps OBJECT-TYPE + SYNTAX Counter + ACCESS read-only + STATUS mandatory + DESCRIPTION + "The number of ICMP Echo Reply messages sent." + ::= { icmp 22 } + +icmpOutTimestamps OBJECT-TYPE + SYNTAX Counter + ACCESS read-only + STATUS mandatory + DESCRIPTION + "The number of ICMP Timestamp (request) messages + sent." + ::= { icmp 23 } + +icmpOutTimestampReps OBJECT-TYPE + SYNTAX Counter + ACCESS read-only + STATUS mandatory + DESCRIPTION + "The number of ICMP Timestamp Reply messages + sent." + ::= { icmp 24 } + +icmpOutAddrMasks OBJECT-TYPE + SYNTAX Counter + ACCESS read-only + STATUS mandatory + DESCRIPTION + "The number of ICMP Address Mask Request messages + sent." + ::= { icmp 25 } + +icmpOutAddrMaskReps OBJECT-TYPE + SYNTAX Counter + ACCESS read-only + STATUS mandatory + DESCRIPTION + "The number of ICMP Address Mask Reply messages + sent." + ::= { icmp 26 } + +-- the TCP group + +-- Implementation of the TCP group is mandatory for all +-- systems that implement the TCP. + +-- Note that instances of object types that represent +-- information about a particular TCP connection are +-- transient; they persist only as long as the connection +-- in question. + +tcpRtoAlgorithm OBJECT-TYPE + SYNTAX INTEGER { + other(1), -- none of the following + + constant(2), -- a constant rto + rsre(3), -- MIL-STD-1778, Appendix B + vanj(4) -- Van Jacobson's algorithm [10] + } + ACCESS read-only + STATUS mandatory + DESCRIPTION + "The algorithm used to determine the timeout value + used for retransmitting unacknowledged octets." + ::= { tcp 1 } + +tcpRtoMin OBJECT-TYPE + SYNTAX INTEGER + ACCESS read-only + STATUS mandatory + DESCRIPTION + "The minimum value permitted by a TCP + implementation for the retransmission timeout, + measured in milliseconds. More refined semantics + for objects of this type depend upon the algorithm + used to determine the retransmission timeout. In + particular, when the timeout algorithm is rsre(3), + an object of this type has the semantics of the + LBOUND quantity described in RFC 793." + ::= { tcp 2 } + +tcpRtoMax OBJECT-TYPE + SYNTAX INTEGER + ACCESS read-only + STATUS mandatory + DESCRIPTION + "The maximum value permitted by a TCP + implementation for the retransmission timeout, + measured in milliseconds. More refined semantics + for objects of this type depend upon the algorithm + used to determine the retransmission timeout. In + particular, when the timeout algorithm is rsre(3), + an object of this type has the semantics of the + UBOUND quantity described in RFC 793." + ::= { tcp 3 } + +tcpMaxConn OBJECT-TYPE + SYNTAX INTEGER + ACCESS read-only + STATUS mandatory + DESCRIPTION + "The limit on the total number of TCP connections + the entity can support. In entities where the + maximum number of connections is dynamic, this + object should contain the value -1." + ::= { tcp 4 } + +tcpActiveOpens OBJECT-TYPE + SYNTAX Counter + ACCESS read-only + STATUS mandatory + DESCRIPTION + "The number of times TCP connections have made a + direct transition to the SYN-SENT state from the + CLOSED state." + ::= { tcp 5 } + +tcpPassiveOpens OBJECT-TYPE + SYNTAX Counter + ACCESS read-only + STATUS mandatory + DESCRIPTION + "The number of times TCP connections have made a + direct transition to the SYN-RCVD state from the + LISTEN state." + ::= { tcp 6 } + +tcpAttemptFails OBJECT-TYPE + SYNTAX Counter + ACCESS read-only + STATUS mandatory + DESCRIPTION + "The number of times TCP connections have made a + direct transition to the CLOSED state from either + the SYN-SENT state or the SYN-RCVD state, plus the + number of times TCP connections have made a direct + transition to the LISTEN state from the SYN-RCVD + state." + ::= { tcp 7 } + +tcpEstabResets OBJECT-TYPE + SYNTAX Counter + ACCESS read-only + STATUS mandatory + DESCRIPTION + "The number of times TCP connections have made a + direct transition to the CLOSED state from either + the ESTABLISHED state or the CLOSE-WAIT state." + ::= { tcp 8 } + +tcpCurrEstab OBJECT-TYPE + SYNTAX Gauge + ACCESS read-only + STATUS mandatory + DESCRIPTION + "The number of TCP connections for which the + current state is either ESTABLISHED or CLOSE- + WAIT." + ::= { tcp 9 } + +tcpInSegs OBJECT-TYPE + SYNTAX Counter + ACCESS read-only + STATUS mandatory + DESCRIPTION + "The total number of segments received, including + those received in error. This count includes + segments received on currently established + connections." + ::= { tcp 10 } + +tcpOutSegs OBJECT-TYPE + SYNTAX Counter + ACCESS read-only + STATUS mandatory + DESCRIPTION + "The total number of segments sent, including + those on current connections but excluding those + containing only retransmitted octets." + ::= { tcp 11 } + +tcpRetransSegs OBJECT-TYPE + SYNTAX Counter + ACCESS read-only + STATUS mandatory + DESCRIPTION + "The total number of segments retransmitted - that + is, the number of TCP segments transmitted + containing one or more previously transmitted + octets." + ::= { tcp 12 } + +-- the TCP Connection table + +-- The TCP connection table contains information about this +-- entity's existing TCP connections. + +tcpConnTable OBJECT-TYPE + SYNTAX SEQUENCE OF TcpConnEntry + ACCESS not-accessible + STATUS mandatory + DESCRIPTION + "A table containing TCP connection-specific + information." + ::= { tcp 13 } + +tcpConnEntry OBJECT-TYPE + SYNTAX TcpConnEntry + ACCESS not-accessible + STATUS mandatory + DESCRIPTION + "Information about a particular current TCP + connection. An object of this type is transient, + in that it ceases to exist when (or soon after) + the connection makes the transition to the CLOSED + state." + INDEX { tcpConnLocalAddress, + tcpConnLocalPort, + tcpConnRemAddress, + tcpConnRemPort } + ::= { tcpConnTable 1 } + +TcpConnEntry ::= + SEQUENCE { + tcpConnState + INTEGER, + tcpConnLocalAddress + IpAddress, + tcpConnLocalPort + INTEGER (0..65535), + tcpConnRemAddress + IpAddress, + tcpConnRemPort + INTEGER (0..65535) + } + +tcpConnState OBJECT-TYPE + SYNTAX INTEGER { + closed(1), + listen(2), + synSent(3), + synReceived(4), + established(5), + finWait1(6), + finWait2(7), + closeWait(8), + lastAck(9), + closing(10), + timeWait(11), + deleteTCB(12) + } + ACCESS read-write + STATUS mandatory + DESCRIPTION + "The state of this TCP connection. + + The only value which may be set by a management + station is deleteTCB(12). Accordingly, it is + appropriate for an agent to return a `badValue' + response if a management station attempts to set + this object to any other value. + + If a management station sets this object to the + value deleteTCB(12), then this has the effect of + deleting the TCB (as defined in RFC 793) of the + corresponding connection on the managed node, + resulting in immediate termination of the + connection. + + As an implementation-specific option, a RST + + segment may be sent from the managed node to the + other TCP endpoint (note however that RST segments + are not sent reliably)." + ::= { tcpConnEntry 1 } + +tcpConnLocalAddress OBJECT-TYPE + SYNTAX IpAddress + ACCESS read-only + STATUS mandatory + DESCRIPTION + "The local IP address for this TCP connection. In + the case of a connection in the listen state which + is willing to accept connections for any IP + interface associated with the node, the value + 0.0.0.0 is used." + ::= { tcpConnEntry 2 } + +tcpConnLocalPort OBJECT-TYPE + SYNTAX INTEGER (0..65535) + ACCESS read-only + STATUS mandatory + DESCRIPTION + "The local port number for this TCP connection." + ::= { tcpConnEntry 3 } + +tcpConnRemAddress OBJECT-TYPE + SYNTAX IpAddress + ACCESS read-only + STATUS mandatory + DESCRIPTION + "The remote IP address for this TCP connection." + ::= { tcpConnEntry 4 } + +tcpConnRemPort OBJECT-TYPE + SYNTAX INTEGER (0..65535) + ACCESS read-only + STATUS mandatory + DESCRIPTION + "The remote port number for this TCP connection." + ::= { tcpConnEntry 5 } + +-- additional TCP objects + +tcpInErrs OBJECT-TYPE + SYNTAX Counter + ACCESS read-only + STATUS mandatory + DESCRIPTION + "The total number of segments received in error + (e.g., bad TCP checksums)." + ::= { tcp 14 } + +tcpOutRsts OBJECT-TYPE + SYNTAX Counter + ACCESS read-only + STATUS mandatory + DESCRIPTION + "The number of TCP segments sent containing the + RST flag." + ::= { tcp 15 } + +-- the UDP group + +-- Implementation of the UDP group is mandatory for all +-- systems which implement the UDP. + +udpInDatagrams OBJECT-TYPE + SYNTAX Counter + ACCESS read-only + STATUS mandatory + DESCRIPTION + "The total number of UDP datagrams delivered to + UDP users." + ::= { udp 1 } + +udpNoPorts OBJECT-TYPE + SYNTAX Counter + ACCESS read-only + STATUS mandatory + DESCRIPTION + "The total number of received UDP datagrams for + which there was no application at the destination + port." + ::= { udp 2 } + +udpInErrors OBJECT-TYPE + SYNTAX Counter + ACCESS read-only + STATUS mandatory + DESCRIPTION + "The number of received UDP datagrams that could + not be delivered for reasons other than the lack + of an application at the destination port." + ::= { udp 3 } + +udpOutDatagrams OBJECT-TYPE + SYNTAX Counter + ACCESS read-only + STATUS mandatory + DESCRIPTION + "The total number of UDP datagrams sent from this + entity." + ::= { udp 4 } + +-- the UDP Listener table + +-- The UDP listener table contains information about this +-- entity's UDP end-points on which a local application is +-- currently accepting datagrams. + +udpTable OBJECT-TYPE + SYNTAX SEQUENCE OF UdpEntry + ACCESS not-accessible + STATUS mandatory + DESCRIPTION + "A table containing UDP listener information." + ::= { udp 5 } + +udpEntry OBJECT-TYPE + SYNTAX UdpEntry + ACCESS not-accessible + STATUS mandatory + DESCRIPTION + "Information about a particular current UDP + listener." + INDEX { udpLocalAddress, udpLocalPort } + ::= { udpTable 1 } + +UdpEntry ::= + SEQUENCE { + udpLocalAddress + IpAddress, + udpLocalPort + INTEGER (0..65535) + } + +udpLocalAddress OBJECT-TYPE + SYNTAX IpAddress + ACCESS read-only + STATUS mandatory + DESCRIPTION + "The local IP address for this UDP listener. In + + the case of a UDP listener which is willing to + accept datagrams for any IP interface associated + with the node, the value 0.0.0.0 is used." + ::= { udpEntry 1 } + +udpLocalPort OBJECT-TYPE + SYNTAX INTEGER (0..65535) + ACCESS read-only + STATUS mandatory + DESCRIPTION + "The local port number for this UDP listener." + ::= { udpEntry 2 } + +-- the EGP group + +-- Implementation of the EGP group is mandatory for all +-- systems which implement the EGP. + +egpInMsgs OBJECT-TYPE + SYNTAX Counter + ACCESS read-only + STATUS mandatory + DESCRIPTION + "The number of EGP messages received without + error." + ::= { egp 1 } + +egpInErrors OBJECT-TYPE + SYNTAX Counter + ACCESS read-only + STATUS mandatory + DESCRIPTION + "The number of EGP messages received that proved + to be in error." + ::= { egp 2 } + +egpOutMsgs OBJECT-TYPE + SYNTAX Counter + ACCESS read-only + STATUS mandatory + DESCRIPTION + "The total number of locally generated EGP + messages." + ::= { egp 3 } + +egpOutErrors OBJECT-TYPE + SYNTAX Counter + ACCESS read-only + STATUS mandatory + DESCRIPTION + "The number of locally generated EGP messages not + sent due to resource limitations within an EGP + entity." + ::= { egp 4 } + +-- the EGP Neighbor table + +-- The EGP neighbor table contains information about this +-- entity's EGP neighbors. + +egpNeighTable OBJECT-TYPE + SYNTAX SEQUENCE OF EgpNeighEntry + ACCESS not-accessible + STATUS mandatory + DESCRIPTION + "The EGP neighbor table." + ::= { egp 5 } + +egpNeighEntry OBJECT-TYPE + SYNTAX EgpNeighEntry + ACCESS not-accessible + STATUS mandatory + DESCRIPTION + "Information about this entity's relationship with + a particular EGP neighbor." + INDEX { egpNeighAddr } + ::= { egpNeighTable 1 } + +EgpNeighEntry ::= + SEQUENCE { + egpNeighState + INTEGER, + egpNeighAddr + IpAddress, + egpNeighAs + INTEGER, + egpNeighInMsgs + Counter, + egpNeighInErrs + Counter, + egpNeighOutMsgs + Counter, + egpNeighOutErrs + Counter, + egpNeighInErrMsgs + Counter, + egpNeighOutErrMsgs + Counter, + egpNeighStateUps + Counter, + egpNeighStateDowns + Counter, + egpNeighIntervalHello + INTEGER, + egpNeighIntervalPoll + INTEGER, + egpNeighMode + INTEGER, + egpNeighEventTrigger + INTEGER + } + +egpNeighState OBJECT-TYPE + SYNTAX INTEGER { + idle(1), + acquisition(2), + down(3), + up(4), + cease(5) + } + ACCESS read-only + STATUS mandatory + DESCRIPTION + "The EGP state of the local system with respect to + this entry's EGP neighbor. Each EGP state is + represented by a value that is one greater than + the numerical value associated with said state in + RFC 904." + ::= { egpNeighEntry 1 } + +egpNeighAddr OBJECT-TYPE + SYNTAX IpAddress + ACCESS read-only + STATUS mandatory + DESCRIPTION + "The IP address of this entry's EGP neighbor." + ::= { egpNeighEntry 2 } + +egpNeighAs OBJECT-TYPE + SYNTAX INTEGER + ACCESS read-only + STATUS mandatory + DESCRIPTION + "The autonomous system of this EGP peer. Zero + should be specified if the autonomous system + number of the neighbor is not yet known." + ::= { egpNeighEntry 3 } + +egpNeighInMsgs OBJECT-TYPE + SYNTAX Counter + ACCESS read-only + STATUS mandatory + DESCRIPTION + "The number of EGP messages received without error + from this EGP peer." + ::= { egpNeighEntry 4 } + +egpNeighInErrs OBJECT-TYPE + SYNTAX Counter + ACCESS read-only + STATUS mandatory + DESCRIPTION + "The number of EGP messages received from this EGP + peer that proved to be in error (e.g., bad EGP + checksum)." + ::= { egpNeighEntry 5 } + +egpNeighOutMsgs OBJECT-TYPE + SYNTAX Counter + ACCESS read-only + STATUS mandatory + DESCRIPTION + "The number of locally generated EGP messages to + this EGP peer." + ::= { egpNeighEntry 6 } + +egpNeighOutErrs OBJECT-TYPE + SYNTAX Counter + ACCESS read-only + STATUS mandatory + DESCRIPTION + "The number of locally generated EGP messages not + sent to this EGP peer due to resource limitations + within an EGP entity." + ::= { egpNeighEntry 7 } + +egpNeighInErrMsgs OBJECT-TYPE + SYNTAX Counter + ACCESS read-only + STATUS mandatory + DESCRIPTION + "The number of EGP-defined error messages received + from this EGP peer." + ::= { egpNeighEntry 8 } + +egpNeighOutErrMsgs OBJECT-TYPE + SYNTAX Counter + ACCESS read-only + STATUS mandatory + DESCRIPTION + "The number of EGP-defined error messages sent to + this EGP peer." + ::= { egpNeighEntry 9 } + +egpNeighStateUps OBJECT-TYPE + SYNTAX Counter + ACCESS read-only + STATUS mandatory + DESCRIPTION + "The number of EGP state transitions to the UP + state with this EGP peer." + ::= { egpNeighEntry 10 } + +egpNeighStateDowns OBJECT-TYPE + SYNTAX Counter + ACCESS read-only + STATUS mandatory + DESCRIPTION + "The number of EGP state transitions from the UP + state to any other state with this EGP peer." + ::= { egpNeighEntry 11 } + +egpNeighIntervalHello OBJECT-TYPE + SYNTAX INTEGER + ACCESS read-only + STATUS mandatory + DESCRIPTION + "The interval between EGP Hello command + retransmissions (in hundredths of a second). This + represents the t1 timer as defined in RFC 904." + ::= { egpNeighEntry 12 } + +egpNeighIntervalPoll OBJECT-TYPE + SYNTAX INTEGER + ACCESS read-only + STATUS mandatory + DESCRIPTION + "The interval between EGP poll command + + retransmissions (in hundredths of a second). This + represents the t3 timer as defined in RFC 904." + ::= { egpNeighEntry 13 } + +egpNeighMode OBJECT-TYPE + SYNTAX INTEGER { active(1), passive(2) } + ACCESS read-only + STATUS mandatory + DESCRIPTION + "The polling mode of this EGP entity, either + passive or active." + ::= { egpNeighEntry 14 } + +egpNeighEventTrigger OBJECT-TYPE + SYNTAX INTEGER { start(1), stop(2) } + ACCESS read-write + STATUS mandatory + DESCRIPTION + "A control variable used to trigger operator- + initiated Start and Stop events. When read, this + variable always returns the most recent value that + egpNeighEventTrigger was set to. If it has not + been set since the last initialization of the + network management subsystem on the node, it + returns a value of `stop'. + + When set, this variable causes a Start or Stop + event on the specified neighbor, as specified on + pages 8-10 of RFC 904. Briefly, a Start event + causes an Idle peer to begin neighbor acquisition + and a non-Idle peer to reinitiate neighbor + acquisition. A stop event causes a non-Idle peer + to return to the Idle state until a Start event + occurs, either via egpNeighEventTrigger or + otherwise." + ::= { egpNeighEntry 15 } + +-- additional EGP objects + +egpAs OBJECT-TYPE + SYNTAX INTEGER + ACCESS read-only + STATUS mandatory + DESCRIPTION + "The autonomous system number of this EGP entity." + ::= { egp 6 } + +-- the Transmission group + +-- Based on the transmission media underlying each interface +-- on a system, the corresponding portion of the Transmission +-- group is mandatory for that system. + +-- When Internet-standard definitions for managing +-- transmission media are defined, the transmission group is +-- used to provide a prefix for the names of those objects. + +-- Typically, such definitions reside in the experimental +-- portion of the MIB until they are "proven", then as a +-- part of the Internet standardization process, the +-- definitions are accordingly elevated and a new object +-- identifier, under the transmission group is defined. By +-- convention, the name assigned is: +-- +-- type OBJECT IDENTIFIER ::= { transmission number } +-- +-- where "type" is the symbolic value used for the media in +-- the ifType column of the ifTable object, and "number" is +-- the actual integer value corresponding to the symbol. + +-- the SNMP group + +-- Implementation of the SNMP group is mandatory for all +-- systems which support an SNMP protocol entity. Some of +-- the objects defined below will be zero-valued in those +-- SNMP implementations that are optimized to support only +-- those functions specific to either a management agent or +-- a management station. In particular, it should be +-- observed that the objects below refer to an SNMP entity, +-- and there may be several SNMP entities residing on a +-- managed node (e.g., if the node is hosting acting as +-- a management station). + +snmpInPkts OBJECT-TYPE + SYNTAX Counter + ACCESS read-only + STATUS mandatory + DESCRIPTION + "The total number of Messages delivered to the + SNMP entity from the transport service." + ::= { snmp 1 } + +snmpOutPkts OBJECT-TYPE + SYNTAX Counter + ACCESS read-only + STATUS mandatory + DESCRIPTION + "The total number of SNMP Messages which were + passed from the SNMP protocol entity to the + transport service." + ::= { snmp 2 } + +snmpInBadVersions OBJECT-TYPE + SYNTAX Counter + ACCESS read-only + STATUS mandatory + DESCRIPTION + "The total number of SNMP Messages which were + delivered to the SNMP protocol entity and were for + an unsupported SNMP version." + ::= { snmp 3 } + +snmpInBadCommunityNames OBJECT-TYPE + SYNTAX Counter + ACCESS read-only + STATUS mandatory + DESCRIPTION + "The total number of SNMP Messages delivered to + the SNMP protocol entity which used a SNMP + community name not known to said entity." + ::= { snmp 4 } + +snmpInBadCommunityUses OBJECT-TYPE + SYNTAX Counter + ACCESS read-only + STATUS mandatory + DESCRIPTION + "The total number of SNMP Messages delivered to + the SNMP protocol entity which represented an SNMP + operation which was not allowed by the SNMP + community named in the Message." + ::= { snmp 5 } + +snmpInASNParseErrs OBJECT-TYPE + SYNTAX Counter + ACCESS read-only + STATUS mandatory + DESCRIPTION + "The total number of ASN.1 or BER errors + encountered by the SNMP protocol entity when + decoding received SNMP Messages." + ::= { snmp 6 } + +-- { snmp 7 } is not used + +snmpInTooBigs OBJECT-TYPE + SYNTAX Counter + ACCESS read-only + STATUS mandatory + DESCRIPTION + "The total number of SNMP PDUs which were + delivered to the SNMP protocol entity and for + which the value of the error-status field is + `tooBig'." + ::= { snmp 8 } + +snmpInNoSuchNames OBJECT-TYPE + SYNTAX Counter + ACCESS read-only + STATUS mandatory + DESCRIPTION + "The total number of SNMP PDUs which were + delivered to the SNMP protocol entity and for + which the value of the error-status field is + `noSuchName'." + ::= { snmp 9 } + +snmpInBadValues OBJECT-TYPE + SYNTAX Counter + ACCESS read-only + STATUS mandatory + DESCRIPTION + "The total number of SNMP PDUs which were + delivered to the SNMP protocol entity and for + which the value of the error-status field is + `badValue'." + ::= { snmp 10 } + +snmpInReadOnlys OBJECT-TYPE + SYNTAX Counter + ACCESS read-only + STATUS mandatory + DESCRIPTION + "The total number valid SNMP PDUs which were + delivered to the SNMP protocol entity and for + which the value of the error-status field is + `readOnly'. It should be noted that it is a + protocol error to generate an SNMP PDU which + contains the value `readOnly' in the error-status + field, as such this object is provided as a means + of detecting incorrect implementations of the + + SNMP." + ::= { snmp 11 } + +snmpInGenErrs OBJECT-TYPE + SYNTAX Counter + ACCESS read-only + STATUS mandatory + DESCRIPTION + "The total number of SNMP PDUs which were + delivered to the SNMP protocol entity and for + which the value of the error-status field is + `genErr'." + ::= { snmp 12 } + +snmpInTotalReqVars OBJECT-TYPE + SYNTAX Counter + ACCESS read-only + STATUS mandatory + DESCRIPTION + "The total number of MIB objects which have been + retrieved successfully by the SNMP protocol entity + as the result of receiving valid SNMP Get-Request + and Get-Next PDUs." + ::= { snmp 13 } + +snmpInTotalSetVars OBJECT-TYPE + SYNTAX Counter + ACCESS read-only + STATUS mandatory + DESCRIPTION + "The total number of MIB objects which have been + altered successfully by the SNMP protocol entity + as the result of receiving valid SNMP Set-Request + PDUs." + ::= { snmp 14 } + +snmpInGetRequests OBJECT-TYPE + SYNTAX Counter + ACCESS read-only + STATUS mandatory + DESCRIPTION + "The total number of SNMP Get-Request PDUs which + have been accepted and processed by the SNMP + protocol entity." + ::= { snmp 15 } + +snmpInGetNexts OBJECT-TYPE + SYNTAX Counter + ACCESS read-only + STATUS mandatory + DESCRIPTION + "The total number of SNMP Get-Next PDUs which have + been accepted and processed by the SNMP protocol + entity." + ::= { snmp 16 } + +snmpInSetRequests OBJECT-TYPE + SYNTAX Counter + ACCESS read-only + STATUS mandatory + DESCRIPTION + "The total number of SNMP Set-Request PDUs which + have been accepted and processed by the SNMP + protocol entity." + ::= { snmp 17 } + +snmpInGetResponses OBJECT-TYPE + SYNTAX Counter + ACCESS read-only + STATUS mandatory + DESCRIPTION + "The total number of SNMP Get-Response PDUs which + have been accepted and processed by the SNMP + protocol entity." + ::= { snmp 18 } + +snmpInTraps OBJECT-TYPE + SYNTAX Counter + ACCESS read-only + STATUS mandatory + DESCRIPTION + "The total number of SNMP Trap PDUs which have + been accepted and processed by the SNMP protocol + entity." + ::= { snmp 19 } + +snmpOutTooBigs OBJECT-TYPE + SYNTAX Counter + ACCESS read-only + STATUS mandatory + DESCRIPTION + "The total number of SNMP PDUs which were + generated by the SNMP protocol entity and for + which the value of the error-status field is + `tooBig.'" + ::= { snmp 20 } + +snmpOutNoSuchNames OBJECT-TYPE + SYNTAX Counter + ACCESS read-only + STATUS mandatory + DESCRIPTION + "The total number of SNMP PDUs which were + generated by the SNMP protocol entity and for + which the value of the error-status is + `noSuchName'." + ::= { snmp 21 } + +snmpOutBadValues OBJECT-TYPE + SYNTAX Counter + ACCESS read-only + STATUS mandatory + DESCRIPTION + "The total number of SNMP PDUs which were + generated by the SNMP protocol entity and for + which the value of the error-status field is + `badValue'." + ::= { snmp 22 } + +-- { snmp 23 } is not used + +snmpOutGenErrs OBJECT-TYPE + SYNTAX Counter + ACCESS read-only + STATUS mandatory + DESCRIPTION + "The total number of SNMP PDUs which were + generated by the SNMP protocol entity and for + which the value of the error-status field is + `genErr'." + ::= { snmp 24 } + +snmpOutGetRequests OBJECT-TYPE + SYNTAX Counter + ACCESS read-only + STATUS mandatory + DESCRIPTION + "The total number of SNMP Get-Request PDUs which + have been generated by the SNMP protocol entity." + ::= { snmp 25 } + +snmpOutGetNexts OBJECT-TYPE + SYNTAX Counter + ACCESS read-only + STATUS mandatory + DESCRIPTION + "The total number of SNMP Get-Next PDUs which have + been generated by the SNMP protocol entity." + ::= { snmp 26 } + +snmpOutSetRequests OBJECT-TYPE + SYNTAX Counter + ACCESS read-only + STATUS mandatory + DESCRIPTION + "The total number of SNMP Set-Request PDUs which + have been generated by the SNMP protocol entity." + ::= { snmp 27 } + +snmpOutGetResponses OBJECT-TYPE + SYNTAX Counter + ACCESS read-only + STATUS mandatory + DESCRIPTION + "The total number of SNMP Get-Response PDUs which + have been generated by the SNMP protocol entity." + ::= { snmp 28 } + +snmpOutTraps OBJECT-TYPE + SYNTAX Counter + ACCESS read-only + STATUS mandatory + DESCRIPTION + "The total number of SNMP Trap PDUs which have + been generated by the SNMP protocol entity." + ::= { snmp 29 } + +snmpEnableAuthenTraps OBJECT-TYPE + SYNTAX INTEGER { enabled(1), disabled(2) } + ACCESS read-write + STATUS mandatory + DESCRIPTION + "Indicates whether the SNMP agent process is + permitted to generate authentication-failure + traps. The value of this object overrides any + configuration information; as such, it provides a + means whereby all authentication-failure traps may + be disabled. + + Note that it is strongly recommended that this + object be stored in non-volatile memory so that it + remains constant between re-initializations of the + network management system." + ::= { snmp 30 } + +END \ No newline at end of file diff --git a/plugins/inputs/snmp/testdata/test.mib b/plugins/inputs/snmp/testdata/test.mib deleted file mode 100644 index c6e7a2a8962b6..0000000000000 --- a/plugins/inputs/snmp/testdata/test.mib +++ /dev/null @@ -1,97 +0,0 @@ -TEST DEFINITIONS ::= BEGIN - -testOID ::= { 1 0 0 } - -testTable OBJECT-TYPE - SYNTAX SEQUENCE OF testTableEntry - MAX-ACCESS not-accessible - STATUS current - ::= { testOID 0 } - -testTableEntry OBJECT-TYPE - SYNTAX TestTableEntry - MAX-ACCESS not-accessible - STATUS current - INDEX { - server - } - ::= { testTable 1 } - -TestTableEntry ::= - SEQUENCE { - server OCTET STRING, - connections INTEGER, - latency OCTET STRING, - description OCTET STRING, - } - -server OBJECT-TYPE - SYNTAX OCTET STRING - MAX-ACCESS read-only - STATUS current - ::= { testTableEntry 1 } - -connections OBJECT-TYPE - SYNTAX INTEGER - MAX-ACCESS read-only - STATUS current - ::= { testTableEntry 2 } - -latency OBJECT-TYPE - SYNTAX OCTET STRING - MAX-ACCESS read-only - STATUS current - ::= { testTableEntry 3 } - -description OBJECT-TYPE - SYNTAX OCTET STRING - MAX-ACCESS read-only - STATUS current - ::= { testTableEntry 4 } - -hostname OBJECT-TYPE - SYNTAX OCTET STRING - MAX-ACCESS read-only - STATUS current - ::= { testOID 1 1 } - -testSecondaryTable OBJECT-TYPE - SYNTAX SEQUENCE OF testSecondaryTableEntry - MAX-ACCESS not-accessible - STATUS current - ::= { testOID 3 } - -testSecondaryTableEntry OBJECT-TYPE - SYNTAX TestSecondaryTableEntry - MAX-ACCESS not-accessible - STATUS current - INDEX { - instance - } - ::= { testSecondaryTable 1 } - -TestSecondaryTableEntry ::= - SEQUENCE { - instance OCTET STRING, - connections INTEGER, - testTableIndex INTEGER, - } - -instance OBJECT-TYPE - SYNTAX OCTET STRING - MAX-ACCESS read-only - STATUS current - ::= { testSecondaryTableEntry 1 } - -connections OBJECT-TYPE - SYNTAX OCTET STRING - MAX-ACCESS read-only - STATUS current - ::= { testSecondaryTableEntry 2 } - -testTableIndex OBJECT-TYPE - SYNTAX OCTET STRING - MAX-ACCESS read-only - STATUS current - ::= { testSecondaryTableEntry 3 } -END From d09955123221deb3ac659da11118e24caa3cc110 Mon Sep 17 00:00:00 2001 From: MyaLongmire Date: Mon, 11 Oct 2021 13:18:02 -0600 Subject: [PATCH 70/81] fix: trying to get mib files working --- plugins/inputs/snmp/snmp_test.go | 92 ++++++++++++++++++- .../testdata/{tabletest/RFC1213 => tableMib} | 4 +- .../{tabletest/RFC1155 => tableMibImports} | 2 +- 3 files changed, 92 insertions(+), 6 deletions(-) rename plugins/inputs/snmp/testdata/{tabletest/RFC1213 => tableMib} (99%) rename plugins/inputs/snmp/testdata/{tabletest/RFC1155 => tableMibImports} (98%) diff --git a/plugins/inputs/snmp/snmp_test.go b/plugins/inputs/snmp/snmp_test.go index 1e7f4d00867cf..41b2cc430bd56 100644 --- a/plugins/inputs/snmp/snmp_test.go +++ b/plugins/inputs/snmp/snmp_test.go @@ -1,8 +1,11 @@ package snmp import ( + "fmt" "net" + "os" "path/filepath" + "strings" "sync" "testing" "time" @@ -13,6 +16,8 @@ import ( "github.com/influxdata/telegraf/plugins/inputs" "github.com/influxdata/telegraf/testutil" "github.com/influxdata/toml" + "github.com/sleepinggenius2/gosmi" + "github.com/sleepinggenius2/gosmi/types" "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" ) @@ -153,13 +158,13 @@ func TestFieldInit(t *testing.T) { } func TestTableInit(t *testing.T) { - testDataPath, err := filepath.Abs("./testdata/tabletest") + testDataPath, err := filepath.Abs("./testdata") require.NoError(t, err) tbl := Table{ Oid: ".1.3.6.1.2.1.3.1", Fields: []Field{ - {Oid: ".1.3.6.1.2.1.3.1.1.1", Name: "atIfIndex"}, + {Oid: ".1.3.6.1.2.1.3.1.1.1", Name: "ifIndex"}, {Oid: "RFC1213-MIB::atPhysAddress", Name: "atPhysAddress", IsTag: true}, }, } @@ -174,7 +179,7 @@ func TestTableInit(t *testing.T) { assert.Equal(t, "atTable", tbl.Name) assert.Len(t, tbl.Fields, 4) - assert.Contains(t, tbl.Fields, Field{Oid: ".1.3.6.1.2.1.3.1.1.1", Name: "atIfIndex", initialized: true, snmp: s}) + assert.Contains(t, tbl.Fields, Field{Oid: ".1.3.6.1.2.1.3.1.1.1", Name: "atIfIndex", initialized: true, IsTag: true, snmp: s}) assert.Contains(t, tbl.Fields, Field{Oid: ".1.3.6.1.2.1.3.1.1.2", Name: "atPhysAddress", IsTag: true, initialized: true, snmp: s, Conversion: "hwaddr"}) assert.Contains(t, tbl.Fields, Field{Oid: ".1.3.6.1.2.1.3.1.1.3", Name: "atNetAddress", initialized: true, IsTag: true, snmp: s}) } @@ -1158,3 +1163,84 @@ func TestTableJoinNoIndexAsTag_walk(t *testing.T) { assert.Contains(t, tb.Rows, rtr2) assert.Contains(t, tb.Rows, rtr3) } + +func TestGoSmi(t *testing.T) { + gosmi.Init() + Path := []string{"./testdata"} + var folders []string + for _, mibPath := range Path { + gosmi.AppendPath(mibPath) + folders = append(folders, mibPath) + err := filepath.Walk(mibPath, func(path string, info os.FileInfo, err error) error { + if info.Mode()&os.ModeSymlink != 0 { + s, _ := os.Readlink(path) + folders = append(folders, s) + } + return nil + }) + require.NoError(t, err) + for _, folder := range folders { + err := filepath.Walk(folder, func(path string, info os.FileInfo, err error) error { + if info.IsDir() { + gosmi.AppendPath(path) + } else if info.Mode()&os.ModeSymlink == 0 { + gosmi.LoadModule(info.Name()) + //println(load) + } + return nil + }) + require.NoError(t, err) + } + folders = []string{} + } + oid := "RFC1213-MIB::atTable" + + s := strings.Split(oid, "::") + var end string + // node becomes sysUpTime.0 + node := s[1] + if strings.ContainsAny(node, ".") { + s = strings.Split(node, ".") + // node becomes sysUpTime + node = s[0] + end = "." + s[1] + } + + out, err := gosmi.GetNode(node) + require.NoError(t, err) + + oidNum := "." + out.RenderNumeric() + end + + println(oidNum) + + oidText := out.RenderQualified() + i := strings.Index(oidText, "::") + + mibName := oidText[:i] + oidText = oidText[i+2:] + println("mibname: " + mibName) + submask := oidNum + ".1" + node1, err := gosmi.GetNodeByOID(types.OidMustFromString(submask)) + + require.NoError(t, err) + + index := node1.GetIndex() + tagOids := map[string]struct{}{} + var mibPrefix string + + for i := range index { + tagOids[mibPrefix+index[i].Name] = struct{}{} + } + + // grabs all columns from the table + // mimmicks grabbing everything returned from snmptable -Ch -Cl -c public 127.0.0.1 oidFullName + col := node1.GetRow().AsTable().ColumnOrder + fmt.Printf("cols: \t %v \n", col) + for i := range col { + fmt.Printf("cols: \t %v \n", col[i]) + _, isTag := tagOids[mibPrefix+col[i]] + println(isTag) + } + + require.Error(t, err) +} diff --git a/plugins/inputs/snmp/testdata/tabletest/RFC1213 b/plugins/inputs/snmp/testdata/tableMib similarity index 99% rename from plugins/inputs/snmp/testdata/tabletest/RFC1213 rename to plugins/inputs/snmp/testdata/tableMib index 6819d122bb48c..be13c1c1cc510 100644 --- a/plugins/inputs/snmp/testdata/tabletest/RFC1213 +++ b/plugins/inputs/snmp/testdata/tableMib @@ -3,9 +3,9 @@ RFC1213-MIB DEFINITIONS ::= BEGIN IMPORTS mgmt, NetworkAddress, IpAddress, Counter, Gauge, TimeTicks - FROM RFC1155 + FROM RFC1155-SMI OBJECT-TYPE - FROM RFC-1212; + FROM fooImports; -- This MIB module uses the extended OBJECT-TYPE macro as -- defined in [14]; diff --git a/plugins/inputs/snmp/testdata/tabletest/RFC1155 b/plugins/inputs/snmp/testdata/tableMibImports similarity index 98% rename from plugins/inputs/snmp/testdata/tabletest/RFC1155 rename to plugins/inputs/snmp/testdata/tableMibImports index 571667e45dc29..1516e7cbb840f 100644 --- a/plugins/inputs/snmp/testdata/tabletest/RFC1155 +++ b/plugins/inputs/snmp/testdata/tableMibImports @@ -1,4 +1,4 @@ -RFC1155 DEFINITIONS ::= BEGIN +RFC1155-SMI DEFINITIONS ::= BEGIN EXPORTS -- EVERYTHING internet, directory, mgmt, From 9318036b9749129ed0d64a80e23fc38eae77c0ae Mon Sep 17 00:00:00 2001 From: MyaLongmire Date: Mon, 11 Oct 2021 13:20:35 -0600 Subject: [PATCH 71/81] fix: removed extra print statements --- plugins/inputs/snmp/snmp_test.go | 86 -------------------------------- 1 file changed, 86 deletions(-) diff --git a/plugins/inputs/snmp/snmp_test.go b/plugins/inputs/snmp/snmp_test.go index 41b2cc430bd56..02e952807ae7a 100644 --- a/plugins/inputs/snmp/snmp_test.go +++ b/plugins/inputs/snmp/snmp_test.go @@ -1,11 +1,8 @@ package snmp import ( - "fmt" "net" - "os" "path/filepath" - "strings" "sync" "testing" "time" @@ -16,8 +13,6 @@ import ( "github.com/influxdata/telegraf/plugins/inputs" "github.com/influxdata/telegraf/testutil" "github.com/influxdata/toml" - "github.com/sleepinggenius2/gosmi" - "github.com/sleepinggenius2/gosmi/types" "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" ) @@ -1163,84 +1158,3 @@ func TestTableJoinNoIndexAsTag_walk(t *testing.T) { assert.Contains(t, tb.Rows, rtr2) assert.Contains(t, tb.Rows, rtr3) } - -func TestGoSmi(t *testing.T) { - gosmi.Init() - Path := []string{"./testdata"} - var folders []string - for _, mibPath := range Path { - gosmi.AppendPath(mibPath) - folders = append(folders, mibPath) - err := filepath.Walk(mibPath, func(path string, info os.FileInfo, err error) error { - if info.Mode()&os.ModeSymlink != 0 { - s, _ := os.Readlink(path) - folders = append(folders, s) - } - return nil - }) - require.NoError(t, err) - for _, folder := range folders { - err := filepath.Walk(folder, func(path string, info os.FileInfo, err error) error { - if info.IsDir() { - gosmi.AppendPath(path) - } else if info.Mode()&os.ModeSymlink == 0 { - gosmi.LoadModule(info.Name()) - //println(load) - } - return nil - }) - require.NoError(t, err) - } - folders = []string{} - } - oid := "RFC1213-MIB::atTable" - - s := strings.Split(oid, "::") - var end string - // node becomes sysUpTime.0 - node := s[1] - if strings.ContainsAny(node, ".") { - s = strings.Split(node, ".") - // node becomes sysUpTime - node = s[0] - end = "." + s[1] - } - - out, err := gosmi.GetNode(node) - require.NoError(t, err) - - oidNum := "." + out.RenderNumeric() + end - - println(oidNum) - - oidText := out.RenderQualified() - i := strings.Index(oidText, "::") - - mibName := oidText[:i] - oidText = oidText[i+2:] - println("mibname: " + mibName) - submask := oidNum + ".1" - node1, err := gosmi.GetNodeByOID(types.OidMustFromString(submask)) - - require.NoError(t, err) - - index := node1.GetIndex() - tagOids := map[string]struct{}{} - var mibPrefix string - - for i := range index { - tagOids[mibPrefix+index[i].Name] = struct{}{} - } - - // grabs all columns from the table - // mimmicks grabbing everything returned from snmptable -Ch -Cl -c public 127.0.0.1 oidFullName - col := node1.GetRow().AsTable().ColumnOrder - fmt.Printf("cols: \t %v \n", col) - for i := range col { - fmt.Printf("cols: \t %v \n", col[i]) - _, isTag := tagOids[mibPrefix+col[i]] - println(isTag) - } - - require.Error(t, err) -} From 8ce0077de2fa2867b2e86c877280dcfb2b2a6855 Mon Sep 17 00:00:00 2001 From: MyaLongmire Date: Tue, 12 Oct 2021 08:37:04 -0600 Subject: [PATCH 72/81] fix: removes unused code --- plugins/inputs/snmp/snmp_test.go | 7 ------- 1 file changed, 7 deletions(-) diff --git a/plugins/inputs/snmp/snmp_test.go b/plugins/inputs/snmp/snmp_test.go index 02e952807ae7a..027d2f9970676 100644 --- a/plugins/inputs/snmp/snmp_test.go +++ b/plugins/inputs/snmp/snmp_test.go @@ -207,13 +207,6 @@ func TestSnmpInit(t *testing.T) { } func TestSnmpInit_noTranslate(t *testing.T) { - // write another mib - // override execCommand so it returns exec.ErrNotFound - // defer func(ec func(string, ...string) *exec.Cmd) { execCommand = ec }(execCommand) - // execCommand = func(_ string, _ ...string) *exec.Cmd { - // return exec.Command("snmptranslateExecErrNotFound") - // } - s := &Snmp{ Fields: []Field{ {Oid: ".1.1.1.1", Name: "one", IsTag: true}, From 8e6a2591cb5769ff7424415f5a0b9dc934fe1c6c Mon Sep 17 00:00:00 2001 From: MyaLongmire Date: Tue, 12 Oct 2021 08:43:55 -0600 Subject: [PATCH 73/81] fix: remove name from noTranslate test --- plugins/inputs/snmp/snmp_test.go | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/plugins/inputs/snmp/snmp_test.go b/plugins/inputs/snmp/snmp_test.go index 027d2f9970676..837980e6fc198 100644 --- a/plugins/inputs/snmp/snmp_test.go +++ b/plugins/inputs/snmp/snmp_test.go @@ -218,7 +218,7 @@ func TestSnmpInit_noTranslate(t *testing.T) { Fields: []Field{ {Oid: ".1.1.1.4", Name: "four", IsTag: true}, {Oid: ".1.1.1.5", Name: "five"}, - {Oid: ".1.1.1.6", Name: ".1.1.1.6"}, + {Oid: ".1.1.1.6"}, }}, }, } @@ -247,7 +247,7 @@ func TestSnmpInit_noTranslate(t *testing.T) { assert.Equal(t, false, s.Tables[0].Fields[1].IsTag) assert.Equal(t, ".1.1.1.6", s.Tables[0].Fields[2].Oid) - assert.Equal(t, ".1.1.1.6", s.Tables[0].Fields[2].Name) + assert.Equal(t, "iso", s.Tables[0].Fields[2].Name) assert.Equal(t, false, s.Tables[0].Fields[2].IsTag) } From 8e01068901b4dedc96cb90db603d718c94a733d8 Mon Sep 17 00:00:00 2001 From: MyaLongmire Date: Tue, 12 Oct 2021 08:59:50 -0600 Subject: [PATCH 74/81] fix: renaming --- plugins/inputs/snmp/snmp_test.go | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/plugins/inputs/snmp/snmp_test.go b/plugins/inputs/snmp/snmp_test.go index 837980e6fc198..c68ebae66abde 100644 --- a/plugins/inputs/snmp/snmp_test.go +++ b/plugins/inputs/snmp/snmp_test.go @@ -247,7 +247,7 @@ func TestSnmpInit_noTranslate(t *testing.T) { assert.Equal(t, false, s.Tables[0].Fields[1].IsTag) assert.Equal(t, ".1.1.1.6", s.Tables[0].Fields[2].Oid) - assert.Equal(t, "iso", s.Tables[0].Fields[2].Name) + assert.Equal(t, "dod", s.Tables[0].Fields[2].Name) assert.Equal(t, false, s.Tables[0].Fields[2].IsTag) } From a3b1a5ccbf5871d8e16ba034453f5bfc954cd963 Mon Sep 17 00:00:00 2001 From: MyaLongmire Date: Tue, 12 Oct 2021 09:20:22 -0600 Subject: [PATCH 75/81] fix: changed translate error to warning so telegraf can keep running, added .999 back into tests --- plugins/inputs/snmp/snmp.go | 10 ++++++---- plugins/inputs/snmp/snmp_test.go | 4 +++- 2 files changed, 9 insertions(+), 5 deletions(-) diff --git a/plugins/inputs/snmp/snmp.go b/plugins/inputs/snmp/snmp.go index 737f46a48f8e1..f22d008ef38f1 100644 --- a/plugins/inputs/snmp/snmp.go +++ b/plugins/inputs/snmp/snmp.go @@ -4,6 +4,7 @@ import ( "encoding/binary" "errors" "fmt" + "log" "math" "net" "os" @@ -304,8 +305,9 @@ func (f *Field) init(parent *Snmp) error { // check if oid needs translation or name is not set if strings.ContainsAny(f.Oid, ":abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ") || f.Name == "" { _, oidNum, oidText, conversion, err := snmpTranslateCall(f.Oid) + //maybe turn this into a warning if err != nil { - return fmt.Errorf("translating: %w", err) + log.Printf("W! [inputs.snmp] %v", err) } f.Oid = oidNum if f.Name == "" { @@ -910,7 +912,7 @@ func snmpTranslateCall(oid string) (mibName string, oidNum string, oidText strin out, err = gosmi.GetNode(node) if err != nil { - return "", "", "", "", err + return oid, oid, oid, "", err } oidNum = "." + out.RenderNumeric() + end @@ -921,7 +923,7 @@ func snmpTranslateCall(oid string) (mibName string, oidNum string, oidText strin if strings.ContainsAny(s[i], "abcdefghijklmnopqrstuvwxyz") { out, err = gosmi.GetNode(s[i]) if err != nil { - return "", "", "", "", err + return oid, oid, oid, "", err } s[i] = out.RenderNumeric() } @@ -933,7 +935,7 @@ func snmpTranslateCall(oid string) (mibName string, oidNum string, oidText strin oidNum = oid // ensure modules are loaded or node will be empty (might not error) if err != nil { - return "", "", "", "", err + return oid, oid, oid, "", err } } diff --git a/plugins/inputs/snmp/snmp_test.go b/plugins/inputs/snmp/snmp_test.go index c68ebae66abde..dcd2e047858a8 100644 --- a/plugins/inputs/snmp/snmp_test.go +++ b/plugins/inputs/snmp/snmp_test.go @@ -139,6 +139,7 @@ func TestFieldInit(t *testing.T) { {"IF-MIB::ifPhysAddress.1", "", "none", ".1.3.6.1.2.1.2.2.1.6.1", "ifPhysAddress.1", "none"}, {"BRIDGE-MIB::dot1dTpFdbAddress.1", "", "", ".1.3.6.1.2.1.17.4.3.1.1.1", "dot1dTpFdbAddress.1", "hwaddr"}, {"TCP-MIB::tcpConnectionLocalAddress.1", "", "", ".1.3.6.1.2.1.6.19.1.2.1", "tcpConnectionLocalAddress.1", "ipaddr"}, + {".999", "", "", ".999", ".999", ""}, } for _, txl := range translations { @@ -159,6 +160,7 @@ func TestTableInit(t *testing.T) { tbl := Table{ Oid: ".1.3.6.1.2.1.3.1", Fields: []Field{ + {Oid: ".999", Name: "foo"}, {Oid: ".1.3.6.1.2.1.3.1.1.1", Name: "ifIndex"}, {Oid: "RFC1213-MIB::atPhysAddress", Name: "atPhysAddress", IsTag: true}, }, @@ -173,7 +175,7 @@ func TestTableInit(t *testing.T) { assert.Equal(t, "atTable", tbl.Name) - assert.Len(t, tbl.Fields, 4) + assert.Len(t, tbl.Fields, 5) assert.Contains(t, tbl.Fields, Field{Oid: ".1.3.6.1.2.1.3.1.1.1", Name: "atIfIndex", initialized: true, IsTag: true, snmp: s}) assert.Contains(t, tbl.Fields, Field{Oid: ".1.3.6.1.2.1.3.1.1.2", Name: "atPhysAddress", IsTag: true, initialized: true, snmp: s, Conversion: "hwaddr"}) assert.Contains(t, tbl.Fields, Field{Oid: ".1.3.6.1.2.1.3.1.1.3", Name: "atNetAddress", initialized: true, IsTag: true, snmp: s}) From 7fec6e5714adb0ae7e5650b14d90140ca7ca9d7d Mon Sep 17 00:00:00 2001 From: MyaLongmire Date: Tue, 12 Oct 2021 14:08:55 -0600 Subject: [PATCH 76/81] fix: call getMibsPath only once and start reqorking testTableBuild --- plugins/inputs/snmp/snmp.go | 27 +++----- plugins/inputs/snmp/snmp_test.go | 100 ++++++++++++---------------- plugins/processors/ifname/ifname.go | 2 +- 3 files changed, 54 insertions(+), 75 deletions(-) diff --git a/plugins/inputs/snmp/snmp.go b/plugins/inputs/snmp/snmp.go index f22d008ef38f1..4a768620d5f84 100644 --- a/plugins/inputs/snmp/snmp.go +++ b/plugins/inputs/snmp/snmp.go @@ -99,16 +99,21 @@ func (s *Snmp) init() error { return nil } + err := s.getMibsPath() + if err != nil { + return fmt.Errorf("could not get path %v", err) + } + s.connectionCache = make([]snmpConnection, len(s.Agents)) for i := range s.Tables { - if err := s.Tables[i].Init(s); err != nil { + if err := s.Tables[i].Init(); err != nil { return fmt.Errorf("initializing table %s: %w", s.Tables[i].Name, err) } } for i := range s.Fields { - if err := s.Fields[i].init(s); err != nil { + if err := s.Fields[i].init(); err != nil { return fmt.Errorf("initializing field %s: %w", s.Fields[i].Name, err) } } @@ -178,16 +183,10 @@ type Table struct { Oid string initialized bool - snmp *Snmp } // Init() builds & initializes the nested fields. -func (t *Table) Init(parent *Snmp) error { - t.snmp = parent - err := t.snmp.getMibsPath() - if err != nil { - return fmt.Errorf("could not get path %v", err) - } +func (t *Table) Init() error { //makes sure oid or name is set in config file //otherwise snmp will produce metrics with an empty name if t.Oid == "" && t.Name == "" { @@ -205,7 +204,7 @@ func (t *Table) Init(parent *Snmp) error { secondaryIndexTablePresent := false // initialize all the nested fields for i := range t.Fields { - if err := t.Fields[i].init(t.snmp); err != nil { + if err := t.Fields[i].init(); err != nil { return fmt.Errorf("initializing field %s: %w", t.Fields[i].Name, err) } if t.Fields[i].SecondaryIndexTable { @@ -293,15 +292,11 @@ type Field struct { } // init() converts OID names to numbers, and sets the .Name attribute if unset. -func (f *Field) init(parent *Snmp) error { - f.snmp = parent +func (f *Field) init() error { if f.initialized { return nil } - err := f.snmp.getMibsPath() - if err != nil { - return fmt.Errorf("could not get path %v", err) - } + // check if oid needs translation or name is not set if strings.ContainsAny(f.Oid, ":abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ") || f.Name == "" { _, oidNum, oidText, conversion, err := snmpTranslateCall(f.Oid) diff --git a/plugins/inputs/snmp/snmp_test.go b/plugins/inputs/snmp/snmp_test.go index dcd2e047858a8..93c34e249373e 100644 --- a/plugins/inputs/snmp/snmp_test.go +++ b/plugins/inputs/snmp/snmp_test.go @@ -120,6 +120,9 @@ func TestFieldInit(t *testing.T) { }, } + err = s.init() + require.NoError(t, err) + translations := []struct { inputOid string inputName string @@ -143,8 +146,8 @@ func TestFieldInit(t *testing.T) { } for _, txl := range translations { - f := Field{Oid: txl.inputOid, Name: txl.inputName, Conversion: txl.inputConversion, snmp: s} - err := f.init(f.snmp) + f := Field{Oid: txl.inputOid, Name: txl.inputName, Conversion: txl.inputConversion} + err := f.init() if !assert.NoError(t, err, "inputOid='%s' inputName='%s'", txl.inputOid, txl.inputName) { continue } @@ -170,15 +173,18 @@ func TestTableInit(t *testing.T) { Path: []string{testDataPath}, }, } - err = tbl.Init(s) + err = s.init() + require.NoError(t, err) + + err = tbl.Init() require.NoError(t, err) assert.Equal(t, "atTable", tbl.Name) assert.Len(t, tbl.Fields, 5) - assert.Contains(t, tbl.Fields, Field{Oid: ".1.3.6.1.2.1.3.1.1.1", Name: "atIfIndex", initialized: true, IsTag: true, snmp: s}) - assert.Contains(t, tbl.Fields, Field{Oid: ".1.3.6.1.2.1.3.1.1.2", Name: "atPhysAddress", IsTag: true, initialized: true, snmp: s, Conversion: "hwaddr"}) - assert.Contains(t, tbl.Fields, Field{Oid: ".1.3.6.1.2.1.3.1.1.3", Name: "atNetAddress", initialized: true, IsTag: true, snmp: s}) + assert.Contains(t, tbl.Fields, Field{Oid: ".1.3.6.1.2.1.3.1.1.1", Name: "atIfIndex", initialized: true, IsTag: true}) + assert.Contains(t, tbl.Fields, Field{Oid: ".1.3.6.1.2.1.3.1.1.2", Name: "atPhysAddress", IsTag: true, initialized: true, Conversion: "hwaddr"}) + assert.Contains(t, tbl.Fields, Field{Oid: ".1.3.6.1.2.1.3.1.1.3", Name: "atNetAddress", initialized: true, IsTag: true}) } func TestSnmpInit(t *testing.T) { @@ -623,77 +629,64 @@ func TestGosnmpWrapper_get_retry(t *testing.T) { } func TestTableBuild_walk(t *testing.T) { + testDataPath, err := filepath.Abs("./testdata") + require.NoError(t, err) + s := &Snmp{ + ClientConfig: snmp.ClientConfig{ + Path: []string{testDataPath}, + }, + } + err = s.init() + require.NoError(t, err) + tbl := Table{ - Name: "mytable", + Name: "atTable", IndexAsTag: true, Fields: []Field{ { - Name: "myfield1", - Oid: ".1.0.0.0.1.1", + Name: "ifIndex", + Oid: "1.3.6.1.2.1.3.1.1.1", IsTag: true, }, { - Name: "myfield2", - Oid: ".1.0.0.0.1.2", + Name: "atPhysAddress", + Oid: "1.3.6.1.2.1.3.1.1.2", + Translate: true, }, { - Name: "myfield3", - Oid: ".1.0.0.0.1.3", + Name: "atNetAddress", + Oid: "1.3.6.1.2.1.3.1.1.3", Conversion: "float", }, - { - Name: "myfield4", - Oid: ".1.0.0.2.1.5", - OidIndexSuffix: ".9.9", - }, - { - Name: "myfield5", - Oid: ".1.0.0.2.1.5", - OidIndexLength: 1, - }, - // { - // Name: "myfield6", - // Oid: ".1.0.0.0.1.6", - // Translate: true, - // }, - { - Name: "myfield7", - Oid: ".1.0.0.0.1.6", - Translate: false, - }, }, } + err = tbl.Init() + require.NoError(t, err) tb, err := tbl.Build(tsc, true) require.NoError(t, err) - assert.Equal(t, tb.Name, "mytable") + assert.Equal(t, tb.Name, "atTable") rtr1 := RTableRow{ Tags: map[string]string{ - "myfield1": "foo", - "index": "0", + "ifIndex": "foo", + "index": "0", }, Fields: map[string]interface{}{ - "myfield2": 1, - "myfield3": float64(0.123), - "myfield4": 11, - "myfield5": 11, + "atPhysAddress": 1, + "atNetAddress": "testTableEntry.7", // this fails as Build calls snmpTranslate and this is not a real mib so traslate fails // "myfield6": "testTableEntry.7", - "myfield7": ".1.0.0.0.1.7", }, } rtr2 := RTableRow{ Tags: map[string]string{ - "myfield1": "bar", - "index": "1", + "ifIndex": "bar", + "index": "1", }, Fields: map[string]interface{}{ - "myfield2": 2, - "myfield3": float64(0.456), - "myfield4": 22, - "myfield5": 22, + "atPhysAddress": 2, }, } rtr3 := RTableRow{ @@ -701,23 +694,14 @@ func TestTableBuild_walk(t *testing.T) { "index": "2", }, Fields: map[string]interface{}{ - "myfield2": 0, - "myfield3": float64(0.0), - }, - } - rtr4 := RTableRow{ - Tags: map[string]string{ - "index": "3", - }, - Fields: map[string]interface{}{ - "myfield3": float64(9.999), + "atPhysAddress": 0, }, } + assert.Len(t, tb.Rows, 4) assert.Contains(t, tb.Rows, rtr1) assert.Contains(t, tb.Rows, rtr2) assert.Contains(t, tb.Rows, rtr3) - assert.Contains(t, tb.Rows, rtr4) } func TestTableBuild_noWalk(t *testing.T) { diff --git a/plugins/processors/ifname/ifname.go b/plugins/processors/ifname/ifname.go index 6a7a3fb3ba09b..10623c041dd2d 100644 --- a/plugins/processors/ifname/ifname.go +++ b/plugins/processors/ifname/ifname.go @@ -356,7 +356,7 @@ func makeTableNoMock(fieldName string) (*si.Table, error) { }, } - err = tab.Init(&si.Snmp{}) + err = tab.Init() if err != nil { //Init already wraps return nil, err From 41475d87f7959b7c78b4e3054179611ba31021ad Mon Sep 17 00:00:00 2001 From: MyaLongmire Date: Tue, 12 Oct 2021 16:34:42 -0600 Subject: [PATCH 77/81] fix: changed tableBuildWalk test to use a real mib added oids to interface table --- plugins/inputs/snmp/snmp.go | 6 ++- plugins/inputs/snmp/snmp_test.go | 78 ++++++++++++++++------------- plugins/inputs/snmp/testdata/server | 2 +- 3 files changed, 48 insertions(+), 38 deletions(-) diff --git a/plugins/inputs/snmp/snmp.go b/plugins/inputs/snmp/snmp.go index 4a768620d5f84..155deb325f2f9 100644 --- a/plugins/inputs/snmp/snmp.go +++ b/plugins/inputs/snmp/snmp.go @@ -288,7 +288,6 @@ type Field struct { SecondaryOuterJoin bool initialized bool - snmp *Snmp } // init() converts OID names to numbers, and sets the .Name attribute if unset. @@ -520,6 +519,7 @@ func (t Table) Build(gs snmpConnection, walk bool) (*RTable, error) { } else { err := gs.Walk(oid, func(ent gosnmp.SnmpPDU) error { if len(ent.Name) <= len(oid) || ent.Name[:len(oid)+1] != oid+"." { + fmt.Printf("%v\n", &walkError{}) return &walkError{} // break the walk } @@ -547,10 +547,14 @@ func (t Table) Build(gs snmpConnection, walk bool) (*RTable, error) { // snmptranslate table field value here if f.Translate { if entOid, ok := ent.Value.(string); ok { + fmt.Printf("%v \n", entOid) _, _, oidText, _, err := snmpTranslateCall(entOid) + fmt.Printf("%v \n", err) + fmt.Printf("%v\n", ent.Value) if err == nil { // If no error translating, the original value for ent.Value should be replaced ent.Value = oidText + fmt.Printf("%v\n", ent.Value) } } } diff --git a/plugins/inputs/snmp/snmp_test.go b/plugins/inputs/snmp/snmp_test.go index 93c34e249373e..5bc25d6410b79 100644 --- a/plugins/inputs/snmp/snmp_test.go +++ b/plugins/inputs/snmp/snmp_test.go @@ -61,33 +61,42 @@ func (tsc *testSNMPConnection) Walk(oid string, wf gosnmp.WalkFunc) error { var tsc = &testSNMPConnection{ host: "tsc", values: map[string]interface{}{ - ".1.0.0.0.1.1.0": "foo", - ".1.0.0.0.1.1.1": []byte("bar"), - ".1.0.0.0.1.1.2": []byte(""), - ".1.0.0.0.1.102": "bad", - ".1.0.0.0.1.2.0": 1, - ".1.0.0.0.1.2.1": 2, - ".1.0.0.0.1.2.2": 0, - ".1.0.0.0.1.3.0": "0.123", - ".1.0.0.0.1.3.1": "0.456", - ".1.0.0.0.1.3.2": "0.000", - ".1.0.0.0.1.3.3": "9.999", - ".1.0.0.0.1.5.0": 123456, - ".1.0.0.1.1": "baz", - ".1.0.0.1.2": 234, - ".1.0.0.1.3": []byte("byte slice"), - ".1.0.0.2.1.5.0.9.9": 11, - ".1.0.0.2.1.5.1.9.9": 22, - ".1.0.0.0.1.6.0": ".1.0.0.0.1.7", - ".1.0.0.3.1.1.10": "instance", - ".1.0.0.3.1.1.11": "instance2", - ".1.0.0.3.1.1.12": "instance3", - ".1.0.0.3.1.2.10": 10, - ".1.0.0.3.1.2.11": 20, - ".1.0.0.3.1.2.12": 20, - ".1.0.0.3.1.3.10": 1, - ".1.0.0.3.1.3.11": 2, - ".1.0.0.3.1.3.12": 3, + ".1.3.6.1.2.1.3.1.1.1.0": "foo", + ".1.3.6.1.2.1.3.1.1.1.1": []byte("bar"), + ".1.3.6.1.2.1.3.1.1.1.2": []byte(""), + ".1.3.6.1.2.1.3.1.1.102": "bad", + ".1.3.6.1.2.1.3.1.1.2.0": 1, + ".1.3.6.1.2.1.3.1.1.2.1": 2, + ".1.3.6.1.2.1.3.1.1.2.2": 0, + ".1.3.6.1.2.1.3.1.1.3.0": "1.3.6.1.2.1.3.1.1.3", + ".1.3.6.1.2.1.3.1.1.5.0": 123456, + ".1.0.0.0.1.1.0": "foo", + ".1.0.0.0.1.1.1": []byte("bar"), + ".1.0.0.0.1.1.2": []byte(""), + ".1.0.0.0.1.102": "bad", + ".1.0.0.0.1.2.0": 1, + ".1.0.0.0.1.2.1": 2, + ".1.0.0.0.1.2.2": 0, + ".1.0.0.0.1.3.0": "0.123", + ".1.0.0.0.1.3.1": "0.456", + ".1.0.0.0.1.3.2": "0.000", + ".1.0.0.0.1.3.3": "9.999", + ".1.0.0.0.1.5.0": 123456, + ".1.0.0.1.1": "baz", + ".1.0.0.1.2": 234, + ".1.0.0.1.3": []byte("byte slice"), + ".1.0.0.2.1.5.0.9.9": 11, + ".1.0.0.2.1.5.1.9.9": 22, + ".1.0.0.0.1.6.0": ".1.0.0.0.1.7", + ".1.0.0.3.1.1.10": "instance", + ".1.0.0.3.1.1.11": "instance2", + ".1.0.0.3.1.1.12": "instance3", + ".1.0.0.3.1.2.10": 10, + ".1.0.0.3.1.2.11": 20, + ".1.0.0.3.1.2.12": 20, + ".1.0.0.3.1.3.10": 1, + ".1.0.0.3.1.3.11": 2, + ".1.0.0.3.1.3.12": 3, }, } @@ -210,7 +219,6 @@ func TestSnmpInit(t *testing.T) { Oid: ".1.0.0.1.1", Name: "hostname", initialized: true, - snmp: s, }, s.Fields[0]) } @@ -651,12 +659,12 @@ func TestTableBuild_walk(t *testing.T) { { Name: "atPhysAddress", Oid: "1.3.6.1.2.1.3.1.1.2", - Translate: true, + Translate: false, }, { - Name: "atNetAddress", - Oid: "1.3.6.1.2.1.3.1.1.3", - Conversion: "float", + Name: "atNetAddress", + Oid: "1.3.6.1.2.1.3.1.1.3", + Translate: true, }, }, } @@ -675,9 +683,7 @@ func TestTableBuild_walk(t *testing.T) { }, Fields: map[string]interface{}{ "atPhysAddress": 1, - "atNetAddress": "testTableEntry.7", - // this fails as Build calls snmpTranslate and this is not a real mib so traslate fails - // "myfield6": "testTableEntry.7", + "atNetAddress": "atNetAddress", }, } rtr2 := RTableRow{ @@ -698,7 +704,7 @@ func TestTableBuild_walk(t *testing.T) { }, } - assert.Len(t, tb.Rows, 4) + assert.Len(t, tb.Rows, 3) assert.Contains(t, tb.Rows, rtr1) assert.Contains(t, tb.Rows, rtr2) assert.Contains(t, tb.Rows, rtr3) diff --git a/plugins/inputs/snmp/testdata/server b/plugins/inputs/snmp/testdata/server index 366b29e3d26dc..4f97618d62ef3 100644 --- a/plugins/inputs/snmp/testdata/server +++ b/plugins/inputs/snmp/testdata/server @@ -52,6 +52,6 @@ description OBJECT-TYPE STATUS current DESCRIPTION "server mib for testing" - ::= { testMIBObjects 4 } + ::= { testMIBObjects 4 } END \ No newline at end of file From d0443b0bc1dc86a071d79aa8f87ad2a364808a3b Mon Sep 17 00:00:00 2001 From: MyaLongmire Date: Tue, 12 Oct 2021 16:38:55 -0600 Subject: [PATCH 78/81] chore: removed leftover print statements --- plugins/inputs/snmp/snmp.go | 4 ---- 1 file changed, 4 deletions(-) diff --git a/plugins/inputs/snmp/snmp.go b/plugins/inputs/snmp/snmp.go index 155deb325f2f9..3e8ff03dbbb07 100644 --- a/plugins/inputs/snmp/snmp.go +++ b/plugins/inputs/snmp/snmp.go @@ -547,14 +547,10 @@ func (t Table) Build(gs snmpConnection, walk bool) (*RTable, error) { // snmptranslate table field value here if f.Translate { if entOid, ok := ent.Value.(string); ok { - fmt.Printf("%v \n", entOid) _, _, oidText, _, err := snmpTranslateCall(entOid) - fmt.Printf("%v \n", err) - fmt.Printf("%v\n", ent.Value) if err == nil { // If no error translating, the original value for ent.Value should be replaced ent.Value = oidText - fmt.Printf("%v\n", ent.Value) } } } From b3c1543a736cf65787dcfbf46ccdfdaced046aa9 Mon Sep 17 00:00:00 2001 From: MyaLongmire Date: Tue, 12 Oct 2021 16:42:34 -0600 Subject: [PATCH 79/81] chore: removed leftover print statements --- plugins/inputs/snmp/snmp.go | 1 - 1 file changed, 1 deletion(-) diff --git a/plugins/inputs/snmp/snmp.go b/plugins/inputs/snmp/snmp.go index 3e8ff03dbbb07..8843e9f90e18f 100644 --- a/plugins/inputs/snmp/snmp.go +++ b/plugins/inputs/snmp/snmp.go @@ -519,7 +519,6 @@ func (t Table) Build(gs snmpConnection, walk bool) (*RTable, error) { } else { err := gs.Walk(oid, func(ent gosnmp.SnmpPDU) error { if len(ent.Name) <= len(oid) || ent.Name[:len(oid)+1] != oid+"." { - fmt.Printf("%v\n", &walkError{}) return &walkError{} // break the walk } From 48b0a3b3a56544b0d452b069953727fe78abf63d Mon Sep 17 00:00:00 2001 From: MyaLongmire Date: Tue, 12 Oct 2021 16:45:13 -0600 Subject: [PATCH 80/81] fis: trailing name in test --- plugins/inputs/snmp/snmp_test.go | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/plugins/inputs/snmp/snmp_test.go b/plugins/inputs/snmp/snmp_test.go index 5bc25d6410b79..843a85c9e36ea 100644 --- a/plugins/inputs/snmp/snmp_test.go +++ b/plugins/inputs/snmp/snmp_test.go @@ -227,7 +227,7 @@ func TestSnmpInit_noTranslate(t *testing.T) { Fields: []Field{ {Oid: ".1.1.1.1", Name: "one", IsTag: true}, {Oid: ".1.1.1.2", Name: "two"}, - {Oid: ".1.1.1.3", Name: ".1.1.1.3"}, + {Oid: ".1.1.1.3"}, }, Tables: []Table{ {Name: "testing", @@ -251,7 +251,7 @@ func TestSnmpInit_noTranslate(t *testing.T) { assert.Equal(t, false, s.Fields[1].IsTag) assert.Equal(t, ".1.1.1.3", s.Fields[2].Oid) - assert.Equal(t, ".1.1.1.3", s.Fields[2].Name) + assert.Equal(t, "iso", s.Fields[2].Name) assert.Equal(t, false, s.Fields[2].IsTag) assert.Equal(t, ".1.1.1.4", s.Tables[0].Fields[0].Oid) From 610885959b0984278fa373842437b093d0e68241 Mon Sep 17 00:00:00 2001 From: MyaLongmire Date: Tue, 12 Oct 2021 17:23:32 -0600 Subject: [PATCH 81/81] fix: added .9 in front of oids in testSnmpInit_noTranslate so they don't match prewritten mibs --- plugins/inputs/snmp/snmp.go | 8 ++-- plugins/inputs/snmp/snmp_test.go | 36 +++++++++------- plugins/inputs/snmp/testdata/foo | 3 ++ plugins/inputs/snmp/testdata/tableBuild | 57 +++++++++++++++++++++++++ 4 files changed, 85 insertions(+), 19 deletions(-) create mode 100644 plugins/inputs/snmp/testdata/tableBuild diff --git a/plugins/inputs/snmp/snmp.go b/plugins/inputs/snmp/snmp.go index 8843e9f90e18f..303aa8dd6c7f4 100644 --- a/plugins/inputs/snmp/snmp.go +++ b/plugins/inputs/snmp/snmp.go @@ -906,7 +906,7 @@ func snmpTranslateCall(oid string) (mibName string, oidNum string, oidText strin out, err = gosmi.GetNode(node) if err != nil { - return oid, oid, oid, "", err + return oid, oid, oid, oid, err } oidNum = "." + out.RenderNumeric() + end @@ -917,7 +917,7 @@ func snmpTranslateCall(oid string) (mibName string, oidNum string, oidText strin if strings.ContainsAny(s[i], "abcdefghijklmnopqrstuvwxyz") { out, err = gosmi.GetNode(s[i]) if err != nil { - return oid, oid, oid, "", err + return oid, oid, oid, oid, err } s[i] = out.RenderNumeric() } @@ -929,7 +929,7 @@ func snmpTranslateCall(oid string) (mibName string, oidNum string, oidText strin oidNum = oid // ensure modules are loaded or node will be empty (might not error) if err != nil { - return oid, oid, oid, "", err + return oid, oid, oid, oid, err } } @@ -952,7 +952,7 @@ func snmpTranslateCall(oid string) (mibName string, oidNum string, oidText strin oidText = out.RenderQualified() i := strings.Index(oidText, "::") if i == -1 { - return "", oid, oid, "", fmt.Errorf("not found") + return "", oid, oid, oid, fmt.Errorf("not found") } mibName = oidText[:i] oidText = oidText[i+2:] + end diff --git a/plugins/inputs/snmp/snmp_test.go b/plugins/inputs/snmp/snmp_test.go index 843a85c9e36ea..64f5aeb194286 100644 --- a/plugins/inputs/snmp/snmp_test.go +++ b/plugins/inputs/snmp/snmp_test.go @@ -223,47 +223,53 @@ func TestSnmpInit(t *testing.T) { } func TestSnmpInit_noTranslate(t *testing.T) { + testDataPath, err := filepath.Abs("./testdata") + require.NoError(t, err) + s := &Snmp{ Fields: []Field{ - {Oid: ".1.1.1.1", Name: "one", IsTag: true}, - {Oid: ".1.1.1.2", Name: "two"}, - {Oid: ".1.1.1.3"}, + {Oid: ".9.1.1.1.1", Name: "one", IsTag: true}, + {Oid: ".9.1.1.1.2", Name: "two"}, + {Oid: ".9.1.1.1.3"}, }, Tables: []Table{ {Name: "testing", Fields: []Field{ - {Oid: ".1.1.1.4", Name: "four", IsTag: true}, - {Oid: ".1.1.1.5", Name: "five"}, - {Oid: ".1.1.1.6"}, + {Oid: ".9.1.1.1.4", Name: "four", IsTag: true}, + {Oid: ".9.1.1.1.5", Name: "five"}, + {Oid: ".9.1.1.1.6"}, }}, }, + ClientConfig: snmp.ClientConfig{ + Path: []string{testDataPath}, + }, } - err := s.init() + err = s.init() require.NoError(t, err) - assert.Equal(t, ".1.1.1.1", s.Fields[0].Oid) + assert.Equal(t, ".9.1.1.1.1", s.Fields[0].Oid) assert.Equal(t, "one", s.Fields[0].Name) assert.Equal(t, true, s.Fields[0].IsTag) - assert.Equal(t, ".1.1.1.2", s.Fields[1].Oid) + assert.Equal(t, ".9.1.1.1.2", s.Fields[1].Oid) assert.Equal(t, "two", s.Fields[1].Name) assert.Equal(t, false, s.Fields[1].IsTag) - assert.Equal(t, ".1.1.1.3", s.Fields[2].Oid) - assert.Equal(t, "iso", s.Fields[2].Name) + assert.Equal(t, ".9.1.1.1.3", s.Fields[2].Oid) + assert.Equal(t, ".9.1.1.1.3", s.Fields[2].Name) assert.Equal(t, false, s.Fields[2].IsTag) - assert.Equal(t, ".1.1.1.4", s.Tables[0].Fields[0].Oid) + assert.Equal(t, ".9.1.1.1.4", s.Tables[0].Fields[0].Oid) assert.Equal(t, "four", s.Tables[0].Fields[0].Name) assert.Equal(t, true, s.Tables[0].Fields[0].IsTag) - assert.Equal(t, ".1.1.1.5", s.Tables[0].Fields[1].Oid) + assert.Equal(t, ".9.1.1.1.5", s.Tables[0].Fields[1].Oid) assert.Equal(t, "five", s.Tables[0].Fields[1].Name) assert.Equal(t, false, s.Tables[0].Fields[1].IsTag) - assert.Equal(t, ".1.1.1.6", s.Tables[0].Fields[2].Oid) - assert.Equal(t, "dod", s.Tables[0].Fields[2].Name) + assert.Equal(t, ".9.1.1.1.6", s.Tables[0].Fields[2].Oid) + assert.Equal(t, ".9.1.1.1.6", s.Tables[0].Fields[2].Name) assert.Equal(t, false, s.Tables[0].Fields[2].IsTag) } diff --git a/plugins/inputs/snmp/testdata/foo b/plugins/inputs/snmp/testdata/foo index f2e4c0b700f80..4e9bf7f9d16f9 100644 --- a/plugins/inputs/snmp/testdata/foo +++ b/plugins/inputs/snmp/testdata/foo @@ -15,6 +15,9 @@ fooTestMIB MODULE-IDENTITY ::= { iso 1 } fooMIBObjects OBJECT IDENTIFIER ::= { iso 2 } +fooOne OBJECT IDENTIFIER ::= { iso 1 } +six OBJECT IDENTIFIER ::= { fooOne 1 } +three OBJECT IDENTIFIER ::= { six 3 } foo OBJECT-TYPE SYNTAX Integer32 diff --git a/plugins/inputs/snmp/testdata/tableBuild b/plugins/inputs/snmp/testdata/tableBuild new file mode 100644 index 0000000000000..0551bfd6dd1d4 --- /dev/null +++ b/plugins/inputs/snmp/testdata/tableBuild @@ -0,0 +1,57 @@ +TEST DEFINITIONS ::= BEGIN + +IMPORTS + MODULE-IDENTITY, OBJECT-TYPE, Integer32 FROM fooImports; + +TestMIB MODULE-IDENTITY + LAST-UPDATED "2021090800Z" + ORGANIZATION "influx" + CONTACT-INFO + "EMail: influx@email.com" + DESCRIPTION + "MIB module for testing snmp plugin + for telegraf + " + ::= { iso 1 } + +testingObjects OBJECT IDENTIFIER ::= { iso 0 } +testObjects OBJECT IDENTIFIER ::= { testingObjects 0 } +hostnameone OBJECT IDENTIFIER ::= {testObjects 1 } +hostname OBJECT IDENTIFIER ::= { hostnameone 1 } +testTable OBJECT IDENTIFIER ::= { testObjects 0 } +testMIBObjects OBJECT IDENTIFIER ::= { testTable 1 } + + +myfield1 OBJECT-TYPE + SYNTAX Integer32 + ACCESS read-only + STATUS current + DESCRIPTION + "server mib for testing" + ::= { testMIBObjects 1 } + +myfield2 OBJECT-TYPE + SYNTAX Integer32 + ACCESS read-only + STATUS current + DESCRIPTION + "server mib for testing" + ::= { testMIBObjects 2 } + +myfield3 OBJECT-TYPE + SYNTAX Integer32 + ACCESS read-only + STATUS current + DESCRIPTION + "server mib for testing" + ::= { testMIBObjects 3 } + +myfield4 OBJECT-TYPE + SYNTAX Integer32 + ACCESS read-only + STATUS current + DESCRIPTION + "server mib for testing" + ::= { testMIBObjects 4 } + +END \ No newline at end of file