diff --git a/CHANGELOG.md b/CHANGELOG.md index c9f2c3750a37f..4b325b647bddc 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -10,10 +10,15 @@ - [file](./plugins/inputs/file/README.md) - Contributed by @maxunt - [tengine](./plugins/inputs/tengine/README.md) - Contributed by @ertaoxu +- [x509_cert](./plugins/inputs/x509_cert/README.md) - Contributed by @jtyr +- [filecount](./plugins/inputs/filecount/README.md) - Contributed by @sometimesfood +- [pgbouncer](./plugins/inputs/pgbouncer/README.md) - Contributed by @nerzhul +- [activemq](./plugins/inputs/activemq/README.md) - Contributed by @mlabouardy ### New Processors - [enum](./plugins/processors/enum/README.md) - Contributed by @KarstenSchnitter +- [rename](./plugins/processors/rename/README.md) - Contributed by @goldibex ### New Aggregators @@ -45,6 +50,44 @@ - [#4320](https://github.com/influxdata/telegraf/pull/4320): Improve cloudwatch output performance. - [#3768](https://github.com/influxdata/telegraf/pull/3768): Add x509_cert input plugin. - [#4471](https://github.com/influxdata/telegraf/pull/4471): Add IPSIpAddress syntax to ipaddr conversion in snmp plugin. +- [#4363](https://github.com/influxdata/telegraf/pull/4363): Add filecount input plugin. +- [#4485](https://github.com/influxdata/telegraf/pull/4485): Add support for configuring an AWS endpoint_url. +- [#4491](https://github.com/influxdata/telegraf/pull/4491): Send all messages before waiting for results in kafka output. +- [#4492](https://github.com/influxdata/telegraf/pull/4492): Add support for lz4 compression to kafka output. +- [#4450](https://github.com/influxdata/telegraf/pull/4450): Split multiple sensor keys in ipmi input. +- [#4364](https://github.com/influxdata/telegraf/pull/4364): Support StatisticValues in cloudwatch output plugin. +- [#4431](https://github.com/influxdata/telegraf/pull/4431): Add ip restriction for the prometheus_client output. +- [#3918](https://github.com/influxdata/telegraf/pull/3918): Add pgbouncer input plugin. +- [#2689](https://github.com/influxdata/telegraf/pull/2689): Add ActiveMQ input plugin. +- [#4402](https://github.com/influxdata/telegraf/pull/4402): Add wavefront parser. +- [#4528](https://github.com/influxdata/telegraf/pull/4528): Add rename processor. +- [#4537](https://github.com/influxdata/telegraf/pull/4537): Add message 'max_bytes' configuration to kafka input. +- [#4546](https://github.com/influxdata/telegraf/pull/4546): Add gopsutil meminfo fields to mem plugin. +- [#4285](https://github.com/influxdata/telegraf/pull/4285): Document how to parse telegraf logs. +- [#4542](https://github.com/influxdata/telegraf/pull/4542): Use dep v0.5.0. +- [#4433](https://github.com/influxdata/telegraf/pull/4433): Add ability to set measurement from matched text in grok parser. +- [#4565](https://github.com/influxdata/telegraf/pull/4465): Drop message batches in kafka output if too large. +- [#4579](https://github.com/influxdata/telegraf/pull/4579): Add support for static and random routing keys in kafka output. +- [#4539](https://github.com/influxdata/telegraf/pull/4539): Add logfmt parser. + +## v1.7.4 [unreleased] + +### Bugfixes + +- [#4534](https://github.com/influxdata/telegraf/pull/4534): Skip unserializable metric in influxDB UDP output. +- [#4554](https://github.com/influxdata/telegraf/pull/4554): Fix powerdns input tests. + +## v1.7.3 [2018-08-07] + +### Bugfixes + +- [#4434](https://github.com/influxdata/telegraf/issues/4434): Reduce required docker API version. +- [#4498](https://github.com/influxdata/telegraf/pull/4498): Keep leading whitespace for messages in syslog input. +- [#4470](https://github.com/influxdata/telegraf/issues/4470): Skip bad entries on interrupt input. +- [#4501](https://github.com/influxdata/telegraf/issues/4501): Preserve metric type when using filters in output plugins. +- [#3794](https://github.com/influxdata/telegraf/issues/3794): Fix error message if URL is unparseable in influxdb output. +- [#4059](https://github.com/influxdata/telegraf/issues/4059): Use explicit zpool properties to fix parse error on FreeBSD 11.2. +- [#4514](https://github.com/influxdata/telegraf/pull/4514): Lock buffer when adding metrics. ## v1.7.2 [2018-07-18] diff --git a/Gopkg.lock b/Gopkg.lock index 4a70b057dd88c..ef76419baae11 100644 --- a/Gopkg.lock +++ b/Gopkg.lock @@ -3,14 +3,14 @@ [[projects]] branch = "master" - digest = "1:d7582b4af1b0b953ff2bb9573a50f787c7e1669cb148fb086a3d1c670a1ac955" + digest = "1:fc0802104acded1f48e4860a9f2db85b82b4a754fca9eae750ff4e8b8cdf2116" name = "code.cloudfoundry.org/clock" packages = ["."] pruneopts = "" revision = "02e53af36e6c978af692887ed449b74026d76fec" [[projects]] - digest = "1:ce7dc0f1ffcd9a2aacc50ae6d322eebff8f4faa2d6c5f445c874cd0b77a63de7" + digest = "1:ca3acef20fd660d4df327accbf3ca2df9a12213d914f3113305dcd56579324b9" name = "collectd.org" packages = [ "api", @@ -23,7 +23,7 @@ [[projects]] branch = "master" - digest = "1:c1269bfaddefd090935401c291ad5df6c03de605a440e941ecc568e19f0f9e3b" + digest = "1:298712a3ee36b59c3ca91f4183bd75d174d5eaa8b4aed5072831f126e2e752f6" name = "github.com/Microsoft/ApplicationInsights-Go" packages = [ "appinsights", @@ -33,7 +33,7 @@ revision = "d2df5d440eda5372f24fcac03839a64d6cb5f7e5" [[projects]] - digest = "1:ec6a42cd98d70f0916216d8f6df8ca61145edeaad041014aa9c003068de7364c" + digest = "1:45ec6eb579713a01991ad07f538fed3b576ee55f5ce9f248320152a9270d9258" name = "github.com/Microsoft/go-winio" packages = ["."] pruneopts = "" @@ -41,7 +41,7 @@ version = "v0.4.9" [[projects]] - digest = "1:14af5ba5ac88efec490fb59734df34e1bd973198caefa7b0cceed0900ef6164c" + digest = "1:9362b2212139b7821f73a86169bf80ce6b0264956f87d82ab3aeedb2b5c08fea" name = "github.com/Shopify/sarama" packages = ["."] pruneopts = "" @@ -57,7 +57,7 @@ version = "1.0.0" [[projects]] - digest = "1:855af787df6b733016849082d66ffda5e0e00856513fcac08a7cf199a23515c2" + digest = "1:f296e8b29c60c94efed3b8cfae08d793cb95149cdd7343e6a9834b4ac7136475" name = "github.com/aerospike/aerospike-client-go" packages = [ ".", @@ -78,7 +78,7 @@ [[projects]] branch = "master" - digest = "1:1399282ad03ac819f0e8a747c888407c5c98bb497d33821a7047c7bae667ede0" + digest = "1:a74730e052a45a3fab1d310fdef2ec17ae3d6af16228421e238320846f2aaec8" name = "github.com/alecthomas/template" packages = [ ".", @@ -97,7 +97,7 @@ [[projects]] branch = "master" - digest = "1:072692f8d76356228f31f64ca3140041a140011c7dea26e746206e8649c71b31" + digest = "1:7f21a8f175ee7f91c659f919c61032e11889fba5dc25c0cec555087cbb87435a" name = "github.com/amir/raidman" packages = [ ".", @@ -108,14 +108,14 @@ [[projects]] branch = "master" - digest = "1:83a67d925714169fa5121021abef0276605c6e4d51c467dd1f0c04344abad1ff" + digest = "1:0828d8c0f95689f832cf348fe23827feb7640cd698d612ef59e2f9d041f54c68" name = "github.com/apache/thrift" packages = ["lib/go/thrift"] pruneopts = "" revision = "f2867c24984aa53edec54a138c03db934221bdea" [[projects]] - digest = "1:ca172b51bfe0a1ae7725dc782339fed4ba697dcd44e29a0a1c765fffdbf05ddc" + digest = "1:65a05bde9b02f645c73afa61c9f6af92d94d726c81a268f45cc70218bd58de65" name = "github.com/aws/aws-sdk-go" packages = [ "aws", @@ -156,14 +156,14 @@ [[projects]] branch = "master" - digest = "1:fca298802a2ab834d6eb0e284788ae037ebc324c0f325ff92c5eea592d189cc5" + digest = "1:c0bec5f9b98d0bc872ff5e834fac186b807b656683bd29cb82fb207a1513fabb" name = "github.com/beorn7/perks" packages = ["quantile"] pruneopts = "" revision = "3a771d992973f24aa725d07868b467d1ddfceafb" [[projects]] - digest = "1:0edb96edcfeee9aeba92e605536fbb1542b0bf6a10cea9d0b5a2227d5a703eae" + digest = "1:c5978131c797af795972c27c25396c81d1bf53b7b6e8e3e0259e58375765c071" name = "github.com/bsm/sarama-cluster" packages = ["."] pruneopts = "" @@ -180,7 +180,7 @@ [[projects]] branch = "master" - digest = "1:65ae2d1625584ba8d16d1e15b25db1fc62334e2040f22dbbbdc7531c909843b2" + digest = "1:298e42868718da06fc0899ae8fdb99c48a14477045234c9274d81caa79af6a8f" name = "github.com/couchbase/go-couchbase" packages = ["."] pruneopts = "" @@ -188,7 +188,7 @@ [[projects]] branch = "master" - digest = "1:5db54de7054c072f47806c91ef7625ffa00489ca2da5fbc6ca1c78e08018f6bf" + digest = "1:c734658274a6be88870a36742fdea96a3fce4fc99a7b90946c9e84335ceae71a" name = "github.com/couchbase/gomemcached" packages = [ ".", @@ -199,7 +199,7 @@ [[projects]] branch = "master" - digest = "1:0deaa0f28c823119725c8308703f019797bc077e251d1ed3f2b8eae2cc7791d7" + digest = "1:c1195c02bc8fbf5307cfb95bc79eddaa1351ee3587cc4a7bbe6932e2fb966ff2" name = "github.com/couchbase/goutils" packages = [ "logging", @@ -209,15 +209,7 @@ revision = "e865a1461c8ac0032bd37e2d4dab3289faea3873" [[projects]] - branch = "master" - digest = "1:4c015b7445aa37becc220fde9bdbc4d4329f75af72ca1c98f9b0bd698d6068cb" - name = "github.com/crewjam/rfc5424" - packages = ["."] - pruneopts = "" - revision = "6ae4b209c3f0d5071494be6b883a1970acadda94" - -[[projects]] - digest = "1:0a39ec8bf5629610a4bc7873a92039ee509246da3cef1a0ea60f1ed7e5f9cea5" + digest = "1:56c130d885a4aacae1dd9c7b71cfe39912c7ebc1ff7d2b46083c8812996dc43b" name = "github.com/davecgh/go-spew" packages = ["spew"] pruneopts = "" @@ -225,7 +217,7 @@ version = "v1.1.0" [[projects]] - digest = "1:2426da75f49e5b8507a6ed5d4c49b06b2ff795f4aec401c106b7db8fb2625cd7" + digest = "1:6098222470fe0172157ce9bbef5d2200df4edde17ee649c5d6e48330e4afa4c6" name = "github.com/dgrijalva/jwt-go" packages = ["."] pruneopts = "" @@ -233,7 +225,7 @@ version = "v3.2.0" [[projects]] - digest = "1:68df19ee476d93359596377b7437bbe49d233fe014becd060ded757aeed531cd" + digest = "1:522eff2a1f014a64fb403db60fc0110653e4dc5b59779894d208e697b0708ddc" name = "github.com/docker/distribution" packages = [ "digestset", @@ -243,7 +235,7 @@ revision = "edc3ab29cdff8694dd6feb85cfeb4b5f1b38ed9c" [[projects]] - digest = "1:a21509491bfd5bd1f99abe1d38430fddd16c8c8dc0092f954e224b93ad87f06b" + digest = "1:d149605f1b00713fdc48150122892d77d49d30c825f690dd92f497aeb6cf18f5" name = "github.com/docker/docker" packages = [ "api", @@ -268,7 +260,7 @@ revision = "ed7b6428c133e7c59404251a09b7d6b02fa83cc2" [[projects]] - digest = "1:5b20afc76a36d3994194e2612e83b51bc2b12db3d4d2a722b24474b2d0e3a890" + digest = "1:a5ecc2e70260a87aa263811281465a5effcfae8a54bac319cee87c4625f04d63" name = "github.com/docker/go-connections" packages = [ "nat", @@ -288,7 +280,7 @@ version = "v0.3.3" [[projects]] - digest = "1:7bbb118aeef9a6b9fef3d57b6cc5378f7cd6e915cabf4dea695e318e1a1bd4e6" + digest = "1:6d6672f85a84411509885eaa32f597577873de00e30729b9bb0eb1e1faa49c12" name = "github.com/eapache/go-resiliency" packages = ["breaker"] pruneopts = "" @@ -297,7 +289,7 @@ [[projects]] branch = "master" - digest = "1:7b28f7f7c9fb914b30dff111fb910d49bd61d275101f665aea79409bb3ba2ae2" + digest = "1:7b12ea8b50040c6c2378ec5b5a1ab722730b2bfb46e8724ded57f2c3905431fa" name = "github.com/eapache/go-xerial-snappy" packages = ["."] pruneopts = "" @@ -312,7 +304,7 @@ version = "v1.1.0" [[projects]] - digest = "1:d2e2aebcb8e8027345e16f9d0be8cdee3bb470ba406c7a54cb7457ae3ad4ace5" + digest = "1:3fa846cb3feb4e65371fe3c347c299de9b5bc3e71e256c0d940cd19b767a6ba0" name = "github.com/eclipse/paho.mqtt.golang" packages = [ ".", @@ -323,7 +315,7 @@ version = "v1.1.1" [[projects]] - digest = "1:d19c78214e03e297e9e30d2eb11892f731358b2951f2a5c7374658a156373e4c" + digest = "1:858b7fe7b0f4bc7ef9953926828f2816ea52d01a88d72d1c45bc8c108f23c356" name = "github.com/go-ini/ini" packages = ["."] pruneopts = "" @@ -339,7 +331,7 @@ version = "v0.3.0" [[projects]] - digest = "1:c3a5ae14424a38c244439732c31a08b5f956c46c4acdc159fc285a52dbf11de0" + digest = "1:96c4a6ff4206086347bfe28e96e092642882128f45ecb8dc8f15f3e6f6703af0" name = "github.com/go-ole/go-ole" packages = [ ".", @@ -350,7 +342,7 @@ version = "v1.2.1" [[projects]] - digest = "1:f2f6a616a1ca8aed667d956c98f7f6178efe72bbe0a419bd33b9d99841c7de69" + digest = "1:3dfd659219b6f63dc0677a62b8d4e8f10b5cf53900aef40858db10a19407e41d" name = "github.com/go-redis/redis" packages = [ ".", @@ -367,7 +359,7 @@ version = "v6.12.0" [[projects]] - digest = "1:dc876ae7727280d95f97af5320308131278b93d6c6f5cf953065e18cb8c88fd2" + digest = "1:c07de423ca37dc2765396d6971599ab652a339538084b9b58c9f7fc533b28525" name = "github.com/go-sql-driver/mysql" packages = ["."] pruneopts = "" @@ -375,7 +367,7 @@ version = "v1.4.0" [[projects]] - digest = "1:b7a7e17513aeee6492d93015c7bf29c86a0c1c91210ea56b21e36c1a40958cba" + digest = "1:9ab1b1c637d7c8f49e39d8538a650d7eb2137b076790cff69d160823b505964c" name = "github.com/gobwas/glob" packages = [ ".", @@ -392,7 +384,7 @@ version = "v0.2.3" [[projects]] - digest = "1:673df1d02ca0c6f51458fe94bbb6fae0b05e54084a31db2288f1c4321255c2da" + digest = "1:6e73003ecd35f4487a5e88270d3ca0a81bc80dc88053ac7e4dcfec5fba30d918" name = "github.com/gogo/protobuf" packages = ["proto"] pruneopts = "" @@ -400,7 +392,7 @@ version = "v1.1.1" [[projects]] - digest = "1:b1d3041d568e065ab4d76f7477844458e9209c0bb241eaccdc0770bf0a13b120" + digest = "1:f958a1c137db276e52f0b50efee41a1a389dcdded59a69711f3e872757dab34b" name = "github.com/golang/protobuf" packages = [ "proto", @@ -415,14 +407,14 @@ [[projects]] branch = "master" - digest = "1:075128b9fc42e6d99067da1a2e6c0a634a6043b5a60abe6909c51f5ecad37b6d" + digest = "1:2a5888946cdbc8aa360fd43301f9fc7869d663f60d5eedae7d4e6e5e4f06f2bf" name = "github.com/golang/snappy" packages = ["."] pruneopts = "" revision = "2e65f85255dbc3072edf28d6b5b8efc472979f5a" [[projects]] - digest = "1:cc082d7b9cc3f832f2aed9d06d1cbb33b6984a61d8ec403535b086415c181607" + digest = "1:f9f45f75f332e03fc7e9fe9188ea4e1ce4d14779ef34fa1b023da67518e36327" name = "github.com/google/go-cmp" packages = [ "cmp", @@ -459,7 +451,7 @@ revision = "e80d13ce29ede4452c43dea11e79b9bc8a15b478" [[projects]] - digest = "1:db58383b43f583c44fb47c3331de943a11bb73ea951c2def55d29a454a57f4ee" + digest = "1:e7224669901bab4094e6d6697c136557b7177db6ceb01b7fc8b20d08f4b5aacd" name = "github.com/hashicorp/consul" packages = ["api"] pruneopts = "" @@ -476,14 +468,14 @@ [[projects]] branch = "master" - digest = "1:cd5813053beac0114f96a7da3924fc8a15e0cd2b139f079e0fcce5d3244ae304" + digest = "1:ff65bf6fc4d1116f94ac305342725c21b55c16819c2606adc8f527755716937f" name = "github.com/hashicorp/go-rootcerts" packages = ["."] pruneopts = "" revision = "6bb64b370b90e7ef1fa532be9e591a81c3493e00" [[projects]] - digest = "1:d2b2cff454cb23a9769ef3c9075741f5985773a998584b3b3ce203fe4b1abbea" + digest = "1:f72168ea995f398bab88e84bd1ff58a983466ba162fb8d50d47420666cd57fad" name = "github.com/hashicorp/serf" packages = ["coordinate"] pruneopts = "" @@ -491,7 +483,7 @@ version = "v0.8.1" [[projects]] - digest = "1:cc0cf2e12280074e5c6dc0f15a4bb3d6c43509e6091cdcdcc83eea491577257b" + digest = "1:a39ef049cdeee03a57b132e7d60e32711b9d949c78458da78e702d9864c54369" name = "github.com/influxdata/go-syslog" packages = [ "rfc5424", @@ -503,7 +495,7 @@ [[projects]] branch = "master" - digest = "1:effc58ad45323ad15159bbca533be4870eaddb2d9a513d3488d8bfe822c83532" + digest = "1:bc3eb5ddfd59781ea1183f2b3d1eb105a1495d421f09b2ccd360c7fced0b612d" name = "github.com/influxdata/tail" packages = [ ".", @@ -517,7 +509,7 @@ [[projects]] branch = "master" - digest = "1:d31edcf33a3b36218de96e43f3fec18ea96deb2a28b838a3a01a4df856ded345" + digest = "1:7fb6cc9607eaa6ef309edebc42b57f704244bd4b9ab23bff128829c4ad09b95d" name = "github.com/influxdata/toml" packages = [ ".", @@ -535,7 +527,7 @@ revision = "7c63b0a71ef8300adc255344d275e10e5c3a71ec" [[projects]] - digest = "1:4197871f269749786aa2406557dba15f10cf79161cdc3998180614c62c8b6351" + digest = "1:2de1791b9e43f26c696e36950e42676565e7da7499a870bc02213da4b59b1d14" name = "github.com/jackc/pgx" packages = [ ".", @@ -551,7 +543,7 @@ version = "v3.1.0" [[projects]] - digest = "1:4f767a115bc8e08576f6d38ab73c376fc1b1cd3bb5041171c9e8668cc7739b52" + digest = "1:6f49eae0c1e5dab1dafafee34b207aeb7a42303105960944828c2079b92fc88e" name = "github.com/jmespath/go-jmespath" packages = ["."] pruneopts = "" @@ -567,7 +559,7 @@ [[projects]] branch = "master" - digest = "1:2df59f23f11c5c59982f737c98c5523b276bfc85a4773a04b411190402bb30fd" + digest = "1:fed90fa725d3b1bac0a760de64426834dfef4546474cf182f2ec94285afa74a8" name = "github.com/kardianos/service" packages = ["."] pruneopts = "" @@ -591,7 +583,7 @@ [[projects]] branch = "master" - digest = "1:28ca57775f285ae87cbdc7280aad91c5f2ed3c2af98d9f035d75956d1ca97fe6" + digest = "1:7e9956922e349af0190afa0b6621befcd201072679d8e51a9047ff149f2afe93" name = "github.com/mailru/easyjson" packages = [ ".", @@ -603,7 +595,7 @@ revision = "efc7eb8984d6655c26b5c9d2e65c024e5767c37c" [[projects]] - digest = "1:49a8b01a6cd6558d504b65608214ca40a78000e1b343ed0da5c6a9ccd83d6d30" + digest = "1:63722a4b1e1717be7b98fc686e0b30d5e7f734b9e93d7dee86293b6deab7ea28" name = "github.com/matttproud/golang_protobuf_extensions" packages = ["pbutil"] pruneopts = "" @@ -611,7 +603,7 @@ version = "v1.0.1" [[projects]] - digest = "1:f0bad0fece0fb73c6ea249c18d8e80ffbe86be0457715b04463068f04686cf39" + digest = "1:4c8d8358c45ba11ab7bb15df749d4df8664ff1582daead28bae58cf8cbe49890" name = "github.com/miekg/dns" packages = ["."] pruneopts = "" @@ -651,7 +643,7 @@ version = "v0.1.0" [[projects]] - digest = "1:e5894541d6ceec5dd283e24e3530aadf59c06449695d19189a7a27bb4c15840d" + digest = "1:e5ec850ce66beb0014fc40d8e64b7482172eee71d86d734d66def5e9eac16797" name = "github.com/nats-io/gnatsd" packages = [ "conf", @@ -665,7 +657,7 @@ version = "v1.2.0" [[projects]] - digest = "1:88f1bde4c172e27b05ed46adfbd0e79dc1663a6281e4b39fa3e39d71ead9621d" + digest = "1:665af347df4c5d1ae4c3eacd0754f5337a301f6a3f2444c9993b996605c8c02b" name = "github.com/nats-io/go-nats" packages = [ ".", @@ -685,7 +677,7 @@ version = "v1.0.0" [[projects]] - digest = "1:501cce26a54c785458b0dd54a08ddd984d4ad0c198255430d5d37cd2efe23149" + digest = "1:7a69f6a3a33929f8b66aa39c93868ad1698f06417fe627ae067559beb94504bd" name = "github.com/nsqio/go-nsq" packages = ["."] pruneopts = "" @@ -701,7 +693,7 @@ version = "v1.0.0-rc1" [[projects]] - digest = "1:0d08f7224705b1df80beee92ffbdc63ab13fd6f6eb80bf287735f9bc7e8b83eb" + digest = "1:f26c8670b11e29a49c8e45f7ec7f2d5bac62e8fd4e3c0ae1662baa4a697f984a" name = "github.com/opencontainers/image-spec" packages = [ "specs-go", @@ -720,7 +712,7 @@ revision = "a52f2342449246d5bcc273e65cbdcfa5f7d6c63c" [[projects]] - digest = "1:bba12aa4747b212f75db3e7fee73fe1b66d303cb3ff0c1984b7f2ad20e8bd2bc" + digest = "1:78fb99d6011c2ae6c72f3293a83951311147b12b06a5ffa43abf750c4fab6ac5" name = "github.com/opentracing/opentracing-go" packages = [ ".", @@ -732,7 +724,7 @@ version = "v1.0.2" [[projects]] - digest = "1:c6c0db6294924072f98a0de090d200bae4b7102b12a443ba9569c4ba7df52aa1" + digest = "1:fea0e67285d900e5a0a7ec19ff4b4c82865a28dddbee8454c5360ad908f7069c" name = "github.com/openzipkin/zipkin-go-opentracing" packages = [ ".", @@ -747,7 +739,7 @@ version = "v0.3.4" [[projects]] - digest = "1:41de12a4684237dd55a11260c941c2c58a055951985e9473ba1661175a13fea7" + digest = "1:29e34e58f26655c4d73135cdfc0517ea2ff1483eff34e5d5ef4b6fddbb81e31b" name = "github.com/pierrec/lz4" packages = [ ".", @@ -774,7 +766,7 @@ version = "v1.0.0" [[projects]] - digest = "1:981835985f655d1d380cc6aa7d9fa9ad7abfaf40c75da200fd40d864cd05a7c3" + digest = "1:4142d94383572e74b42352273652c62afec5b23f325222ed09198f46009022d1" name = "github.com/prometheus/client_golang" packages = [ "prometheus", @@ -786,7 +778,7 @@ [[projects]] branch = "master" - digest = "1:562d53e436b244a9bb5c1ff43bcaf4882e007575d34ec37717b15751c65cc63a" + digest = "1:185cf55b1f44a1bf243558901c3f06efa5c64ba62cfdcbb1bf7bbe8c3fb68561" name = "github.com/prometheus/client_model" packages = ["go"] pruneopts = "" @@ -794,7 +786,7 @@ [[projects]] branch = "master" - digest = "1:6a8420870eb2935977da1fff0f3afca9bdb3f1e66258c9e91a8a7ce0b5417c3b" + digest = "1:bfbc121ef802d245ef67421cff206615357d9202337a3d492b8f668906b485a8" name = "github.com/prometheus/common" packages = [ "expfmt", @@ -807,7 +799,7 @@ [[projects]] branch = "master" - digest = "1:00fca823dfcdd8107226f67215afd948b001525223ed955a05b33a4c885c9591" + digest = "1:b694a6bdecdace488f507cff872b30f6f490fdaf988abd74d87ea56406b23b6e" name = "github.com/prometheus/procfs" packages = [ ".", @@ -820,7 +812,7 @@ [[projects]] branch = "master" - digest = "1:1b65925989a4dfb6d98ef1d530cda33ab1ff25945b14a22a8b8bb27cc282af70" + digest = "1:15bcdc717654ef21128e8af3a63eec39a6d08a830e297f93d65163f87c8eb523" name = "github.com/rcrowley/go-metrics" packages = ["."] pruneopts = "" @@ -828,7 +820,7 @@ [[projects]] branch = "master" - digest = "1:d8fe9f454582e04b5693b59cdebe3f0bd9dc29ad9651bfb1633cba4658b66c65" + digest = "1:7fc2f428767a2521abc63f1a663d981f61610524275d6c0ea645defadd4e916f" name = "github.com/samuel/go-zookeeper" packages = ["zk"] pruneopts = "" @@ -843,7 +835,7 @@ version = "v1.2.0" [[projects]] - digest = "1:987ce58e999676c2e209831390f2d56621ff98def2ecca4928e73fe1e2569954" + digest = "1:fce9909f20bc6a6363a6d589e478bdcf8111044b41566d37d7552bf92d955540" name = "github.com/shirou/gopsutil" packages = [ "cpu", @@ -856,8 +848,8 @@ "process", ] pruneopts = "" - revision = "4a180b209f5f494e5923cfce81ea30ba23915877" - version = "v2.18.06" + revision = "8048a2e9c5773235122027dd585cf821b2af1249" + version = "v2.18.07" [[projects]] branch = "master" @@ -868,7 +860,7 @@ revision = "bb4de0191aa41b5507caa14b0650cdbddcd9280b" [[projects]] - digest = "1:f2cc92b78b2f3b76ab0f9daddddd28627bcfcc6cacf119029aa3850082d95079" + digest = "1:8cf46b6c18a91068d446e26b67512cf16f1540b45d90b28b9533706a127f0ca6" name = "github.com/sirupsen/logrus" packages = ["."] pruneopts = "" @@ -877,7 +869,7 @@ [[projects]] branch = "master" - digest = "1:79e73b87cb07e380d1a3aaa14fbcc418e0d42eede5f971e7ee2f4a6e6d531deb" + digest = "1:4b0cabe65ca903a7b2a3e6272c5304eb788ce196d35ecb901c6563e5e7582443" name = "github.com/soniah/gosnmp" packages = ["."] pruneopts = "" @@ -885,14 +877,14 @@ [[projects]] branch = "master" - digest = "1:0a1f8d01a0191f558910bcbfd7e1dc11a53ac374473d13b68b8fe520f21efb07" + digest = "1:4e8f1cae8e6d83af9000d82566efb8823907dae77ba4f1d76ff28fdd197c3c90" name = "github.com/streadway/amqp" packages = ["."] pruneopts = "" revision = "e5adc2ada8b8efff032bf61173a233d143e9318e" [[projects]] - digest = "1:34062a2274daa6ec4d2f50d6070cc51cf4674d6d553ed76b406cb3425b9528e8" + digest = "1:711eebe744c0151a9d09af2315f0bb729b2ec7637ef4c410fa90a18ef74b65b6" name = "github.com/stretchr/objx" packages = ["."] pruneopts = "" @@ -900,7 +892,7 @@ version = "v0.1.1" [[projects]] - digest = "1:bc2a12c8863e1080226b7bc69192efd6c37aaa9b85cec508b0a8f54fabb9bd9f" + digest = "1:c587772fb8ad29ad4db67575dad25ba17a51f072ff18a22b4f0257a4d9c24f75" name = "github.com/stretchr/testify" packages = [ "assert", @@ -928,7 +920,7 @@ revision = "1731857f09b1f38450e2c12409748407822dc6be" [[projects]] - digest = "1:23e2b9f3a20cd4a6427147377255ec2f6237e8606fa6ef0707ed79b7bfbe3a83" + digest = "1:343f20460c11a0d0529fe532553bfef9446918d1a1fda6d8661eb27d5b1a68b8" name = "github.com/vjeantet/grok" packages = ["."] pruneopts = "" @@ -937,7 +929,7 @@ [[projects]] branch = "master" - digest = "1:5383edd40c7f6c95a7dc46a47bf0c83de4bf40a4252f12fa803f790037addffc" + digest = "1:98ed05e9796df287b90c1d96854e3913c8e349dbc546412d3cabb472ecf4b417" name = "github.com/wvanbergen/kafka" packages = ["consumergroup"] pruneopts = "" @@ -945,7 +937,7 @@ [[projects]] branch = "master" - digest = "1:f936b4936e1b092cc41c9b33fdc990ad78386545f1ffeca8427c72b2605bca85" + digest = "1:12aff3cc417907bf9f683a6bf1dc78ffb08e41bc69f829491e593ea9b951a3cf" name = "github.com/wvanbergen/kazoo-go" packages = ["."] pruneopts = "" @@ -953,7 +945,7 @@ [[projects]] branch = "master" - digest = "1:9946d558a909f63e31332c77b82649522da97ae7f7cfbfebc6f53549ab6b3e0f" + digest = "1:c5918689b7e187382cc1066bf0260de54ba9d1b323105f46ed2551d2fb4a17c7" name = "github.com/yuin/gopher-lua" packages = [ ".", @@ -966,7 +958,7 @@ [[projects]] branch = "master" - digest = "1:84e9087a94f336c204887281046891769d2ed7bf1d2b31c21ff6fb5e1743abce" + digest = "1:2fcfc6c3fb8dfe0d80d7789272230d3ac7db15022b66817113f98d9fff880225" name = "github.com/zensqlmonitor/go-mssqldb" packages = ["."] pruneopts = "" @@ -974,7 +966,7 @@ [[projects]] branch = "master" - digest = "1:21100b2e8b6922303dd109da81b3134ed0eff05cb3402881eabde9cce8f4e5e6" + digest = "1:0773b5c3be42874166670a20aa177872edb450cd9fc70b1df97303d977702a50" name = "golang.org/x/crypto" packages = [ "bcrypt", @@ -990,7 +982,7 @@ [[projects]] branch = "master" - digest = "1:58d8f8f3ad415b10d2145316519e5b7995b7cf9e663b33a1e9e0c2ddd96c1d58" + digest = "1:00ff990baae4665bb0a8174af5ff78228574227ed96c89671247a56852a50e21" name = "golang.org/x/net" packages = [ "bpf", @@ -1018,7 +1010,7 @@ [[projects]] branch = "master" - digest = "1:a8944db88149e7ecbea4b760c625b9ccf455fceae21387bc8890c3589d28b623" + digest = "1:677e38cad6833ad266ec843739d167755eda1e6f2d8af1c63102b0426ad820db" name = "golang.org/x/sys" packages = [ "unix", @@ -1033,7 +1025,7 @@ revision = "ac767d655b305d4e9612f5f6e33120b9176c4ad4" [[projects]] - digest = "1:af9bfca4298ef7502c52b1459df274eed401a4f5498b900e9a92d28d3d87ac5a" + digest = "1:5acd3512b047305d49e8763eef7ba423901e85d5dd2fd1e71778a0ea8de10bd4" name = "golang.org/x/text" packages = [ "collate", @@ -1068,7 +1060,7 @@ version = "v0.3.0" [[projects]] - digest = "1:eede11c81b63c8f6fd06ef24ba0a640dc077196ec9b7a58ecde03c82eee2f151" + digest = "1:c1771ca6060335f9768dff6558108bc5ef6c58506821ad43377ee23ff059e472" name = "google.golang.org/appengine" packages = ["cloudsql"] pruneopts = "" @@ -1077,14 +1069,14 @@ [[projects]] branch = "master" - digest = "1:8d093c040b734e160cbe8291c7b539c36d2c6dd4581c4bb37cff56078c65bd07" + digest = "1:b1443b4e3cc990c84d27fcdece9d3302158c67dba870e33a6937a2c0076388c2" name = "google.golang.org/genproto" packages = ["googleapis/rpc/status"] pruneopts = "" revision = "fedd2861243fd1a8152376292b921b394c7bef7e" [[projects]] - digest = "1:05f2028524c4eada11e3f46d23139f23e9e0a40b2552207a5af278e8063ce782" + digest = "1:5f31b45ee9da7a87f140bef3ed0a7ca34ea2a6d38eb888123b8e28170e8aa4f2" name = "google.golang.org/grpc" packages = [ ".", @@ -1118,7 +1110,7 @@ version = "v1.13.0" [[projects]] - digest = "1:2840683aa0e9980689f85bf48b2a56ec7a108fd089f12af8ea7d98c172819589" + digest = "1:15d017551627c8bb091bde628215b2861bed128855343fdd570c62d08871f6e1" name = "gopkg.in/alecthomas/kingpin.v2" packages = ["."] pruneopts = "" @@ -1126,7 +1118,7 @@ version = "v2.2.6" [[projects]] - digest = "1:a8f8c1725195c4324d4350fae001524ca7489e40d9b6bb47598772e3faa103ba" + digest = "1:3cad99e0d1f94b8c162787c12e59d0a0b9df1ef75590eb145cdd625479091efe" name = "gopkg.in/asn1-ber.v1" packages = ["."] pruneopts = "" @@ -1142,7 +1134,7 @@ version = "v2.0.0" [[projects]] - digest = "1:b2106f1668ea5efc1ecc480f7e922a093adb9563fd9ce58585292871f0d0f229" + digest = "1:eb53021a8aa3f599d29c7102e65026242bdedce998a54837dc67f14b6a97c5fd" name = "gopkg.in/fsnotify.v1" packages = ["."] pruneopts = "" @@ -1151,7 +1143,7 @@ version = "v1.4.7" [[projects]] - digest = "1:5fa5df18f3bd9cad28ed7f263b15da217945735110898fa2b9af25cdafb9cbf3" + digest = "1:960720207d3d0992995f4576e1366fd9e9b1483473b07fb7243144f75f5b1546" name = "gopkg.in/gorethink/gorethink.v3" packages = [ ".", @@ -1164,7 +1156,7 @@ version = "v3.0.5" [[projects]] - digest = "1:74163d1887c0821951e6f1795a1d10338f45f09d9067cb4a8edcf7ee481724ee" + digest = "1:367baf06b7dbd0ef0bbdd785f6a79f929c96b0c18e9d3b29c0eed1ac3f5db133" name = "gopkg.in/ldap.v2" packages = ["."] pruneopts = "" @@ -1173,7 +1165,7 @@ [[projects]] branch = "v2" - digest = "1:f799e95918890212dcf4ce5951291061d318f689977ec9cea0417b08433c2a9d" + digest = "1:f54ba71a035aac92ced3e902d2bff3734a15d1891daff73ec0f90ef236750139" name = "gopkg.in/mgo.v2" packages = [ ".", @@ -1186,7 +1178,7 @@ revision = "9856a29383ce1c59f308dd1cf0363a79b5bef6b5" [[projects]] - digest = "1:427414c304a47b497759094220ce42dd2e838ab7d52de197c633b800c6ff84b5" + digest = "1:b49c4d3115800eace659c9a6a5c384a922f5b210178b24a01abb10731f404ea2" name = "gopkg.in/olivere/elastic.v5" packages = [ ".", @@ -1216,6 +1208,97 @@ [solve-meta] analyzer-name = "dep" analyzer-version = 1 - inputs-digest = "726abf0a241126b415293c203dddc516e4d8be9b0f2913fb3ab2c4eb332e3ce2" + input-imports = [ + "collectd.org/api", + "collectd.org/network", + "github.com/Microsoft/ApplicationInsights-Go/appinsights", + "github.com/Shopify/sarama", + "github.com/StackExchange/wmi", + "github.com/aerospike/aerospike-client-go", + "github.com/amir/raidman", + "github.com/apache/thrift/lib/go/thrift", + "github.com/aws/aws-sdk-go/aws", + "github.com/aws/aws-sdk-go/aws/client", + "github.com/aws/aws-sdk-go/aws/credentials", + "github.com/aws/aws-sdk-go/aws/credentials/stscreds", + "github.com/aws/aws-sdk-go/aws/session", + "github.com/aws/aws-sdk-go/service/cloudwatch", + "github.com/aws/aws-sdk-go/service/kinesis", + "github.com/aws/aws-sdk-go/service/sts", + "github.com/bsm/sarama-cluster", + "github.com/couchbase/go-couchbase", + "github.com/dgrijalva/jwt-go", + "github.com/docker/docker/api/types", + "github.com/docker/docker/api/types/container", + "github.com/docker/docker/api/types/filters", + "github.com/docker/docker/api/types/registry", + "github.com/docker/docker/api/types/swarm", + "github.com/docker/docker/client", + "github.com/eclipse/paho.mqtt.golang", + "github.com/go-redis/redis", + "github.com/go-sql-driver/mysql", + "github.com/gobwas/glob", + "github.com/golang/protobuf/proto", + "github.com/google/go-cmp/cmp", + "github.com/gorilla/mux", + "github.com/hashicorp/consul/api", + "github.com/influxdata/go-syslog/rfc5424", + "github.com/influxdata/go-syslog/rfc5425", + "github.com/influxdata/tail", + "github.com/influxdata/toml", + "github.com/influxdata/toml/ast", + "github.com/influxdata/wlog", + "github.com/jackc/pgx", + "github.com/jackc/pgx/pgtype", + "github.com/jackc/pgx/stdlib", + "github.com/kardianos/service", + "github.com/kballard/go-shellquote", + "github.com/matttproud/golang_protobuf_extensions/pbutil", + "github.com/miekg/dns", + "github.com/multiplay/go-ts3", + "github.com/nats-io/gnatsd/server", + "github.com/nats-io/go-nats", + "github.com/nsqio/go-nsq", + "github.com/openzipkin/zipkin-go-opentracing", + "github.com/openzipkin/zipkin-go-opentracing/thrift/gen-go/zipkincore", + "github.com/prometheus/client_golang/prometheus", + "github.com/prometheus/client_golang/prometheus/promhttp", + "github.com/prometheus/client_model/go", + "github.com/prometheus/common/expfmt", + "github.com/prometheus/common/log", + "github.com/satori/go.uuid", + "github.com/shirou/gopsutil/cpu", + "github.com/shirou/gopsutil/disk", + "github.com/shirou/gopsutil/host", + "github.com/shirou/gopsutil/load", + "github.com/shirou/gopsutil/mem", + "github.com/shirou/gopsutil/net", + "github.com/shirou/gopsutil/process", + "github.com/soniah/gosnmp", + "github.com/streadway/amqp", + "github.com/stretchr/testify/assert", + "github.com/stretchr/testify/mock", + "github.com/stretchr/testify/require", + "github.com/tidwall/gjson", + "github.com/vjeantet/grok", + "github.com/wvanbergen/kafka/consumergroup", + "github.com/zensqlmonitor/go-mssqldb", + "golang.org/x/net/context", + "golang.org/x/net/html/charset", + "golang.org/x/sys/unix", + "golang.org/x/sys/windows", + "golang.org/x/sys/windows/svc", + "golang.org/x/sys/windows/svc/mgr", + "google.golang.org/grpc", + "google.golang.org/grpc/codes", + "google.golang.org/grpc/credentials", + "google.golang.org/grpc/status", + "gopkg.in/gorethink/gorethink.v3", + "gopkg.in/ldap.v2", + "gopkg.in/mgo.v2", + "gopkg.in/mgo.v2/bson", + "gopkg.in/olivere/elastic.v5", + "gopkg.in/yaml.v2", + ] solver-name = "gps-cdcl" solver-version = 1 diff --git a/Gopkg.toml b/Gopkg.toml index 799b5243c3087..d282e1ebd7e6b 100644 --- a/Gopkg.toml +++ b/Gopkg.toml @@ -148,7 +148,7 @@ [[constraint]] name = "github.com/shirou/gopsutil" - version = "2.18.05" + version = "2.18.07" [[constraint]] name = "github.com/Shopify/sarama" diff --git a/README.md b/README.md index 03e3a8f589eeb..e0d88e4148b94 100644 --- a/README.md +++ b/README.md @@ -43,7 +43,7 @@ Ansible role: https://github.com/rossmcdonald/telegraf Telegraf requires golang version 1.9 or newer, the Makefile requires GNU make. 1. [Install Go](https://golang.org/doc/install) >=1.9 -2. [Install dep](https://golang.github.io/dep/docs/installation.html) ==v0.4.1 +2. [Install dep](https://golang.github.io/dep/docs/installation.html) ==v0.5.0 3. Download Telegraf source: ``` go get -d github.com/influxdata/telegraf @@ -127,6 +127,7 @@ configuration options. ## Input Plugins +* [activemq](./plugins/inputs/activemq) * [aerospike](./plugins/inputs/aerospike) * [amqp_consumer](./plugins/inputs/amqp_consumer) (rabbitmq) * [apache](./plugins/inputs/apache) @@ -158,6 +159,7 @@ configuration options. * [fibaro](./plugins/inputs/fibaro) * [file](./plugins/inputs/file) * [filestat](./plugins/inputs/filestat) +* [filecount](./plugins/inputs/filecount) * [fluentd](./plugins/inputs/fluentd) * [graylog](./plugins/inputs/graylog) * [haproxy](./plugins/inputs/haproxy) @@ -197,7 +199,7 @@ configuration options. * [nats](./plugins/inputs/nats) * [net](./plugins/inputs/net) * [net_response](./plugins/inputs/net_response) -* [netstat](./plugins/inputs/netstat) +* [netstat](./plugins/inputs/net) * [nginx](./plugins/inputs/nginx) * [nginx_plus](./plugins/inputs/nginx_plus) * [nsq_consumer](./plugins/inputs/nsq_consumer) @@ -208,6 +210,7 @@ configuration options. * [openldap](./plugins/inputs/openldap) * [opensmtpd](./plugins/inputs/opensmtpd) * [pf](./plugins/inputs/pf) +* [pgbouncer](./plugins/inputs/pgbouncer) * [phpfpm](./plugins/inputs/phpfpm) * [phusion passenger](./plugins/inputs/passenger) * [ping](./plugins/inputs/ping) @@ -276,6 +279,7 @@ formats may be used with input plugins supporting the `data_format` option: * [override](./plugins/processors/override) * [printer](./plugins/processors/printer) * [regex](./plugins/processors/regex) +* [rename](./plugins/processors/rename) * [topk](./plugins/processors/topk) ## Aggregator Plugins diff --git a/appveyor.yml b/appveyor.yml index cd8938ff71172..a1af84d6c4fb2 100644 --- a/appveyor.yml +++ b/appveyor.yml @@ -21,8 +21,8 @@ install: - 7z x "C:\Cache\gnuwin32-dep.zip" -oC:\GnuWin32 -y - go get -d github.com/golang/dep - cd "%GOPATH%\src\github.com\golang\dep" - - git checkout -q v0.4.1 - - go install -ldflags="-X main.version=v0.4.1" ./cmd/dep + - git checkout -q v0.5.0 + - go install -ldflags="-X main.version=v0.5.0" ./cmd/dep - cd "%GOPATH%\src\github.com\influxdata\telegraf" - git config --system core.longpaths true - go version diff --git a/cmd/telegraf/telegraf.go b/cmd/telegraf/telegraf.go index 57ff846cfa873..e8ac7e6603b47 100644 --- a/cmd/telegraf/telegraf.go +++ b/cmd/telegraf/telegraf.go @@ -58,22 +58,11 @@ var fService = flag.String("service", "", var fRunAsConsole = flag.Bool("console", false, "run as console application (windows only)") var ( - nextVersion = "1.8.0" - version string - commit string - branch string + version string + commit string + branch string ) -func init() { - // If commit or branch are not set, make that clear. - if commit == "" { - commit = "unknown" - } - if branch == "" { - branch = "unknown" - } -} - var stop chan struct{} func reloadLoop( @@ -165,7 +154,7 @@ func reloadLoop( } }() - log.Printf("I! Starting Telegraf %s\n", displayVersion()) + log.Printf("I! Starting Telegraf %s\n", version) log.Printf("I! Loaded inputs: %s", strings.Join(c.InputNames(), " ")) log.Printf("I! Loaded aggregators: %s", strings.Join(c.AggregatorNames(), " ")) log.Printf("I! Loaded processors: %s", strings.Join(c.ProcessorNames(), " ")) @@ -225,11 +214,27 @@ func (p *program) Stop(s service.Service) error { return nil } -func displayVersion() string { - if version == "" { - return fmt.Sprintf("v%s~%s", nextVersion, commit) +func formatFullVersion() string { + var parts = []string{"Telegraf"} + + if version != "" { + parts = append(parts, version) + } else { + parts = append(parts, "unknown") } - return "v" + version + + if branch != "" || commit != "" { + if branch == "" { + branch = "unknown" + } + if commit == "" { + commit = "unknown" + } + git := fmt.Sprintf("(git: %s %s)", branch, commit) + parts = append(parts, git) + } + + return strings.Join(parts, " ") } func main() { @@ -273,7 +278,7 @@ func main() { if len(args) > 0 { switch args[0] { case "version": - fmt.Printf("Telegraf %s (git: %s %s)\n", displayVersion(), branch, commit) + fmt.Println(formatFullVersion()) return case "config": config.PrintSampleConfig( @@ -301,7 +306,7 @@ func main() { } return case *fVersion: - fmt.Printf("Telegraf %s (git: %s %s)\n", displayVersion(), branch, commit) + fmt.Println(formatFullVersion()) return case *fSampleConfig: config.PrintSampleConfig( diff --git a/docker-compose.yml b/docker-compose.yml index 822d7fff1a76b..5ac47089db975 100644 --- a/docker-compose.yml +++ b/docker-compose.yml @@ -40,6 +40,13 @@ services: image: memcached ports: - "11211:11211" + pgbouncer: + image: mbed/pgbouncer + environment: + PG_ENV_POSTGRESQL_USER: pgbouncer + PG_ENV_POSTGRESQL_PASS: pgbouncer + ports: + - "6432:6432" postgres: image: postgres:alpine ports: diff --git a/docs/DATA_FORMATS_INPUT.md b/docs/DATA_FORMATS_INPUT.md index 5a63e9d83690c..7f7c94930e1e6 100644 --- a/docs/DATA_FORMATS_INPUT.md +++ b/docs/DATA_FORMATS_INPUT.md @@ -10,6 +10,8 @@ Telegraf is able to parse the following input data formats into metrics: 1. [Collectd](#collectd) 1. [Dropwizard](#dropwizard) 1. [Grok](#grok) +1. [Logfmt](#logfmt) +1. [Wavefront](#wavefront) Telegraf metrics, like InfluxDB [points](https://docs.influxdata.com/influxdb/v0.10/write_protocols/line/), @@ -660,7 +662,7 @@ For more information about the dropwizard json format see # tag2 = "tags.tag2" ``` -# Grok +# Grok: The grok data format parses line delimited data using a regular expression like language. @@ -669,52 +671,8 @@ The best way to get acquainted with grok patterns is to read the logstash docs, which are available here: https://www.elastic.co/guide/en/logstash/current/plugins-filters-grok.html -#### Grok Configuration: -```toml -[[inputs.reader]] - ## Files to parse each interval. - ## These accept standard unix glob matching rules, but with the addition of - ## ** as a "super asterisk". ie: - ## /var/log/**.log -> recursively find all .log files in /var/log - ## /var/log/*/*.log -> find all .log files with a parent dir in /var/log - ## /var/log/apache.log -> only tail the apache log file - files = ["/var/log/apache/access.log"] - - ## The dataformat to be read from files - ## Each data format has its own unique set of configuration options, read - ## more about them here: - ## https://github.com/influxdata/telegraf/blob/master/docs/DATA_FORMATS_INPUT.md - data_format = "grok" - - ## This is a list of patterns to check the given log file(s) for. - ## Note that adding patterns here increases processing time. The most - ## efficient configuration is to have one pattern per logparser. - ## Other common built-in patterns are: - ## %{COMMON_LOG_FORMAT} (plain apache & nginx access logs) - ## %{COMBINED_LOG_FORMAT} (access logs + referrer & agent) - grok_patterns = ["%{COMBINED_LOG_FORMAT}"] - - ## Full path(s) to custom pattern files. - grok_custom_pattern_files = [] - - ## Custom patterns can also be defined here. Put one pattern per line. - grok_custom_patterns = ''' - ''' - - ## Timezone allows you to provide an override for timestamps that - ## don't already include an offset - ## e.g. 04/06/2016 12:41:45 data one two 5.43µs - ## - ## Default: "" which renders UTC - ## Options are as follows: - ## 1. Local -- interpret based on machine localtime - ## 2. "Canada/Eastern" -- Unix TZ values like those found in https://en.wikipedia.org/wiki/List_of_tz_database_time_zones - ## 3. UTC -- or blank/unspecified, will return timestamp in UTC - grok_timezone = "Canada/Eastern" -``` - -The Telegraf grok parser uses a slightly modified version of logstash "grok" -patterns, with the format +The grok parser uses a slightly modified version of logstash "grok" +patterns, with the format: ``` %{[:][:]} @@ -739,6 +697,7 @@ You must capture at least one field per line. - duration (ie, 5.23ms gets converted to int nanoseconds) - tag (converts the field into a tag) - drop (drops the field completely) + - measurement (use the matched text as the measurement name) - Timestamp modifiers: - ts (This will auto-learn the timestamp format) - ts-ansic ("Mon Jan _2 15:04:05 2006") @@ -758,7 +717,7 @@ You must capture at least one field per line. - ts-"CUSTOM" CUSTOM time layouts must be within quotes and be the representation of the -"reference time", which is `Mon Jan 2 15:04:05 -0700 MST 2006`. +"reference time", which is `Mon Jan 2 15:04:05 -0700 MST 2006`. To match a comma decimal point you can use a period. For example `%{TIMESTAMP:timestamp:ts-"2006-01-02 15:04:05.000"}` can be used to match `"2018-01-02 15:04:05,000"` To match a comma decimal point you can use a period in the pattern string. See https://golang.org/pkg/time/#Parse for more details. @@ -772,6 +731,50 @@ logstash patterns that depend on these are not supported._ If you need help building patterns to match your logs, you will find the https://grokdebug.herokuapp.com application quite useful! +#### Grok Configuration: +```toml +[[inputs.file]] + ## Files to parse each interval. + ## These accept standard unix glob matching rules, but with the addition of + ## ** as a "super asterisk". ie: + ## /var/log/**.log -> recursively find all .log files in /var/log + ## /var/log/*/*.log -> find all .log files with a parent dir in /var/log + ## /var/log/apache.log -> only tail the apache log file + files = ["/var/log/apache/access.log"] + + ## The dataformat to be read from files + ## Each data format has its own unique set of configuration options, read + ## more about them here: + ## https://github.com/influxdata/telegraf/blob/master/docs/DATA_FORMATS_INPUT.md + data_format = "grok" + + ## This is a list of patterns to check the given log file(s) for. + ## Note that adding patterns here increases processing time. The most + ## efficient configuration is to have one pattern. + ## Other common built-in patterns are: + ## %{COMMON_LOG_FORMAT} (plain apache & nginx access logs) + ## %{COMBINED_LOG_FORMAT} (access logs + referrer & agent) + grok_patterns = ["%{COMBINED_LOG_FORMAT}"] + + ## Full path(s) to custom pattern files. + grok_custom_pattern_files = [] + + ## Custom patterns can also be defined here. Put one pattern per line. + grok_custom_patterns = ''' + ''' + + ## Timezone allows you to provide an override for timestamps that + ## don't already include an offset + ## e.g. 04/06/2016 12:41:45 data one two 5.43µs + ## + ## Default: "" which renders UTC + ## Options are as follows: + ## 1. Local -- interpret based on machine localtime + ## 2. "Canada/Eastern" -- Unix TZ values like those found in https://en.wikipedia.org/wiki/List_of_tz_database_time_zones + ## 3. UTC -- or blank/unspecified, will return timestamp in UTC + grok_timezone = "Canada/Eastern" +``` + #### Timestamp Examples This example input and config parses a file using a custom timestamp conversion: @@ -781,9 +784,8 @@ This example input and config parses a file using a custom timestamp conversion: ``` ```toml -[[inputs.logparser]] - [inputs.logparser.grok] - patterns = ['%{TIMESTAMP_ISO8601:timestamp:ts-"2006-01-02 15:04:05"} value=%{NUMBER:value:int}'] +[[inputs.file]] + grok_patterns = ['%{TIMESTAMP_ISO8601:timestamp:ts-"2006-01-02 15:04:05"} value=%{NUMBER:value:int}'] ``` This example input and config parses a file using a timestamp in unix time: @@ -794,9 +796,8 @@ This example input and config parses a file using a timestamp in unix time: ``` ```toml -[[inputs.logparser]] - [inputs.logparser.grok] - patterns = ['%{NUMBER:timestamp:ts-epoch} value=%{NUMBER:value:int}'] +[[inputs.file]] + grok_patterns = ['%{NUMBER:timestamp:ts-epoch} value=%{NUMBER:value:int}'] ``` This example parses a file using a built-in conversion and a custom pattern: @@ -806,20 +807,19 @@ Wed Apr 12 13:10:34 PST 2017 value=42 ``` ```toml -[[inputs.logparser]] - [inputs.logparser.grok] - patterns = ["%{TS_UNIX:timestamp:ts-unix} value=%{NUMBER:value:int}"] - custom_patterns = ''' - TS_UNIX %{DAY} %{MONTH} %{MONTHDAY} %{HOUR}:%{MINUTE}:%{SECOND} %{TZ} %{YEAR} - ''' +[[inputs.file]] + grok_patterns = ["%{TS_UNIX:timestamp:ts-unix} value=%{NUMBER:value:int}"] + grok_custom_patterns = ''' + TS_UNIX %{DAY} %{MONTH} %{MONTHDAY} %{HOUR}:%{MINUTE}:%{SECOND} %{TZ} %{YEAR} + ''' ``` For cases where the timestamp itself is without offset, the `timezone` config var is available to denote an offset. By default (with `timezone` either omit, blank or set to `"UTC"`), the times are processed as if in the UTC timezone. If specified as `timezone = "Local"`, the timestamp will be processed based on the current machine timezone configuration. Lastly, if using a -timezone from the list of Unix [timezones](https://en.wikipedia.org/wiki/List_of_tz_database_time_zones), the logparser grok will attempt to offset -the timestamp accordingly. See test cases for more detailed examples. +timezone from the list of Unix [timezones](https://en.wikipedia.org/wiki/List_of_tz_database_time_zones), +grok will offset the timestamp accordingly. #### TOML Escaping @@ -840,29 +840,26 @@ get a literal `|`. With a basic TOML string, special characters such as backslash must be escaped, requiring us to escape the backslash a second time. ```toml -[[inputs.logparser]] - [inputs.logparser.grok] - patterns = ["\\|%{NUMBER:value:int}\\|%{UNICODE_ESCAPE:escape}\\|'%{WORD:name}'\\|"] - custom_patterns = "UNICODE_ESCAPE (?:\\\\u[0-9A-F]{4})+" +[[inputs.file]] + grok_patterns = ["\\|%{NUMBER:value:int}\\|%{UNICODE_ESCAPE:escape}\\|'%{WORD:name}'\\|"] + grok_custom_patterns = "UNICODE_ESCAPE (?:\\\\u[0-9A-F]{4})+" ``` We cannot use a literal TOML string for the pattern, because we cannot match a `'` within it. However, it works well for the custom pattern. ```toml -[[inputs.logparser]] - [inputs.logparser.grok] - patterns = ["\\|%{NUMBER:value:int}\\|%{UNICODE_ESCAPE:escape}\\|'%{WORD:name}'\\|"] - custom_patterns = 'UNICODE_ESCAPE (?:\\u[0-9A-F]{4})+' +[[inputs.file]] + grok_patterns = ["\\|%{NUMBER:value:int}\\|%{UNICODE_ESCAPE:escape}\\|'%{WORD:name}'\\|"] + grok_custom_patterns = 'UNICODE_ESCAPE (?:\\u[0-9A-F]{4})+' ``` A multi-line literal string allows us to encode the pattern: ```toml -[[inputs.logparser]] - [inputs.logparser.grok] - patterns = [''' - \|%{NUMBER:value:int}\|%{UNICODE_ESCAPE:escape}\|'%{WORD:name}'\| - '''] - custom_patterns = 'UNICODE_ESCAPE (?:\\u[0-9A-F]{4})+' +[[inputs.file]] + grok_patterns = [''' + \|%{NUMBER:value:int}\|%{UNICODE_ESCAPE:escape}\|'%{WORD:name}'\| + '''] + grok_custom_patterns = 'UNICODE_ESCAPE (?:\\u[0-9A-F]{4})+' ``` #### Tips for creating patterns @@ -886,4 +883,43 @@ the file output will only print once per `flush_interval`. - If successful, add the next token, update the pattern and retest. - Continue one token at a time until the entire line is successfully parsed. +# Logfmt +This parser implements the logfmt format by extracting and converting key-value pairs from log text in the form `=`. +At the moment, the plugin will produce one metric per line and all keys +are added as fields. +A typical log +``` +method=GET host=influxdata.org ts=2018-07-24T19:43:40.275Z +connect=4ms service=8ms status=200 bytes=1653 +``` +will be converted into +``` +logfmt method="GET",host="influxdata.org",ts="2018-07-24T19:43:40.275Z",connect="4ms",service="8ms",status=200i,bytes=1653i + +``` +Additional information about the logfmt format can be found [here](https://brandur.org/logfmt). + +# Wavefront: + +Wavefront Data Format is metrics are parsed directly into Telegraf metrics. +For more information about the Wavefront Data Format see +[here](https://docs.wavefront.com/wavefront_data_format.html). + +There are no additional configuration options for Wavefront Data Format line-protocol. + +#### Wavefront Configuration: +```toml +[[inputs.exec]] + ## Commands array + commands = ["/tmp/test.sh", "/usr/bin/mycollector --foo=bar"] + + ## measurement name suffix (for separating different commands) + name_suffix = "_mycollector" + + ## Data format to consume. + ## Each data format has its own unique set of configuration options, read + ## more about them here: + ## https://github.com/influxdata/telegraf/blob/master/docs/DATA_FORMATS_INPUT.md + data_format = "wavefront" +``` diff --git a/etc/telegraf.conf b/etc/telegraf.conf index 38942adee0a6a..9315aa457dba7 100644 --- a/etc/telegraf.conf +++ b/etc/telegraf.conf @@ -747,6 +747,9 @@ # #basic_username = "Foo" # #basic_password = "Bar" # +# ## IP Ranges which are allowed to access metrics +# #ip_range = ["192.168.0.0/24", "192.168.1.0/30"] +# # ## Interval to expire metrics and not deliver to prometheus, 0 == no expiration # # expiration_interval = "60s" # @@ -1976,6 +1979,9 @@ # ## Timeout for the ipmitool command to complete # timeout = "20s" +# ## Schema Version: (Optional, defaults to version 1) +# metric_version = 2 + # # Gather packets and bytes counters from Linux ipsets # [[inputs.ipset]] diff --git a/etc/telegraf_windows.conf b/etc/telegraf_windows.conf index 54b7ee0e19d50..220944d42b3bc 100644 --- a/etc/telegraf_windows.conf +++ b/etc/telegraf_windows.conf @@ -205,20 +205,6 @@ Instances = ["_Total"] Measurement = "win_swap" - [[inputs.win_perf_counters.object]] - ObjectName = "Network Interface" - Instances = ["*"] - Counters = [ - "Bytes Sent/sec", - "Bytes Received/sec", - "Packets Sent/sec", - "Packets Received/sec", - "Packets Received Discarded", - "Packets Received Errors", - "Packets Outbound Discarded", - "Packets Outbound Errors", - ] - # Windows system plugins using WMI (disabled by default, using diff --git a/internal/buffer/buffer.go b/internal/buffer/buffer.go index cdc81fed304d6..6a460eccb41df 100644 --- a/internal/buffer/buffer.go +++ b/internal/buffer/buffer.go @@ -14,9 +14,12 @@ var ( // Buffer is an object for storing metrics in a circular buffer. type Buffer struct { - buf chan telegraf.Metric - - mu sync.Mutex + sync.Mutex + buf []telegraf.Metric + first int + last int + size int + empty bool } // NewBuffer returns a Buffer @@ -24,33 +27,58 @@ type Buffer struct { // called when the buffer is full, then the oldest metric(s) will be dropped. func NewBuffer(size int) *Buffer { return &Buffer{ - buf: make(chan telegraf.Metric, size), + buf: make([]telegraf.Metric, size), + first: 0, + last: 0, + size: size, + empty: true, } } // IsEmpty returns true if Buffer is empty. func (b *Buffer) IsEmpty() bool { - return len(b.buf) == 0 + return b.empty } // Len returns the current length of the buffer. func (b *Buffer) Len() int { - return len(b.buf) + if b.empty { + return 0 + } else if b.first <= b.last { + return b.last - b.first + 1 + } + // Spans the end of array. + // size - gap in the middle + return b.size - (b.first - b.last - 1) // size - gap +} + +func (b *Buffer) push(m telegraf.Metric) { + // Empty + if b.empty { + b.last = b.first // Reset + b.buf[b.last] = m + b.empty = false + return + } + + b.last++ + b.last %= b.size + + // Full + if b.first == b.last { + MetricsDropped.Incr(1) + b.first = (b.first + 1) % b.size + } + b.buf[b.last] = m } // Add adds metrics to the buffer. func (b *Buffer) Add(metrics ...telegraf.Metric) { - for i, _ := range metrics { + b.Lock() + defer b.Unlock() + for i := range metrics { MetricsWritten.Incr(1) - select { - case b.buf <- metrics[i]: - default: - b.mu.Lock() - MetricsDropped.Incr(1) - <-b.buf - b.buf <- metrics[i] - b.mu.Unlock() - } + b.push(metrics[i]) } } @@ -58,13 +86,39 @@ func (b *Buffer) Add(metrics ...telegraf.Metric) { // the batch will be of maximum length batchSize. It can be less than batchSize, // if the length of Buffer is less than batchSize. func (b *Buffer) Batch(batchSize int) []telegraf.Metric { - b.mu.Lock() - n := min(len(b.buf), batchSize) - out := make([]telegraf.Metric, n) - for i := 0; i < n; i++ { - out[i] = <-b.buf + b.Lock() + defer b.Unlock() + outLen := min(b.Len(), batchSize) + out := make([]telegraf.Metric, outLen) + if outLen == 0 { + return out + } + + // We copy everything right of first up to last, count or end + // b.last >= rightInd || b.last < b.first + // therefore wont copy past b.last + rightInd := min(b.size, b.first+outLen) - 1 + + copyCount := copy(out, b.buf[b.first:rightInd+1]) + + // We've emptied the ring + if rightInd == b.last { + b.empty = true + } + b.first = rightInd + 1 + b.first %= b.size + + // We circle back for the rest + if copyCount < outLen { + right := min(b.last, outLen-copyCount) + copy(out[copyCount:], b.buf[b.first:right+1]) + // We've emptied the ring + if right == b.last { + b.empty = true + } + b.first = right + 1 + b.first %= b.size } - b.mu.Unlock() return out } diff --git a/internal/buffer/buffer_test.go b/internal/buffer/buffer_test.go index f84d8c66d0082..b3f666fd05d39 100644 --- a/internal/buffer/buffer_test.go +++ b/internal/buffer/buffer_test.go @@ -1,6 +1,8 @@ package buffer import ( + "sync" + "sync/atomic" "testing" "github.com/influxdata/telegraf" @@ -17,6 +19,107 @@ var metricList = []telegraf.Metric{ testutil.TestMetric(8, "mymetric5"), } +func makeBench5(b *testing.B, freq, batchSize int) { + const k = 1000 + var wg sync.WaitGroup + buf := NewBuffer(10000) + m := testutil.TestMetric(1, "mymetric") + + for i := 0; i < b.N; i++ { + buf.Add(m, m, m, m, m) + if i%(freq*k) == 0 { + wg.Add(1) + go func() { + buf.Batch(batchSize * k) + wg.Done() + }() + } + } + // Flush + buf.Batch(b.N) + wg.Wait() + +} +func makeBenchStrict(b *testing.B, freq, batchSize int) { + const k = 1000 + var count uint64 + var wg sync.WaitGroup + buf := NewBuffer(10000) + m := testutil.TestMetric(1, "mymetric") + + for i := 0; i < b.N; i++ { + buf.Add(m) + if i%(freq*k) == 0 { + wg.Add(1) + go func() { + defer wg.Done() + l := len(buf.Batch(batchSize * k)) + atomic.AddUint64(&count, uint64(l)) + }() + } + } + // Flush + wg.Add(1) + go func() { + l := len(buf.Batch(b.N)) + atomic.AddUint64(&count, uint64(l)) + wg.Done() + }() + + wg.Wait() + if count != uint64(b.N) { + b.Errorf("not all metrics came out. %d of %d", count, b.N) + } +} +func makeBench(b *testing.B, freq, batchSize int) { + const k = 1000 + var wg sync.WaitGroup + buf := NewBuffer(10000) + m := testutil.TestMetric(1, "mymetric") + + for i := 0; i < b.N; i++ { + buf.Add(m) + if i%(freq*k) == 0 { + wg.Add(1) + go func() { + buf.Batch(batchSize * k) + wg.Done() + }() + } + } + wg.Wait() + // Flush + buf.Batch(b.N) +} + +func BenchmarkBufferBatch5Add(b *testing.B) { + makeBench5(b, 100, 101) +} +func BenchmarkBufferBigInfrequentBatchCatchup(b *testing.B) { + makeBench(b, 100, 101) +} +func BenchmarkBufferOftenBatch(b *testing.B) { + makeBench(b, 1, 1) +} +func BenchmarkBufferAlmostBatch(b *testing.B) { + makeBench(b, 10, 9) +} +func BenchmarkBufferSlowBatch(b *testing.B) { + makeBench(b, 10, 1) +} +func BenchmarkBufferBatchNoDrop(b *testing.B) { + makeBenchStrict(b, 1, 4) +} +func BenchmarkBufferCatchup(b *testing.B) { + buf := NewBuffer(10000) + m := testutil.TestMetric(1, "mymetric") + + for i := 0; i < b.N; i++ { + buf.Add(m) + } + buf.Batch(b.N) +} + func BenchmarkAddMetrics(b *testing.B) { buf := NewBuffer(10000) m := testutil.TestMetric(1, "mymetric") diff --git a/internal/config/aws/credentials.go b/internal/config/aws/credentials.go index b1f57fceb2903..1e4f91b132a3b 100644 --- a/internal/config/aws/credentials.go +++ b/internal/config/aws/credentials.go @@ -9,13 +9,14 @@ import ( ) type CredentialConfig struct { - Region string - AccessKey string - SecretKey string - RoleARN string - Profile string - Filename string - Token string + Region string + AccessKey string + SecretKey string + RoleARN string + Profile string + Filename string + Token string + EndpointURL string } func (c *CredentialConfig) Credentials() client.ConfigProvider { @@ -28,7 +29,8 @@ func (c *CredentialConfig) Credentials() client.ConfigProvider { func (c *CredentialConfig) rootCredentials() client.ConfigProvider { config := &aws.Config{ - Region: aws.String(c.Region), + Region: aws.String(c.Region), + Endpoint: &c.EndpointURL, } if c.AccessKey != "" || c.SecretKey != "" { config.Credentials = credentials.NewStaticCredentials(c.AccessKey, c.SecretKey, c.Token) @@ -42,7 +44,8 @@ func (c *CredentialConfig) rootCredentials() client.ConfigProvider { func (c *CredentialConfig) assumeCredentials() client.ConfigProvider { rootCredentials := c.rootCredentials() config := &aws.Config{ - Region: aws.String(c.Region), + Region: aws.String(c.Region), + Endpoint: &c.EndpointURL, } config.Credentials = stscreds.NewCredentials(rootCredentials, c.RoleARN) return session.New(config) diff --git a/internal/models/running_output.go b/internal/models/running_output.go index 713c28cce6bcb..25576d745c36b 100644 --- a/internal/models/running_output.go +++ b/internal/models/running_output.go @@ -105,12 +105,13 @@ func (ro *RunningOutput) AddMetric(m telegraf.Metric) { tags := m.Tags() fields := m.Fields() t := m.Time() + tp := m.Type() if ok := ro.Config.Filter.Apply(name, fields, tags); !ok { ro.MetricsFiltered.Incr(1) return } // error is not possible if creating from another metric, so ignore. - m, _ = metric.New(name, tags, fields, t) + m, _ = metric.New(name, tags, fields, t, tp) } ro.metrics.Add(m) @@ -119,6 +120,7 @@ func (ro *RunningOutput) AddMetric(m telegraf.Metric) { err := ro.write(batch) if err != nil { ro.failMetrics.Add(batch...) + log.Printf("E! Error writing to output [%s]: %v", ro.Name, err) } } } diff --git a/plugins/inputs/activemq/README.md b/plugins/inputs/activemq/README.md new file mode 100644 index 0000000000000..b44d12d22f07b --- /dev/null +++ b/plugins/inputs/activemq/README.md @@ -0,0 +1,86 @@ +# Telegraf Input Plugin: ActiveMQ + +This plugin gather queues, topics & subscribers metrics using ActiveMQ Console API. + +### Configuration: + +```toml +# Description +[[inputs.activemq]] + ## Required ActiveMQ Endpoint + # server = "192.168.50.10" + + ## Required ActiveMQ port + # port = 8161 + + ## Credentials for basic HTTP authentication + # username = "admin" + # password = "admin" + + ## Required ActiveMQ webadmin root path + # webadmin = "admin" + + ## Maximum time to receive response. + # response_timeout = "5s" + + ## Optional TLS Config + # tls_ca = "/etc/telegraf/ca.pem" + # tls_cert = "/etc/telegraf/cert.pem" + # tls_key = "/etc/telegraf/key.pem" + ## Use TLS but skip chain & host verification +``` + +### Measurements & Fields: + +Every effort was made to preserve the names based on the XML response from the ActiveMQ Console API. + +- activemq_queues: + - size + - consumer_count + - enqueue_count + - dequeue_count + - activemq_topics: + - size + - consumer_count + - enqueue_count + - dequeue_count + - subscribers_metrics: + - pending_queue_size + - dispatched_queue_size + - dispatched_counter + - enqueue_counter + - dequeue_counter + +### Tags: + +- activemq_queues: + - name + - source + - port +- activemq_topics: + - name + - source + - port +- activemq_subscribers: + - client_id + - subscription_name + - connection_id + - destination_name + - selector + - active + - source + - port + +### Example Output: + +``` +$ ./telegraf -config telegraf.conf -input-filter activemq -test +activemq_queues,name=sandra,host=88284b2fe51b,source=localhost,port=8161 consumer_count=0i,enqueue_count=0i,dequeue_count=0i,size=0i 1492610703000000000 +activemq_queues,name=Test,host=88284b2fe51b,source=localhost,port=8161 dequeue_count=0i,size=0i,consumer_count=0i,enqueue_count=0i 1492610703000000000 +activemq_topics,name=ActiveMQ.Advisory.MasterBroker\ ,host=88284b2fe51b,source=localhost,port=8161 size=0i,consumer_count=0i,enqueue_count=1i,dequeue_count=0i 1492610703000000000 +activemq_topics,host=88284b2fe51b,name=AAA\,source=localhost,port=8161 size=0i,consumer_count=1i,enqueue_count=0i,dequeue_count=0i 1492610703000000000 +activemq_topics,name=ActiveMQ.Advisory.Topic\,source=localhost,port=8161 ,host=88284b2fe51b enqueue_count=1i,dequeue_count=0i,size=0i,consumer_count=0i 1492610703000000000 +activemq_topics,name=ActiveMQ.Advisory.Queue\,source=localhost,port=8161 ,host=88284b2fe51b size=0i,consumer_count=0i,enqueue_count=2i,dequeue_count=0i 1492610703000000000 +activemq_topics,name=AAAA\ ,host=88284b2fe51b,source=localhost,port=8161 consumer_count=0i,enqueue_count=0i,dequeue_count=0i,size=0i 1492610703000000000 +activemq_subscribers,connection_id=NOTSET,destination_name=AAA,,source=localhost,port=8161,selector=AA,active=no,host=88284b2fe51b,client_id=AAA,subscription_name=AAA pending_queue_size=0i,dispatched_queue_size=0i,dispatched_counter=0i,enqueue_counter=0i,dequeue_counter=0i 1492610703000000000 +``` diff --git a/plugins/inputs/activemq/activemq.go b/plugins/inputs/activemq/activemq.go new file mode 100644 index 0000000000000..5b59730d20d90 --- /dev/null +++ b/plugins/inputs/activemq/activemq.go @@ -0,0 +1,261 @@ +package activemq + +import ( + "encoding/xml" + "fmt" + "io/ioutil" + "net/http" + "strconv" + "time" + + "strings" + + "github.com/influxdata/telegraf" + "github.com/influxdata/telegraf/internal" + "github.com/influxdata/telegraf/internal/tls" + "github.com/influxdata/telegraf/plugins/inputs" +) + +type ActiveMQ struct { + Server string `json:"server"` + Port int `json:"port"` + Username string `json:"username"` + Password string `json:"password"` + Webadmin string `json:"webadmin"` + ResponseTimeout internal.Duration + tls.ClientConfig + + client *http.Client +} + +type Topics struct { + XMLName xml.Name `xml:"topics"` + TopicItems []Topic `xml:"topic"` +} + +type Topic struct { + XMLName xml.Name `xml:"topic"` + Name string `xml:"name,attr"` + Stats Stats `xml:"stats"` +} + +type Subscribers struct { + XMLName xml.Name `xml:"subscribers"` + SubscriberItems []Subscriber `xml:"subscriber"` +} + +type Subscriber struct { + XMLName xml.Name `xml:"subscriber"` + ClientId string `xml:"clientId,attr"` + SubscriptionName string `xml:"subscriptionName,attr"` + ConnectionId string `xml:"connectionId,attr"` + DestinationName string `xml:"destinationName,attr"` + Selector string `xml:"selector,attr"` + Active string `xml:"active,attr"` + Stats Stats `xml:"stats"` +} + +type Queues struct { + XMLName xml.Name `xml:"queues"` + QueueItems []Queue `xml:"queue"` +} + +type Queue struct { + XMLName xml.Name `xml:"queue"` + Name string `xml:"name,attr"` + Stats Stats `xml:"stats"` +} + +type Stats struct { + XMLName xml.Name `xml:"stats"` + Size int `xml:"size,attr"` + ConsumerCount int `xml:"consumerCount,attr"` + EnqueueCount int `xml:"enqueueCount,attr"` + DequeueCount int `xml:"dequeueCount,attr"` + PendingQueueSize int `xml:"pendingQueueSize,attr"` + DispatchedQueueSize int `xml:"dispatchedQueueSize,attr"` + DispatchedCounter int `xml:"dispatchedCounter,attr"` + EnqueueCounter int `xml:"enqueueCounter,attr"` + DequeueCounter int `xml:"dequeueCounter,attr"` +} + +const ( + QUEUES_STATS = "queues" + TOPICS_STATS = "topics" + SUBSCRIBERS_STATS = "subscribers" +) + +var sampleConfig = ` + ## Required ActiveMQ Endpoint + # server = "192.168.50.10" + + ## Required ActiveMQ port + # port = 8161 + + ## Credentials for basic HTTP authentication + # username = "admin" + # password = "admin" + + ## Required ActiveMQ webadmin root path + # webadmin = "admin" + + ## Maximum time to receive response. + # response_timeout = "5s" + + ## Optional TLS Config + # tls_ca = "/etc/telegraf/ca.pem" + # tls_cert = "/etc/telegraf/cert.pem" + # tls_key = "/etc/telegraf/key.pem" + ## Use TLS but skip chain & host verification + ` + +func (a *ActiveMQ) Description() string { + return "Gather ActiveMQ metrics" +} + +func (a *ActiveMQ) SampleConfig() string { + return sampleConfig +} + +func (a *ActiveMQ) createHttpClient() (*http.Client, error) { + tlsCfg, err := a.ClientConfig.TLSConfig() + if err != nil { + return nil, err + } + + client := &http.Client{ + Transport: &http.Transport{ + TLSClientConfig: tlsCfg, + }, + Timeout: a.ResponseTimeout.Duration, + } + + return client, nil +} + +func (a *ActiveMQ) GetMetrics(keyword string) ([]byte, error) { + if a.ResponseTimeout.Duration < time.Second { + a.ResponseTimeout.Duration = time.Second * 5 + } + + if a.client == nil { + client, err := a.createHttpClient() + if err != nil { + return nil, err + } + a.client = client + } + url := fmt.Sprintf("http://%s:%d/%s/xml/%s.jsp", a.Server, a.Port, a.Webadmin, keyword) + + req, err := http.NewRequest("GET", url, nil) + if err != nil { + return nil, err + } + + req.SetBasicAuth(a.Username, a.Password) + resp, err := a.client.Do(req) + if err != nil { + return nil, err + } + + defer resp.Body.Close() + return ioutil.ReadAll(resp.Body) +} + +func (a *ActiveMQ) GatherQueuesMetrics(acc telegraf.Accumulator, queues Queues) { + for _, queue := range queues.QueueItems { + records := make(map[string]interface{}) + tags := make(map[string]string) + + tags["name"] = strings.TrimSpace(queue.Name) + tags["source"] = a.Server + tags["port"] = strconv.Itoa(a.Port) + + records["size"] = queue.Stats.Size + records["consumer_count"] = queue.Stats.ConsumerCount + records["enqueue_count"] = queue.Stats.EnqueueCount + records["dequeue_count"] = queue.Stats.DequeueCount + + acc.AddFields("activemq_queues", records, tags) + } +} + +func (a *ActiveMQ) GatherTopicsMetrics(acc telegraf.Accumulator, topics Topics) { + for _, topic := range topics.TopicItems { + records := make(map[string]interface{}) + tags := make(map[string]string) + + tags["name"] = topic.Name + tags["source"] = a.Server + tags["port"] = strconv.Itoa(a.Port) + + records["size"] = topic.Stats.Size + records["consumer_count"] = topic.Stats.ConsumerCount + records["enqueue_count"] = topic.Stats.EnqueueCount + records["dequeue_count"] = topic.Stats.DequeueCount + + acc.AddFields("activemq_topics", records, tags) + } +} + +func (a *ActiveMQ) GatherSubscribersMetrics(acc telegraf.Accumulator, subscribers Subscribers) { + for _, subscriber := range subscribers.SubscriberItems { + records := make(map[string]interface{}) + tags := make(map[string]string) + + tags["client_id"] = subscriber.ClientId + tags["subscription_name"] = subscriber.SubscriptionName + tags["connection_id"] = subscriber.ConnectionId + tags["destination_name"] = subscriber.DestinationName + tags["selector"] = subscriber.Selector + tags["active"] = subscriber.Active + tags["source"] = a.Server + tags["port"] = strconv.Itoa(a.Port) + + records["pending_queue_size"] = subscriber.Stats.PendingQueueSize + records["dispatched_queue_size"] = subscriber.Stats.DispatchedQueueSize + records["dispatched_counter"] = subscriber.Stats.DispatchedCounter + records["enqueue_counter"] = subscriber.Stats.EnqueueCounter + records["dequeue_counter"] = subscriber.Stats.DequeueCounter + + acc.AddFields("activemq_subscribers", records, tags) + } +} + +func (a *ActiveMQ) Gather(acc telegraf.Accumulator) error { + dataQueues, err := a.GetMetrics(QUEUES_STATS) + queues := Queues{} + err = xml.Unmarshal(dataQueues, &queues) + if err != nil { + return err + } + + dataTopics, err := a.GetMetrics(TOPICS_STATS) + topics := Topics{} + err = xml.Unmarshal(dataTopics, &topics) + if err != nil { + return err + } + + dataSubscribers, err := a.GetMetrics(SUBSCRIBERS_STATS) + subscribers := Subscribers{} + err = xml.Unmarshal(dataSubscribers, &subscribers) + if err != nil { + return err + } + + a.GatherQueuesMetrics(acc, queues) + a.GatherTopicsMetrics(acc, topics) + a.GatherSubscribersMetrics(acc, subscribers) + + return nil +} + +func init() { + inputs.Add("activemq", func() telegraf.Input { + return &ActiveMQ{ + Server: "localhost", + Port: 8161, + } + }) +} diff --git a/plugins/inputs/activemq/activemq_test.go b/plugins/inputs/activemq/activemq_test.go new file mode 100644 index 0000000000000..c277af3c5e72c --- /dev/null +++ b/plugins/inputs/activemq/activemq_test.go @@ -0,0 +1,139 @@ +package activemq + +import ( + "encoding/xml" + "testing" + + "github.com/influxdata/telegraf/testutil" +) + +func TestGatherQueuesMetrics(t *testing.T) { + + s := ` + + + +queueBrowse/sandra?view=rss&feedType=atom_1.0 +queueBrowse/sandra?view=rss&feedType=rss_2.0 + + + + + +queueBrowse/Test?view=rss&feedType=atom_1.0 +queueBrowse/Test?view=rss&feedType=rss_2.0 + + +` + + queues := Queues{} + + xml.Unmarshal([]byte(s), &queues) + + records := make(map[string]interface{}) + tags := make(map[string]string) + + tags["name"] = "Test" + tags["source"] = "localhost" + tags["port"] = "8161" + + records["size"] = 0 + records["consumer_count"] = 0 + records["enqueue_count"] = 0 + records["dequeue_count"] = 0 + + var acc testutil.Accumulator + + activeMQ := new(ActiveMQ) + activeMQ.Server = "localhost" + activeMQ.Port = 8161 + + activeMQ.GatherQueuesMetrics(&acc, queues) + acc.AssertContainsTaggedFields(t, "activemq_queues", records, tags) +} + +func TestGatherTopicsMetrics(t *testing.T) { + + s := ` + + + + + + + + + + + + + + + +` + + topics := Topics{} + + xml.Unmarshal([]byte(s), &topics) + + records := make(map[string]interface{}) + tags := make(map[string]string) + + tags["name"] = "ActiveMQ.Advisory.MasterBroker " + tags["source"] = "localhost" + tags["port"] = "8161" + + records["size"] = 0 + records["consumer_count"] = 0 + records["enqueue_count"] = 1 + records["dequeue_count"] = 0 + + var acc testutil.Accumulator + + activeMQ := new(ActiveMQ) + activeMQ.Server = "localhost" + activeMQ.Port = 8161 + + activeMQ.GatherTopicsMetrics(&acc, topics) + acc.AssertContainsTaggedFields(t, "activemq_topics", records, tags) +} + +func TestGatherSubscribersMetrics(t *testing.T) { + + s := ` + + + +` + + subscribers := Subscribers{} + + xml.Unmarshal([]byte(s), &subscribers) + + records := make(map[string]interface{}) + tags := make(map[string]string) + + tags["client_id"] = "AAA" + tags["subscription_name"] = "AAA" + tags["connection_id"] = "NOTSET" + tags["destination_name"] = "AAA" + tags["selector"] = "AA" + tags["active"] = "no" + tags["source"] = "localhost" + tags["port"] = "8161" + + records["pending_queue_size"] = 0 + records["dispatched_queue_size"] = 0 + records["dispatched_counter"] = 0 + records["enqueue_counter"] = 0 + records["dequeue_counter"] = 0 + + var acc testutil.Accumulator + + activeMQ := new(ActiveMQ) + activeMQ.Server = "localhost" + activeMQ.Port = 8161 + + activeMQ.GatherSubscribersMetrics(&acc, subscribers) + acc.AssertContainsTaggedFields(t, "activemq_subscribers", records, tags) +} diff --git a/plugins/inputs/all/all.go b/plugins/inputs/all/all.go index 8989684e4e29b..ac86fb87985e2 100644 --- a/plugins/inputs/all/all.go +++ b/plugins/inputs/all/all.go @@ -1,6 +1,7 @@ package all import ( + _ "github.com/influxdata/telegraf/plugins/inputs/activemq" _ "github.com/influxdata/telegraf/plugins/inputs/aerospike" _ "github.com/influxdata/telegraf/plugins/inputs/amqp_consumer" _ "github.com/influxdata/telegraf/plugins/inputs/apache" @@ -31,6 +32,7 @@ import ( _ "github.com/influxdata/telegraf/plugins/inputs/fail2ban" _ "github.com/influxdata/telegraf/plugins/inputs/fibaro" _ "github.com/influxdata/telegraf/plugins/inputs/file" + _ "github.com/influxdata/telegraf/plugins/inputs/filecount" _ "github.com/influxdata/telegraf/plugins/inputs/filestat" _ "github.com/influxdata/telegraf/plugins/inputs/fluentd" _ "github.com/influxdata/telegraf/plugins/inputs/graylog" @@ -83,6 +85,7 @@ import ( _ "github.com/influxdata/telegraf/plugins/inputs/opensmtpd" _ "github.com/influxdata/telegraf/plugins/inputs/passenger" _ "github.com/influxdata/telegraf/plugins/inputs/pf" + _ "github.com/influxdata/telegraf/plugins/inputs/pgbouncer" _ "github.com/influxdata/telegraf/plugins/inputs/phpfpm" _ "github.com/influxdata/telegraf/plugins/inputs/ping" _ "github.com/influxdata/telegraf/plugins/inputs/postfix" diff --git a/plugins/inputs/cloudwatch/README.md b/plugins/inputs/cloudwatch/README.md index 88a5b098f1ac2..dfb5bf95ded3b 100644 --- a/plugins/inputs/cloudwatch/README.md +++ b/plugins/inputs/cloudwatch/README.md @@ -35,6 +35,12 @@ API endpoint. In the following order the plugin will attempt to authenticate. #profile = "" #shared_credential_file = "" + ## Endpoint to make request against, the correct endpoint is automatically + ## determined and this option should only be set if you wish to override the + ## default. + ## ex: endpoint_url = "http://localhost:8000" + # endpoint_url = "" + # The minimum period for Cloudwatch metrics is 1 minute (60s). However not all # metrics are made available to the 1 minute period. Some are collected at # 3 minute, 5 minute, or larger intervals. See https://aws.amazon.com/cloudwatch/faqs/#monitoring. diff --git a/plugins/inputs/cloudwatch/cloudwatch.go b/plugins/inputs/cloudwatch/cloudwatch.go index b4f91f745a786..9ba15b6ac9501 100644 --- a/plugins/inputs/cloudwatch/cloudwatch.go +++ b/plugins/inputs/cloudwatch/cloudwatch.go @@ -19,13 +19,14 @@ import ( type ( CloudWatch struct { - Region string `toml:"region"` - AccessKey string `toml:"access_key"` - SecretKey string `toml:"secret_key"` - RoleARN string `toml:"role_arn"` - Profile string `toml:"profile"` - Filename string `toml:"shared_credential_file"` - Token string `toml:"token"` + Region string `toml:"region"` + AccessKey string `toml:"access_key"` + SecretKey string `toml:"secret_key"` + RoleARN string `toml:"role_arn"` + Profile string `toml:"profile"` + Filename string `toml:"shared_credential_file"` + Token string `toml:"token"` + EndpointURL string `toml:"endpoint_url"` Period internal.Duration `toml:"period"` Delay internal.Duration `toml:"delay"` @@ -79,6 +80,12 @@ func (c *CloudWatch) SampleConfig() string { #profile = "" #shared_credential_file = "" + ## Endpoint to make request against, the correct endpoint is automatically + ## determined and this option should only be set if you wish to override the + ## default. + ## ex: endpoint_url = "http://localhost:8000" + # endpoint_url = "" + # The minimum period for Cloudwatch metrics is 1 minute (60s). However not all # metrics are made available to the 1 minute period. Some are collected at # 3 minute, 5 minute, or larger intervals. See https://aws.amazon.com/cloudwatch/faqs/#monitoring. @@ -224,13 +231,14 @@ func init() { */ func (c *CloudWatch) initializeCloudWatch() error { credentialConfig := &internalaws.CredentialConfig{ - Region: c.Region, - AccessKey: c.AccessKey, - SecretKey: c.SecretKey, - RoleARN: c.RoleARN, - Profile: c.Profile, - Filename: c.Filename, - Token: c.Token, + Region: c.Region, + AccessKey: c.AccessKey, + SecretKey: c.SecretKey, + RoleARN: c.RoleARN, + Profile: c.Profile, + Filename: c.Filename, + Token: c.Token, + EndpointURL: c.EndpointURL, } configProvider := credentialConfig.Credentials() diff --git a/plugins/inputs/file/README.md b/plugins/inputs/file/README.md index 73a3a2362e0f0..4358b67ad2668 100644 --- a/plugins/inputs/file/README.md +++ b/plugins/inputs/file/README.md @@ -14,7 +14,7 @@ use the [tail input plugin](/plugins/inputs/tail) instead. ## ** as a "super asterisk". ie: ## /var/log/**.log -> recursively find all .log files in /var/log ## /var/log/*/*.log -> find all .log files with a parent dir in /var/log - ## /var/log/apache.log -> only tail the apache log file + ## /var/log/apache.log -> only read the apache log file files = ["/var/log/apache/access.log"] ## Data format to consume. diff --git a/plugins/inputs/file/dev/docker-compose.yml b/plugins/inputs/file/dev/docker-compose.yml index 3c16fca909ebd..efce389f78424 100644 --- a/plugins/inputs/file/dev/docker-compose.yml +++ b/plugins/inputs/file/dev/docker-compose.yml @@ -6,7 +6,7 @@ services: volumes: - ./telegraf.conf:/telegraf.conf - ../../../../telegraf:/telegraf - - ./json_a.log:/var/log/test.log + - ./dev/json_a.log:/var/log/test.log entrypoint: - /telegraf - --config diff --git a/plugins/inputs/file/dev/json_a.log b/plugins/inputs/file/dev/json_a.log deleted file mode 100644 index 0f52e9d1e3b57..0000000000000 --- a/plugins/inputs/file/dev/json_a.log +++ /dev/null @@ -1,14 +0,0 @@ -{ -"parent": { - "child": 3.0, - "ignored_child": "hi" -}, -"ignored_null": null, -"integer": 4, -"list": [3, 4], -"ignored_parent": { - "another_ignored_null": null, - "ignored_string": "hello, world!" -}, -"another_list": [4] -} diff --git a/plugins/inputs/file/testfiles/grok_a.log b/plugins/inputs/file/dev/testfiles/grok_a.log similarity index 100% rename from plugins/inputs/file/testfiles/grok_a.log rename to plugins/inputs/file/dev/testfiles/grok_a.log diff --git a/plugins/inputs/file/testfiles/json_a.log b/plugins/inputs/file/dev/testfiles/json_a.log similarity index 100% rename from plugins/inputs/file/testfiles/json_a.log rename to plugins/inputs/file/dev/testfiles/json_a.log diff --git a/plugins/inputs/file/file.go b/plugins/inputs/file/file.go index 2779561fc2ffb..d6714301eaed2 100644 --- a/plugins/inputs/file/file.go +++ b/plugins/inputs/file/file.go @@ -11,9 +11,8 @@ import ( ) type File struct { - Files []string `toml:"files"` - FromBeginning bool - parser parsers.Parser + Files []string `toml:"files"` + parser parsers.Parser filenames []string } @@ -24,7 +23,7 @@ const sampleConfig = ` ## ** as a "super asterisk". ie: ## /var/log/**.log -> recursively find all .log files in /var/log ## /var/log/*/*.log -> find all .log files with a parent dir in /var/log - ## /var/log/apache.log -> only tail the apache log file + ## /var/log/apache.log -> only read the apache log file files = ["/var/log/apache/access.log"] ## The dataformat to be read from files @@ -40,7 +39,7 @@ func (f *File) SampleConfig() string { } func (f *File) Description() string { - return "reload and gather from file[s] on telegraf's interval" + return "Reload and gather from file[s] on telegraf's interval." } func (f *File) Gather(acc telegraf.Accumulator) error { diff --git a/plugins/inputs/file/file_test.go b/plugins/inputs/file/file_test.go index 28105664615a1..43322c2e84cf9 100644 --- a/plugins/inputs/file/file_test.go +++ b/plugins/inputs/file/file_test.go @@ -14,26 +14,26 @@ import ( func TestRefreshFilePaths(t *testing.T) { wd, err := os.Getwd() r := File{ - Files: []string{filepath.Join(wd, "testfiles/**.log")}, + Files: []string{filepath.Join(wd, "dev/testfiles/**.log")}, } err = r.refreshFilePaths() require.NoError(t, err) - assert.Equal(t, len(r.filenames), 2) + assert.Equal(t, 2, len(r.filenames)) } func TestJSONParserCompile(t *testing.T) { var acc testutil.Accumulator wd, _ := os.Getwd() r := File{ - Files: []string{filepath.Join(wd, "testfiles/json_a.log")}, + Files: []string{filepath.Join(wd, "dev/testfiles/json_a.log")}, } parserConfig := parsers.Config{ DataFormat: "json", TagKeys: []string{"parent_ignored_child"}, } nParser, err := parsers.NewParser(&parserConfig) - r.parser = nParser assert.NoError(t, err) + r.parser = nParser r.Gather(&acc) assert.Equal(t, map[string]string{"parent_ignored_child": "hi"}, acc.Metrics[0].Tags) @@ -44,7 +44,7 @@ func TestGrokParser(t *testing.T) { wd, _ := os.Getwd() var acc testutil.Accumulator r := File{ - Files: []string{filepath.Join(wd, "testfiles/grok_a.log")}, + Files: []string{filepath.Join(wd, "dev/testfiles/grok_a.log")}, } parserConfig := parsers.Config{ @@ -57,5 +57,5 @@ func TestGrokParser(t *testing.T) { assert.NoError(t, err) err = r.Gather(&acc) - assert.Equal(t, 2, len(acc.Metrics)) + assert.Equal(t, len(acc.Metrics), 2) } diff --git a/plugins/inputs/filecount/README.md b/plugins/inputs/filecount/README.md new file mode 100644 index 0000000000000..ccec532aaef91 --- /dev/null +++ b/plugins/inputs/filecount/README.md @@ -0,0 +1,49 @@ +# filecount Input Plugin + +Counts files in directories that match certain criteria. + +### Configuration: + +```toml +# Count files in a directory +[[inputs.filecount]] + ## Directory to gather stats about. + directory = "/var/cache/apt/archives" + + ## Only count files that match the name pattern. Defaults to "*". + name = "*.deb" + + ## Count files in subdirectories. Defaults to true. + recursive = false + + ## Only count regular files. Defaults to true. + regular_only = true + + ## Only count files that are at least this size in bytes. If size is + ## a negative number, only count files that are smaller than the + ## absolute value of size. Defaults to 0. + size = 0 + + ## Only count files that have not been touched for at least this + ## duration. If mtime is negative, only count files that have been + ## touched in this duration. Defaults to "0s". + mtime = "0s" +``` + +### Measurements & Fields: + +- filecount + - count (int) + +### Tags: + +- All measurements have the following tags: + - directory (the directory path, as specified in the config) + +### Example Output: + +``` +$ telegraf --config /etc/telegraf/telegraf.conf --input-filter filecount --test +> filecount,directory=/var/cache/apt,host=czernobog count=7i 1530034445000000000 +> filecount,directory=/tmp,host=czernobog count=17i 1530034445000000000 +``` diff --git a/plugins/inputs/filecount/filecount.go b/plugins/inputs/filecount/filecount.go new file mode 100644 index 0000000000000..6041ec7b5aae0 --- /dev/null +++ b/plugins/inputs/filecount/filecount.go @@ -0,0 +1,215 @@ +package filecount + +import ( + "os" + "path/filepath" + "time" + + "github.com/influxdata/telegraf" + "github.com/influxdata/telegraf/internal" + "github.com/influxdata/telegraf/plugins/inputs" +) + +const sampleConfig = ` + ## Directory to gather stats about. + directory = "/var/cache/apt/archives" + + ## Only count files that match the name pattern. Defaults to "*". + name = "*.deb" + + ## Count files in subdirectories. Defaults to true. + recursive = false + + ## Only count regular files. Defaults to true. + regular_only = true + + ## Only count files that are at least this size in bytes. If size is + ## a negative number, only count files that are smaller than the + ## absolute value of size. Defaults to 0. + size = 0 + + ## Only count files that have not been touched for at least this + ## duration. If mtime is negative, only count files that have been + ## touched in this duration. Defaults to "0s". + mtime = "0s" +` + +type FileCount struct { + Directory string + Name string + Recursive bool + RegularOnly bool + Size int64 + MTime internal.Duration `toml:"mtime"` + fileFilters []fileFilterFunc +} + +type countFunc func(os.FileInfo) +type fileFilterFunc func(os.FileInfo) (bool, error) + +func (_ *FileCount) Description() string { + return "Count files in a directory" +} + +func (_ *FileCount) SampleConfig() string { return sampleConfig } + +func rejectNilFilters(filters []fileFilterFunc) []fileFilterFunc { + filtered := make([]fileFilterFunc, 0, len(filters)) + for _, f := range filters { + if f != nil { + filtered = append(filtered, f) + } + } + return filtered +} + +func (fc *FileCount) nameFilter() fileFilterFunc { + if fc.Name == "*" { + return nil + } + + return func(f os.FileInfo) (bool, error) { + match, err := filepath.Match(fc.Name, f.Name()) + if err != nil { + return false, err + } + return match, nil + } +} + +func (fc *FileCount) regularOnlyFilter() fileFilterFunc { + if !fc.RegularOnly { + return nil + } + + return func(f os.FileInfo) (bool, error) { + return f.Mode().IsRegular(), nil + } +} + +func (fc *FileCount) sizeFilter() fileFilterFunc { + if fc.Size == 0 { + return nil + } + + return func(f os.FileInfo) (bool, error) { + if !f.Mode().IsRegular() { + return false, nil + } + if fc.Size < 0 { + return f.Size() < -fc.Size, nil + } + return f.Size() >= fc.Size, nil + } +} + +func (fc *FileCount) mtimeFilter() fileFilterFunc { + if fc.MTime.Duration == 0 { + return nil + } + + return func(f os.FileInfo) (bool, error) { + age := absDuration(fc.MTime.Duration) + mtime := time.Now().Add(-age) + if fc.MTime.Duration < 0 { + return f.ModTime().After(mtime), nil + } + return f.ModTime().Before(mtime), nil + } +} + +func absDuration(x time.Duration) time.Duration { + if x < 0 { + return -x + } + return x +} + +func count(basedir string, recursive bool, countFn countFunc) error { + walkFn := func(path string, file os.FileInfo, err error) error { + if path == basedir { + return nil + } + countFn(file) + if !recursive && file.IsDir() { + return filepath.SkipDir + } + return nil + } + return filepath.Walk(basedir, walkFn) +} + +func (fc *FileCount) initFileFilters() { + filters := []fileFilterFunc{ + fc.nameFilter(), + fc.regularOnlyFilter(), + fc.sizeFilter(), + fc.mtimeFilter(), + } + fc.fileFilters = rejectNilFilters(filters) +} + +func (fc *FileCount) filter(file os.FileInfo) (bool, error) { + if fc.fileFilters == nil { + fc.initFileFilters() + } + + for _, fileFilter := range fc.fileFilters { + match, err := fileFilter(file) + if err != nil { + return false, err + } + if !match { + return false, nil + } + } + + return true, nil +} + +func (fc *FileCount) Gather(acc telegraf.Accumulator) error { + numFiles := int64(0) + countFn := func(f os.FileInfo) { + match, err := fc.filter(f) + if err != nil { + acc.AddError(err) + return + } + if !match { + return + } + numFiles++ + } + err := count(fc.Directory, fc.Recursive, countFn) + if err != nil { + acc.AddError(err) + } + + acc.AddFields("filecount", + map[string]interface{}{ + "count": numFiles, + }, + map[string]string{ + "directory": fc.Directory, + }) + + return nil +} + +func NewFileCount() *FileCount { + return &FileCount{ + Directory: "", + Name: "*", + Recursive: true, + RegularOnly: true, + Size: 0, + MTime: internal.Duration{Duration: 0}, + fileFilters: nil, + } +} + +func init() { + inputs.Add("filecount", func() telegraf.Input { + return NewFileCount() + }) +} diff --git a/plugins/inputs/filecount/filecount_test.go b/plugins/inputs/filecount/filecount_test.go new file mode 100644 index 0000000000000..294a8b9653720 --- /dev/null +++ b/plugins/inputs/filecount/filecount_test.go @@ -0,0 +1,99 @@ +package filecount + +import ( + "os" + "path/filepath" + "runtime" + "strings" + "testing" + "time" + + "github.com/influxdata/telegraf/internal" + "github.com/influxdata/telegraf/testutil" + "github.com/stretchr/testify/require" +) + +func TestNoFilters(t *testing.T) { + fc := getNoFilterFileCount() + matches := []string{"foo", "bar", "baz", "qux", + "subdir/", "subdir/quux", "subdir/quuz"} + require.True(t, fileCountEquals(fc, len(matches))) +} + +func TestNameFilter(t *testing.T) { + fc := getNoFilterFileCount() + fc.Name = "ba*" + matches := []string{"bar", "baz"} + require.True(t, fileCountEquals(fc, len(matches))) +} + +func TestNonRecursive(t *testing.T) { + fc := getNoFilterFileCount() + fc.Recursive = false + matches := []string{"foo", "bar", "baz", "qux", "subdir"} + require.True(t, fileCountEquals(fc, len(matches))) +} + +func TestRegularOnlyFilter(t *testing.T) { + fc := getNoFilterFileCount() + fc.RegularOnly = true + matches := []string{ + "foo", "bar", "baz", "qux", "subdir/quux", "subdir/quuz", + } + require.True(t, fileCountEquals(fc, len(matches))) +} + +func TestSizeFilter(t *testing.T) { + fc := getNoFilterFileCount() + fc.Size = -100 + matches := []string{"foo", "bar", "baz", + "subdir/quux", "subdir/quuz"} + require.True(t, fileCountEquals(fc, len(matches))) + + fc.Size = 100 + matches = []string{"qux"} + require.True(t, fileCountEquals(fc, len(matches))) +} + +func TestMTimeFilter(t *testing.T) { + oldFile := filepath.Join(getTestdataDir(), "baz") + mtime := time.Date(1979, time.December, 14, 18, 25, 5, 0, time.UTC) + if err := os.Chtimes(oldFile, mtime, mtime); err != nil { + t.Skip("skipping mtime filter test.") + } + fileAge := time.Since(mtime) - (60 * time.Second) + + fc := getNoFilterFileCount() + fc.MTime = internal.Duration{Duration: -fileAge} + matches := []string{"foo", "bar", "qux", + "subdir/", "subdir/quux", "subdir/quuz"} + require.True(t, fileCountEquals(fc, len(matches))) + + fc.MTime = internal.Duration{Duration: fileAge} + matches = []string{"baz"} + require.True(t, fileCountEquals(fc, len(matches))) +} + +func getNoFilterFileCount() FileCount { + return FileCount{ + Directory: getTestdataDir(), + Name: "*", + Recursive: true, + RegularOnly: false, + Size: 0, + MTime: internal.Duration{Duration: 0}, + fileFilters: nil, + } +} + +func getTestdataDir() string { + _, filename, _, _ := runtime.Caller(1) + return strings.Replace(filename, "filecount_test.go", "testdata/", 1) +} + +func fileCountEquals(fc FileCount, expectedCount int) bool { + tags := map[string]string{"directory": getTestdataDir()} + acc := testutil.Accumulator{} + acc.GatherError(fc.Gather) + return acc.HasPoint("filecount", tags, "count", int64(expectedCount)) +} diff --git a/plugins/inputs/filecount/testdata/bar b/plugins/inputs/filecount/testdata/bar new file mode 100644 index 0000000000000..e69de29bb2d1d diff --git a/plugins/inputs/filecount/testdata/baz b/plugins/inputs/filecount/testdata/baz new file mode 100644 index 0000000000000..e69de29bb2d1d diff --git a/plugins/inputs/filecount/testdata/foo b/plugins/inputs/filecount/testdata/foo new file mode 100644 index 0000000000000..e69de29bb2d1d diff --git a/plugins/inputs/filecount/testdata/qux b/plugins/inputs/filecount/testdata/qux new file mode 100644 index 0000000000000..c7288f23d2239 --- /dev/null +++ b/plugins/inputs/filecount/testdata/qux @@ -0,0 +1,7 @@ +Lorem ipsum dolor sit amet, consectetur adipiscing elit, sed do +eiusmod tempor incididunt ut labore et dolore magna aliqua. Ut enim ad +minim veniam, quis nostrud exercitation ullamco laboris nisi ut +aliquip ex ea commodo consequat. Duis aute irure dolor in +reprehenderit in voluptate velit esse cillum dolore eu fugiat nulla +pariatur. Excepteur sint occaecat cupidatat non proident, sunt in +culpa qui officia deserunt mollit anim id est laborum. diff --git a/plugins/inputs/filecount/testdata/subdir/quux b/plugins/inputs/filecount/testdata/subdir/quux new file mode 100644 index 0000000000000..e69de29bb2d1d diff --git a/plugins/inputs/filecount/testdata/subdir/quuz b/plugins/inputs/filecount/testdata/subdir/quuz new file mode 100644 index 0000000000000..e69de29bb2d1d diff --git a/plugins/inputs/interrupts/interrupts.go b/plugins/inputs/interrupts/interrupts.go index 75cbf3be13917..30b7ee182d413 100644 --- a/plugins/inputs/interrupts/interrupts.go +++ b/plugins/inputs/interrupts/interrupts.go @@ -3,12 +3,13 @@ package interrupts import ( "bufio" "fmt" - "github.com/influxdata/telegraf" - "github.com/influxdata/telegraf/plugins/inputs" "io" "os" "strconv" "strings" + + "github.com/influxdata/telegraf" + "github.com/influxdata/telegraf/plugins/inputs" ) type Interrupts struct{} @@ -50,6 +51,8 @@ func parseInterrupts(r io.Reader) ([]IRQ, error) { } cpucount = len(cpus) } + +scan: for scanner.Scan() { fields := strings.Fields(scanner.Text()) if !strings.HasSuffix(fields[0], ":") { @@ -62,7 +65,7 @@ func parseInterrupts(r io.Reader) ([]IRQ, error) { if i < len(irqvals) { irqval, err := strconv.ParseInt(irqvals[i], 10, 64) if err != nil { - return irqs, fmt.Errorf("Unable to parse %q from %q: %s", irqvals[i], scanner.Text(), err) + continue scan } irq.Cpus = append(irq.Cpus, irqval) } diff --git a/plugins/inputs/interrupts/interrupts_test.go b/plugins/inputs/interrupts/interrupts_test.go index 6c76c8504c1c4..cf1dc949e359f 100644 --- a/plugins/inputs/interrupts/interrupts_test.go +++ b/plugins/inputs/interrupts/interrupts_test.go @@ -2,9 +2,10 @@ package interrupts import ( "bytes" + "testing" + "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" - "testing" ) func TestParseInterrupts(t *testing.T) { @@ -58,3 +59,132 @@ TASKLET: 205 0` } } } + +// Tests #4470 +func TestParseInterruptsBad(t *testing.T) { + interruptStr := ` CPU0 CPU1 CPU2 CPU3 + 16: 0 0 0 0 bcm2836-timer 0 Edge arch_timer + 17: 127224250 118424219 127224437 117885416 bcm2836-timer 1 Edge arch_timer + 21: 0 0 0 0 bcm2836-pmu 9 Edge arm-pmu + 23: 1549514 0 0 0 ARMCTRL-level 1 Edge 3f00b880.mailbox + 24: 2 0 0 0 ARMCTRL-level 2 Edge VCHIQ doorbell + 46: 0 0 0 0 ARMCTRL-level 48 Edge bcm2708_fb dma + 48: 0 0 0 0 ARMCTRL-level 50 Edge DMA IRQ + 50: 0 0 0 0 ARMCTRL-level 52 Edge DMA IRQ + 51: 208 0 0 0 ARMCTRL-level 53 Edge DMA IRQ + 54: 883002 0 0 0 ARMCTRL-level 56 Edge DMA IRQ + 59: 0 0 0 0 ARMCTRL-level 61 Edge bcm2835-auxirq + 62: 521451447 0 0 0 ARMCTRL-level 64 Edge dwc_otg, dwc_otg_pcd, dwc_otg_hcd:usb1 + 86: 857597 0 0 0 ARMCTRL-level 88 Edge mmc0 + 87: 4938 0 0 0 ARMCTRL-level 89 Edge uart-pl011 + 92: 5669 0 0 0 ARMCTRL-level 94 Edge mmc1 + FIQ: usb_fiq + IPI0: 0 0 0 0 CPU wakeup interrupts + IPI1: 0 0 0 0 Timer broadcast interrupts + IPI2: 23564958 23464876 23531165 23040826 Rescheduling interrupts + IPI3: 148438 639704 644266 588150 Function call interrupts + IPI4: 0 0 0 0 CPU stop interrupts + IPI5: 4348149 1843985 3819457 1822877 IRQ work interrupts + IPI6: 0 0 0 0 completion interrupts` + f := bytes.NewBufferString(interruptStr) + parsed := []IRQ{ + IRQ{ + ID: "16", Type: "bcm2836-timer", Device: "0 Edge arch_timer", + Cpus: []int64{0, 0, 0, 0}, + }, + IRQ{ + ID: "17", Type: "bcm2836-timer", Device: "1 Edge arch_timer", + Cpus: []int64{127224250, 118424219, 127224437, 117885416}, Total: 490758322, + }, + IRQ{ + ID: "21", Type: "bcm2836-pmu", Device: "9 Edge arm-pmu", + Cpus: []int64{0, 0, 0, 0}, + }, + IRQ{ + ID: "23", Type: "ARMCTRL-level", Device: "1 Edge 3f00b880.mailbox", + Cpus: []int64{1549514, 0, 0, 0}, Total: 1549514, + }, + IRQ{ + ID: "24", Type: "ARMCTRL-level", Device: "2 Edge VCHIQ doorbell", + Cpus: []int64{2, 0, 0, 0}, Total: 2, + }, + IRQ{ + ID: "46", Type: "ARMCTRL-level", Device: "48 Edge bcm2708_fb dma", + Cpus: []int64{0, 0, 0, 0}, + }, + IRQ{ + ID: "48", Type: "ARMCTRL-level", Device: "50 Edge DMA IRQ", + Cpus: []int64{0, 0, 0, 0}, + }, + IRQ{ + ID: "50", Type: "ARMCTRL-level", Device: "52 Edge DMA IRQ", + Cpus: []int64{0, 0, 0, 0}, + }, + IRQ{ + ID: "51", Type: "ARMCTRL-level", Device: "53 Edge DMA IRQ", + Cpus: []int64{208, 0, 0, 0}, Total: 208, + }, + IRQ{ + ID: "54", Type: "ARMCTRL-level", Device: "56 Edge DMA IRQ", + Cpus: []int64{883002, 0, 0, 0}, Total: 883002, + }, + IRQ{ + ID: "59", Type: "ARMCTRL-level", Device: "61 Edge bcm2835-auxirq", + Cpus: []int64{0, 0, 0, 0}, + }, + IRQ{ + ID: "62", Type: "ARMCTRL-level", Device: "64 Edge dwc_otg, dwc_otg_pcd, dwc_otg_hcd:usb1", + Cpus: []int64{521451447, 0, 0, 0}, Total: 521451447, + }, + IRQ{ + ID: "86", Type: "ARMCTRL-level", Device: "88 Edge mmc0", + Cpus: []int64{857597, 0, 0, 0}, Total: 857597, + }, + IRQ{ + ID: "87", Type: "ARMCTRL-level", Device: "89 Edge uart-pl011", + Cpus: []int64{4938, 0, 0, 0}, Total: 4938, + }, + IRQ{ + ID: "92", Type: "ARMCTRL-level", Device: "94 Edge mmc1", + Cpus: []int64{5669, 0, 0, 0}, Total: 5669, + }, + IRQ{ + ID: "IPI0", Type: "CPU wakeup interrupts", + Cpus: []int64{0, 0, 0, 0}, + }, + IRQ{ + ID: "IPI1", Type: "Timer broadcast interrupts", + Cpus: []int64{0, 0, 0, 0}, + }, + IRQ{ + ID: "IPI2", Type: "Rescheduling interrupts", + Cpus: []int64{23564958, 23464876, 23531165, 23040826}, Total: 93601825, + }, + IRQ{ + ID: "IPI3", Type: "Function call interrupts", + Cpus: []int64{148438, 639704, 644266, 588150}, Total: 2020558, + }, + IRQ{ + ID: "IPI4", Type: "CPU stop interrupts", + Cpus: []int64{0, 0, 0, 0}, + }, + IRQ{ + ID: "IPI5", Type: "IRQ work interrupts", + Cpus: []int64{4348149, 1843985, 3819457, 1822877}, Total: 11834468, + }, + IRQ{ + ID: "IPI6", Type: "completion interrupts", + Cpus: []int64{0, 0, 0, 0}, + }, + } + got, err := parseInterrupts(f) + require.Equal(t, nil, err) + require.NotEqual(t, 0, len(got)) + require.Equal(t, len(got), len(parsed)) + for i := 0; i < len(parsed); i++ { + assert.Equal(t, parsed[i], got[i]) + for k := 0; k < len(parsed[i].Cpus); k++ { + assert.Equal(t, parsed[i].Cpus[k], got[i].Cpus[k]) + } + } +} diff --git a/plugins/inputs/ipmi_sensor/README.md b/plugins/inputs/ipmi_sensor/README.md index 74cfe3bc5eb98..fb2e8f26e0c07 100644 --- a/plugins/inputs/ipmi_sensor/README.md +++ b/plugins/inputs/ipmi_sensor/README.md @@ -8,6 +8,10 @@ If no servers are specified, the plugin will query the local machine sensor stat ``` ipmitool sdr ``` +or with the version 2 schema: +``` +ipmitool sdr elist +``` When one or more servers are specified, the plugin will use the following command to collect remote host sensor stats: @@ -41,19 +45,36 @@ ipmitool -I lan -H SERVER -U USERID -P PASSW0RD sdr ## Timeout for the ipmitool command to complete. Default is 20 seconds. timeout = "20s" + + ## Schema Version: (Optional, defaults to version 1) + metric_version = 2 ``` ### Measurements +Version 1 schema: - ipmi_sensor: - tags: - name - unit + - host - server (only when retrieving stats from remote servers) - fields: - - status (int) + - status (int, 1=ok status_code/0=anything else) - value (float) +Version 2 schema: +- ipmi_sensor: + - tags: + - name + - entity_id (can help uniquify duplicate names) + - status_code (two letter code from IPMI documentation) + - status_desc (extended status description field) + - unit (only on analog values) + - host + - server (only when retrieving stats from remote) + - fields: + - value (float) #### Permissions @@ -68,24 +89,36 @@ KERNEL=="ipmi*", MODE="660", GROUP="telegraf" ### Example Output +#### Version 1 Schema When retrieving stats from a remote server: ``` -ipmi_sensor,server=10.20.2.203,unit=degrees_c,name=ambient_temp status=1i,value=20 1458488465012559455 -ipmi_sensor,server=10.20.2.203,unit=feet,name=altitude status=1i,value=80 1458488465012688613 -ipmi_sensor,server=10.20.2.203,unit=watts,name=avg_power status=1i,value=220 1458488465012776511 -ipmi_sensor,server=10.20.2.203,unit=volts,name=planar_3.3v status=1i,value=3.28 1458488465012861875 -ipmi_sensor,server=10.20.2.203,unit=volts,name=planar_vbat status=1i,value=3.04 1458488465013072508 -ipmi_sensor,server=10.20.2.203,unit=rpm,name=fan_1a_tach status=1i,value=2610 1458488465013137932 -ipmi_sensor,server=10.20.2.203,unit=rpm,name=fan_1b_tach status=1i,value=1775 1458488465013279896 +ipmi_sensor,server=10.20.2.203,name=uid_light value=0,status=1i 1517125513000000000 +ipmi_sensor,server=10.20.2.203,name=sys._health_led status=1i,value=0 1517125513000000000 +ipmi_sensor,server=10.20.2.203,name=power_supply_1,unit=watts status=1i,value=110 1517125513000000000 +ipmi_sensor,server=10.20.2.203,name=power_supply_2,unit=watts status=1i,value=120 1517125513000000000 +ipmi_sensor,server=10.20.2.203,name=power_supplies value=0,status=1i 1517125513000000000 +ipmi_sensor,server=10.20.2.203,name=fan_1,unit=percent status=1i,value=43.12 1517125513000000000 ``` + +When retrieving stats from the local machine (no server specified): +``` +ipmi_sensor,name=uid_light value=0,status=1i 1517125513000000000 +ipmi_sensor,name=sys._health_led status=1i,value=0 1517125513000000000 +ipmi_sensor,name=power_supply_1,unit=watts status=1i,value=110 1517125513000000000 +ipmi_sensor,name=power_supply_2,unit=watts status=1i,value=120 1517125513000000000 +ipmi_sensor,name=power_supplies value=0,status=1i 1517125513000000000 +ipmi_sensor,name=fan_1,unit=percent status=1i,value=43.12 1517125513000000000 +``` + +#### Version 2 Schema + When retrieving stats from the local machine (no server specified): ``` -ipmi_sensor,unit=degrees_c,name=ambient_temp status=1i,value=20 1458488465012559455 -ipmi_sensor,unit=feet,name=altitude status=1i,value=80 1458488465012688613 -ipmi_sensor,unit=watts,name=avg_power status=1i,value=220 1458488465012776511 -ipmi_sensor,unit=volts,name=planar_3.3v status=1i,value=3.28 1458488465012861875 -ipmi_sensor,unit=volts,name=planar_vbat status=1i,value=3.04 1458488465013072508 -ipmi_sensor,unit=rpm,name=fan_1a_tach status=1i,value=2610 1458488465013137932 -ipmi_sensor,unit=rpm,name=fan_1b_tach status=1i,value=1775 1458488465013279896 +ipmi_sensor,name=uid_light,entity_id=23.1,status_code=ok,status_desc=ok value=0 1517125474000000000 +ipmi_sensor,name=sys._health_led,entity_id=23.2,status_code=ok,status_desc=ok value=0 1517125474000000000 +ipmi_sensor,entity_id=10.1,name=power_supply_1,status_code=ok,status_desc=presence_detected,unit=watts value=110 1517125474000000000 +ipmi_sensor,name=power_supply_2,entity_id=10.2,status_code=ok,unit=watts,status_desc=presence_detected value=125 1517125474000000000 +ipmi_sensor,name=power_supplies,entity_id=10.3,status_code=ok,status_desc=fully_redundant value=0 1517125474000000000 +ipmi_sensor,entity_id=7.1,name=fan_1,status_code=ok,status_desc=transition_to_running,unit=percent value=43.12 1517125474000000000 ``` diff --git a/plugins/inputs/ipmi_sensor/ipmi.go b/plugins/inputs/ipmi_sensor/ipmi.go index ee99b0a3d15fe..65506e11835c7 100644 --- a/plugins/inputs/ipmi_sensor/ipmi.go +++ b/plugins/inputs/ipmi_sensor/ipmi.go @@ -1,8 +1,11 @@ package ipmi_sensor import ( + "bufio" + "bytes" "fmt" "os/exec" + "regexp" "strconv" "strings" "sync" @@ -14,14 +17,20 @@ import ( ) var ( - execCommand = exec.Command // execCommand is used to mock commands in tests. + execCommand = exec.Command // execCommand is used to mock commands in tests. + re_v1_parse_line = regexp.MustCompile(`^(?P[^|]*)\|(?P[^|]*)\|(?P.*)`) + re_v2_parse_line = regexp.MustCompile(`^(?P[^|]*)\|[^|]+\|(?P[^|]*)\|(?P[^|]*)\|(?:(?P[^|]+))?`) + re_v2_parse_description = regexp.MustCompile(`^(?P[0-9.]+)\s(?P.*)|(?P.+)|^$`) + re_v2_parse_unit = regexp.MustCompile(`^(?P[^,]+)(?:,\s*(?P.*))?`) ) +// Ipmi stores the configuration values for the ipmi_sensor input plugin type Ipmi struct { - Path string - Privilege string - Servers []string - Timeout internal.Duration + Path string + Privilege string + Servers []string + Timeout internal.Duration + MetricVersion int } var sampleConfig = ` @@ -46,16 +55,22 @@ var sampleConfig = ` ## Timeout for the ipmitool command to complete timeout = "20s" + + ## Schema Version: (Optional, defaults to version 1) + metric_version = 2 ` +// SampleConfig returns the documentation about the sample configuration func (m *Ipmi) SampleConfig() string { return sampleConfig } +// Description returns a basic description for the plugin functions func (m *Ipmi) Description() string { return "Read metrics from the bare metal servers via IPMI" } +// Gather is the main execution function for the plugin func (m *Ipmi) Gather(acc telegraf.Accumulator) error { if len(m.Path) == 0 { return fmt.Errorf("ipmitool not found: verify that ipmitool is installed and that ipmitool is in your PATH") @@ -93,23 +108,33 @@ func (m *Ipmi) parse(acc telegraf.Accumulator, server string) error { opts = conn.options() } opts = append(opts, "sdr") + if m.MetricVersion == 2 { + opts = append(opts, "elist") + } cmd := execCommand(m.Path, opts...) out, err := internal.CombinedOutputTimeout(cmd, m.Timeout.Duration) + timestamp := time.Now() if err != nil { return fmt.Errorf("failed to run command %s: %s - %s", strings.Join(cmd.Args, " "), err, string(out)) } + if m.MetricVersion == 2 { + return parseV2(acc, hostname, out, timestamp) + } + return parseV1(acc, hostname, out, timestamp) +} +func parseV1(acc telegraf.Accumulator, hostname string, cmdOut []byte, measured_at time.Time) error { // each line will look something like // Planar VBAT | 3.05 Volts | ok - lines := strings.Split(string(out), "\n") - for i := 0; i < len(lines); i++ { - vals := strings.Split(lines[i], "|") - if len(vals) != 3 { + scanner := bufio.NewScanner(bytes.NewReader(cmdOut)) + for scanner.Scan() { + ipmiFields := extractFieldsFromRegex(re_v1_parse_line, scanner.Text()) + if len(ipmiFields) != 3 { continue } tags := map[string]string{ - "name": transform(vals[0]), + "name": transform(ipmiFields["name"]), } // tag the server is we have one @@ -118,18 +143,20 @@ func (m *Ipmi) parse(acc telegraf.Accumulator, server string) error { } fields := make(map[string]interface{}) - if strings.EqualFold("ok", trim(vals[2])) { + if strings.EqualFold("ok", trim(ipmiFields["status_code"])) { fields["status"] = 1 } else { fields["status"] = 0 } - val1 := trim(vals[1]) - - if strings.Index(val1, " ") > 0 { + if strings.Index(ipmiFields["description"], " ") > 0 { // split middle column into value and unit - valunit := strings.SplitN(val1, " ", 2) - fields["value"] = Atofloat(valunit[0]) + valunit := strings.SplitN(ipmiFields["description"], " ", 2) + var err error + fields["value"], err = aToFloat(valunit[0]) + if err != nil { + continue + } if len(valunit) > 1 { tags["unit"] = transform(valunit[1]) } @@ -137,19 +164,85 @@ func (m *Ipmi) parse(acc telegraf.Accumulator, server string) error { fields["value"] = 0.0 } - acc.AddFields("ipmi_sensor", fields, tags, time.Now()) + acc.AddFields("ipmi_sensor", fields, tags, measured_at) } - return nil + return scanner.Err() } -func Atofloat(val string) float64 { +func parseV2(acc telegraf.Accumulator, hostname string, cmdOut []byte, measured_at time.Time) error { + // each line will look something like + // CMOS Battery | 65h | ok | 7.1 | + // Temp | 0Eh | ok | 3.1 | 55 degrees C + // Drive 0 | A0h | ok | 7.1 | Drive Present + scanner := bufio.NewScanner(bytes.NewReader(cmdOut)) + for scanner.Scan() { + ipmiFields := extractFieldsFromRegex(re_v2_parse_line, scanner.Text()) + if len(ipmiFields) < 3 || len(ipmiFields) > 4 { + continue + } + + tags := map[string]string{ + "name": transform(ipmiFields["name"]), + } + + // tag the server is we have one + if hostname != "" { + tags["server"] = hostname + } + tags["entity_id"] = transform(ipmiFields["entity_id"]) + tags["status_code"] = trim(ipmiFields["status_code"]) + fields := make(map[string]interface{}) + descriptionResults := extractFieldsFromRegex(re_v2_parse_description, trim(ipmiFields["description"])) + // This is an analog value with a unit + if descriptionResults["analogValue"] != "" && len(descriptionResults["analogUnit"]) >= 1 { + var err error + fields["value"], err = aToFloat(descriptionResults["analogValue"]) + if err != nil { + continue + } + // Some implementations add an extra status to their analog units + unitResults := extractFieldsFromRegex(re_v2_parse_unit, descriptionResults["analogUnit"]) + tags["unit"] = transform(unitResults["realAnalogUnit"]) + if unitResults["statusDesc"] != "" { + tags["status_desc"] = transform(unitResults["statusDesc"]) + } + } else { + // This is a status value + fields["value"] = 0.0 + // Extended status descriptions aren't required, in which case for consistency re-use the status code + if descriptionResults["status"] != "" { + tags["status_desc"] = transform(descriptionResults["status"]) + } else { + tags["status_desc"] = transform(ipmiFields["status_code"]) + } + } + + acc.AddFields("ipmi_sensor", fields, tags, measured_at) + } + + return scanner.Err() +} + +// extractFieldsFromRegex consumes a regex with named capture groups and returns a kvp map of strings with the results +func extractFieldsFromRegex(re *regexp.Regexp, input string) map[string]string { + submatches := re.FindStringSubmatch(input) + results := make(map[string]string) + for i, name := range re.SubexpNames() { + if name != input && name != "" && input != "" { + results[name] = trim(submatches[i]) + } + } + return results +} + +// aToFloat converts string representations of numbers to float64 values +func aToFloat(val string) (float64, error) { f, err := strconv.ParseFloat(val, 64) if err != nil { - return 0.0 - } else { - return f + return 0.0, err } + return f, nil } func trim(s string) string { diff --git a/plugins/inputs/ipmi_sensor/ipmi_test.go b/plugins/inputs/ipmi_sensor/ipmi_test.go index 3d45f2fa843b0..d781ce7b51d25 100644 --- a/plugins/inputs/ipmi_sensor/ipmi_test.go +++ b/plugins/inputs/ipmi_sensor/ipmi_test.go @@ -28,7 +28,7 @@ func TestGather(t *testing.T) { require.NoError(t, err) - assert.Equal(t, acc.NFields(), 266, "non-numeric measurements should be ignored") + assert.Equal(t, acc.NFields(), 262, "non-numeric measurements should be ignored") conn := NewConnection(i.Servers[0], i.Privilege) assert.Equal(t, "USERID", conn.Username) @@ -127,6 +127,7 @@ func TestGather(t *testing.T) { } err = acc.GatherError(i.Gather) + require.NoError(t, err) var testsWithoutServer = []struct { fields map[string]interface{} @@ -378,3 +379,196 @@ OS RealTime Mod | 0x00 | ok } os.Exit(0) } + +func TestGatherV2(t *testing.T) { + i := &Ipmi{ + Servers: []string{"USERID:PASSW0RD@lan(192.168.1.1)"}, + Path: "ipmitool", + Privilege: "USER", + Timeout: internal.Duration{Duration: time.Second * 5}, + MetricVersion: 2, + } + // overwriting exec commands with mock commands + execCommand = fakeExecCommandV2 + var acc testutil.Accumulator + + err := acc.GatherError(i.Gather) + + require.NoError(t, err) + + conn := NewConnection(i.Servers[0], i.Privilege) + assert.Equal(t, "USERID", conn.Username) + assert.Equal(t, "lan", conn.Interface) + + var testsWithServer = []struct { + fields map[string]interface{} + tags map[string]string + }{ + //SEL | 72h | ns | 7.1 | No Reading + { + map[string]interface{}{ + "value": float64(0), + }, + map[string]string{ + "name": "sel", + "entity_id": "7.1", + "status_code": "ns", + "status_desc": "no_reading", + "server": "192.168.1.1", + }, + }, + } + + for _, test := range testsWithServer { + acc.AssertContainsTaggedFields(t, "ipmi_sensor", test.fields, test.tags) + } + + i = &Ipmi{ + Path: "ipmitool", + Timeout: internal.Duration{Duration: time.Second * 5}, + MetricVersion: 2, + } + + err = acc.GatherError(i.Gather) + require.NoError(t, err) + + var testsWithoutServer = []struct { + fields map[string]interface{} + tags map[string]string + }{ + //SEL | 72h | ns | 7.1 | No Reading + { + map[string]interface{}{ + "value": float64(0), + }, + map[string]string{ + "name": "sel", + "entity_id": "7.1", + "status_code": "ns", + "status_desc": "no_reading", + }, + }, + //Intrusion | 73h | ok | 7.1 | + { + map[string]interface{}{ + "value": float64(0), + }, + map[string]string{ + "name": "intrusion", + "entity_id": "7.1", + "status_code": "ok", + "status_desc": "ok", + }, + }, + //Fan1 | 30h | ok | 7.1 | 5040 RPM + { + map[string]interface{}{ + "value": float64(5040), + }, + map[string]string{ + "name": "fan1", + "entity_id": "7.1", + "status_code": "ok", + "unit": "rpm", + }, + }, + //Inlet Temp | 04h | ok | 7.1 | 25 degrees C + { + map[string]interface{}{ + "value": float64(25), + }, + map[string]string{ + "name": "inlet_temp", + "entity_id": "7.1", + "status_code": "ok", + "unit": "degrees_c", + }, + }, + //USB Cable Pres | 50h | ok | 7.1 | Connected + { + map[string]interface{}{ + "value": float64(0), + }, + map[string]string{ + "name": "usb_cable_pres", + "entity_id": "7.1", + "status_code": "ok", + "status_desc": "connected", + }, + }, + //Current 1 | 6Ah | ok | 10.1 | 7.20 Amps + { + map[string]interface{}{ + "value": float64(7.2), + }, + map[string]string{ + "name": "current_1", + "entity_id": "10.1", + "status_code": "ok", + "unit": "amps", + }, + }, + //Power Supply 1 | 03h | ok | 10.1 | 110 Watts, Presence detected + { + map[string]interface{}{ + "value": float64(110), + }, + map[string]string{ + "name": "power_supply_1", + "entity_id": "10.1", + "status_code": "ok", + "unit": "watts", + "status_desc": "presence_detected", + }, + }, + } + + for _, test := range testsWithoutServer { + acc.AssertContainsTaggedFields(t, "ipmi_sensor", test.fields, test.tags) + } +} + +// fackeExecCommandV2 is a helper function that mock +// the exec.Command call (and call the test binary) +func fakeExecCommandV2(command string, args ...string) *exec.Cmd { + cs := []string{"-test.run=TestHelperProcessV2", "--", command} + cs = append(cs, args...) + cmd := exec.Command(os.Args[0], cs...) + cmd.Env = []string{"GO_WANT_HELPER_PROCESS=1"} + return cmd +} + +// TestHelperProcessV2 isn't a real test. It's used to mock exec.Command +// For example, if you run: +// GO_WANT_HELPER_PROCESS=1 go test -test.run=TestHelperProcessV2 -- chrony tracking +// it returns below mockData. +func TestHelperProcessV2(t *testing.T) { + if os.Getenv("GO_WANT_HELPER_PROCESS") != "1" { + return + } + + // Curated list of use cases instead of full dumps + mockData := `SEL | 72h | ns | 7.1 | No Reading +Intrusion | 73h | ok | 7.1 | +Fan1 | 30h | ok | 7.1 | 5040 RPM +Inlet Temp | 04h | ok | 7.1 | 25 degrees C +USB Cable Pres | 50h | ok | 7.1 | Connected +Current 1 | 6Ah | ok | 10.1 | 7.20 Amps +Power Supply 1 | 03h | ok | 10.1 | 110 Watts, Presence detected +` + + args := os.Args + + // Previous arguments are tests stuff, that looks like : + // /tmp/go-build970079519/…/_test/integration.test -test.run=TestHelperProcess -- + cmd, args := args[3], args[4:] + + if cmd == "ipmitool" { + fmt.Fprint(os.Stdout, mockData) + } else { + fmt.Fprint(os.Stdout, "command not found") + os.Exit(1) + + } + os.Exit(0) +} diff --git a/plugins/inputs/kafka_consumer/README.md b/plugins/inputs/kafka_consumer/README.md index 24a0efc0f8cb6..b6fc8fc89cfe3 100644 --- a/plugins/inputs/kafka_consumer/README.md +++ b/plugins/inputs/kafka_consumer/README.md @@ -44,7 +44,7 @@ and use the old zookeeper connection method. ## Maximum length of a message to consume, in bytes (default 0/unlimited); ## larger messages are dropped - max_message_len = 65536 + max_message_len = 1000000 ``` ## Testing diff --git a/plugins/inputs/kafka_consumer/kafka_consumer.go b/plugins/inputs/kafka_consumer/kafka_consumer.go index d3791b224eb7a..eba9b68ac9bc7 100644 --- a/plugins/inputs/kafka_consumer/kafka_consumer.go +++ b/plugins/inputs/kafka_consumer/kafka_consumer.go @@ -21,6 +21,7 @@ type Kafka struct { Topics []string Brokers []string MaxMessageLen int + Version string `toml:"version"` Cluster *cluster.Consumer @@ -64,6 +65,12 @@ var sampleConfig = ` ## Optional Client id # client_id = "Telegraf" + ## Set the minimal supported Kafka version. Setting this enables the use of new + ## Kafka features and APIs. Of particular interest, lz4 compression + ## requires at least version 0.10.0.0. + ## ex: version = "1.1.0" + # version = "" + ## Optional TLS Config # tls_ca = "/etc/telegraf/ca.pem" # tls_cert = "/etc/telegraf/cert.pem" @@ -88,7 +95,7 @@ var sampleConfig = ` ## Maximum length of a message to consume, in bytes (default 0/unlimited); ## larger messages are dropped - max_message_len = 65536 + max_message_len = 1000000 ` func (k *Kafka) SampleConfig() string { @@ -111,6 +118,15 @@ func (k *Kafka) Start(acc telegraf.Accumulator) error { k.acc = acc config := cluster.NewConfig() + + if k.Version != "" { + version, err := sarama.ParseKafkaVersion(k.Version) + if err != nil { + return err + } + config.Version = version + } + config.Consumer.Return.Errors = true tlsConfig, err := k.ClientConfig.TLSConfig() diff --git a/plugins/inputs/logparser/README.md b/plugins/inputs/logparser/README.md index d35a94a709e6f..27cbc3cf44062 100644 --- a/plugins/inputs/logparser/README.md +++ b/plugins/inputs/logparser/README.md @@ -220,6 +220,48 @@ A multi-line literal string allows us to encode the pattern: custom_patterns = 'UNICODE_ESCAPE (?:\\u[0-9A-F]{4})+' ``` +#### Parsing Telegraf log file +We can use logparser to convert the log lines generated by Telegraf in metrics. + +To do this we need to configure Telegraf to write logs to a file. +This could be done using the ``agent.logfile`` parameter or configuring syslog. +```toml +[agent] + logfile = "/var/log/telegraf/telegraf.log" +``` + +Logparser configuration: +```toml +[[inputs.logparser]] + files = ["/var/log/telegraf/telegraf.log"] + + [inputs.logparser.grok] + measurement = "telegraf_log" + patterns = ['^%{TIMESTAMP_ISO8601:timestamp:ts-rfc3339} %{TELEGRAF_LOG_LEVEL:level:tag}! %{GREEDYDATA:msg}'] + custom_patterns = ''' +TELEGRAF_LOG_LEVEL (?:[DIWE]+) +''' +``` + +Example log lines: +``` +2018-06-14T06:41:35Z I! Starting Telegraf v1.6.4 +2018-06-14T06:41:35Z I! Agent Config: Interval:3s, Quiet:false, Hostname:"archer", Flush Interval:3s +2018-02-20T22:39:20Z E! Error in plugin [inputs.docker]: took longer to collect than collection interval (10s) +2018-06-01T10:34:05Z W! Skipping a scheduled flush because there is already a flush ongoing. +2018-06-14T07:33:33Z D! Output [file] buffer fullness: 0 / 10000 metrics. +``` + +Generated metrics: +``` +telegraf_log,host=somehostname,level=I msg="Starting Telegraf v1.6.4" 1528958495000000000 +telegraf_log,host=somehostname,level=I msg="Agent Config: Interval:3s, Quiet:false, Hostname:\"somehostname\", Flush Interval:3s" 1528958495001000000 +telegraf_log,host=somehostname,level=E msg="Error in plugin [inputs.docker]: took longer to collect than collection interval (10s)" 1519166360000000000 +telegraf_log,host=somehostname,level=W msg="Skipping a scheduled flush because there is already a flush ongoing." 1527849245000000000 +telegraf_log,host=somehostname,level=D msg="Output [file] buffer fullness: 0 / 10000 metrics." 1528961613000000000 +``` + + ### Tips for creating patterns Writing complex patterns can be difficult, here is some advice for writing a diff --git a/plugins/inputs/mem/memory.go b/plugins/inputs/mem/memory.go index f664dd3f42669..a7d887cbe8bec 100644 --- a/plugins/inputs/mem/memory.go +++ b/plugins/inputs/mem/memory.go @@ -37,6 +37,27 @@ func (s *MemStats) Gather(acc telegraf.Accumulator) error { "slab": vm.Slab, "used_percent": 100 * float64(vm.Used) / float64(vm.Total), "available_percent": 100 * float64(vm.Available) / float64(vm.Total), + "commit_limit": vm.CommitLimit, + "committed_as": vm.CommittedAS, + "dirty": vm.Dirty, + "high_free": vm.HighFree, + "high_total": vm.HighTotal, + "huge_page_size": vm.HugePageSize, + "huge_pages_free": vm.HugePagesFree, + "huge_pages_total": vm.HugePagesTotal, + "low_free": vm.LowFree, + "low_total": vm.LowTotal, + "mapped": vm.Mapped, + "page_tables": vm.PageTables, + "shared": vm.Shared, + "swap_cached": vm.SwapCached, + "swap_free": vm.SwapFree, + "swap_total": vm.SwapTotal, + "vmalloc_chunk": vm.VMallocChunk, + "vmalloc_total": vm.VMallocTotal, + "vmalloc_used": vm.VMallocUsed, + "write_back": vm.Writeback, + "write_back_tmp": vm.WritebackTmp, } acc.AddGauge("mem", fields, nil) diff --git a/plugins/inputs/mem/memory_test.go b/plugins/inputs/mem/memory_test.go index ef9af8d22c973..06f2f6ea97fd0 100644 --- a/plugins/inputs/mem/memory_test.go +++ b/plugins/inputs/mem/memory_test.go @@ -27,6 +27,27 @@ func TestMemStats(t *testing.T) { // Buffers: 771, // Cached: 4312, // Shared: 2142, + CommitLimit: 1, + CommittedAS: 118680, + Dirty: 4, + HighFree: 0, + HighTotal: 0, + HugePageSize: 4096, + HugePagesFree: 0, + HugePagesTotal: 0, + LowFree: 69936, + LowTotal: 255908, + Mapped: 42236, + PageTables: 1236, + Shared: 0, + SwapCached: 0, + SwapFree: 524280, + SwapTotal: 524280, + VMallocChunk: 3872908, + VMallocTotal: 3874808, + VMallocUsed: 1416, + Writeback: 0, + WritebackTmp: 0, } mps.On("VMStat").Return(vms, nil) @@ -47,6 +68,27 @@ func TestMemStats(t *testing.T) { "inactive": uint64(1124), "wired": uint64(134), "slab": uint64(1234), + "commit_limit": uint64(1), + "committed_as": uint64(118680), + "dirty": uint64(4), + "high_free": uint64(0), + "high_total": uint64(0), + "huge_page_size": uint64(4096), + "huge_pages_free": uint64(0), + "huge_pages_total": uint64(0), + "low_free": uint64(69936), + "low_total": uint64(255908), + "mapped": uint64(42236), + "page_tables": uint64(1236), + "shared": uint64(0), + "swap_cached": uint64(0), + "swap_free": uint64(524280), + "swap_total": uint64(524280), + "vmalloc_chunk": uint64(3872908), + "vmalloc_total": uint64(3874808), + "vmalloc_used": uint64(1416), + "write_back": uint64(0), + "write_back_tmp": uint64(0), } acc.AssertContainsTaggedFields(t, "mem", memfields, make(map[string]string)) diff --git a/plugins/inputs/pgbouncer/README.md b/plugins/inputs/pgbouncer/README.md new file mode 100644 index 0000000000000..2a841c45aada0 --- /dev/null +++ b/plugins/inputs/pgbouncer/README.md @@ -0,0 +1,21 @@ +# PgBouncer plugin + +This PgBouncer plugin provides metrics for your PgBouncer load balancer. + +More information about the meaning of these metrics can be found in the [PgBouncer Documentation](https://pgbouncer.github.io/usage.html) + +## Configuration +Specify address via a url matching: + + `postgres://[pqgotest[:password]]@localhost[/dbname]?sslmode=[disable|verify-ca|verify-full]` + +All connection parameters are optional. + +Without the dbname parameter, the driver will default to a database with the same name as the user. +This dbname is just for instantiating a connection with the server and doesn't restrict the databases we are trying to grab metrics for. + +### Configuration example +``` +[[inputs.pgbouncer]] + address = "postgres://telegraf@localhost/pgbouncer" +``` diff --git a/plugins/inputs/pgbouncer/pgbouncer.go b/plugins/inputs/pgbouncer/pgbouncer.go new file mode 100644 index 0000000000000..722648c48edc1 --- /dev/null +++ b/plugins/inputs/pgbouncer/pgbouncer.go @@ -0,0 +1,179 @@ +package pgbouncer + +import ( + "bytes" + "github.com/influxdata/telegraf/plugins/inputs/postgresql" + + // register in driver. + _ "github.com/jackc/pgx/stdlib" + + "github.com/influxdata/telegraf" + "github.com/influxdata/telegraf/internal" + "github.com/influxdata/telegraf/plugins/inputs" +) + +type PgBouncer struct { + postgresql.Service +} + +var ignoredColumns = map[string]bool{"user": true, "database": true, "pool_mode": true, + "avg_req": true, "avg_recv": true, "avg_sent": true, "avg_query": true, +} + +var sampleConfig = ` + ## specify address via a url matching: + ## postgres://[pqgotest[:password]]@localhost[/dbname]\ + ## ?sslmode=[disable|verify-ca|verify-full] + ## or a simple string: + ## host=localhost user=pqotest password=... sslmode=... dbname=app_production + ## + ## All connection parameters are optional. + ## + address = "host=localhost user=pgbouncer sslmode=disable" +` + +func (p *PgBouncer) SampleConfig() string { + return sampleConfig +} + +func (p *PgBouncer) Description() string { + return "Read metrics from one or many pgbouncer servers" +} + +func (p *PgBouncer) Gather(acc telegraf.Accumulator) error { + var ( + err error + query string + columns []string + ) + + query = `SHOW STATS` + + rows, err := p.DB.Query(query) + if err != nil { + return err + } + + defer rows.Close() + + // grab the column information from the result + if columns, err = rows.Columns(); err != nil { + return err + } + + for rows.Next() { + tags, columnMap, err := p.accRow(rows, acc, columns) + + if err != nil { + return err + } + + fields := make(map[string]interface{}) + for col, val := range columnMap { + _, ignore := ignoredColumns[col] + if !ignore { + fields[col] = *val + } + } + acc.AddFields("pgbouncer", fields, tags) + } + + query = `SHOW POOLS` + + poolRows, err := p.DB.Query(query) + if err != nil { + return err + } + + defer poolRows.Close() + + // grab the column information from the result + if columns, err = poolRows.Columns(); err != nil { + return err + } + + for poolRows.Next() { + tags, columnMap, err := p.accRow(poolRows, acc, columns) + if err != nil { + return err + } + + if s, ok := (*columnMap["user"]).(string); ok && s != "" { + tags["user"] = s + } + + if s, ok := (*columnMap["pool_mode"]).(string); ok && s != "" { + tags["pool_mode"] = s + } + + fields := make(map[string]interface{}) + for col, val := range columnMap { + _, ignore := ignoredColumns[col] + if !ignore { + fields[col] = *val + } + } + acc.AddFields("pgbouncer_pools", fields, tags) + } + + return poolRows.Err() +} + +type scanner interface { + Scan(dest ...interface{}) error +} + +func (p *PgBouncer) accRow(row scanner, acc telegraf.Accumulator, columns []string) (map[string]string, + map[string]*interface{}, error) { + var columnVars []interface{} + var dbname bytes.Buffer + + // this is where we'll store the column name with its *interface{} + columnMap := make(map[string]*interface{}) + + for _, column := range columns { + columnMap[column] = new(interface{}) + } + + // populate the array of interface{} with the pointers in the right order + for i := 0; i < len(columnMap); i++ { + columnVars = append(columnVars, columnMap[columns[i]]) + } + + // deconstruct array of variables and send to Scan + err := row.Scan(columnVars...) + + if err != nil { + return nil, nil, err + } + if columnMap["database"] != nil { + // extract the database name from the column map + dbname.WriteString((*columnMap["database"]).(string)) + } else { + dbname.WriteString("postgres") + } + + var tagAddress string + tagAddress, err = p.SanitizedAddress() + if err != nil { + return nil, nil, err + } + + // Return basic tags and the mapped columns + return map[string]string{"server": tagAddress, "db": dbname.String()}, columnMap, nil +} + +func init() { + inputs.Add("pgbouncer", func() telegraf.Input { + return &PgBouncer{ + Service: postgresql.Service{ + MaxIdle: 1, + MaxOpen: 1, + MaxLifetime: internal.Duration{ + Duration: 0, + }, + IsPgBouncer: true, + }, + } + }) +} diff --git a/plugins/inputs/pgbouncer/pgbouncer_test.go b/plugins/inputs/pgbouncer/pgbouncer_test.go new file mode 100644 index 0000000000000..44e28c7f3335e --- /dev/null +++ b/plugins/inputs/pgbouncer/pgbouncer_test.go @@ -0,0 +1,66 @@ +package pgbouncer + +import ( + "fmt" + "github.com/influxdata/telegraf/plugins/inputs/postgresql" + "github.com/influxdata/telegraf/testutil" + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" + "testing" +) + +func TestPgBouncerGeneratesMetrics(t *testing.T) { + if testing.Short() { + t.Skip("Skipping integration test in short mode") + } + + p := &PgBouncer{ + Service: postgresql.Service{ + Address: fmt.Sprintf( + "host=%s user=pgbouncer password=pgbouncer dbname=pgbouncer port=6432 sslmode=disable", + testutil.GetLocalHost(), + ), + IsPgBouncer: true, + }, + } + + var acc testutil.Accumulator + require.NoError(t, p.Start(&acc)) + require.NoError(t, p.Gather(&acc)) + + intMetrics := []string{ + "total_requests", + "total_received", + "total_sent", + "total_query_time", + "avg_req", + "avg_recv", + "avg_sent", + "avg_query", + "cl_active", + "cl_waiting", + "sv_active", + "sv_idle", + "sv_used", + "sv_tested", + "sv_login", + "maxwait", + } + + int32Metrics := []string{} + + metricsCounted := 0 + + for _, metric := range intMetrics { + assert.True(t, acc.HasInt64Field("pgbouncer", metric)) + metricsCounted++ + } + + for _, metric := range int32Metrics { + assert.True(t, acc.HasInt32Field("pgbouncer", metric)) + metricsCounted++ + } + + assert.True(t, metricsCounted > 0) + assert.Equal(t, len(intMetrics)+len(int32Metrics), metricsCounted) +} diff --git a/plugins/inputs/postgresql/postgresql.go b/plugins/inputs/postgresql/postgresql.go index 19c9db9ce0f3c..e136098f4f304 100644 --- a/plugins/inputs/postgresql/postgresql.go +++ b/plugins/inputs/postgresql/postgresql.go @@ -189,6 +189,7 @@ func init() { MaxLifetime: internal.Duration{ Duration: 0, }, + IsPgBouncer: false, }, } }) diff --git a/plugins/inputs/postgresql/postgresql_test.go b/plugins/inputs/postgresql/postgresql_test.go index 306dca3b6b6ef..b23321019f5f8 100644 --- a/plugins/inputs/postgresql/postgresql_test.go +++ b/plugins/inputs/postgresql/postgresql_test.go @@ -20,6 +20,7 @@ func TestPostgresqlGeneratesMetrics(t *testing.T) { "host=%s user=postgres sslmode=disable", testutil.GetLocalHost(), ), + IsPgBouncer: false, }, Databases: []string{"postgres"}, } diff --git a/plugins/inputs/postgresql/service.go b/plugins/inputs/postgresql/service.go index 4f7b21e549cf5..9d3ab396317a1 100644 --- a/plugins/inputs/postgresql/service.go +++ b/plugins/inputs/postgresql/service.go @@ -3,6 +3,9 @@ package postgresql import ( "database/sql" "fmt" + "github.com/jackc/pgx" + "github.com/jackc/pgx/pgtype" + "github.com/jackc/pgx/stdlib" "net" "net/url" "regexp" @@ -90,6 +93,7 @@ type Service struct { MaxOpen int MaxLifetime internal.Duration DB *sql.DB + IsPgBouncer bool } // Start starts the ServiceInput's service, whatever that may be @@ -100,7 +104,34 @@ func (p *Service) Start(telegraf.Accumulator) (err error) { p.Address = localhost } - if p.DB, err = sql.Open("pgx", p.Address); err != nil { + connectionString := p.Address + + // Specific support to make it work with PgBouncer too + // See https://github.com/influxdata/telegraf/issues/3253#issuecomment-357505343 + if p.IsPgBouncer { + d := &stdlib.DriverConfig{ + ConnConfig: pgx.ConnConfig{ + PreferSimpleProtocol: true, + RuntimeParams: map[string]string{ + "client_encoding": "UTF8", + }, + CustomConnInfo: func(c *pgx.Conn) (*pgtype.ConnInfo, error) { + info := c.ConnInfo.DeepCopy() + info.RegisterDataType(pgtype.DataType{ + Value: &pgtype.OIDValue{}, + Name: "int8OID", + OID: pgtype.Int8OID, + }) + + return info, nil + }, + }, + } + stdlib.RegisterDriverConfig(d) + connectionString = d.ConnectionString(p.Address) + } + + if p.DB, err = sql.Open("pgx", connectionString); err != nil { return err } diff --git a/plugins/inputs/postgresql_extensible/postgresql_extensible.go b/plugins/inputs/postgresql_extensible/postgresql_extensible.go index 056f4afc841df..a04382888b41b 100644 --- a/plugins/inputs/postgresql_extensible/postgresql_extensible.go +++ b/plugins/inputs/postgresql_extensible/postgresql_extensible.go @@ -283,6 +283,7 @@ func init() { MaxLifetime: internal.Duration{ Duration: 0, }, + IsPgBouncer: false, }, } }) diff --git a/plugins/inputs/postgresql_extensible/postgresql_extensible_test.go b/plugins/inputs/postgresql_extensible/postgresql_extensible_test.go index 77db5feb542fc..0f9358da63ac1 100644 --- a/plugins/inputs/postgresql_extensible/postgresql_extensible_test.go +++ b/plugins/inputs/postgresql_extensible/postgresql_extensible_test.go @@ -17,6 +17,7 @@ func queryRunner(t *testing.T, q query) *testutil.Accumulator { "host=%s user=postgres sslmode=disable", testutil.GetLocalHost(), ), + IsPgBouncer: false, }, Databases: []string{"postgres"}, Query: q, diff --git a/plugins/inputs/powerdns/powerdns_test.go b/plugins/inputs/powerdns/powerdns_test.go index 56666a886eecf..fe64be5db62eb 100644 --- a/plugins/inputs/powerdns/powerdns_test.go +++ b/plugins/inputs/powerdns/powerdns_test.go @@ -1,8 +1,6 @@ package powerdns import ( - "crypto/rand" - "encoding/binary" "fmt" "net" "testing" @@ -70,10 +68,9 @@ func (s statServer) serverSocket(l net.Listener) { } } -func TestMemcachedGeneratesMetrics(t *testing.T) { +func TestPowerdnsGeneratesMetrics(t *testing.T) { // We create a fake server to return test data - var randomNumber int64 - binary.Read(rand.Reader, binary.LittleEndian, &randomNumber) + randomNumber := int64(5239846799706671610) socket, err := net.Listen("unix", fmt.Sprintf("/tmp/pdns%d.controlsocket", randomNumber)) if err != nil { t.Fatal("Cannot initialize server on port ") diff --git a/plugins/inputs/syslog/README.md b/plugins/inputs/syslog/README.md index 107727947361e..e57d28dd2d036 100644 --- a/plugins/inputs/syslog/README.md +++ b/plugins/inputs/syslog/README.md @@ -33,9 +33,9 @@ Syslog messages should be formatted according to ## Only applies to stream sockets (e.g. TCP). # max_connections = 1024 - ## Read timeout (default = 500ms). + ## Read timeout is the maximum time allowed for reading a single message (default = 5s). ## 0 means unlimited. - # read_timeout = 500ms + # read_timeout = "5s" ## Whether to parse in best effort mode or not (default = false). ## By default best effort parsing is off. diff --git a/plugins/inputs/syslog/rfc5426_test.go b/plugins/inputs/syslog/rfc5426_test.go index 8304a54060982..67966ed1de406 100644 --- a/plugins/inputs/syslog/rfc5426_test.go +++ b/plugins/inputs/syslog/rfc5426_test.go @@ -202,6 +202,38 @@ func getTestCasesForRFC5426() []testCase5426 { }, werr: true, }, + { + name: "trim message", + data: []byte("<1>1 - - - - - - \tA\n"), + wantBestEffort: &testutil.Metric{ + Measurement: "syslog", + Fields: map[string]interface{}{ + "version": uint16(1), + "message": "\tA", + "facility_code": 0, + "severity_code": 1, + }, + Tags: map[string]string{ + "severity": "alert", + "facility": "kern", + }, + Time: defaultTime, + }, + wantStrict: &testutil.Metric{ + Measurement: "syslog", + Fields: map[string]interface{}{ + "version": uint16(1), + "message": "\tA", + "facility_code": 0, + "severity_code": 1, + }, + Tags: map[string]string{ + "severity": "alert", + "facility": "kern", + }, + Time: defaultTime, + }, + }, } return testCases diff --git a/plugins/inputs/syslog/syslog.go b/plugins/inputs/syslog/syslog.go index 6f8d959ec789f..5b22cbcadaabe 100644 --- a/plugins/inputs/syslog/syslog.go +++ b/plugins/inputs/syslog/syslog.go @@ -10,6 +10,7 @@ import ( "strings" "sync" "time" + "unicode" "github.com/influxdata/go-syslog/rfc5424" "github.com/influxdata/go-syslog/rfc5425" @@ -71,9 +72,9 @@ var sampleConfig = ` ## Only applies to stream sockets (e.g. TCP). # max_connections = 1024 - ## Read timeout (default = 500ms). + ## Read timeout is the maximum time allowed for reading a single message (default = 5s). ## 0 means unlimited. - # read_timeout = 500ms + # read_timeout = "5s" ## Whether to parse in best effort mode or not (default = false). ## By default best effort parsing is off. @@ -365,7 +366,9 @@ func fields(msg rfc5424.SyslogMessage, s *Syslog) map[string]interface{} { } if msg.Message() != nil { - flds["message"] = strings.TrimSpace(*msg.Message()) + flds["message"] = strings.TrimRightFunc(*msg.Message(), func(r rune) bool { + return unicode.IsSpace(r) + }) } if msg.StructuredData() != nil { diff --git a/plugins/inputs/zfs/zfs_freebsd.go b/plugins/inputs/zfs/zfs_freebsd.go index 63bbdd6e6e614..51c20682e832b 100644 --- a/plugins/inputs/zfs/zfs_freebsd.go +++ b/plugins/inputs/zfs/zfs_freebsd.go @@ -30,7 +30,11 @@ func (z *Zfs) gatherPoolStats(acc telegraf.Accumulator) (string, error) { if z.PoolMetrics { for _, line := range lines { col := strings.Split(line, "\t") - tags := map[string]string{"pool": col[0], "health": col[8]} + if len(col) != 8 { + continue + } + + tags := map[string]string{"pool": col[0], "health": col[1]} fields := map[string]interface{}{} if tags["health"] == "UNAVAIL" { @@ -39,19 +43,19 @@ func (z *Zfs) gatherPoolStats(acc telegraf.Accumulator) (string, error) { } else { - size, err := strconv.ParseInt(col[1], 10, 64) + size, err := strconv.ParseInt(col[2], 10, 64) if err != nil { return "", fmt.Errorf("Error parsing size: %s", err) } fields["size"] = size - alloc, err := strconv.ParseInt(col[2], 10, 64) + alloc, err := strconv.ParseInt(col[3], 10, 64) if err != nil { return "", fmt.Errorf("Error parsing allocation: %s", err) } fields["allocated"] = alloc - free, err := strconv.ParseInt(col[3], 10, 64) + free, err := strconv.ParseInt(col[4], 10, 64) if err != nil { return "", fmt.Errorf("Error parsing free: %s", err) } @@ -130,7 +134,7 @@ func run(command string, args ...string) ([]string, error) { } func zpool() ([]string, error) { - return run("zpool", []string{"list", "-Hp"}...) + return run("zpool", []string{"list", "-Hp", "-o", "name,health,size,alloc,free,fragmentation,capacity,dedupratio"}...) } func sysctl(metric string) ([]string, error) { diff --git a/plugins/inputs/zfs/zfs_freebsd_test.go b/plugins/inputs/zfs/zfs_freebsd_test.go index 60b95a39d6f42..dba135cfd0bc8 100644 --- a/plugins/inputs/zfs/zfs_freebsd_test.go +++ b/plugins/inputs/zfs/zfs_freebsd_test.go @@ -10,21 +10,21 @@ import ( "github.com/stretchr/testify/require" ) -// $ zpool list -Hp +// $ zpool list -Hp -o name,health,size,alloc,free,fragmentation,capacity,dedupratio var zpool_output = []string{ - "freenas-boot 30601641984 2022177280 28579464704 - - 6 1.00x ONLINE -", - "red1 8933531975680 1126164848640 7807367127040 - 8% 12 1.83x ONLINE /mnt", - "temp1 2989297238016 1626309320704 1362987917312 - 38% 54 1.28x ONLINE /mnt", - "temp2 2989297238016 626958278656 2362338959360 - 12% 20 1.00x ONLINE /mnt", + "freenas-boot ONLINE 30601641984 2022177280 28579464704 - 6 1.00x", + "red1 ONLINE 8933531975680 1126164848640 7807367127040 8% 12 1.83x", + "temp1 ONLINE 2989297238016 1626309320704 1362987917312 38% 54 1.28x", + "temp2 ONLINE 2989297238016 626958278656 2362338959360 12% 20 1.00x", } func mock_zpool() ([]string, error) { return zpool_output, nil } -// $ zpool list -Hp +// $ zpool list -Hp -o name,health,size,alloc,free,fragmentation,capacity,dedupratio var zpool_output_unavail = []string{ - "temp2 - - - - - - - UNAVAIL -", + "temp2 UNAVAIL - - - - - -", } func mock_zpool_unavail() ([]string, error) { diff --git a/plugins/outputs/cloudwatch/README.md b/plugins/outputs/cloudwatch/README.md index c44ac4ead97f3..31619263f26f9 100644 --- a/plugins/outputs/cloudwatch/README.md +++ b/plugins/outputs/cloudwatch/README.md @@ -36,3 +36,13 @@ Examples include but are not limited to: ### namespace The namespace used for AWS CloudWatch metrics. + +### write_statistics + +If you have a large amount of metrics, you should consider to send statistic +values instead of raw metrics which could not only improve performance but +also save AWS API cost. If enable this flag, this plugin would parse the required +[CloudWatch statistic fields](https://docs.aws.amazon.com/sdk-for-go/api/service/cloudwatch/#StatisticSet) +(count, min, max, and sum) and send them to CloudWatch. You could use `basicstats` +aggregator to calculate those fields. If not all statistic fields are available, +all fields would still be sent as raw metrics. \ No newline at end of file diff --git a/plugins/outputs/cloudwatch/cloudwatch.go b/plugins/outputs/cloudwatch/cloudwatch.go index f7ccc1feea3f1..d3bd663036b71 100644 --- a/plugins/outputs/cloudwatch/cloudwatch.go +++ b/plugins/outputs/cloudwatch/cloudwatch.go @@ -17,16 +17,139 @@ import ( ) type CloudWatch struct { - Region string `toml:"region"` - AccessKey string `toml:"access_key"` - SecretKey string `toml:"secret_key"` - RoleARN string `toml:"role_arn"` - Profile string `toml:"profile"` - Filename string `toml:"shared_credential_file"` - Token string `toml:"token"` + Region string `toml:"region"` + AccessKey string `toml:"access_key"` + SecretKey string `toml:"secret_key"` + RoleARN string `toml:"role_arn"` + Profile string `toml:"profile"` + Filename string `toml:"shared_credential_file"` + Token string `toml:"token"` + EndpointURL string `toml:"endpoint_url"` Namespace string `toml:"namespace"` // CloudWatch Metrics Namespace svc *cloudwatch.CloudWatch + + WriteStatistics bool `toml:"write_statistics"` +} + +type statisticType int + +const ( + statisticTypeNone statisticType = iota + statisticTypeMax + statisticTypeMin + statisticTypeSum + statisticTypeCount +) + +type cloudwatchField interface { + addValue(sType statisticType, value float64) + buildDatum() []*cloudwatch.MetricDatum +} + +type statisticField struct { + metricName string + fieldName string + tags map[string]string + values map[statisticType]float64 + timestamp time.Time +} + +func (f *statisticField) addValue(sType statisticType, value float64) { + if sType != statisticTypeNone { + f.values[sType] = value + } +} + +func (f *statisticField) buildDatum() []*cloudwatch.MetricDatum { + + var datums []*cloudwatch.MetricDatum + + if f.hasAllFields() { + // If we have all required fields, we build datum with StatisticValues + min, _ := f.values[statisticTypeMin] + max, _ := f.values[statisticTypeMax] + sum, _ := f.values[statisticTypeSum] + count, _ := f.values[statisticTypeCount] + + datum := &cloudwatch.MetricDatum{ + MetricName: aws.String(strings.Join([]string{f.metricName, f.fieldName}, "_")), + Dimensions: BuildDimensions(f.tags), + Timestamp: aws.Time(f.timestamp), + StatisticValues: &cloudwatch.StatisticSet{ + Minimum: aws.Float64(min), + Maximum: aws.Float64(max), + Sum: aws.Float64(sum), + SampleCount: aws.Float64(count), + }, + } + + datums = append(datums, datum) + + } else { + // If we don't have all required fields, we build each field as independent datum + for sType, value := range f.values { + datum := &cloudwatch.MetricDatum{ + Value: aws.Float64(value), + Dimensions: BuildDimensions(f.tags), + Timestamp: aws.Time(f.timestamp), + } + + switch sType { + case statisticTypeMin: + datum.MetricName = aws.String(strings.Join([]string{f.metricName, f.fieldName, "min"}, "_")) + case statisticTypeMax: + datum.MetricName = aws.String(strings.Join([]string{f.metricName, f.fieldName, "max"}, "_")) + case statisticTypeSum: + datum.MetricName = aws.String(strings.Join([]string{f.metricName, f.fieldName, "sum"}, "_")) + case statisticTypeCount: + datum.MetricName = aws.String(strings.Join([]string{f.metricName, f.fieldName, "count"}, "_")) + default: + // should not be here + continue + } + + datums = append(datums, datum) + } + } + + return datums +} + +func (f *statisticField) hasAllFields() bool { + + _, hasMin := f.values[statisticTypeMin] + _, hasMax := f.values[statisticTypeMax] + _, hasSum := f.values[statisticTypeSum] + _, hasCount := f.values[statisticTypeCount] + + return hasMin && hasMax && hasSum && hasCount +} + +type valueField struct { + metricName string + fieldName string + tags map[string]string + value float64 + timestamp time.Time +} + +func (f *valueField) addValue(sType statisticType, value float64) { + if sType == statisticTypeNone { + f.value = value + } +} + +func (f *valueField) buildDatum() []*cloudwatch.MetricDatum { + + return []*cloudwatch.MetricDatum{ + { + MetricName: aws.String(strings.Join([]string{f.metricName, f.fieldName}, "_")), + Value: aws.Float64(f.value), + Dimensions: BuildDimensions(f.tags), + Timestamp: aws.Time(f.timestamp), + }, + } } var sampleConfig = ` @@ -48,8 +171,22 @@ var sampleConfig = ` #profile = "" #shared_credential_file = "" + ## Endpoint to make request against, the correct endpoint is automatically + ## determined and this option should only be set if you wish to override the + ## default. + ## ex: endpoint_url = "http://localhost:8000" + # endpoint_url = "" + ## Namespace for the CloudWatch MetricDatums namespace = "InfluxData/Telegraf" + + ## If you have a large amount of metrics, you should consider to send statistic + ## values instead of raw metrics which could not only improve performance but + ## also save AWS API cost. If enable this flag, this plugin would parse the required + ## CloudWatch statistic fields (count, min, max, and sum) and send them to CloudWatch. + ## You could use basicstats aggregator to calculate those fields. If not all statistic + ## fields are available, all fields would still be sent as raw metrics. + # write_statistics = false ` func (c *CloudWatch) SampleConfig() string { @@ -62,13 +199,14 @@ func (c *CloudWatch) Description() string { func (c *CloudWatch) Connect() error { credentialConfig := &internalaws.CredentialConfig{ - Region: c.Region, - AccessKey: c.AccessKey, - SecretKey: c.SecretKey, - RoleARN: c.RoleARN, - Profile: c.Profile, - Filename: c.Filename, - Token: c.Token, + Region: c.Region, + AccessKey: c.AccessKey, + SecretKey: c.SecretKey, + RoleARN: c.RoleARN, + Profile: c.Profile, + Filename: c.Filename, + Token: c.Token, + EndpointURL: c.EndpointURL, } configProvider := credentialConfig.Credentials() @@ -96,7 +234,7 @@ func (c *CloudWatch) Write(metrics []telegraf.Metric) error { var datums []*cloudwatch.MetricDatum for _, m := range metrics { - d := BuildMetricDatum(m) + d := BuildMetricDatum(c.WriteStatistics, m) datums = append(datums, d...) } @@ -151,67 +289,58 @@ func PartitionDatums(size int, datums []*cloudwatch.MetricDatum) [][]*cloudwatch return partitions } -// Make a MetricDatum for each field in a Point. Only fields with values that can be -// converted to float64 are supported. Non-supported fields are skipped. -func BuildMetricDatum(point telegraf.Metric) []*cloudwatch.MetricDatum { - datums := make([]*cloudwatch.MetricDatum, len(point.Fields())) - i := 0 +// Make a MetricDatum from telegraf.Metric. It would check if all required fields of +// cloudwatch.StatisticSet are available. If so, it would build MetricDatum from statistic values. +// Otherwise, fields would still been built independently. +func BuildMetricDatum(buildStatistic bool, point telegraf.Metric) []*cloudwatch.MetricDatum { - var value float64 + fields := make(map[string]cloudwatchField) for k, v := range point.Fields() { - switch t := v.(type) { - case int: - value = float64(t) - case int32: - value = float64(t) - case int64: - value = float64(t) - case uint64: - value = float64(t) - case float64: - value = t - case bool: - if t { - value = 1 - } else { - value = 0 - } - case time.Time: - value = float64(t.Unix()) - default: - // Skip unsupported type. - datums = datums[:len(datums)-1] - continue - } - // Do CloudWatch boundary checking - // Constraints at: http://docs.aws.amazon.com/AmazonCloudWatch/latest/APIReference/API_MetricDatum.html - if math.IsNaN(value) { - datums = datums[:len(datums)-1] - continue - } - if math.IsInf(value, 0) { - datums = datums[:len(datums)-1] - continue - } - if value > 0 && value < float64(8.515920e-109) { - datums = datums[:len(datums)-1] + val, ok := convert(v) + if !ok { + // Only fields with values that can be converted to float64 (and within CloudWatch boundary) are supported. + // Non-supported fields are skipped. continue } - if value > float64(1.174271e+108) { - datums = datums[:len(datums)-1] + + sType, fieldName := getStatisticType(k) + + // If statistic metric is not enabled or non-statistic type, just take current field as a value field. + if !buildStatistic || sType == statisticTypeNone { + fields[k] = &valueField{ + metricName: point.Name(), + fieldName: k, + tags: point.Tags(), + timestamp: point.Time(), + value: val, + } continue } - datums[i] = &cloudwatch.MetricDatum{ - MetricName: aws.String(strings.Join([]string{point.Name(), k}, "_")), - Value: aws.Float64(value), - Dimensions: BuildDimensions(point.Tags()), - Timestamp: aws.Time(point.Time()), + // Otherwise, it shall be a statistic field. + if _, ok := fields[fieldName]; !ok { + // Hit an uncached field, create statisticField for first time + fields[fieldName] = &statisticField{ + metricName: point.Name(), + fieldName: fieldName, + tags: point.Tags(), + timestamp: point.Time(), + values: map[statisticType]float64{ + sType: val, + }, + } + } else { + // Add new statistic value to this field + fields[fieldName].addValue(sType, val) } + } - i += 1 + var datums []*cloudwatch.MetricDatum + for _, f := range fields { + d := f.buildDatum() + datums = append(datums, d...) } return datums @@ -260,6 +389,72 @@ func BuildDimensions(mTags map[string]string) []*cloudwatch.Dimension { return dimensions } +func getStatisticType(name string) (sType statisticType, fieldName string) { + switch { + case strings.HasSuffix(name, "_max"): + sType = statisticTypeMax + fieldName = strings.TrimSuffix(name, "_max") + case strings.HasSuffix(name, "_min"): + sType = statisticTypeMin + fieldName = strings.TrimSuffix(name, "_min") + case strings.HasSuffix(name, "_sum"): + sType = statisticTypeSum + fieldName = strings.TrimSuffix(name, "_sum") + case strings.HasSuffix(name, "_count"): + sType = statisticTypeCount + fieldName = strings.TrimSuffix(name, "_count") + default: + sType = statisticTypeNone + fieldName = name + } + return +} + +func convert(v interface{}) (value float64, ok bool) { + + ok = true + + switch t := v.(type) { + case int: + value = float64(t) + case int32: + value = float64(t) + case int64: + value = float64(t) + case uint64: + value = float64(t) + case float64: + value = t + case bool: + if t { + value = 1 + } else { + value = 0 + } + case time.Time: + value = float64(t.Unix()) + default: + // Skip unsupported type. + ok = false + return + } + + // Do CloudWatch boundary checking + // Constraints at: http://docs.aws.amazon.com/AmazonCloudWatch/latest/APIReference/API_MetricDatum.html + switch { + case math.IsNaN(value): + return 0, false + case math.IsInf(value, 0): + return 0, false + case value > 0 && value < float64(8.515920e-109): + return 0, false + case value > float64(1.174271e+108): + return 0, false + } + + return +} + func init() { outputs.Add("cloudwatch", func() telegraf.Output { return &CloudWatch{} diff --git a/plugins/outputs/cloudwatch/cloudwatch_test.go b/plugins/outputs/cloudwatch/cloudwatch_test.go index 8ab60de2f082b..c91c30e0c0b21 100644 --- a/plugins/outputs/cloudwatch/cloudwatch_test.go +++ b/plugins/outputs/cloudwatch/cloudwatch_test.go @@ -5,11 +5,13 @@ import ( "math" "sort" "testing" + "time" "github.com/aws/aws-sdk-go/aws" "github.com/aws/aws-sdk-go/service/cloudwatch" "github.com/influxdata/telegraf" + "github.com/influxdata/telegraf/metric" "github.com/influxdata/telegraf/testutil" "github.com/stretchr/testify/assert" @@ -72,13 +74,45 @@ func TestBuildMetricDatums(t *testing.T) { testutil.TestMetric(float64(1.174272e+108)), // largest should be 1.174271e+108 } for _, point := range validMetrics { - datums := BuildMetricDatum(point) + datums := BuildMetricDatum(false, point) assert.Equal(1, len(datums), fmt.Sprintf("Valid point should create a Datum {value: %v}", point)) } for _, point := range invalidMetrics { - datums := BuildMetricDatum(point) + datums := BuildMetricDatum(false, point) assert.Equal(0, len(datums), fmt.Sprintf("Valid point should not create a Datum {value: %v}", point)) } + + statisticMetric, _ := metric.New( + "test1", + map[string]string{"tag1": "value1"}, + map[string]interface{}{"value_max": float64(10), "value_min": float64(0), "value_sum": float64(100), "value_count": float64(20)}, + time.Date(2009, time.November, 10, 23, 0, 0, 0, time.UTC), + ) + datums := BuildMetricDatum(true, statisticMetric) + assert.Equal(1, len(datums), fmt.Sprintf("Valid point should create a Datum {value: %v}", statisticMetric)) + + multiFieldsMetric, _ := metric.New( + "test1", + map[string]string{"tag1": "value1"}, + map[string]interface{}{"valueA": float64(10), "valueB": float64(0), "valueC": float64(100), "valueD": float64(20)}, + time.Date(2009, time.November, 10, 23, 0, 0, 0, time.UTC), + ) + datums = BuildMetricDatum(true, multiFieldsMetric) + assert.Equal(4, len(datums), fmt.Sprintf("Each field should create a Datum {value: %v}", multiFieldsMetric)) + + multiStatisticMetric, _ := metric.New( + "test1", + map[string]string{"tag1": "value1"}, + map[string]interface{}{ + "valueA_max": float64(10), "valueA_min": float64(0), "valueA_sum": float64(100), "valueA_count": float64(20), + "valueB_max": float64(10), "valueB_min": float64(0), "valueB_sum": float64(100), "valueB_count": float64(20), + "valueC_max": float64(10), "valueC_min": float64(0), "valueC_sum": float64(100), + "valueD": float64(10), "valueE": float64(0), + }, + time.Date(2009, time.November, 10, 23, 0, 0, 0, time.UTC), + ) + datums = BuildMetricDatum(true, multiStatisticMetric) + assert.Equal(7, len(datums), fmt.Sprintf("Valid point should create a Datum {value: %v}", multiStatisticMetric)) } func TestPartitionDatums(t *testing.T) { diff --git a/plugins/outputs/influxdb/influxdb.go b/plugins/outputs/influxdb/influxdb.go index f80722bc318f3..bd53d4ed4a941 100644 --- a/plugins/outputs/influxdb/influxdb.go +++ b/plugins/outputs/influxdb/influxdb.go @@ -141,36 +141,36 @@ func (i *InfluxDB) Connect() error { } for _, u := range urls { - u, err := url.Parse(u) + parts, err := url.Parse(u) if err != nil { - return fmt.Errorf("error parsing url [%s]: %v", u, err) + return fmt.Errorf("error parsing url [%q]: %v", u, err) } var proxy *url.URL if len(i.HTTPProxy) > 0 { proxy, err = url.Parse(i.HTTPProxy) if err != nil { - return fmt.Errorf("error parsing proxy_url [%s]: %v", proxy, err) + return fmt.Errorf("error parsing proxy_url [%s]: %v", i.HTTPProxy, err) } } - switch u.Scheme { + switch parts.Scheme { case "udp", "udp4", "udp6": - c, err := i.udpClient(u) + c, err := i.udpClient(parts) if err != nil { return err } i.clients = append(i.clients, c) case "http", "https", "unix": - c, err := i.httpClient(ctx, u, proxy) + c, err := i.httpClient(ctx, parts, proxy) if err != nil { return err } i.clients = append(i.clients, c) default: - return fmt.Errorf("unsupported scheme [%s]: %q", u, u.Scheme) + return fmt.Errorf("unsupported scheme [%q]: %q", u, parts.Scheme) } } diff --git a/plugins/outputs/influxdb/udp.go b/plugins/outputs/influxdb/udp.go index 5b3f5ce51a515..62f2a6ab72371 100644 --- a/plugins/outputs/influxdb/udp.go +++ b/plugins/outputs/influxdb/udp.go @@ -3,11 +3,11 @@ package influxdb import ( "context" "fmt" + "log" "net" "net/url" "github.com/influxdata/telegraf" - "github.com/influxdata/telegraf/plugins/serializers" "github.com/influxdata/telegraf/plugins/serializers/influx" ) @@ -28,7 +28,7 @@ type Conn interface { type UDPConfig struct { MaxPayloadSize int URL *url.URL - Serializer serializers.Serializer + Serializer *influx.Serializer Dialer Dialer } @@ -65,7 +65,7 @@ func NewUDPClient(config *UDPConfig) (*udpClient, error) { type udpClient struct { conn Conn dialer Dialer - serializer serializers.Serializer + serializer *influx.Serializer url *url.URL } @@ -89,7 +89,11 @@ func (c *udpClient) Write(ctx context.Context, metrics []telegraf.Metric) error for _, metric := range metrics { octets, err := c.serializer.Serialize(metric) if err != nil { - return fmt.Errorf("could not serialize metric: %v", err) + // Since we are serializing multiple metrics, don't fail the + // entire batch just because of one unserializable metric. + log.Printf("E! [outputs.influxdb] when writing to [%s] could not serialize metric: %v", + c.URL(), err) + continue } _, err = c.conn.Write(octets) diff --git a/plugins/outputs/influxdb/udp_test.go b/plugins/outputs/influxdb/udp_test.go index 9bced42621de1..61b3f1dedda07 100644 --- a/plugins/outputs/influxdb/udp_test.go +++ b/plugins/outputs/influxdb/udp_test.go @@ -4,6 +4,7 @@ import ( "bytes" "context" "fmt" + "log" "net" "net/url" "sync" @@ -13,7 +14,6 @@ import ( "github.com/influxdata/telegraf" "github.com/influxdata/telegraf/metric" "github.com/influxdata/telegraf/plugins/outputs/influxdb" - "github.com/influxdata/telegraf/plugins/serializers/influx" "github.com/stretchr/testify/require" ) @@ -65,19 +65,6 @@ func (d *MockDialer) DialContext(ctx context.Context, network string, address st return d.DialContextF(network, address) } -type MockSerializer struct { - SerializeF func(metric telegraf.Metric) ([]byte, error) - SerializeBatchF func(metrics []telegraf.Metric) ([]byte, error) -} - -func (s *MockSerializer) Serialize(metric telegraf.Metric) ([]byte, error) { - return s.SerializeF(metric) -} - -func (s *MockSerializer) SerializeBatch(metrics []telegraf.Metric) ([]byte, error) { - return s.SerializeBatchF(metrics) -} - func TestUDP_NewUDPClientNoURL(t *testing.T) { config := &influxdb.UDPConfig{} _, err := influxdb.NewUDPClient(config) @@ -177,28 +164,69 @@ func TestUDP_WriteError(t *testing.T) { require.True(t, closed) } -func TestUDP_SerializeError(t *testing.T) { - config := &influxdb.UDPConfig{ - URL: getURL(), - Dialer: &MockDialer{ - DialContextF: func(network, address string) (influxdb.Conn, error) { - conn := &MockConn{} - return conn, nil +func TestUDP_ErrorLogging(t *testing.T) { + tests := []struct { + name string + config *influxdb.UDPConfig + metrics []telegraf.Metric + logContains string + }{ + { + name: "logs need more space", + config: &influxdb.UDPConfig{ + MaxPayloadSize: 1, + URL: getURL(), + Dialer: &MockDialer{ + DialContextF: func(network, address string) (influxdb.Conn, error) { + conn := &MockConn{} + return conn, nil + }, + }, }, + metrics: []telegraf.Metric{getMetric()}, + logContains: `could not serialize metric: "cpu": need more space`, }, - Serializer: &MockSerializer{ - SerializeF: func(metric telegraf.Metric) ([]byte, error) { - return nil, influx.ErrNeedMoreSpace + { + name: "logs series name", + config: &influxdb.UDPConfig{ + URL: getURL(), + Dialer: &MockDialer{ + DialContextF: func(network, address string) (influxdb.Conn, error) { + conn := &MockConn{} + return conn, nil + }, + }, + }, + metrics: []telegraf.Metric{ + func() telegraf.Metric { + metric, _ := metric.New( + "cpu", + map[string]string{ + "host": "example.org", + }, + map[string]interface{}{}, + time.Unix(0, 0), + ) + return metric + }(), }, + logContains: `could not serialize metric: "cpu,host=example.org": no serializable fields`, }, } - client, err := influxdb.NewUDPClient(config) - require.NoError(t, err) - - ctx := context.Background() - err = client.Write(ctx, []telegraf.Metric{getMetric()}) - require.Error(t, err) - require.Contains(t, err.Error(), influx.ErrNeedMoreSpace.Error()) + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + var b bytes.Buffer + log.SetOutput(&b) + + client, err := influxdb.NewUDPClient(tt.config) + require.NoError(t, err) + + ctx := context.Background() + err = client.Write(ctx, tt.metrics) + require.NoError(t, err) + require.Contains(t, b.String(), tt.logContains) + }) + } } func TestUDP_WriteWithRealConn(t *testing.T) { diff --git a/plugins/outputs/kafka/README.md b/plugins/outputs/kafka/README.md index 562f3fd5d9b74..25b173a0260f7 100644 --- a/plugins/outputs/kafka/README.md +++ b/plugins/outputs/kafka/README.md @@ -10,9 +10,15 @@ This plugin writes to a [Kafka Broker](http://kafka.apache.org/07/quickstart.htm ## Kafka topic for producer messages topic = "telegraf" - ## Optional client id + ## Optional Client id # client_id = "Telegraf" + ## Set the minimal supported Kafka version. Setting this enables the use of new + ## Kafka features and APIs. Of particular interested, lz4 compression + ## requires at least version 0.10.0.0. + ## ex: version = "1.1.0" + # version = "" + ## Optional topic suffix configuration. ## If the section is omitted, no suffix is used. ## Following topic suffix methods are supported: @@ -20,7 +26,7 @@ This plugin writes to a [Kafka Broker](http://kafka.apache.org/07/quickstart.htm ## tags - suffix equals to separator + specified tags' values ## interleaved with separator - ## Suffix equals to "_" + measurement's name + ## Suffix equals to "_" + measurement name # [outputs.kafka.topic_suffix] # method = "measurement" # separator = "_" @@ -44,11 +50,19 @@ This plugin writes to a [Kafka Broker](http://kafka.apache.org/07/quickstart.htm ## ie, if this tag exists, its value will be used as the routing key routing_tag = "host" + ## Static routing key. Used when no routing_tag is set or as a fallback + ## when the tag specified in routing tag is not found. If set to "random", + ## a random value will be generated for each message. + ## ex: routing_key = "random" + ## routing_key = "telegraf" + # routing_key = "" + ## CompressionCodec represents the various compression codecs recognized by ## Kafka in messages. ## 0 : No compression ## 1 : Gzip compression ## 2 : Snappy compression + ## 3 : LZ4 compression # compression_codec = 0 ## RequiredAcks is used in Produce Requests to tell the broker how many diff --git a/plugins/outputs/kafka/kafka.go b/plugins/outputs/kafka/kafka.go index a45e2a4e9c3cf..f2951e6d5eab8 100644 --- a/plugins/outputs/kafka/kafka.go +++ b/plugins/outputs/kafka/kafka.go @@ -3,12 +3,14 @@ package kafka import ( "crypto/tls" "fmt" + "log" "strings" "github.com/influxdata/telegraf" tlsint "github.com/influxdata/telegraf/internal/tls" "github.com/influxdata/telegraf/plugins/outputs" "github.com/influxdata/telegraf/plugins/serializers" + uuid "github.com/satori/go.uuid" "github.com/Shopify/sarama" ) @@ -21,22 +23,18 @@ var ValidTopicSuffixMethods = []string{ type ( Kafka struct { - // Kafka brokers to send metrics to - Brokers []string - // Kafka topic - Topic string - // Kafka client id - ClientID string `toml:"client_id"` - // Kafka topic suffix option - TopicSuffix TopicSuffix `toml:"topic_suffix"` - // Routing Key Tag - RoutingTag string `toml:"routing_tag"` - // Compression Codec Tag + Brokers []string + Topic string + ClientID string `toml:"client_id"` + TopicSuffix TopicSuffix `toml:"topic_suffix"` + RoutingTag string `toml:"routing_tag"` + RoutingKey string `toml:"routing_key"` CompressionCodec int - // RequiredAcks Tag - RequiredAcks int - // MaxRetry Tag - MaxRetry int + RequiredAcks int + MaxRetry int + MaxMessageBytes int `toml:"max_message_bytes"` + + Version string `toml:"version"` // Legacy TLS config options // TLS client certificate @@ -74,6 +72,12 @@ var sampleConfig = ` ## Optional Client id # client_id = "Telegraf" + ## Set the minimal supported Kafka version. Setting this enables the use of new + ## Kafka features and APIs. Of particular interest, lz4 compression + ## requires at least version 0.10.0.0. + ## ex: version = "1.1.0" + # version = "" + ## Optional topic suffix configuration. ## If the section is omitted, no suffix is used. ## Following topic suffix methods are supported: @@ -105,11 +109,19 @@ var sampleConfig = ` ## ie, if this tag exists, its value will be used as the routing key routing_tag = "host" + ## Static routing key. Used when no routing_tag is set or as a fallback + ## when the tag specified in routing tag is not found. If set to "random", + ## a random value will be generated for each message. + ## ex: routing_key = "random" + ## routing_key = "telegraf" + # routing_key = "" + ## CompressionCodec represents the various compression codecs recognized by ## Kafka in messages. ## 0 : No compression ## 1 : Gzip compression ## 2 : Snappy compression + ## 3 : LZ4 compression # compression_codec = 0 ## RequiredAcks is used in Produce Requests to tell the broker how many @@ -132,6 +144,10 @@ var sampleConfig = ` ## until the next flush. # max_retry = 3 + ## The maximum permitted size of a message. Should be set equal to or + ## smaller than the broker's 'message.max.bytes'. + # max_message_bytes = 1000000 + ## Optional TLS Config # tls_ca = "/etc/telegraf/ca.pem" # tls_cert = "/etc/telegraf/cert.pem" @@ -191,6 +207,14 @@ func (k *Kafka) Connect() error { } config := sarama.NewConfig() + if k.Version != "" { + version, err := sarama.ParseKafkaVersion(k.Version) + if err != nil { + return err + } + config.Version = version + } + if k.ClientID != "" { config.ClientID = k.ClientID } else { @@ -202,6 +226,10 @@ func (k *Kafka) Connect() error { config.Producer.Retry.Max = k.MaxRetry config.Producer.Return.Successes = true + if k.MaxMessageBytes > 0 { + config.Producer.MaxMessageBytes = k.MaxMessageBytes + } + // Legacy support ssl config if k.Certificate != "" { k.TLSCert = k.Certificate @@ -245,33 +273,56 @@ func (k *Kafka) Description() string { return "Configuration for the Kafka server to send metrics to" } -func (k *Kafka) Write(metrics []telegraf.Metric) error { - if len(metrics) == 0 { - return nil +func (k *Kafka) routingKey(metric telegraf.Metric) string { + if k.RoutingTag != "" { + key, ok := metric.GetTag(k.RoutingTag) + if ok { + return key + } } + if k.RoutingKey == "random" { + u := uuid.NewV4() + return u.String() + } + + return k.RoutingKey +} + +func (k *Kafka) Write(metrics []telegraf.Metric) error { + msgs := make([]*sarama.ProducerMessage, 0, len(metrics)) for _, metric := range metrics { buf, err := k.serializer.Serialize(metric) if err != nil { return err } - topicName := k.GetTopicName(metric) - m := &sarama.ProducerMessage{ - Topic: topicName, + Topic: k.GetTopicName(metric), Value: sarama.ByteEncoder(buf), } - if h, ok := metric.Tags()[k.RoutingTag]; ok { - m.Key = sarama.StringEncoder(h) + key := k.routingKey(metric) + if key != "" { + m.Key = sarama.StringEncoder(key) } + msgs = append(msgs, m) + } - _, _, err = k.producer.SendMessage(m) - - if err != nil { - return fmt.Errorf("FAILED to send kafka message: %s\n", err) + err := k.producer.SendMessages(msgs) + if err != nil { + // We could have many errors, return only the first encountered. + if errs, ok := err.(sarama.ProducerErrors); ok { + for _, prodErr := range errs { + if prodErr.Err == sarama.ErrMessageSizeTooLarge { + log.Printf("E! Error writing to output [kafka]: Message too large, consider increasing `max_message_bytes`; dropping batch") + return nil + } + return prodErr + } } + return err } + return nil } diff --git a/plugins/outputs/kafka/kafka_test.go b/plugins/outputs/kafka/kafka_test.go index b18d9f15d9d4a..ba900e32c6eaa 100644 --- a/plugins/outputs/kafka/kafka_test.go +++ b/plugins/outputs/kafka/kafka_test.go @@ -2,7 +2,10 @@ package kafka import ( "testing" + "time" + "github.com/influxdata/telegraf" + "github.com/influxdata/telegraf/metric" "github.com/influxdata/telegraf/plugins/serializers" "github.com/influxdata/telegraf/testutil" "github.com/stretchr/testify/require" @@ -96,3 +99,59 @@ func TestValidateTopicSuffixMethod(t *testing.T) { require.NoError(t, err, "Topic suffix method used should be valid.") } } + +func TestRoutingKey(t *testing.T) { + tests := []struct { + name string + kafka *Kafka + metric telegraf.Metric + check func(t *testing.T, routingKey string) + }{ + { + name: "static routing key", + kafka: &Kafka{ + RoutingKey: "static", + }, + metric: func() telegraf.Metric { + m, _ := metric.New( + "cpu", + map[string]string{}, + map[string]interface{}{ + "value": 42.0, + }, + time.Unix(0, 0), + ) + return m + }(), + check: func(t *testing.T, routingKey string) { + require.Equal(t, "static", routingKey) + }, + }, + { + name: "random routing key", + kafka: &Kafka{ + RoutingKey: "random", + }, + metric: func() telegraf.Metric { + m, _ := metric.New( + "cpu", + map[string]string{}, + map[string]interface{}{ + "value": 42.0, + }, + time.Unix(0, 0), + ) + return m + }(), + check: func(t *testing.T, routingKey string) { + require.Equal(t, 36, len(routingKey)) + }, + }, + } + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + key := tt.kafka.routingKey(tt.metric) + tt.check(t, key) + }) + } +} diff --git a/plugins/outputs/kinesis/kinesis.go b/plugins/outputs/kinesis/kinesis.go index d77ff08a56c82..0143791460768 100644 --- a/plugins/outputs/kinesis/kinesis.go +++ b/plugins/outputs/kinesis/kinesis.go @@ -17,13 +17,14 @@ import ( type ( KinesisOutput struct { - Region string `toml:"region"` - AccessKey string `toml:"access_key"` - SecretKey string `toml:"secret_key"` - RoleARN string `toml:"role_arn"` - Profile string `toml:"profile"` - Filename string `toml:"shared_credential_file"` - Token string `toml:"token"` + Region string `toml:"region"` + AccessKey string `toml:"access_key"` + SecretKey string `toml:"secret_key"` + RoleARN string `toml:"role_arn"` + Profile string `toml:"profile"` + Filename string `toml:"shared_credential_file"` + Token string `toml:"token"` + EndpointURL string `toml:"endpoint_url"` StreamName string `toml:"streamname"` PartitionKey string `toml:"partitionkey"` @@ -60,6 +61,12 @@ var sampleConfig = ` #profile = "" #shared_credential_file = "" + ## Endpoint to make request against, the correct endpoint is automatically + ## determined and this option should only be set if you wish to override the + ## default. + ## ex: endpoint_url = "http://localhost:8000" + # endpoint_url = "" + ## Kinesis StreamName must exist prior to starting telegraf. streamname = "StreamName" ## DEPRECATED: PartitionKey as used for sharding data. @@ -126,13 +133,14 @@ func (k *KinesisOutput) Connect() error { } credentialConfig := &internalaws.CredentialConfig{ - Region: k.Region, - AccessKey: k.AccessKey, - SecretKey: k.SecretKey, - RoleARN: k.RoleARN, - Profile: k.Profile, - Filename: k.Filename, - Token: k.Token, + Region: k.Region, + AccessKey: k.AccessKey, + SecretKey: k.SecretKey, + RoleARN: k.RoleARN, + Profile: k.Profile, + Filename: k.Filename, + Token: k.Token, + EndpointURL: k.EndpointURL, } configProvider := credentialConfig.Credentials() svc := kinesis.New(configProvider) diff --git a/plugins/outputs/prometheus_client/README.md b/plugins/outputs/prometheus_client/README.md index 6cb0cc59e1817..d68cafe9d7899 100644 --- a/plugins/outputs/prometheus_client/README.md +++ b/plugins/outputs/prometheus_client/README.md @@ -18,6 +18,9 @@ This plugin starts a [Prometheus](https://prometheus.io/) Client, it exposes all basic_username = "Foo" basic_password = "Bar" + # IP Ranges which are allowed to access metrics + ip_range = ["192.168.0.0/24", "192.168.1.0/30"] + # Path to publish the metrics on, defaults to /metrics path = "/metrics" diff --git a/plugins/outputs/prometheus_client/prometheus_client.go b/plugins/outputs/prometheus_client/prometheus_client.go index b82c72cf05810..9634e92275ece 100644 --- a/plugins/outputs/prometheus_client/prometheus_client.go +++ b/plugins/outputs/prometheus_client/prometheus_client.go @@ -5,6 +5,7 @@ import ( "crypto/subtle" "fmt" "log" + "net" "net/http" "os" "regexp" @@ -58,6 +59,7 @@ type PrometheusClient struct { TLSKey string `toml:"tls_key"` BasicUsername string `toml:"basic_username"` BasicPassword string `toml:"basic_password"` + IPRange []string `toml:"ip_range"` ExpirationInterval internal.Duration `toml:"expiration_interval"` Path string `toml:"path"` CollectorsExclude []string `toml:"collectors_exclude"` @@ -84,6 +86,9 @@ var sampleConfig = ` #basic_username = "Foo" #basic_password = "Bar" + ## IP Ranges which are allowed to access metrics + #ip_range = ["192.168.0.0/24", "192.168.1.0/30"] + ## Interval to expire metrics and not deliver to prometheus, 0 == no expiration # expiration_interval = "60s" @@ -96,7 +101,7 @@ var sampleConfig = ` string_as_label = true ` -func (p *PrometheusClient) basicAuth(h http.Handler) http.Handler { +func (p *PrometheusClient) auth(h http.Handler) http.Handler { return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { if p.BasicUsername != "" && p.BasicPassword != "" { w.Header().Set("WWW-Authenticate", `Basic realm="Restricted"`) @@ -110,6 +115,27 @@ func (p *PrometheusClient) basicAuth(h http.Handler) http.Handler { } } + if len(p.IPRange) > 0 { + matched := false + remoteIPs, _, _ := net.SplitHostPort(r.RemoteAddr) + remoteIP := net.ParseIP(remoteIPs) + for _, iprange := range p.IPRange { + _, ipNet, err := net.ParseCIDR(iprange) + if err != nil { + http.Error(w, "Config Error in ip_range setting", 500) + return + } + if ipNet.Contains(remoteIP) { + matched = true + break + } + } + if !matched { + http.Error(w, "Not authorized", 401) + return + } + } + h.ServeHTTP(w, r) }) } @@ -146,7 +172,7 @@ func (p *PrometheusClient) Start() error { } mux := http.NewServeMux() - mux.Handle(p.Path, p.basicAuth(promhttp.HandlerFor( + mux.Handle(p.Path, p.auth(promhttp.HandlerFor( registry, promhttp.HandlerOpts{ErrorHandling: promhttp.ContinueOnError}))) p.server = &http.Server{ diff --git a/plugins/outputs/wavefront/wavefront.go b/plugins/outputs/wavefront/wavefront.go index 18c5a6495baaa..ef36d1804045f 100644 --- a/plugins/outputs/wavefront/wavefront.go +++ b/plugins/outputs/wavefront/wavefront.go @@ -189,26 +189,32 @@ func buildTags(mTags map[string]string, w *Wavefront) (string, map[string]string } var source string - sourceTagFound := false - - for _, s := range w.SourceOverride { - for k, v := range mTags { - if k == s { - source = v - mTags["telegraf_host"] = mTags["host"] - sourceTagFound = true - delete(mTags, k) + + if s, ok := mTags["source"]; ok { + source = s + delete(mTags, "source") + } else { + sourceTagFound := false + for _, s := range w.SourceOverride { + for k, v := range mTags { + if k == s { + source = v + mTags["telegraf_host"] = mTags["host"] + sourceTagFound = true + delete(mTags, k) + break + } + } + if sourceTagFound { break } } - if sourceTagFound { - break + + if !sourceTagFound { + source = mTags["host"] } } - if !sourceTagFound { - source = mTags["host"] - } delete(mTags, "host") return tagValueReplacer.Replace(source), mTags diff --git a/plugins/parsers/grok/parser.go b/plugins/parsers/grok/parser.go index e17f127fcfee6..bc65588eb9841 100644 --- a/plugins/parsers/grok/parser.go +++ b/plugins/parsers/grok/parser.go @@ -2,6 +2,7 @@ package grok import ( "bufio" + "bytes" "fmt" "log" "os" @@ -37,6 +38,7 @@ var timeLayouts = map[string]string{ } const ( + MEASUREMENT = "measurement" INT = "int" TAG = "tag" FLOAT = "float" @@ -216,7 +218,6 @@ func (p *Parser) ParseLine(line string) (telegraf.Metric, error) { if k == "" || v == "" { continue } - // t is the modifier of the field var t string // check if pattern has some modifiers @@ -238,6 +239,8 @@ func (p *Parser) ParseLine(line string) (telegraf.Metric, error) { } switch t { + case MEASUREMENT: + p.Measurement = v case INT: iv, err := strconv.ParseInt(v, 10, 64) if err != nil { @@ -349,25 +352,27 @@ func (p *Parser) ParseLine(line string) (telegraf.Metric, error) { } if len(fields) == 0 { - return nil, fmt.Errorf("logparser_grok: must have one or more fields") + return nil, fmt.Errorf("grok: must have one or more fields") } return metric.New(p.Measurement, tags, fields, p.tsModder.tsMod(timestamp)) } func (p *Parser) Parse(buf []byte) ([]telegraf.Metric, error) { - scanner := bufio.NewScanner(strings.NewReader(string(buf))) - var lines []string - for scanner.Scan() { - lines = append(lines, scanner.Text()) - } - var metrics []telegraf.Metric - for _, line := range lines { + metrics := make([]telegraf.Metric, 0) + + scanner := bufio.NewScanner(bytes.NewReader(buf)) + for scanner.Scan() { + line := scanner.Text() m, err := p.ParseLine(line) if err != nil { return nil, err } + + if m == nil { + continue + } metrics = append(metrics, m) } diff --git a/plugins/parsers/grok/parser_test.go b/plugins/parsers/grok/parser_test.go index 09f8fa16d89b5..8133d30212156 100644 --- a/plugins/parsers/grok/parser_test.go +++ b/plugins/parsers/grok/parser_test.go @@ -1,6 +1,7 @@ package grok import ( + "log" "testing" "time" @@ -959,3 +960,52 @@ func TestReplaceTimestampComma(t *testing.T) { //Convert Nanosecond to milisecond for compare require.Equal(t, 555, m.Time().Nanosecond()/1000000) } + +func TestDynamicMeasurementModifier(t *testing.T) { + p := &Parser{ + Patterns: []string{"%{TEST}"}, + CustomPatterns: "TEST %{NUMBER:var1:tag} %{NUMBER:var2:float} %{WORD:test:measurement}", + } + + require.NoError(t, p.Compile()) + m, err := p.ParseLine("4 5 hello") + require.NoError(t, err) + require.Equal(t, m.Name(), "hello") +} + +func TestStaticMeasurementModifier(t *testing.T) { + p := &Parser{ + Patterns: []string{"%{WORD:hi:measurement} %{NUMBER:num:string}"}, + } + + require.NoError(t, p.Compile()) + m, err := p.ParseLine("test_name 42") + log.Printf("%v", m) + require.NoError(t, err) + require.Equal(t, "test_name", m.Name()) +} + +// tests that the top level measurement name is used +func TestTwoMeasurementModifier(t *testing.T) { + p := &Parser{ + Patterns: []string{"%{TEST:test_name:measurement}"}, + CustomPatterns: "TEST %{NUMBER:var1:tag} %{NUMBER:var2:measurement} %{WORD:var3:measurement}", + } + + require.NoError(t, p.Compile()) + m, err := p.ParseLine("4 5 hello") + require.NoError(t, err) + require.Equal(t, m.Name(), "4 5 hello") +} + +func TestMeasurementModifierNoName(t *testing.T) { + p := &Parser{ + Patterns: []string{"%{TEST}"}, + CustomPatterns: "TEST %{NUMBER:var1:tag} %{NUMBER:var2:float} %{WORD:hi:measurement}", + } + + require.NoError(t, p.Compile()) + m, err := p.ParseLine("4 5 hello") + require.NoError(t, err) + require.Equal(t, m.Name(), "hello") +} diff --git a/plugins/parsers/logfmt/parser.go b/plugins/parsers/logfmt/parser.go new file mode 100644 index 0000000000000..603dbbae862b9 --- /dev/null +++ b/plugins/parsers/logfmt/parser.go @@ -0,0 +1,111 @@ +package logfmt + +import ( + "bytes" + "fmt" + "strconv" + "time" + + "github.com/go-logfmt/logfmt" + "github.com/influxdata/telegraf" + "github.com/influxdata/telegraf/metric" +) + +var ( + ErrNoMetric = fmt.Errorf("no metric in line") +) + +// Parser decodes logfmt formatted messages into metrics. +type Parser struct { + MetricName string + DefaultTags map[string]string + Now func() time.Time +} + +// NewParser creates a parser. +func NewParser(metricName string, defaultTags map[string]string) *Parser { + return &Parser{ + MetricName: metricName, + DefaultTags: defaultTags, + Now: time.Now, + } +} + +// Parse converts a slice of bytes in logfmt format to metrics. +func (p *Parser) Parse(b []byte) ([]telegraf.Metric, error) { + reader := bytes.NewReader(b) + decoder := logfmt.NewDecoder(reader) + metrics := make([]telegraf.Metric, 0) + for { + ok := decoder.ScanRecord() + if !ok { + err := decoder.Err() + if err != nil { + return nil, err + } + break + } + fields := make(map[string]interface{}) + for decoder.ScanKeyval() { + if string(decoder.Value()) == "" { + continue + } + + //type conversions + value := string(decoder.Value()) + if iValue, err := strconv.ParseInt(value, 10, 64); err == nil { + fields[string(decoder.Key())] = iValue + } else if fValue, err := strconv.ParseFloat(value, 64); err == nil { + fields[string(decoder.Key())] = fValue + } else if bValue, err := strconv.ParseBool(value); err == nil { + fields[string(decoder.Key())] = bValue + } else { + fields[string(decoder.Key())] = value + } + } + if len(fields) == 0 { + continue + } + + m, err := metric.New(p.MetricName, map[string]string{}, fields, p.Now()) + if err != nil { + return nil, err + } + + metrics = append(metrics, m) + } + p.applyDefaultTags(metrics) + return metrics, nil +} + +// ParseLine converts a single line of text in logfmt format to metrics. +func (p *Parser) ParseLine(s string) (telegraf.Metric, error) { + metrics, err := p.Parse([]byte(s)) + if err != nil { + return nil, err + } + + if len(metrics) < 1 { + return nil, ErrNoMetric + } + return metrics[0], nil +} + +// SetDefaultTags adds tags to the metrics outputs of Parse and ParseLine. +func (p *Parser) SetDefaultTags(tags map[string]string) { + p.DefaultTags = tags +} + +func (p *Parser) applyDefaultTags(metrics []telegraf.Metric) { + if len(p.DefaultTags) == 0 { + return + } + + for _, m := range metrics { + for k, v := range p.DefaultTags { + if !m.HasTag(k) { + m.AddTag(k, v) + } + } + } +} diff --git a/plugins/parsers/logfmt/parser_test.go b/plugins/parsers/logfmt/parser_test.go new file mode 100644 index 0000000000000..c9096468467dc --- /dev/null +++ b/plugins/parsers/logfmt/parser_test.go @@ -0,0 +1,231 @@ +package logfmt + +import ( + "testing" + "time" + + "github.com/influxdata/telegraf" + "github.com/influxdata/telegraf/metric" + "github.com/influxdata/telegraf/testutil" + "github.com/stretchr/testify/require" +) + +func MustMetric(t *testing.T, m *testutil.Metric) telegraf.Metric { + t.Helper() + v, err := metric.New(m.Measurement, m.Tags, m.Fields, m.Time) + if err != nil { + t.Fatal(err) + } + return v +} + +func TestParse(t *testing.T) { + tests := []struct { + name string + measurement string + now func() time.Time + bytes []byte + want []testutil.Metric + wantErr bool + }{ + { + name: "no bytes returns no metrics", + now: func() time.Time { return time.Unix(0, 0) }, + want: []testutil.Metric{}, + }, + { + name: "test without trailing end", + bytes: []byte("foo=\"bar\""), + now: func() time.Time { return time.Unix(0, 0) }, + measurement: "testlog", + want: []testutil.Metric{ + testutil.Metric{ + Measurement: "testlog", + Tags: map[string]string{}, + Fields: map[string]interface{}{ + "foo": "bar", + }, + Time: time.Unix(0, 0), + }, + }, + }, + { + name: "test with trailing end", + bytes: []byte("foo=\"bar\"\n"), + now: func() time.Time { return time.Unix(0, 0) }, + measurement: "testlog", + want: []testutil.Metric{ + testutil.Metric{ + Measurement: "testlog", + Tags: map[string]string{}, + Fields: map[string]interface{}{ + "foo": "bar", + }, + Time: time.Unix(0, 0), + }, + }, + }, + { + name: "logfmt parser returns all the fields", + bytes: []byte(`ts=2018-07-24T19:43:40.275Z lvl=info msg="http request" method=POST`), + now: func() time.Time { return time.Unix(0, 0) }, + measurement: "testlog", + want: []testutil.Metric{ + testutil.Metric{ + Measurement: "testlog", + Tags: map[string]string{}, + Fields: map[string]interface{}{ + "lvl": "info", + "msg": "http request", + "method": "POST", + "ts": "2018-07-24T19:43:40.275Z", + }, + Time: time.Unix(0, 0), + }, + }, + }, + { + name: "logfmt parser parses every line", + bytes: []byte("ts=2018-07-24T19:43:40.275Z lvl=info msg=\"http request\" method=POST\nparent_id=088876RL000 duration=7.45 log_id=09R4e4Rl000"), + now: func() time.Time { return time.Unix(0, 0) }, + measurement: "testlog", + want: []testutil.Metric{ + testutil.Metric{ + Measurement: "testlog", + Tags: map[string]string{}, + Fields: map[string]interface{}{ + "lvl": "info", + "msg": "http request", + "method": "POST", + "ts": "2018-07-24T19:43:40.275Z", + }, + Time: time.Unix(0, 0), + }, + testutil.Metric{ + Measurement: "testlog", + Tags: map[string]string{}, + Fields: map[string]interface{}{ + "parent_id": "088876RL000", + "duration": 7.45, + "log_id": "09R4e4Rl000", + }, + Time: time.Unix(0, 0), + }, + }, + }, + { + name: "keys without = or values are ignored", + now: func() time.Time { return time.Unix(0, 0) }, + bytes: []byte(`i am no data.`), + want: []testutil.Metric{}, + wantErr: false, + }, + { + name: "keys without values are ignored", + now: func() time.Time { return time.Unix(0, 0) }, + bytes: []byte(`foo="" bar=`), + want: []testutil.Metric{}, + wantErr: false, + }, + { + name: "unterminated quote produces error", + now: func() time.Time { return time.Unix(0, 0) }, + measurement: "testlog", + bytes: []byte(`bar=baz foo="bar`), + want: []testutil.Metric{}, + wantErr: true, + }, + { + name: "malformed key", + now: func() time.Time { return time.Unix(0, 0) }, + measurement: "testlog", + bytes: []byte(`"foo=" bar=baz`), + want: []testutil.Metric{}, + wantErr: true, + }, + } + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + l := Parser{ + MetricName: tt.measurement, + Now: tt.now, + } + got, err := l.Parse(tt.bytes) + if (err != nil) != tt.wantErr { + t.Errorf("Logfmt.Parse error = %v, wantErr %v", err, tt.wantErr) + return + } + require.Equal(t, len(tt.want), len(got)) + for i, m := range got { + testutil.MustEqual(t, m, tt.want[i]) + } + }) + } +} + +func TestParseLine(t *testing.T) { + tests := []struct { + name string + s string + measurement string + now func() time.Time + want testutil.Metric + wantErr bool + }{ + { + name: "No Metric In line", + now: func() time.Time { return time.Unix(0, 0) }, + want: testutil.Metric{}, + wantErr: true, + }, + { + name: "Log parser fmt returns all fields", + now: func() time.Time { return time.Unix(0, 0) }, + measurement: "testlog", + s: `ts=2018-07-24T19:43:35.207268Z lvl=5 msg="Write failed" log_id=09R4e4Rl000`, + want: testutil.Metric{ + Measurement: "testlog", + Fields: map[string]interface{}{ + "ts": "2018-07-24T19:43:35.207268Z", + "lvl": int64(5), + "msg": "Write failed", + "log_id": "09R4e4Rl000", + }, + Tags: map[string]string{}, + Time: time.Unix(0, 0), + }, + }, + { + name: "ParseLine only returns metrics from first string", + now: func() time.Time { return time.Unix(0, 0) }, + measurement: "testlog", + s: "ts=2018-07-24T19:43:35.207268Z lvl=5 msg=\"Write failed\" log_id=09R4e4Rl000\nmethod=POST parent_id=088876RL000 duration=7.45 log_id=09R4e4Rl000", + want: testutil.Metric{ + Measurement: "testlog", + Fields: map[string]interface{}{ + "ts": "2018-07-24T19:43:35.207268Z", + "lvl": int64(5), + "msg": "Write failed", + "log_id": "09R4e4Rl000", + }, + Tags: map[string]string{}, + Time: time.Unix(0, 0), + }, + }, + } + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + l := Parser{ + MetricName: tt.measurement, + Now: tt.now, + } + got, err := l.ParseLine(tt.s) + if (err != nil) != tt.wantErr { + t.Fatalf("Logfmt.Parse error = %v, wantErr %v", err, tt.wantErr) + } + if got != nil { + testutil.MustEqual(t, got, tt.want) + } + }) + } +} diff --git a/plugins/parsers/registry.go b/plugins/parsers/registry.go index 24e73d4b63ca6..e198cb2cb96c6 100644 --- a/plugins/parsers/registry.go +++ b/plugins/parsers/registry.go @@ -11,8 +11,10 @@ import ( "github.com/influxdata/telegraf/plugins/parsers/grok" "github.com/influxdata/telegraf/plugins/parsers/influx" "github.com/influxdata/telegraf/plugins/parsers/json" + "github.com/influxdata/telegraf/plugins/parsers/logfmt" "github.com/influxdata/telegraf/plugins/parsers/nagios" "github.com/influxdata/telegraf/plugins/parsers/value" + "github.com/influxdata/telegraf/plugins/parsers/wavefront" ) // ParserInput is an interface for input plugins that are able to parse @@ -131,6 +133,8 @@ func NewParser(config *Config) (Parser, error) { config.DefaultTags, config.Separator, config.Templates) + case "wavefront": + parser, err = NewWavefrontParser(config.DefaultTags) case "grok": parser, err = newGrokParser( config.MetricName, @@ -139,6 +143,8 @@ func NewParser(config *Config) (Parser, error) { config.GrokCustomPatterns, config.GrokCustomPatternFiles, config.GrokTimeZone) + case "logfmt": + parser, err = NewLogFmtParser(config.MetricName, config.DefaultTags) default: err = fmt.Errorf("Invalid data format: %s", config.DataFormat) } @@ -238,3 +244,12 @@ func NewDropwizardParser( } return parser, err } + +// NewLogFmtParser returns a logfmt parser with the default options. +func NewLogFmtParser(metricName string, defaultTags map[string]string) (Parser, error) { + return logfmt.NewParser(metricName, defaultTags), nil +} + +func NewWavefrontParser(defaultTags map[string]string) (Parser, error) { + return wavefront.NewWavefrontParser(defaultTags), nil +} diff --git a/plugins/parsers/wavefront/element.go b/plugins/parsers/wavefront/element.go new file mode 100644 index 0000000000000..4e40238e74e44 --- /dev/null +++ b/plugins/parsers/wavefront/element.go @@ -0,0 +1,238 @@ +package wavefront + +import ( + "errors" + "fmt" + "strconv" + "time" +) + +var ( + ErrEOF = errors.New("EOF") + ErrInvalidTimestamp = errors.New("Invalid timestamp") +) + +// Interface for parsing line elements. +type ElementParser interface { + parse(p *PointParser, pt *Point) error +} + +type NameParser struct{} +type ValueParser struct{} +type TimestampParser struct { + optional bool +} +type WhiteSpaceParser struct { + nextOptional bool +} +type TagParser struct{} +type LoopedParser struct { + wrappedParser ElementParser + wsPaser *WhiteSpaceParser +} +type LiteralParser struct { + literal string +} + +func (ep *NameParser) parse(p *PointParser, pt *Point) error { + //Valid characters are: a-z, A-Z, 0-9, hyphen ("-"), underscore ("_"), dot ("."). + // Forward slash ("/") and comma (",") are allowed if metricName is enclosed in double quotes. + name, err := parseLiteral(p) + if err != nil { + return err + } + pt.Name = name + return nil +} + +func (ep *ValueParser) parse(p *PointParser, pt *Point) error { + tok, lit := p.scan() + if tok == EOF { + return fmt.Errorf("found %q, expected number", lit) + } + + p.writeBuf.Reset() + if tok == MINUS_SIGN { + p.writeBuf.WriteString(lit) + tok, lit = p.scan() + } + + for tok != EOF && (tok == LETTER || tok == NUMBER || tok == DOT) { + p.writeBuf.WriteString(lit) + tok, lit = p.scan() + } + p.unscan() + + pt.Value = p.writeBuf.String() + _, err := strconv.ParseFloat(pt.Value, 64) + if err != nil { + return fmt.Errorf("invalid metric value %s", pt.Value) + } + return nil +} + +func (ep *TimestampParser) parse(p *PointParser, pt *Point) error { + tok, lit := p.scan() + if tok == EOF { + if ep.optional { + p.unscanTokens(2) + return setTimestamp(pt, 0, 1) + } + return fmt.Errorf("found %q, expected number", lit) + } + + if tok != NUMBER { + if ep.optional { + p.unscanTokens(2) + return setTimestamp(pt, 0, 1) + } + return ErrInvalidTimestamp + } + + p.writeBuf.Reset() + for tok != EOF && tok == NUMBER { + p.writeBuf.WriteString(lit) + tok, lit = p.scan() + } + p.unscan() + + tsStr := p.writeBuf.String() + ts, err := strconv.ParseInt(tsStr, 10, 64) + if err != nil { + return err + } + return setTimestamp(pt, ts, len(tsStr)) +} + +func setTimestamp(pt *Point, ts int64, numDigits int) error { + + if numDigits == 19 { + // nanoseconds + ts = ts / 1e9 + } else if numDigits == 16 { + // microseconds + ts = ts / 1e6 + } else if numDigits == 13 { + // milliseconds + ts = ts / 1e3 + } else if numDigits != 10 { + // must be in seconds, return error if not 0 + if ts == 0 { + ts = getCurrentTime() + } else { + return ErrInvalidTimestamp + } + } + pt.Timestamp = ts + return nil +} + +func (ep *LoopedParser) parse(p *PointParser, pt *Point) error { + for { + err := ep.wrappedParser.parse(p, pt) + if err != nil { + return err + } + err = ep.wsPaser.parse(p, pt) + if err == ErrEOF { + break + } + } + return nil +} + +func (ep *TagParser) parse(p *PointParser, pt *Point) error { + k, err := parseLiteral(p) + if err != nil { + if k == "" { + return nil + } + return err + } + + next, lit := p.scan() + if next != EQUALS { + return fmt.Errorf("found %q, expected equals", lit) + } + + v, err := parseLiteral(p) + if err != nil { + return err + } + if len(pt.Tags) == 0 { + pt.Tags = make(map[string]string) + } + pt.Tags[k] = v + return nil +} + +func (ep *WhiteSpaceParser) parse(p *PointParser, pt *Point) error { + tok := WS + for tok != EOF && tok == WS { + tok, _ = p.scan() + } + + if tok == EOF { + if !ep.nextOptional { + return ErrEOF + } + return nil + } + p.unscan() + return nil +} + +func (ep *LiteralParser) parse(p *PointParser, pt *Point) error { + l, err := parseLiteral(p) + if err != nil { + return err + } + + if l != ep.literal { + return fmt.Errorf("found %s, expected %s", l, ep.literal) + } + return nil +} + +func parseQuotedLiteral(p *PointParser) (string, error) { + p.writeBuf.Reset() + + escaped := false + tok, lit := p.scan() + for tok != EOF && (tok != QUOTES || (tok == QUOTES && escaped)) { + // let everything through + escaped = tok == BACKSLASH + p.writeBuf.WriteString(lit) + tok, lit = p.scan() + } + if tok == EOF { + return "", fmt.Errorf("found %q, expected quotes", lit) + } + return p.writeBuf.String(), nil +} + +func parseLiteral(p *PointParser) (string, error) { + tok, lit := p.scan() + if tok == EOF { + return "", fmt.Errorf("found %q, expected literal", lit) + } + + if tok == QUOTES { + return parseQuotedLiteral(p) + } + + p.writeBuf.Reset() + for tok != EOF && tok > literal_beg && tok < literal_end { + p.writeBuf.WriteString(lit) + tok, lit = p.scan() + } + if tok == QUOTES { + return "", errors.New("found quote inside unquoted literal") + } + p.unscan() + return p.writeBuf.String(), nil +} + +func getCurrentTime() int64 { + return time.Now().UnixNano() / 1e9 +} diff --git a/plugins/parsers/wavefront/parser.go b/plugins/parsers/wavefront/parser.go new file mode 100644 index 0000000000000..f5fc88dbfe570 --- /dev/null +++ b/plugins/parsers/wavefront/parser.go @@ -0,0 +1,203 @@ +package wavefront + +import ( + "bufio" + "bytes" + "io" + "log" + "strconv" + "time" + + "github.com/influxdata/telegraf" + "github.com/influxdata/telegraf/metric" +) + +const MAX_BUFFER_SIZE = 2 + +type Point struct { + Name string + Value string + Timestamp int64 + Source string + Tags map[string]string +} + +// Parser represents a parser. +type PointParser struct { + s *PointScanner + buf struct { + tok []Token // last read n tokens + lit []string // last read n literals + n int // unscanned buffer size (max=2) + } + scanBuf bytes.Buffer // buffer reused for scanning tokens + writeBuf bytes.Buffer // buffer reused for parsing elements + Elements []ElementParser + defaultTags map[string]string +} + +// Returns a slice of ElementParser's for the Graphite format +func NewWavefrontElements() []ElementParser { + var elements []ElementParser + wsParser := WhiteSpaceParser{} + wsParserNextOpt := WhiteSpaceParser{nextOptional: true} + repeatParser := LoopedParser{wrappedParser: &TagParser{}, wsPaser: &wsParser} + elements = append(elements, &NameParser{}, &wsParser, &ValueParser{}, &wsParserNextOpt, + &TimestampParser{optional: true}, &wsParserNextOpt, &repeatParser) + return elements +} + +func NewWavefrontParser(defaultTags map[string]string) *PointParser { + elements := NewWavefrontElements() + return &PointParser{Elements: elements, defaultTags: defaultTags} +} + +func (p *PointParser) Parse(buf []byte) ([]telegraf.Metric, error) { + + // parse even if the buffer begins with a newline + buf = bytes.TrimPrefix(buf, []byte("\n")) + // add newline to end if not exists: + if len(buf) > 0 && !bytes.HasSuffix(buf, []byte("\n")) { + buf = append(buf, []byte("\n")...) + } + + points := make([]Point, 0) + + buffer := bytes.NewBuffer(buf) + reader := bufio.NewReader(buffer) + for { + // Read up to the next newline. + buf, err := reader.ReadBytes('\n') + if err == io.EOF { + break + } + + p.reset(buf) + point := Point{} + for _, element := range p.Elements { + err := element.parse(p, &point) + if err != nil { + return nil, err + } + } + + points = append(points, point) + } + + metrics, err := p.convertPointToTelegrafMetric(points) + if err != nil { + return nil, err + } + return metrics, nil +} + +func (p *PointParser) ParseLine(line string) (telegraf.Metric, error) { + buf := []byte(line) + metrics, err := p.Parse(buf) + if err != nil { + return nil, err + } + + if len(metrics) > 0 { + return metrics[0], nil + } + + return nil, nil +} + +func (p *PointParser) SetDefaultTags(tags map[string]string) { + p.defaultTags = tags +} + +func (p *PointParser) convertPointToTelegrafMetric(points []Point) ([]telegraf.Metric, error) { + + metrics := make([]telegraf.Metric, 0) + + for _, point := range points { + tags := make(map[string]string) + for k, v := range point.Tags { + tags[k] = v + } + // apply default tags after parsed tags + for k, v := range p.defaultTags { + tags[k] = v + } + + // single field for value + fields := make(map[string]interface{}) + v, err := strconv.ParseFloat(point.Value, 64) + if err != nil { + return nil, err + } + fields["value"] = v + + m, err := metric.New(point.Name, tags, fields, time.Unix(point.Timestamp, 0)) + if err != nil { + return nil, err + } + + metrics = append(metrics, m) + } + + return metrics, nil +} + +// scan returns the next token from the underlying scanner. +// If a token has been unscanned then read that from the internal buffer instead. +func (p *PointParser) scan() (Token, string) { + // If we have a token on the buffer, then return it. + if p.buf.n != 0 { + idx := p.buf.n % MAX_BUFFER_SIZE + tok, lit := p.buf.tok[idx], p.buf.lit[idx] + p.buf.n -= 1 + return tok, lit + } + + // Otherwise read the next token from the scanner. + tok, lit := p.s.Scan() + + // Save it to the buffer in case we unscan later. + p.buffer(tok, lit) + + return tok, lit +} + +func (p *PointParser) buffer(tok Token, lit string) { + // create the buffer if it is empty + if len(p.buf.tok) == 0 { + p.buf.tok = make([]Token, MAX_BUFFER_SIZE) + p.buf.lit = make([]string, MAX_BUFFER_SIZE) + } + + // for now assume a simple circular buffer of length two + p.buf.tok[0], p.buf.lit[0] = p.buf.tok[1], p.buf.lit[1] + p.buf.tok[1], p.buf.lit[1] = tok, lit +} + +// unscan pushes the previously read token back onto the buffer. +func (p *PointParser) unscan() { + p.unscanTokens(1) +} + +func (p *PointParser) unscanTokens(n int) { + if n > MAX_BUFFER_SIZE { + // just log for now + log.Printf("cannot unscan more than %d tokens", MAX_BUFFER_SIZE) + } + p.buf.n += n +} + +func (p *PointParser) reset(buf []byte) { + + // reset the scan buffer and write new byte + p.scanBuf.Reset() + p.scanBuf.Write(buf) + + if p.s == nil { + p.s = NewScanner(&p.scanBuf) + } else { + // reset p.s.r passing in the buffer as the reader + p.s.r.Reset(&p.scanBuf) + } + p.buf.n = 0 +} diff --git a/plugins/parsers/wavefront/parser_test.go b/plugins/parsers/wavefront/parser_test.go new file mode 100644 index 0000000000000..85367fa1a03de --- /dev/null +++ b/plugins/parsers/wavefront/parser_test.go @@ -0,0 +1,204 @@ +package wavefront + +import ( + "testing" + "time" + + "github.com/influxdata/telegraf" + "github.com/influxdata/telegraf/metric" + "github.com/stretchr/testify/assert" +) + +func TestParse(t *testing.T) { + parser := NewWavefrontParser(nil) + + parsedMetrics, err := parser.Parse([]byte("test.metric 1")) + assert.NoError(t, err) + testMetric, err := metric.New("test.metric", map[string]string{}, map[string]interface{}{"value": 1.}, time.Unix(0, 0)) + assert.NoError(t, err) + assert.Equal(t, parsedMetrics[0].Name(), testMetric.Name()) + assert.Equal(t, parsedMetrics[0].Fields(), testMetric.Fields()) + + parsedMetrics, err = parser.Parse([]byte("test.metric 1 1530939936")) + assert.NoError(t, err) + testMetric, err = metric.New("test.metric", map[string]string{}, map[string]interface{}{"value": 1.}, time.Unix(1530939936, 0)) + assert.NoError(t, err) + assert.EqualValues(t, parsedMetrics[0], testMetric) + + parsedMetrics, err = parser.Parse([]byte("test.metric 1 1530939936 source=mysource")) + assert.NoError(t, err) + testMetric, err = metric.New("test.metric", map[string]string{"source": "mysource"}, map[string]interface{}{"value": 1.}, time.Unix(1530939936, 0)) + assert.NoError(t, err) + assert.EqualValues(t, parsedMetrics[0], testMetric) + + parsedMetrics, err = parser.Parse([]byte("\"test.metric\" 1.1234 1530939936 source=\"mysource\"")) + assert.NoError(t, err) + testMetric, err = metric.New("test.metric", map[string]string{"source": "mysource"}, map[string]interface{}{"value": 1.1234}, time.Unix(1530939936, 0)) + assert.NoError(t, err) + assert.EqualValues(t, parsedMetrics[0], testMetric) + + parsedMetrics, err = parser.Parse([]byte("\"test.metric\" 1.1234 1530939936 \"source\"=\"mysource\" tag2=value2")) + assert.NoError(t, err) + testMetric, err = metric.New("test.metric", map[string]string{"source": "mysource", "tag2": "value2"}, map[string]interface{}{"value": 1.1234}, time.Unix(1530939936, 0)) + assert.NoError(t, err) + assert.EqualValues(t, parsedMetrics[0], testMetric) + + parsedMetrics, err = parser.Parse([]byte("test.metric 1.1234 1530939936 source=\"mysource\" tag2=value2 ")) + assert.NoError(t, err) + testMetric, err = metric.New("test.metric", map[string]string{"source": "mysource", "tag2": "value2"}, map[string]interface{}{"value": 1.1234}, time.Unix(1530939936, 0)) + assert.NoError(t, err) + assert.EqualValues(t, parsedMetrics[0], testMetric) + +} + +func TestParseLine(t *testing.T) { + parser := NewWavefrontParser(nil) + + parsedMetric, err := parser.ParseLine("test.metric 1") + assert.NoError(t, err) + testMetric, err := metric.New("test.metric", map[string]string{}, map[string]interface{}{"value": 1.}, time.Unix(0, 0)) + assert.NoError(t, err) + assert.Equal(t, parsedMetric.Name(), testMetric.Name()) + assert.Equal(t, parsedMetric.Fields(), testMetric.Fields()) + + parsedMetric, err = parser.ParseLine("test.metric 1 1530939936") + assert.NoError(t, err) + testMetric, err = metric.New("test.metric", map[string]string{}, map[string]interface{}{"value": 1.}, time.Unix(1530939936, 0)) + assert.NoError(t, err) + assert.EqualValues(t, parsedMetric, testMetric) + + parsedMetric, err = parser.ParseLine("test.metric 1 1530939936 source=mysource") + assert.NoError(t, err) + testMetric, err = metric.New("test.metric", map[string]string{"source": "mysource"}, map[string]interface{}{"value": 1.}, time.Unix(1530939936, 0)) + assert.NoError(t, err) + assert.EqualValues(t, parsedMetric, testMetric) + + parsedMetric, err = parser.ParseLine("\"test.metric\" 1.1234 1530939936 source=\"mysource\"") + assert.NoError(t, err) + testMetric, err = metric.New("test.metric", map[string]string{"source": "mysource"}, map[string]interface{}{"value": 1.1234}, time.Unix(1530939936, 0)) + assert.NoError(t, err) + assert.EqualValues(t, parsedMetric, testMetric) + + parsedMetric, err = parser.ParseLine("\"test.metric\" 1.1234 1530939936 \"source\"=\"mysource\" tag2=value2") + assert.NoError(t, err) + testMetric, err = metric.New("test.metric", map[string]string{"source": "mysource", "tag2": "value2"}, map[string]interface{}{"value": 1.1234}, time.Unix(1530939936, 0)) + assert.NoError(t, err) + assert.EqualValues(t, parsedMetric, testMetric) + + parsedMetric, err = parser.ParseLine("test.metric 1.1234 1530939936 source=\"mysource\" tag2=value2 ") + assert.NoError(t, err) + testMetric, err = metric.New("test.metric", map[string]string{"source": "mysource", "tag2": "value2"}, map[string]interface{}{"value": 1.1234}, time.Unix(1530939936, 0)) + assert.NoError(t, err) + assert.EqualValues(t, parsedMetric, testMetric) +} + +func TestParseMultiple(t *testing.T) { + parser := NewWavefrontParser(nil) + + parsedMetrics, err := parser.Parse([]byte("test.metric 1\ntest.metric2 2 1530939936")) + assert.NoError(t, err) + testMetric1, err := metric.New("test.metric", map[string]string{}, map[string]interface{}{"value": 1.}, time.Unix(0, 0)) + assert.NoError(t, err) + testMetric2, err := metric.New("test.metric2", map[string]string{}, map[string]interface{}{"value": 2.}, time.Unix(1530939936, 0)) + assert.NoError(t, err) + testMetrics := []telegraf.Metric{testMetric1, testMetric2} + assert.Equal(t, parsedMetrics[0].Name(), testMetrics[0].Name()) + assert.Equal(t, parsedMetrics[0].Fields(), testMetrics[0].Fields()) + assert.EqualValues(t, parsedMetrics[1], testMetrics[1]) + + parsedMetrics, err = parser.Parse([]byte("test.metric 1 1530939936 source=mysource\n\"test.metric\" 1.1234 1530939936 source=\"mysource\"")) + assert.NoError(t, err) + testMetric1, err = metric.New("test.metric", map[string]string{"source": "mysource"}, map[string]interface{}{"value": 1.}, time.Unix(1530939936, 0)) + assert.NoError(t, err) + testMetric2, err = metric.New("test.metric", map[string]string{"source": "mysource"}, map[string]interface{}{"value": 1.1234}, time.Unix(1530939936, 0)) + assert.NoError(t, err) + testMetrics = []telegraf.Metric{testMetric1, testMetric2} + assert.EqualValues(t, parsedMetrics, testMetrics) + + parsedMetrics, err = parser.Parse([]byte("\"test.metric\" 1.1234 1530939936 \"source\"=\"mysource\" tag2=value2\ntest.metric 1.1234 1530939936 source=\"mysource\" tag2=value2 ")) + assert.NoError(t, err) + testMetric1, err = metric.New("test.metric", map[string]string{"source": "mysource", "tag2": "value2"}, map[string]interface{}{"value": 1.1234}, time.Unix(1530939936, 0)) + assert.NoError(t, err) + testMetric2, err = metric.New("test.metric", map[string]string{"source": "mysource", "tag2": "value2"}, map[string]interface{}{"value": 1.1234}, time.Unix(1530939936, 0)) + assert.NoError(t, err) + testMetrics = []telegraf.Metric{testMetric1, testMetric2} + assert.EqualValues(t, parsedMetrics, testMetrics) + + parsedMetrics, err = parser.Parse([]byte("test.metric 1 1530939936 source=mysource\n\"test.metric\" 1.1234 1530939936 source=\"mysource\"\ntest.metric3 333 1530939936 tagit=valueit")) + assert.NoError(t, err) + testMetric1, err = metric.New("test.metric", map[string]string{"source": "mysource"}, map[string]interface{}{"value": 1.}, time.Unix(1530939936, 0)) + assert.NoError(t, err) + testMetric2, err = metric.New("test.metric", map[string]string{"source": "mysource"}, map[string]interface{}{"value": 1.1234}, time.Unix(1530939936, 0)) + assert.NoError(t, err) + testMetric3, err := metric.New("test.metric3", map[string]string{"tagit": "valueit"}, map[string]interface{}{"value": 333.}, time.Unix(1530939936, 0)) + assert.NoError(t, err) + testMetrics = []telegraf.Metric{testMetric1, testMetric2, testMetric3} + assert.EqualValues(t, parsedMetrics, testMetrics) + +} + +func TestParseSpecial(t *testing.T) { + parser := NewWavefrontParser(nil) + + parsedMetric, err := parser.ParseLine("\"test.metric\" 1 1530939936") + assert.NoError(t, err) + testMetric, err := metric.New("test.metric", map[string]string{}, map[string]interface{}{"value": 1.}, time.Unix(1530939936, 0)) + assert.NoError(t, err) + assert.EqualValues(t, parsedMetric, testMetric) + + parsedMetric, err = parser.ParseLine("test.metric 1 1530939936 tag1=\"val\\\"ue1\"") + assert.NoError(t, err) + testMetric, err = metric.New("test.metric", map[string]string{"tag1": "val\\\"ue1"}, map[string]interface{}{"value": 1.}, time.Unix(1530939936, 0)) + assert.NoError(t, err) + assert.EqualValues(t, parsedMetric, testMetric) + +} + +func TestParseInvalid(t *testing.T) { + parser := NewWavefrontParser(nil) + + _, err := parser.Parse([]byte("test.metric")) + assert.Error(t, err) + + _, err = parser.Parse([]byte("test.metric string")) + assert.Error(t, err) + + _, err = parser.Parse([]byte("test.metric 1 string")) + assert.Error(t, err) + + _, err = parser.Parse([]byte("test.metric 1 1530939936 tag_no_pair")) + assert.Error(t, err) + + _, err = parser.Parse([]byte("test.metric 1 1530939936 tag_broken_value=\"")) + assert.Error(t, err) + + _, err = parser.Parse([]byte("\"test.metric 1 1530939936")) + assert.Error(t, err) + + _, err = parser.Parse([]byte("test.metric 1 1530939936 tag1=val\\\"ue1")) + assert.Error(t, err) + +} + +func TestParseDefaultTags(t *testing.T) { + parser := NewWavefrontParser(map[string]string{"myDefault": "value1", "another": "test2"}) + + parsedMetrics, err := parser.Parse([]byte("test.metric 1 1530939936")) + assert.NoError(t, err) + testMetric, err := metric.New("test.metric", map[string]string{"myDefault": "value1", "another": "test2"}, map[string]interface{}{"value": 1.}, time.Unix(1530939936, 0)) + assert.NoError(t, err) + assert.EqualValues(t, parsedMetrics[0], testMetric) + + parsedMetrics, err = parser.Parse([]byte("test.metric 1 1530939936 source=mysource")) + assert.NoError(t, err) + testMetric, err = metric.New("test.metric", map[string]string{"myDefault": "value1", "another": "test2", "source": "mysource"}, map[string]interface{}{"value": 1.}, time.Unix(1530939936, 0)) + assert.NoError(t, err) + assert.EqualValues(t, parsedMetrics[0], testMetric) + + parsedMetrics, err = parser.Parse([]byte("\"test.metric\" 1.1234 1530939936 another=\"test3\"")) + assert.NoError(t, err) + testMetric, err = metric.New("test.metric", map[string]string{"myDefault": "value1", "another": "test2"}, map[string]interface{}{"value": 1.1234}, time.Unix(1530939936, 0)) + assert.NoError(t, err) + assert.EqualValues(t, parsedMetrics[0], testMetric) + +} diff --git a/plugins/parsers/wavefront/scanner.go b/plugins/parsers/wavefront/scanner.go new file mode 100644 index 0000000000000..e64516f541c2a --- /dev/null +++ b/plugins/parsers/wavefront/scanner.go @@ -0,0 +1,69 @@ +package wavefront + +import ( + "bufio" + "io" +) + +// Lexical Point Scanner +type PointScanner struct { + r *bufio.Reader +} + +func NewScanner(r io.Reader) *PointScanner { + return &PointScanner{r: bufio.NewReader(r)} +} + +// read reads the next rune from the buffered reader. +// Returns rune(0) if an error occurs (or io.EOF is returned). +func (s *PointScanner) read() rune { + ch, _, err := s.r.ReadRune() + if err != nil { + return eof + } + return ch +} + +// unread places the previously read rune back on the reader. +func (s *PointScanner) unread() { + _ = s.r.UnreadRune() +} + +// Scan returns the next token and literal value. +func (s *PointScanner) Scan() (Token, string) { + + // Read the next rune + ch := s.read() + if isWhitespace(ch) { + return WS, string(ch) + } else if isLetter(ch) { + return LETTER, string(ch) + } else if isNumber(ch) { + return NUMBER, string(ch) + } + + // Otherwise read the individual character. + switch ch { + case eof: + return EOF, "" + case '\n': + return NEWLINE, string(ch) + case '.': + return DOT, string(ch) + case '-': + return MINUS_SIGN, string(ch) + case '_': + return UNDERSCORE, string(ch) + case '/': + return SLASH, string(ch) + case '\\': + return BACKSLASH, string(ch) + case ',': + return COMMA, string(ch) + case '"': + return QUOTES, string(ch) + case '=': + return EQUALS, string(ch) + } + return ILLEGAL, string(ch) +} diff --git a/plugins/parsers/wavefront/token.go b/plugins/parsers/wavefront/token.go new file mode 100644 index 0000000000000..bbcbf4e7670bf --- /dev/null +++ b/plugins/parsers/wavefront/token.go @@ -0,0 +1,41 @@ +package wavefront + +type Token int + +const ( + // Special tokens + ILLEGAL Token = iota + EOF + WS + + // Literals + literal_beg + LETTER // metric name, source/point tags + NUMBER + MINUS_SIGN + UNDERSCORE + DOT + SLASH + BACKSLASH + COMMA + literal_end + + // Misc characters + QUOTES + EQUALS + NEWLINE +) + +func isWhitespace(ch rune) bool { + return ch == ' ' || ch == '\t' || ch == '\n' +} + +func isLetter(ch rune) bool { + return (ch >= 'a' && ch <= 'z') || (ch >= 'A' && ch <= 'Z') +} + +func isNumber(ch rune) bool { + return ch >= '0' && ch <= '9' +} + +var eof = rune(0) diff --git a/plugins/processors/all/all.go b/plugins/processors/all/all.go index c18015daa2195..5c2e2549e1c63 100644 --- a/plugins/processors/all/all.go +++ b/plugins/processors/all/all.go @@ -7,5 +7,6 @@ import ( _ "github.com/influxdata/telegraf/plugins/processors/parser" _ "github.com/influxdata/telegraf/plugins/processors/printer" _ "github.com/influxdata/telegraf/plugins/processors/regex" + _ "github.com/influxdata/telegraf/plugins/processors/rename" _ "github.com/influxdata/telegraf/plugins/processors/topk" ) diff --git a/plugins/processors/parser/README.md b/plugins/processors/parser/README.md index 4ebb3379b04ed..653d1f2f002e2 100644 --- a/plugins/processors/parser/README.md +++ b/plugins/processors/parser/README.md @@ -1,7 +1,6 @@ # Parser Processor Plugin -This plugin parses defined fields containing the specified data format and -creates new metrics based on the contents of the field. +This plugin parses defined fields containing the specified data format and creates new metrics based on the contents of the field. ## Configuration ```toml @@ -25,6 +24,9 @@ creates new metrics based on the contents of the field. ### Example: +Extracts the logfmt data from the message field and merge it back into the original metric overriding any conflicting values. + +**Configuration**: ```toml [[processors.parser]] parse_fields = ["message"] @@ -34,8 +36,10 @@ creates new metrics based on the contents of the field. **Input**: ``` -syslog,appname=influxd,facility=daemon,hostname=http://influxdb.example.org (influxdb.example.org),severity=info version=1i,severity_code=6i,facility_code=3i,timestamp=1533848508138040000i,procid="6629",message=" ts=2018-08-09T21:01:48.137963Z lvl=info msg=\"Executing query\" log_id=09p7QbOG000 service=query query=\"SHOW DATABASES\"" +syslog,appname=influxd,facility=daemon,host=loaner,hostname=http://influxdb.example.org\ (influxdb.example.org),severity=info facility_code=3i,message="ts=2018-08-09T21:01:48.137963Z lvl=info msg=\"Executing query\" log_id=09p7QbOG000 service=query query=\"SHOW DATABASES\"",procid="6629",severity_code=6i,timestamp=1533848508138040000i,version=1i 1534976205000000000 ``` **Output**: -syslog,appname=influxd,facility=daemon,hostname=http://influxdb.example.org (influxdb.example.org),severity=info version=1i,severity_code=6i,facility_code=3i,timestamp=1533848508138040000i,procid="6629",ts="2018-08-09T21:01:48.137963Z",lvl=info msg="Executing query",log_id="09p7QbOG000",service="query",query="SHOW DATABASES" +``` +syslog,appname=influxd,facility=daemon,host=loaner,hostname=http://influxdb.example.org\ (influxdb.example.org),severity=info facility_code=3i,log_id="09p7QbOG000",lvl="info",message="ts=2018-08-09T21:01:48.137963Z lvl=info msg=\"Executing query\" log_id=09p7QbOG000 service=query query=\"SHOW DATABASES\"",msg="Executing query",procid="6629",query="SHOW DATABASES",service="query",severity_code=6i,timestamp=1533848508138040000i,ts="2018-08-09T21:01:48.137963Z",version=1i 1534976303000000000 +``` diff --git a/plugins/processors/rename/README.md b/plugins/processors/rename/README.md new file mode 100644 index 0000000000000..dbd31490e41c9 --- /dev/null +++ b/plugins/processors/rename/README.md @@ -0,0 +1,41 @@ +# Rename Processor Plugin + +The `rename` processor renames measurements, fields, and tags. + +### Configuration: + +```toml +## Measurement, tag, and field renamings are stored in separate sub-tables. +## Specify one sub-table per rename operation. +[[processors.rename]] +[[processors.rename.measurement]] + ## measurement to change + from = "network_interface_throughput" + to = "throughput" + +[[processors.rename.tag]] + ## tag to change + from = "hostname" + to = "host" + +[[processors.rename.field]] + ## field to change + from = "lower" + to = "min" + +[[processors.rename.field]] + ## field to change + from = "upper" + to = "max" +``` + +### Tags: + +No tags are applied by this processor, though it can alter them by renaming. + +### Example processing: + +```diff +- network_interface_throughput,hostname=backend.example.com,units=kbps lower=10i,upper=1000i,mean=500i 1502489900000000000 ++ throughput,host=backend.example.com,units=kbps min=10i,max=1000i,mean=500i 1502489900000000000 +``` diff --git a/plugins/processors/rename/rename.go b/plugins/processors/rename/rename.go new file mode 100644 index 0000000000000..2da787a353c49 --- /dev/null +++ b/plugins/processors/rename/rename.go @@ -0,0 +1,82 @@ +package rename + +import ( + "github.com/influxdata/telegraf" + "github.com/influxdata/telegraf/plugins/processors" +) + +const sampleConfig = ` + ## Measurement, tag, and field renamings are stored in separate sub-tables. + ## Specify one sub-table per rename operation. + # [[processors.rename.measurement]] + # ## measurement to change + # from = "kilobytes_per_second" + # to = "kbps" + + # [[processors.rename.tag]] + # ## tag to change + # from = "host" + # to = "hostname" + + # [[processors.rename.field]] + # ## field to change + # from = "lower" + # to = "min" + + # [[processors.rename.field]] + # ## field to change + # from = "upper" + # to = "max" +` + +type renamer struct { + From string + To string +} + +type Rename struct { + Measurement []renamer + Tag []renamer + Field []renamer +} + +func (r *Rename) SampleConfig() string { + return sampleConfig +} + +func (r *Rename) Description() string { + return "Rename measurements, tags, and fields that pass through this filter." +} + +func (r *Rename) Apply(in ...telegraf.Metric) []telegraf.Metric { + for _, point := range in { + for _, measurementRenamer := range r.Measurement { + if point.Name() == measurementRenamer.From { + point.SetName(measurementRenamer.To) + break + } + } + + for _, tagRenamer := range r.Tag { + if value, ok := point.GetTag(tagRenamer.From); ok { + point.RemoveTag(tagRenamer.From) + point.AddTag(tagRenamer.To, value) + } + } + + for _, fieldRenamer := range r.Field { + if value, ok := point.GetField(fieldRenamer.From); ok { + point.RemoveField(fieldRenamer.From) + point.AddField(fieldRenamer.To, value) + } + } + } + + return in +} + +func init() { + processors.Add("rename", func() telegraf.Processor { + return &Rename{} + }) +} diff --git a/plugins/processors/rename/rename_test.go b/plugins/processors/rename/rename_test.go new file mode 100644 index 0000000000000..43f7fcc30a502 --- /dev/null +++ b/plugins/processors/rename/rename_test.go @@ -0,0 +1,58 @@ +package rename + +import ( + "testing" + "time" + + "github.com/influxdata/telegraf" + "github.com/influxdata/telegraf/metric" + "github.com/stretchr/testify/assert" +) + +func newMetric(name string, tags map[string]string, fields map[string]interface{}) telegraf.Metric { + if tags == nil { + tags = map[string]string{} + } + if fields == nil { + fields = map[string]interface{}{} + } + m, _ := metric.New(name, tags, fields, time.Now()) + return m +} + +func TestMeasurementRename(t *testing.T) { + r := Rename{} + r.Measurement = []renamer{ + {From: "foo", To: "bar"}, + {From: "baz", To: "quux"}, + } + m1 := newMetric("foo", nil, nil) + m2 := newMetric("bar", nil, nil) + m3 := newMetric("baz", nil, nil) + results := r.Apply(m1, m2, m3) + assert.Equal(t, "bar", results[0].Name(), "Should change name from 'foo' to 'bar'") + assert.Equal(t, "bar", results[1].Name(), "Should not name from 'bar'") + assert.Equal(t, "quux", results[2].Name(), "Should change name from 'baz' to 'quux'") +} + +func TestTagRename(t *testing.T) { + r := Rename{} + r.Tag = []renamer{ + {From: "hostname", To: "host"}, + } + m := newMetric("foo", map[string]string{"hostname": "localhost", "region": "east-1"}, nil) + results := r.Apply(m) + + assert.Equal(t, map[string]string{"host": "localhost", "region": "east-1"}, results[0].Tags(), "should change tag 'hostname' to 'host'") +} + +func TestFieldRename(t *testing.T) { + r := Rename{} + r.Field = []renamer{ + {From: "time_msec", To: "time"}, + } + m := newMetric("foo", nil, map[string]interface{}{"time_msec": int64(1250), "snakes": true}) + results := r.Apply(m) + + assert.Equal(t, map[string]interface{}{"time": int64(1250), "snakes": true}, results[0].Fields(), "should change field 'time_msec' to 'time'") +} diff --git a/plugins/serializers/influx/influx.go b/plugins/serializers/influx/influx.go index f052c9c93d1bd..2989e44e9f07c 100644 --- a/plugins/serializers/influx/influx.go +++ b/plugins/serializers/influx/influx.go @@ -27,30 +27,34 @@ const ( UintSupport FieldTypeSupport = 1 << iota ) -// MetricError is an error causing a metric to be unserializable. +var ( + NeedMoreSpace = "need more space" + InvalidName = "invalid name" + NoFields = "no serializable fields" +) + +// MetricError is an error causing an entire metric to be unserializable. type MetricError struct { - s string + series string + reason string } func (e MetricError) Error() string { - return e.s + if e.series != "" { + return fmt.Sprintf("%q: %s", e.series, e.reason) + } + return e.reason } // FieldError is an error causing a field to be unserializable. type FieldError struct { - s string + reason string } func (e FieldError) Error() string { - return e.s + return e.reason } -var ( - ErrNeedMoreSpace = &MetricError{"need more space"} - ErrInvalidName = &MetricError{"invalid name"} - ErrNoFields = &MetricError{"no serializable fields"} -) - // Serializer is a serializer for line protocol. type Serializer struct { maxLineBytes int @@ -102,17 +106,20 @@ func (s *Serializer) Serialize(m telegraf.Metric) ([]byte, error) { return out, nil } +// SerializeBatch writes the slice of metrics and returns a byte slice of the +// results. The returned byte slice may contain multiple lines of data. func (s *Serializer) SerializeBatch(metrics []telegraf.Metric) ([]byte, error) { - var batch bytes.Buffer + s.buf.Reset() for _, m := range metrics { - _, err := s.Write(&batch, m) + _, err := s.Write(&s.buf, m) if err != nil { return nil, err } } - return batch.Bytes(), nil + out := make([]byte, s.buf.Len()) + copy(out, s.buf.Bytes()) + return out, nil } - func (s *Serializer) Write(w io.Writer, m telegraf.Metric) (int, error) { err := s.writeMetric(w, m) return s.bytesWritten, err @@ -135,7 +142,7 @@ func (s *Serializer) buildHeader(m telegraf.Metric) error { name := nameEscape(m.Name()) if name == "" { - return ErrInvalidName + return s.newMetricError(InvalidName) } s.header = append(s.header, name...) @@ -222,9 +229,10 @@ func (s *Serializer) writeMetric(w io.Writer, m telegraf.Metric) error { } if s.maxLineBytes > 0 && bytesNeeded > s.maxLineBytes { - // Need at least one field per line + // Need at least one field per line, this metric cannot be fit + // into the max line bytes. if firstField { - return ErrNeedMoreSpace + return s.newMetricError(NeedMoreSpace) } err = s.write(w, s.footer) @@ -232,21 +240,12 @@ func (s *Serializer) writeMetric(w io.Writer, m telegraf.Metric) error { return err } + firstField = true bytesNeeded = len(s.header) + len(s.pair) + len(s.footer) - if s.maxLineBytes > 0 && bytesNeeded > s.maxLineBytes { - return ErrNeedMoreSpace + if bytesNeeded > s.maxLineBytes { + return s.newMetricError(NeedMoreSpace) } - - err = s.write(w, s.header) - if err != nil { - return err - } - - s.write(w, s.pair) - pairsLen += len(s.pair) - firstField = false - continue } if firstField { @@ -261,18 +260,28 @@ func (s *Serializer) writeMetric(w io.Writer, m telegraf.Metric) error { } } - s.write(w, s.pair) + err = s.write(w, s.pair) + if err != nil { + return err + } pairsLen += len(s.pair) firstField = false } if firstField { - return ErrNoFields + return s.newMetricError(NoFields) } return s.write(w, s.footer) +} +func (s *Serializer) newMetricError(reason string) *MetricError { + if len(s.header) != 0 { + series := bytes.TrimRight(s.header, " ") + return &MetricError{series: string(series), reason: reason} + } + return &MetricError{reason: reason} } func (s *Serializer) appendFieldValue(buf []byte, value interface{}) ([]byte, error) { diff --git a/plugins/serializers/influx/influx_test.go b/plugins/serializers/influx/influx_test.go index 74bffe5e492cb..2c1cbd58770c2 100644 --- a/plugins/serializers/influx/influx_test.go +++ b/plugins/serializers/influx/influx_test.go @@ -23,7 +23,7 @@ var tests = []struct { typeSupport FieldTypeSupport input telegraf.Metric output []byte - err error + errReason string }{ { name: "minimal", @@ -98,7 +98,7 @@ var tests = []struct { time.Unix(0, 0), ), ), - err: ErrNoFields, + errReason: NoFields, }, { name: "float Inf", @@ -333,8 +333,8 @@ var tests = []struct { time.Unix(1519194109, 42), ), ), - output: nil, - err: ErrNeedMoreSpace, + output: nil, + errReason: NeedMoreSpace, }, { name: "no fields", @@ -346,7 +346,7 @@ var tests = []struct { time.Unix(0, 0), ), ), - err: ErrNoFields, + errReason: NoFields, }, { name: "procstat", @@ -427,7 +427,10 @@ func TestSerializer(t *testing.T) { serializer.SetFieldSortOrder(SortFields) serializer.SetFieldTypeSupport(tt.typeSupport) output, err := serializer.Serialize(tt.input) - require.Equal(t, tt.err, err) + if tt.errReason != "" { + require.Error(t, err) + require.Contains(t, err.Error(), tt.errReason) + } require.Equal(t, string(tt.output), string(output)) }) } diff --git a/plugins/serializers/influx/reader.go b/plugins/serializers/influx/reader.go index 4a755c88ddd81..d0dad8eebb984 100644 --- a/plugins/serializers/influx/reader.go +++ b/plugins/serializers/influx/reader.go @@ -2,7 +2,6 @@ package influx import ( "bytes" - "fmt" "io" "log" @@ -54,17 +53,11 @@ func (r *reader) Read(p []byte) (int, error) { r.offset += 1 if err != nil { r.buf.Reset() - switch err.(type) { - case *MetricError: - // Since we are serializing an array of metrics, don't fail + if err != nil { + // Since we are serializing multiple metrics, don't fail the // the entire batch just because of one unserializable metric. - log.Printf( - "D! [serializers.influx] could not serialize metric %q: %v; discarding metric", - metric.Name(), err) + log.Printf("E! [serializers.influx] could not serialize metric: %v; discarding metric", err) continue - default: - fmt.Println(err) - return 0, err } } break diff --git a/scripts/ci-1.10.docker b/scripts/ci-1.10.docker index 1cfe4c27d82b9..33075adfc4456 100644 --- a/scripts/ci-1.10.docker +++ b/scripts/ci-1.10.docker @@ -24,5 +24,5 @@ RUN gem install fpm RUN go get -d github.com/golang/dep && \ cd src/github.com/golang/dep && \ - git checkout -q v0.4.1 && \ - go install -ldflags="-X main.version=v0.4.1" ./cmd/dep + git checkout -q v0.5.0 && \ + go install -ldflags="-X main.version=v0.5.0" ./cmd/dep diff --git a/scripts/ci-1.9.docker b/scripts/ci-1.9.docker index d1ac5f839c698..0a931c8179905 100644 --- a/scripts/ci-1.9.docker +++ b/scripts/ci-1.9.docker @@ -24,5 +24,5 @@ RUN gem install fpm RUN go get -d github.com/golang/dep && \ cd src/github.com/golang/dep && \ - git checkout -q v0.4.1 && \ - go install -ldflags="-X main.version=v0.4.1" ./cmd/dep + git checkout -q v0.5.0 && \ + go install -ldflags="-X main.version=v0.5.0" ./cmd/dep