diff --git a/CHANGELOG.next.asciidoc b/CHANGELOG.next.asciidoc index 8bbb2c71e33..fde2c427394 100644 --- a/CHANGELOG.next.asciidoc +++ b/CHANGELOG.next.asciidoc @@ -122,6 +122,8 @@ https://github.com/elastic/beats/compare/v8.2.0\...main[Check the HEAD diff] *Affecting all Beats* +- Beats will now attempt to recover if a lockfile has not been removed {pull}[33169] + *Auditbeat* diff --git a/NOTICE.txt b/NOTICE.txt index 91c74780814..e0a952bc588 100644 --- a/NOTICE.txt +++ b/NOTICE.txt @@ -10100,11 +10100,11 @@ SOFTWARE -------------------------------------------------------------------------------- Dependency : github.com/elastic/elastic-agent-libs -Version: v0.2.11 +Version: v0.2.13 Licence type (autodetected): Apache-2.0 -------------------------------------------------------------------------------- -Contents of probable licence file $GOMODCACHE/github.com/elastic/elastic-agent-libs@v0.2.11/LICENSE: +Contents of probable licence file $GOMODCACHE/github.com/elastic/elastic-agent-libs@v0.2.13/LICENSE: Apache License Version 2.0, January 2004 @@ -10414,11 +10414,11 @@ these terms. -------------------------------------------------------------------------------- Dependency : github.com/elastic/elastic-agent-system-metrics -Version: v0.4.4 +Version: v0.4.5-0.20220927192933-25a985b07d51 Licence type (autodetected): Apache-2.0 -------------------------------------------------------------------------------- -Contents of probable licence file $GOMODCACHE/github.com/elastic/elastic-agent-system-metrics@v0.4.4/LICENSE.txt: +Contents of probable licence file $GOMODCACHE/github.com/elastic/elastic-agent-system-metrics@v0.4.5-0.20220927192933-25a985b07d51/LICENSE.txt: Apache License Version 2.0, January 2004 @@ -17199,11 +17199,11 @@ THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLI -------------------------------------------------------------------------------- Dependency : github.com/magefile/mage -Version: v1.13.0 +Version: v1.14.0 Licence type (autodetected): Apache-2.0 -------------------------------------------------------------------------------- -Contents of probable licence file $GOMODCACHE/github.com/magefile/mage@v1.13.0/LICENSE: +Contents of probable licence file $GOMODCACHE/github.com/magefile/mage@v1.14.0/LICENSE: Apache License Version 2.0, January 2004 @@ -19373,11 +19373,11 @@ OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. -------------------------------------------------------------------------------- Dependency : github.com/stretchr/testify -Version: v1.7.1 +Version: v1.8.0 Licence type (autodetected): MIT -------------------------------------------------------------------------------- -Contents of probable licence file $GOMODCACHE/github.com/stretchr/testify@v1.7.1/LICENSE: +Contents of probable licence file $GOMODCACHE/github.com/stretchr/testify@v1.8.0/LICENSE: MIT License @@ -21378,11 +21378,11 @@ Contents of probable licence file $GOMODCACHE/go.mongodb.org/mongo-driver@v1.5.1 -------------------------------------------------------------------------------- Dependency : go.uber.org/atomic -Version: v1.9.0 +Version: v1.10.0 Licence type (autodetected): MIT -------------------------------------------------------------------------------- -Contents of probable licence file $GOMODCACHE/go.uber.org/atomic@v1.9.0/LICENSE.txt: +Contents of probable licence file $GOMODCACHE/go.uber.org/atomic@v1.10.0/LICENSE.txt: Copyright (c) 2016 Uber Technologies, Inc. @@ -21436,11 +21436,11 @@ THE SOFTWARE. -------------------------------------------------------------------------------- Dependency : go.uber.org/zap -Version: v1.21.0 +Version: v1.23.0 Licence type (autodetected): MIT -------------------------------------------------------------------------------- -Contents of probable licence file $GOMODCACHE/go.uber.org/zap@v1.21.0/LICENSE.txt: +Contents of probable licence file $GOMODCACHE/go.uber.org/zap@v1.23.0/LICENSE.txt: Copyright (c) 2016-2017 Uber Technologies, Inc. @@ -42826,6 +42826,76 @@ DEALINGS IN THE SOFTWARE. +-------------------------------------------------------------------------------- +Dependency : github.com/shirou/gopsutil +Version: v3.21.11+incompatible +Licence type (autodetected): BSD-3-Clause +-------------------------------------------------------------------------------- + +Contents of probable licence file $GOMODCACHE/github.com/shirou/gopsutil@v3.21.11+incompatible/LICENSE: + +gopsutil is distributed under BSD license reproduced below. + +Copyright (c) 2014, WAKAYAMA Shirou +All rights reserved. + +Redistribution and use in source and binary forms, with or without modification, +are permitted provided that the following conditions are met: + + * Redistributions of source code must retain the above copyright notice, this + list of conditions and the following disclaimer. + * Redistributions in binary form must reproduce the above copyright notice, + this list of conditions and the following disclaimer in the documentation + and/or other materials provided with the distribution. + * Neither the name of the gopsutil authors nor the names of its contributors + may be used to endorse or promote products derived from this software without + specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND +ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED +WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE +DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR +ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES +(INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; +LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON +ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS +SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + + +------- +internal/common/binary.go in the gopsutil is copied and modifid from golang/encoding/binary.go. + + + +Copyright (c) 2009 The Go Authors. All rights reserved. + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are +met: + + * Redistributions of source code must retain the above copyright +notice, this list of conditions and the following disclaimer. + * Redistributions in binary form must reproduce the above +copyright notice, this list of conditions and the following disclaimer +in the documentation and/or other materials provided with the +distribution. + * Neither the name of Google Inc. nor the names of its +contributors may be used to endorse or promote products derived from +this software without specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + -------------------------------------------------------------------------------- Dependency : github.com/sirupsen/logrus Version: v1.8.1 @@ -42959,11 +43029,11 @@ SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. -------------------------------------------------------------------------------- Dependency : github.com/stretchr/objx -Version: v0.2.0 +Version: v0.4.0 Licence type (autodetected): MIT -------------------------------------------------------------------------------- -Contents of probable licence file $GOMODCACHE/github.com/stretchr/objx@v0.2.0/LICENSE: +Contents of probable licence file $GOMODCACHE/github.com/stretchr/objx@v0.4.0/LICENSE: The MIT License diff --git a/filebeat/docs/filebeat-log-rotation.asciidoc b/filebeat/docs/filebeat-log-rotation.asciidoc index 1399f605c6c..69eafd22e7e 100644 --- a/filebeat/docs/filebeat-log-rotation.asciidoc +++ b/filebeat/docs/filebeat-log-rotation.asciidoc @@ -70,8 +70,8 @@ sure it does not miss any events. [source,yaml] ----------------------------------------------------- filebeat.inputs: -- type: log - enabled: false +- type: filestream + id: my-server-filestream-id paths: - /var/log/my-server/my-server.log* ----------------------------------------------------- diff --git a/go.mod b/go.mod index 9037142a481..46e0188e28f 100644 --- a/go.mod +++ b/go.mod @@ -117,7 +117,7 @@ require ( github.com/jonboulle/clockwork v0.2.2 github.com/josephspurrier/goversioninfo v0.0.0-20190209210621-63e6d1acd3dd github.com/lib/pq v1.10.3 - github.com/magefile/mage v1.13.0 + github.com/magefile/mage v1.14.0 github.com/mattn/go-colorable v0.1.12 github.com/mattn/go-ieproxy v0.0.0-20191113090002-7c0f6868bffe // indirect github.com/miekg/dns v1.1.42 @@ -141,7 +141,7 @@ require ( github.com/shopspring/decimal v1.2.0 github.com/spf13/cobra v1.3.0 github.com/spf13/pflag v1.0.5 - github.com/stretchr/testify v1.7.1 + github.com/stretchr/testify v1.8.0 github.com/tsg/go-daemon v0.0.0-20200207173439-e704b93fd89b github.com/ugorji/go/codec v1.1.8 github.com/urso/sderr v0.0.0-20210525210834-52b04e8f5c71 @@ -150,9 +150,9 @@ require ( go.elastic.co/ecszap v1.0.1 go.elastic.co/go-licence-detector v0.5.0 go.etcd.io/bbolt v1.3.6 - go.uber.org/atomic v1.9.0 + go.uber.org/atomic v1.10.0 go.uber.org/multierr v1.8.0 - go.uber.org/zap v1.21.0 + go.uber.org/zap v1.23.0 golang.org/x/crypto v0.0.0-20220525230936-793ad666bf5e golang.org/x/lint v0.0.0-20210508222113-6edffad5e616 golang.org/x/mod v0.5.1 @@ -192,9 +192,9 @@ require ( github.com/awslabs/kinesis-aggregation/go/v2 v2.0.0-20220623125934-28468a6701b5 github.com/elastic/bayeux v1.0.5 github.com/elastic/elastic-agent-autodiscover v0.4.0 - github.com/elastic/elastic-agent-libs v0.2.11 + github.com/elastic/elastic-agent-libs v0.2.13 github.com/elastic/elastic-agent-shipper-client v0.4.0 - github.com/elastic/elastic-agent-system-metrics v0.4.4 + github.com/elastic/elastic-agent-system-metrics v0.4.5-0.20220927192933-25a985b07d51 github.com/elastic/go-elasticsearch/v8 v8.2.0 github.com/googleapis/gax-go/v2 v2.5.1 github.com/pierrec/lz4/v4 v4.1.15 @@ -308,8 +308,9 @@ require ( github.com/sanathkr/go-yaml v0.0.0-20170819195128-ed9d249f429b // indirect github.com/santhosh-tekuri/jsonschema v1.2.4 // indirect github.com/sergi/go-diff v1.1.0 // indirect + github.com/shirou/gopsutil v3.21.11+incompatible // indirect github.com/sirupsen/logrus v1.8.1 // indirect - github.com/stretchr/objx v0.2.0 // indirect + github.com/stretchr/objx v0.4.0 // indirect github.com/tklauser/go-sysconf v0.3.9 // indirect github.com/tklauser/numcpus v0.3.0 // indirect github.com/urso/diag v0.0.0-20200210123136-21b3cc8eb797 // indirect diff --git a/go.sum b/go.sum index 9048f0018fa..cc02b7355cb 100644 --- a/go.sum +++ b/go.sum @@ -617,12 +617,13 @@ github.com/elastic/elastic-agent-autodiscover v0.4.0 h1:R1JMLHQpH2KP3GXY8zmgV4dj github.com/elastic/elastic-agent-autodiscover v0.4.0/go.mod h1:p3MSf9813JEnolCTD0GyVAr3+Eptg2zQ9aZVFjl4tJ4= github.com/elastic/elastic-agent-client/v7 v7.0.0-20220804181728-b0328d2fe484 h1:uJIMfLgCenJvxsVmEjBjYGxt0JddCgw2IxgoNfcIXOk= github.com/elastic/elastic-agent-client/v7 v7.0.0-20220804181728-b0328d2fe484/go.mod h1:fkvyUfFwyAG5OnMF0h+FV9sC0Xn9YLITwQpSuwungQs= -github.com/elastic/elastic-agent-libs v0.2.11 h1:ZeYn35Kxt+IdtMPmE01TaDeaahCg/z7MkGPVWUo6Lp4= github.com/elastic/elastic-agent-libs v0.2.11/go.mod h1:chO3rtcLyGlKi9S0iGVZhYCzDfdDsAQYBc+ui588AFE= +github.com/elastic/elastic-agent-libs v0.2.13 h1:YQzhO8RaLosGlyt7IHtj/ZxigWiwLcXXlv3gS4QY9CA= +github.com/elastic/elastic-agent-libs v0.2.13/go.mod h1:0J9lzJh+BjttIiVjYDLncKYCEWUUHiiqnuI64y6C6ss= github.com/elastic/elastic-agent-shipper-client v0.4.0 h1:nsTJF9oo4RHLl+zxFUZqNHaE86C6Ba5aImfegcEf6Sk= github.com/elastic/elastic-agent-shipper-client v0.4.0/go.mod h1:OyI2W+Mv3JxlkEF3OeT7K0dbuxvwew8ke2Cf4HpLa9Q= -github.com/elastic/elastic-agent-system-metrics v0.4.4 h1:Br3S+TlBhijrLysOvbHscFhgQ00X/trDT5VEnOau0E0= -github.com/elastic/elastic-agent-system-metrics v0.4.4/go.mod h1:tF/f9Off38nfzTZHIVQ++FkXrDm9keFhFpJ+3pQ00iI= +github.com/elastic/elastic-agent-system-metrics v0.4.5-0.20220927192933-25a985b07d51 h1:ZFk7hC6eRPJkJNtOSG+GYbRlsgLjSD8rTj4gQq+7rsA= +github.com/elastic/elastic-agent-system-metrics v0.4.5-0.20220927192933-25a985b07d51/go.mod h1:vTqfhtj83LlPKbusEwrEywZv13nhPExwINB3PkeRQeo= github.com/elastic/elastic-transport-go/v8 v8.1.0 h1:NeqEz1ty4RQz+TVbUrpSU7pZ48XkzGWQj02k5koahIE= github.com/elastic/elastic-transport-go/v8 v8.1.0/go.mod h1:87Tcz8IVNe6rVSLdBux1o/PEItLtyabHU3naC7IoqKI= github.com/elastic/fsevents v0.0.0-20181029231046-e1d381a4d270 h1:cWPqxlPtir4RoQVCpGSRXmLqjEHpJKbR60rxh1nQZY4= @@ -1281,8 +1282,8 @@ github.com/lyft/protoc-gen-star v0.5.3/go.mod h1:V0xaHgaf5oCCqmcxYcWiDfTiKsZsRc8 github.com/lyft/protoc-gen-validate v0.0.13/go.mod h1:XbGvPuh87YZc5TdIa2/I4pLk0QoUACkjt2znoq26NVQ= github.com/magefile/mage v1.9.0/go.mod h1:z5UZb/iS3GoOSn0JgWuiw7dxlurVYTu+/jHXqQg881A= github.com/magefile/mage v1.12.1/go.mod h1:z5UZb/iS3GoOSn0JgWuiw7dxlurVYTu+/jHXqQg881A= -github.com/magefile/mage v1.13.0 h1:XtLJl8bcCM7EFoO8FyH8XK3t7G5hQAeK+i4tq+veT9M= -github.com/magefile/mage v1.13.0/go.mod h1:z5UZb/iS3GoOSn0JgWuiw7dxlurVYTu+/jHXqQg881A= +github.com/magefile/mage v1.14.0 h1:6QDX3g6z1YvJ4olPhT1wksUcSa/V0a1B+pJb73fBjyo= +github.com/magefile/mage v1.14.0/go.mod h1:z5UZb/iS3GoOSn0JgWuiw7dxlurVYTu+/jHXqQg881A= github.com/magiconair/properties v1.8.0/go.mod h1:PppfXfuXeibc/6YijjN8zIbojt8czPbwD3XqdrwzmxQ= github.com/magiconair/properties v1.8.5/go.mod h1:y3VJvCyxH9uVvJTWEGAELF3aiYNyPKd5NZ3oSwXrF60= github.com/mailru/easyjson v0.0.0-20160728113105-d5b7844b561a/go.mod h1:C1wdFJiN94OJF2b5HbByQZoLdCWB1Yqtg26g4irojpc= @@ -1632,6 +1633,8 @@ github.com/segmentio/kafka-go v0.2.0/go.mod h1:X6itGqS9L4jDletMsxZ7Dz+JFWxM6JHfP github.com/sergi/go-diff v1.0.0/go.mod h1:0CfEIISq7TuYL3j771MWULgwwjU+GofnZX9QAmXWZgo= github.com/sergi/go-diff v1.1.0 h1:we8PVUC3FE2uYfodKH/nBHMSetSfHDR6scGdBi+erh0= github.com/sergi/go-diff v1.1.0/go.mod h1:STckp+ISIX8hZLjrqAeVduY0gWCT9IjLuqbuNXdaHfM= +github.com/shirou/gopsutil v3.21.11+incompatible h1:+1+c1VGhc88SSonWP6foOcLhvnKlUeu/erjjvaPEYiI= +github.com/shirou/gopsutil v3.21.11+incompatible/go.mod h1:5b4v6he4MtMOwMlS0TUMTu2PcXUg8+E1lC7eC3UO/RA= github.com/shirou/gopsutil/v3 v3.21.12 h1:VoGxEW2hpmz0Vt3wUvHIl9fquzYLNpVpgNNB7pGJimA= github.com/shirou/gopsutil/v3 v3.21.12/go.mod h1:BToYZVTlSVlfazpDDYFnsVZLaoRG+g8ufT6fPQLdJzA= github.com/shopspring/decimal v1.2.0 h1:abSATXmQEYyShuxI4/vyW3tV1MrKAJzCZ/0zLUXYbsQ= @@ -1692,8 +1695,9 @@ github.com/streadway/handy v0.0.0-20190108123426-d5acb3125c2a/go.mod h1:qNTQ5P5J github.com/stretchr/objx v0.0.0-20180129172003-8a3f7159479f/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME= github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME= github.com/stretchr/objx v0.1.1/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME= -github.com/stretchr/objx v0.2.0 h1:Hbg2NidpLE8veEBkEZTL3CvlkUIVzuU9jDplZO54c48= github.com/stretchr/objx v0.2.0/go.mod h1:qt09Ya8vawLte6SNmTgCsAVtYtaKzEcn8ATUoHMkEqE= +github.com/stretchr/objx v0.4.0 h1:M2gUjqZET1qApGOWNSnZ49BAIMX4F/1plDv3+l31EJ4= +github.com/stretchr/objx v0.4.0/go.mod h1:YvHI0jy2hoMjB+UWwv71VJQ9isScKT/TqJzVSSt89Yw= github.com/stretchr/testify v0.0.0-20180303142811-b89eecf5ca5d/go.mod h1:a8OnRcib4nhh0OaRAV+Yts87kKdq0PP7pXfy6kDkUVs= github.com/stretchr/testify v1.2.0/go.mod h1:a8OnRcib4nhh0OaRAV+Yts87kKdq0PP7pXfy6kDkUVs= github.com/stretchr/testify v1.2.2/go.mod h1:a8OnRcib4nhh0OaRAV+Yts87kKdq0PP7pXfy6kDkUVs= @@ -1703,8 +1707,9 @@ github.com/stretchr/testify v1.5.0/go.mod h1:5W2xD1RspED5o8YsWQXVCued0rvSQ+mT+I5 github.com/stretchr/testify v1.5.1/go.mod h1:5W2xD1RspED5o8YsWQXVCued0rvSQ+mT+I5cxcmMvtA= github.com/stretchr/testify v1.6.1/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg= github.com/stretchr/testify v1.7.0/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg= -github.com/stretchr/testify v1.7.1 h1:5TQK59W5E3v0r2duFAb7P95B6hEeOyEnHRa8MjYSMTY= github.com/stretchr/testify v1.7.1/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg= +github.com/stretchr/testify v1.8.0 h1:pSgiaMZlXftHpm5L7V1+rVB+AZJydKsMxsQBIJw4PKk= +github.com/stretchr/testify v1.8.0/go.mod h1:yNjHg4UonilssWZ8iaSj1OCr/vHnekPRkoO+kdMU+MU= github.com/subosito/gotenv v1.2.0/go.mod h1:N0PQaV/YGNqwC0u51sEeR/aUtSLEXKX9iv69rRypqCw= github.com/syndtr/gocapability v0.0.0-20170704070218-db04d3cc01c8/go.mod h1:hkRG7XYTFWNJGYcbNJQlaLq0fg1yr4J4t/NcTQtrfww= github.com/syndtr/gocapability v0.0.0-20180916011248-d98352740cb2/go.mod h1:hkRG7XYTFWNJGYcbNJQlaLq0fg1yr4J4t/NcTQtrfww= @@ -1849,8 +1854,9 @@ go.uber.org/atomic v1.5.1/go.mod h1:sABNBOSYdrvTF6hTgEIbc7YasKWGhgEQZyfxyTvoXHQ= go.uber.org/atomic v1.6.0/go.mod h1:sABNBOSYdrvTF6hTgEIbc7YasKWGhgEQZyfxyTvoXHQ= go.uber.org/atomic v1.7.0/go.mod h1:fEN4uk6kAWBTFdckzkM89CLk9XfWZrxpCo0nPH17wJc= go.uber.org/atomic v1.8.0/go.mod h1:fEN4uk6kAWBTFdckzkM89CLk9XfWZrxpCo0nPH17wJc= -go.uber.org/atomic v1.9.0 h1:ECmE8Bn/WFTYwEW/bpKD3M8VtR/zQVbavAoalC1PYyE= go.uber.org/atomic v1.9.0/go.mod h1:fEN4uk6kAWBTFdckzkM89CLk9XfWZrxpCo0nPH17wJc= +go.uber.org/atomic v1.10.0 h1:9qC72Qh0+3MqyJbAn8YU5xVq1frD8bn3JtD2oXtafVQ= +go.uber.org/atomic v1.10.0/go.mod h1:LUxbIzbOniOlMKjJjyPfpl4v+PKK2cNJn91OQbhoJI0= go.uber.org/goleak v1.0.0/go.mod h1:8a7PlsEVH3e/a/GLqe5IIrQx6GzcnRmZEufDUTk4A7A= go.uber.org/goleak v1.1.10/go.mod h1:8a7PlsEVH3e/a/GLqe5IIrQx6GzcnRmZEufDUTk4A7A= go.uber.org/goleak v1.1.11/go.mod h1:cwTWslyiVhfpKIDGSZEM2HlOvcqm+tG4zioyIeLoqMQ= @@ -1871,8 +1877,9 @@ go.uber.org/zap v1.13.0/go.mod h1:zwrFLgMcdUuIBviXEYEH1YKNaOBnKXsx2IPda5bBwHM= go.uber.org/zap v1.14.0/go.mod h1:zwrFLgMcdUuIBviXEYEH1YKNaOBnKXsx2IPda5bBwHM= go.uber.org/zap v1.14.1/go.mod h1:Mb2vm2krFEG5DV0W9qcHBYFtp/Wku1cvYaqPsS/WYfc= go.uber.org/zap v1.17.0/go.mod h1:MXVU+bhUf/A7Xi2HNOnopQOrmycQ5Ih87HtOu4q5SSo= -go.uber.org/zap v1.21.0 h1:WefMeulhovoZ2sYXz7st6K0sLj7bBhpiFaud4r4zST8= go.uber.org/zap v1.21.0/go.mod h1:wjWOCqI0f2ZZrJF/UufIOkiC8ii6tm1iqIsLo76RfJw= +go.uber.org/zap v1.23.0 h1:OjGQ5KQDEUawVHxNwQgPpiypGHOxo2mNZsOqTak4fFY= +go.uber.org/zap v1.23.0/go.mod h1:D+nX8jyLsMHMYrln8A0rJjFt/T/9/bGgIhAqxv5URuY= golang.org/x/crypto v0.0.0-20171113213409-9f005a07e0d3/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4= golang.org/x/crypto v0.0.0-20180505025534-4ec37c66abab/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4= golang.org/x/crypto v0.0.0-20180904163835-0709b304e793/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4= diff --git a/heartbeat/ecserr/ecserr.go b/heartbeat/ecserr/ecserr.go index 34cee65a96f..9ca594fc0b8 100644 --- a/heartbeat/ecserr/ecserr.go +++ b/heartbeat/ecserr/ecserr.go @@ -120,3 +120,11 @@ func NewCouldNotConnectErr(host, port string, err error) *ECSErr { fmt.Sprintf("Could not connect to '%s:%s' with error: %s", host, port, err), ) } + +func NewNotSyntheticsCapableError() *ECSErr { + return NewECSErr( + TYPE_IO, + "AGENT_NOT_BROWSER_CAPABLE", + "browser monitors cannot be created outside the official elastic docker image", + ) +} diff --git a/heartbeat/monitors/monitor.go b/heartbeat/monitors/monitor.go index fb759f074d6..17271086d16 100644 --- a/heartbeat/monitors/monitor.go +++ b/heartbeat/monitors/monitor.go @@ -154,15 +154,19 @@ func newMonitorUnsafe( return p.Close() } - // If we've hit an error at this point, still run on schedule, but always return an error. - // This way the error is clearly communicated through to kibana. - // Since the error is not recoverable in these instances, the user will need to reconfigure - // the monitor, which will destroy and recreate it in heartbeat, thus clearing this error. - // - // Note: we do this at this point, and no earlier, because at a minimum we need the - // standard monitor fields (id, name and schedule) to deliver an error to kibana in a way - // that it can render. - if err != nil { + var wrappedJobs []jobs.Job + if err == nil { + wrappedJobs = wrappers.WrapCommon(p.Jobs, m.stdFields, stateLoader) + } else { + // If we've hit an error at this point, still run on schedule, but always return an error. + // This way the error is clearly communicated through to kibana. + // Since the error is not recoverable in these instances, the user will need to reconfigure + // the monitor, which will destroy and recreate it in heartbeat, thus clearing this error. + // + // Note: we do this at this point, and no earlier, because at a minimum we need the + // standard monitor fields (id, name and schedule) to deliver an error to kibana in a way + // that it can render. + // Note, needed to hoist err to this scope, not just to add a prefix fullErr := fmt.Errorf("job could not be initialized: %w", err) // A placeholder job that always returns an error @@ -171,9 +175,13 @@ func newMonitorUnsafe( p.Jobs = []jobs.Job{func(event *beat.Event) ([]jobs.Job, error) { return nil, fullErr }} + + // We need to use the lightweight wrapping for error jobs + // since browser wrapping won't write summaries, but the fake job here is + // effectively a lightweight job + wrappedJobs = wrappers.WrapLightweight(p.Jobs, m.stdFields, monitorstate.NewTracker(stateLoader, false)) } - wrappedJobs := wrappers.WrapCommon(p.Jobs, m.stdFields, stateLoader) m.endpoints = p.Endpoints m.configuredJobs, err = m.makeTasks(config, wrappedJobs) diff --git a/heartbeat/monitors/wrappers/wrappers.go b/heartbeat/monitors/wrappers/wrappers.go index bdc5935ae3b..7bbbe51103c 100644 --- a/heartbeat/monitors/wrappers/wrappers.go +++ b/heartbeat/monitors/wrappers/wrappers.go @@ -60,6 +60,7 @@ func WrapLightweight(js []jobs.Job, stdMonFields stdfields.StdMonitorFields, mst addServiceName(stdMonFields), addMonitorMeta(stdMonFields, len(js) > 1), addMonitorStatus(false), + addMonitorErr, addMonitorDuration, ), func() jobs.JobWrapper { @@ -82,6 +83,7 @@ func WrapBrowser(js []jobs.Job, stdMonFields stdfields.StdMonitorFields, mst *mo addServiceName(stdMonFields), addMonitorMeta(stdMonFields, false), addMonitorStatus(true), + addMonitorErr, addMonitorState(stdMonFields, mst), logJourneySummaries, ) @@ -227,28 +229,39 @@ func addMonitorStatus(summaryOnly bool) jobs.JobWrapper { if summaryOnly { hasSummary, _ := event.Fields.HasKey("summary.up") if !hasSummary { - return cont, nil + return cont, err } } - fields := mapstr.M{ + eventext.MergeEventFields(event, mapstr.M{ "monitor": mapstr.M{ "status": look.Status(err), }, + }) + + return cont, err + } + } +} + +func addMonitorErr(origJob jobs.Job) jobs.Job { + return func(event *beat.Event) ([]jobs.Job, error) { + cont, err := origJob(event) + + if err != nil { + var errVal interface{} + var asECS *ecserr.ECSErr + if errors.As(err, &asECS) { + // Override the message of the error in the event it was wrapped + asECS.Message = err.Error() + errVal = asECS + } else { + errVal = look.Reason(err) } - if err != nil { - var asECS *ecserr.ECSErr - if errors.As(err, &asECS) { - // Override the message of the error in the event it was wrapped - asECS.Message = err.Error() - fields["error"] = asECS - } else { - fields["error"] = look.Reason(err) - } - } - eventext.MergeEventFields(event, fields) - return cont, nil + eventext.MergeEventFields(event, mapstr.M{"error": errVal}) } + + return cont, nil } } diff --git a/heartbeat/monitors/wrappers/wrappers_test.go b/heartbeat/monitors/wrappers/wrappers_test.go index 2528514d067..fce80d87e90 100644 --- a/heartbeat/monitors/wrappers/wrappers_test.go +++ b/heartbeat/monitors/wrappers/wrappers_test.go @@ -719,17 +719,27 @@ func TestProjectBrowserJob(t *testing.T) { } func TestECSErrors(t *testing.T) { + // key is test name, value is whether to test a summary event or not + testCases := map[string]bool{ + "on summary event": true, + "on non-summary event": false, + } + ecse := ecserr.NewBadCmdStatusErr(123, "mycommand") - wrappedEcsErr := fmt.Errorf("wrapped: %w", ecse) - expectedEcsErr := ecserr.NewECSErr( + wrappedECSErr := fmt.Errorf("wrapped: %w", ecse) + expectedECSErr := ecserr.NewECSErr( ecse.Type, ecse.Code, - wrappedEcsErr.Error(), + wrappedECSErr.Error(), ) - j := WrapCommon([]jobs.Job{makeProjectBrowserJob(t, "http://example.net", true, wrappedEcsErr, projectMonitorValues)}, testBrowserMonFields, nil) - event := &beat.Event{} - _, err := j[0](event) - require.NoError(t, err) - require.Equal(t, event.Fields["error"], expectedEcsErr) + for name, makeSummaryEvent := range testCases { + t.Run(name, func(t *testing.T) { + j := WrapCommon([]jobs.Job{makeProjectBrowserJob(t, "http://example.net", makeSummaryEvent, wrappedECSErr, projectMonitorValues)}, testBrowserMonFields, nil) + event := &beat.Event{} + _, err := j[0](event) + require.NoError(t, err) + require.Equal(t, expectedECSErr, event.Fields["error"]) + }) + } } diff --git a/libbeat/cmd/instance/beat.go b/libbeat/cmd/instance/beat.go index 4e9af610053..28a7f0489f2 100644 --- a/libbeat/cmd/instance/beat.go +++ b/libbeat/cmd/instance/beat.go @@ -44,6 +44,7 @@ import ( "github.com/elastic/beats/v7/libbeat/beat" "github.com/elastic/beats/v7/libbeat/cfgfile" "github.com/elastic/beats/v7/libbeat/cloudid" + "github.com/elastic/beats/v7/libbeat/cmd/instance/locks" "github.com/elastic/beats/v7/libbeat/common" "github.com/elastic/beats/v7/libbeat/common/reload" "github.com/elastic/beats/v7/libbeat/common/seccomp" @@ -385,13 +386,13 @@ func (b *Beat) launch(settings Settings, bt beat.Creator) error { // Try to acquire exclusive lock on data path to prevent another beat instance // sharing same data path. - bl := newLocker(b) - err := bl.lock() + bl := locks.New(b.Info) + err := bl.Lock() if err != nil { return err } defer func() { - _ = bl.unlock() + _ = bl.Unlock() }() svc.BeforeRun() diff --git a/libbeat/cmd/instance/locker.go b/libbeat/cmd/instance/locker.go deleted file mode 100644 index 0d7a1ff2486..00000000000 --- a/libbeat/cmd/instance/locker.go +++ /dev/null @@ -1,76 +0,0 @@ -// Licensed to Elasticsearch B.V. under one or more contributor -// license agreements. See the NOTICE file distributed with -// this work for additional information regarding copyright -// ownership. Elasticsearch B.V. licenses this file to you under -// the Apache License, Version 2.0 (the "License"); you may -// not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, -// software distributed under the License is distributed on an -// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY -// KIND, either express or implied. See the License for the -// specific language governing permissions and limitations -// under the License. - -package instance - -import ( - "os" - - "github.com/gofrs/flock" - "github.com/pkg/errors" - - "github.com/elastic/elastic-agent-libs/paths" -) - -var ( - // ErrAlreadyLocked is returned when a lock on the data path is attempted but - // unsuccessful because another Beat instance already has the lock on the same - // data path. - ErrAlreadyLocked = errors.New("data path already locked by another beat. Please make sure that multiple beats are not sharing the same data path (path.data).") -) - -type locker struct { - fl *flock.Flock -} - -func newLocker(b *Beat) *locker { - lockfilePath := paths.Resolve(paths.Data, b.Info.Beat+".lock") - return &locker{ - fl: flock.NewFlock(lockfilePath), - } -} - -// lock attempts to acquire a lock on the data path for the currently-running -// Beat instance. If another Beats instance already has a lock on the same data path -// an ErrAlreadyLocked error is returned. -func (l *locker) lock() error { - isLocked, err := l.fl.TryLock() - if err != nil { - return errors.Wrap(err, "unable to lock data path") - } - - if !isLocked { - return ErrAlreadyLocked - } - - return nil -} - -// unlock attempts to release the lock on a data path previously acquired via Lock(). -func (l *locker) unlock() error { - err := l.fl.Unlock() - if err != nil { - return errors.Wrap(err, "unable to unlock data path") - } - - err = os.Remove(l.fl.Path()) - if err != nil { - return errors.Wrap(err, "unable to unlock data path") - } - - return nil -} diff --git a/libbeat/cmd/instance/locker_test.go b/libbeat/cmd/instance/locker_test.go deleted file mode 100644 index b2b2d2854d1..00000000000 --- a/libbeat/cmd/instance/locker_test.go +++ /dev/null @@ -1,68 +0,0 @@ -// Licensed to Elasticsearch B.V. under one or more contributor -// license agreements. See the NOTICE file distributed with -// this work for additional information regarding copyright -// ownership. Elasticsearch B.V. licenses this file to you under -// the Apache License, Version 2.0 (the "License"); you may -// not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, -// software distributed under the License is distributed on an -// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY -// KIND, either express or implied. See the License for the -// specific language governing permissions and limitations -// under the License. - -//go:build !integration -// +build !integration - -package instance - -import ( - "io/ioutil" - "os" - "testing" - - "github.com/stretchr/testify/assert" - - "github.com/elastic/elastic-agent-libs/paths" -) - -// TestLocker tests that two beats pointing to the same data path cannot -// acquire the same lock. -func TestLocker(t *testing.T) { - // Setup temporary data folder for test + clean it up at end of test - tmpDataDir, err := ioutil.TempDir("", "data") - if err != nil { - t.Fatal(err) - } - defer os.RemoveAll(tmpDataDir) - - origDataPath := paths.Paths.Data - defer func() { - paths.Paths.Data = origDataPath - }() - paths.Paths.Data = tmpDataDir - - // Setup two beats with same name and data path - const beatName = "testbeat" - - b1 := &Beat{} - b1.Info.Beat = beatName - - b2 := &Beat{} - b2.Info.Beat = beatName - - // Try to get a lock for the first beat. Expect it to succeed. - bl1 := newLocker(b1) - err = bl1.lock() - assert.NoError(t, err) - - // Try to get a lock for the second beat. Expect it to fail because the - // first beat already has the lock. - bl2 := newLocker(b2) - err = bl2.lock() - assert.EqualError(t, err, ErrAlreadyLocked.Error()) -} diff --git a/libbeat/cmd/instance/locks/lock.go b/libbeat/cmd/instance/locks/lock.go new file mode 100644 index 00000000000..0da72726182 --- /dev/null +++ b/libbeat/cmd/instance/locks/lock.go @@ -0,0 +1,262 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +package locks + +import ( + "encoding/json" + "errors" + "fmt" + "os" + "runtime" + "time" + + "github.com/gofrs/flock" + + "github.com/elastic/beats/v7/libbeat/beat" + "github.com/elastic/elastic-agent-libs/logp" + "github.com/elastic/elastic-agent-libs/paths" + metricproc "github.com/elastic/elastic-agent-system-metrics/metric/system/process" +) + +type Locker struct { + fileLock *flock.Flock + logger *logp.Logger + beatName string + filePath string + beatStart time.Time +} + +type pidfile struct { + PID int `json:"pid"` + WriteTime time.Time `json:"write_time"` +} + +var ( + // ErrAlreadyLocked is returned when a lock on the data path is attempted but + // unsuccessful because another Beat instance already has the lock on the same + // data path. + ErrAlreadyLocked = fmt.Errorf("data path already locked by another beat. Please make sure that multiple beats are not sharing the same data path (path.data).") + + // ErrLockfileEmpty is returned by readExistingPidfile() when an existing pidfile is found, but the file is empty. + ErrLockfileEmpty = fmt.Errorf("lockfile is empty") +) + +// a little wrapper for the gitpid function to make testing easier. +var pidFetch = os.Getpid + +// New returns a new pid-aware file locker +// all logic, including checking for existing locks, is performed lazily +func New(beatInfo beat.Info) *Locker { + lockfilePath := paths.Resolve(paths.Data, beatInfo.Beat+".lock") + return &Locker{ + fileLock: flock.New(lockfilePath), + logger: logp.L(), + beatName: beatInfo.Beat, + filePath: lockfilePath, + beatStart: beatInfo.StartTime, + } +} + +// Lock attempts to acquire a lock on the data path for the currently-running +// Beat instance. If another Beats instance already has a lock on the same data path +// an ErrAlreadyLocked error is returned. +func (lock *Locker) Lock() error { + new := pidfile{PID: pidFetch(), WriteTime: time.Now()} + encoded, err := json.Marshal(&new) + if err != nil { + return fmt.Errorf("error encoding json for pidfile: %w", err) + } + + // The combination of O_CREATE and O_EXCL will ensure we return an error if we don't + // manage to create the file + fh, openErr := os.OpenFile(lock.filePath, os.O_RDWR|os.O_CREATE|os.O_EXCL, 0o600) + // Don't trust different OSes to report the errors we expect, just try to recover regardless + if openErr != nil { + err = lock.handleFailedCreate() + if err != nil { + return fmt.Errorf("cannot obtain lockfile: %w", err) + } + // If something fails here, it's probably unrecoverable + fh, err = os.OpenFile(lock.filePath, os.O_RDWR|os.O_CREATE|os.O_EXCL, 0o600) + if err != nil { + return fmt.Errorf("cannot re-obtain lockfile %s: %w", lock.filePath, err) + } + } + + // a Process can't write to its own locked file on all platforms, write first + _, err = fh.Write(encoded) + if err != nil { + return fmt.Errorf("error writing pidfile to %s: %w", lock.filePath, err) + } + + // Exclusive lock + isLocked, err := lock.fileLock.TryLock() + if err != nil { + return fmt.Errorf("unable to lock data path: %w", err) + } + // case: lock could not be obtained. + if !isLocked { + // if we're here, things are probably unrecoverable, as we've previously checked for a lockfile. Exit. + return ErrAlreadyLocked + } + + return nil +} + +// Unlock attempts to release the lock on a data path previously acquired via Lock(). +func (lock *Locker) Unlock() error { + err := lock.fileLock.Unlock() + if err != nil { + return fmt.Errorf("unable to unlock data path: %w", err) + } + + err = os.Remove(lock.fileLock.Path()) + if err != nil { + return fmt.Errorf("unable to unlock data path file %s: %w", lock.fileLock.Path(), err) + } + return nil +} + +// ******* private helpers + +// handleFailedCreate will attempt to recover from a failed lock operation in a pid-aware way. +// The point of this is to deal with instances where an improper beat shutdown left us with +// a pre-existing pidfile for a beat process that no longer exists. +func (lock *Locker) handleFailedCreate() error { + // First, try to lock the file as a check to see what state we're in. + // If there's a pre-existing lock that's in effect, we probably can't recover + // Not all OSes will fail on this. + _, err := lock.fileLock.TryLock() + // Case: the file already locked, and in use by another process. + if err != nil { + if runtime.GOOS == "windows" { + // on windows, locks from dead PIDs will be auto-released, but it might take the OS a while. + // However, the time it takes for the operating system to unlock these locks depends upon available system resources. + time.Sleep(time.Second) + _, err := lock.fileLock.TryLock() + if err != nil { + return fmt.Errorf("The lockfile %s is locked after a retry, another beat is probably running", lock.fileLock) + } + } else { + return fmt.Errorf("The lockfile %s is already locked by another beat", lock.fileLock) + } + } + + // if we're here, we've locked the file + // unlock so we can continue + err = lock.fileLock.Unlock() + if err != nil { + return fmt.Errorf("error unlocking a previously found file %s after a temporary lock", lock.filePath) + } + + // read in whatever existing lockfile caused us to fail + pf, err := lock.readExistingPidfile() + // Case: two beats start up simultaneously, there's a chance we could "see" the pidfile before the other process writes to it + // or, the other beat died before it could write the pidfile. + // Sleep, read again. If we still don't have anything, assume the other PID is dead, continue. + if errors.Is(err, ErrLockfileEmpty) { + lock.logger.Debugf("Found other pidfile, but no data. Retrying.") + time.Sleep(time.Millisecond * 500) + pf, err = lock.readExistingPidfile() + if errors.Is(err, ErrLockfileEmpty) { + lock.logger.Debugf("No PID found in other lockfile, continuing") + return lock.recoverLockfile() + } else if err != nil { + return fmt.Errorf("error re-reading existing lockfile: %w", err) + } + } else if err != nil { + return fmt.Errorf("error reading existing lockfile: %w", err) + } + + // Case: the lockfile is locked, but by us. Probably a coding error, + // and probably hard to do + if pf.PID == os.Getpid() { + // the lockfile was written before the beat started, meaning we restarted and somehow got the same pid + // in which case, continue + if lock.beatStart.Before(pf.WriteTime) { + return fmt.Errorf("lockfile for beat has been locked twice by the same PID, potential bug.") + } + lock.logger.Debugf("Beat has started with the same PID, continuing") + return lock.recoverLockfile() + } + + // Check to see if the PID found in the pidfile exists. + existsState, err := findMatchingPID(pf.PID) + // Case: we have a lockfile, but the pid from the pidfile no longer exists + // this was presumably due to the dirty shutdown. + // Try to reset the lockfile and continue. + if errors.Is(err, metricproc.ProcNotExist) { + lock.logger.Debugf("%s shut down without removing previous lockfile, continuing", lock.beatName) + return lock.recoverLockfile() + } else if err != nil { + return fmt.Errorf("error looking up status for pid %d: %w", pf.PID, err) + } else { + // Case: the PID exists, but it's attached to a zombie process + // In this case...we should be okay to restart? + if existsState == metricproc.Zombie { + lock.logger.Debugf("%s shut down without removing previous lockfile and is currently in a zombie state, continuing", lock.beatName) + return lock.recoverLockfile() + } + // Case: we've gotten a lock file for another process that's already running + // This is the "base" lockfile case, which is two beats running from the same directory + // This is where we'll catch this particular case on Linux, due to Linux's advisory-style locks. + return fmt.Errorf("connot start, data directory belongs to process with pid %d", pf.PID) + } +} + +// recoverLockfile attempts to remove the lockfile and continue running +// This should only be called after we're sure it's safe to ignore a pre-existing lockfile +// This will reset the internal lockfile handler when it's successful. +func (lock *Locker) recoverLockfile() error { + // File remove may or not work, depending on os-specific details with lockfiles + err := os.Remove(lock.fileLock.Path()) + if err != nil { + if runtime.GOOS == "windows" { + // retry on windows, the OS can take time to clean up + time.Sleep(time.Second) + err = os.Remove(lock.fileLock.Path()) + if err != nil { + return fmt.Errorf("tried twice to remove lockfile %s on windows: %w", + lock.fileLock.Path(), err) + } + } else { + return fmt.Errorf("lockfile %s cannot be removed: %w", lock.fileLock.Path(), err) + } + + } + lock.fileLock = flock.New(lock.filePath) + return nil +} + +// readExistingPidfile will read the contents of an existing pidfile +// Will return ErrLockfileEmpty if no data is found in the lockfile +func (lock *Locker) readExistingPidfile() (pidfile, error) { + rawPidfile, err := os.ReadFile(lock.filePath) + if err != nil { + return pidfile{}, fmt.Errorf("error reading pidfile from path %s", lock.filePath) + } + if len(rawPidfile) == 0 { + return pidfile{}, ErrLockfileEmpty + } + foundPidFile := pidfile{} + err = json.Unmarshal(rawPidfile, &foundPidFile) + if err != nil { + return pidfile{}, fmt.Errorf("error reading JSON from pid file %s: %w", lock.filePath, err) + } + return foundPidFile, nil +} diff --git a/libbeat/cmd/instance/locks/lock_test.go b/libbeat/cmd/instance/locks/lock_test.go new file mode 100644 index 00000000000..10c63f48567 --- /dev/null +++ b/libbeat/cmd/instance/locks/lock_test.go @@ -0,0 +1,155 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +package locks + +import ( + "fmt" + "os" + "testing" + "time" + + "github.com/gofrs/uuid" + "github.com/stretchr/testify/require" + + "github.com/elastic/beats/v7/libbeat/beat" + "github.com/elastic/elastic-agent-libs/logp" + "github.com/elastic/elastic-agent-libs/paths" +) + +func TestMain(m *testing.M) { + err := logp.DevelopmentSetup() + if err != nil { + fmt.Fprintf(os.Stderr, "error creating logger: %s\n", err) + os.Exit(1) + } + tmp, err := os.MkdirTemp("", "pidfile_test") + defer os.RemoveAll(tmp) + if err != nil { + fmt.Fprintf(os.Stderr, "error creating temp directory: %s\n", err) + os.Exit(1) + } + + origDataPath := paths.Paths.Data + defer func() { + paths.Paths.Data = origDataPath + }() + paths.Paths.Data = tmp + + exit := m.Run() + // cleanup tmpdir after run, but let the tests set the exit code + err = os.RemoveAll(tmp) + if err != nil { + fmt.Fprintf(os.Stderr, "Error removing tempdir %s, %s:", tmp, err) + } + + os.Exit(exit) +} + +func TestLockWithDeadPid(t *testing.T) { + // create old lockfile + pidFetch = fakeDeadPid + testBeat := beat.Info{Beat: mustNewUUID(t), StartTime: time.Now()} + locker := New(testBeat) + err := locker.Lock() + require.NoError(t, err) + + // create new locker + pidFetch = os.Getpid + newLocker := New(testBeat) + err = newLocker.Lock() + require.NoError(t, err) +} + +func TestLockWithTwoBeats(t *testing.T) { + testBeat := beat.Info{Beat: mustNewUUID(t), StartTime: time.Now()} + // emulate two beats trying to run from the same data path + locker := New(testBeat) + // use the parent process as another random beat + pidFetch = os.Getppid + err := locker.Lock() + require.NoError(t, err) + + // create new locker for this beat + pidFetch = os.Getpid + newLocker := New(testBeat) + err = newLocker.Lock() + require.Error(t, err) + t.Logf("Got desired error: %s", err) +} + +func TestDoubleLock(t *testing.T) { + testBeat := beat.Info{Beat: mustNewUUID(t), StartTime: time.Now()} + locker := New(testBeat) + err := locker.Lock() + require.NoError(t, err) + + newLocker := New(testBeat) + err = newLocker.Lock() + require.Error(t, err) + t.Logf("Got desired error: %s", err) +} + +func TestUnlock(t *testing.T) { + testBeat := beat.Info{Beat: mustNewUUID(t), StartTime: time.Now()} + locker := New(testBeat) + err := locker.Lock() + require.NoError(t, err) + + err = locker.Unlock() + require.NoError(t, err) +} + +func TestRestartWithSamePID(t *testing.T) { + // create old lockfile + testBeatName := mustNewUUID(t) + testBeat := beat.Info{Beat: testBeatName, StartTime: time.Now().Add(-time.Second * 20)} + locker := New(testBeat) + err := locker.Lock() + require.NoError(t, err) + // create new lockfile with the same PID but a newer time + // create old lockfile + testNewBeat := beat.Info{Name: testBeatName, StartTime: time.Now()} + lockerNew := New(testNewBeat) + err = lockerNew.Lock() + require.NoError(t, err) +} + +func TestEmptyLockfile(t *testing.T) { + testBeat := beat.Info{Beat: mustNewUUID(t), StartTime: time.Now().Add(-time.Second * 1)} + deadLock := New(testBeat) + // Create an empty lockfile + // Might happen in cases where a beat shut down at *just* the right time. + fh, err := os.Create(deadLock.filePath) + require.NoError(t, err) + fh.Close() + + newBeat := New(testBeat) + err = newBeat.Lock() + require.NoError(t, err) + +} + +func mustNewUUID(t *testing.T) string { + uuid, err := uuid.NewV4() + require.NoError(t, err) + return uuid.String() +} + +func fakeDeadPid() int { + return 99999 +} diff --git a/libbeat/cmd/instance/locks/process_lookup_cgo.go b/libbeat/cmd/instance/locks/process_lookup_cgo.go new file mode 100644 index 00000000000..f0f2b253cca --- /dev/null +++ b/libbeat/cmd/instance/locks/process_lookup_cgo.go @@ -0,0 +1,30 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +//go:build (darwin && cgo) || freebsd || linux || windows || aix + +package locks + +import ( + "github.com/elastic/elastic-agent-system-metrics/metric/system/process" + "github.com/elastic/elastic-agent-system-metrics/metric/system/resolve" +) + +// findMatchingPID is a small wrapper to deal with cgo compat issues in libbeat's CI +func findMatchingPID(pid int) (process.PidState, error) { + return process.GetPIDState(resolve.NewTestResolver("/"), pid) +} diff --git a/libbeat/cmd/instance/locks/process_lookup_stub.go b/libbeat/cmd/instance/locks/process_lookup_stub.go new file mode 100644 index 00000000000..cd151ebad14 --- /dev/null +++ b/libbeat/cmd/instance/locks/process_lookup_stub.go @@ -0,0 +1,31 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +//go:build (!darwin || !cgo) && !freebsd && !linux && !windows && !aix + +package locks + +import ( + "fmt" + "runtime" + + "github.com/elastic/elastic-agent-system-metrics/metric/system/process" +) + +func findMatchingPID(pid int) (process.PidState, error) { + return process.Dead, fmt.Errorf("findMatchingPID not supported on platform: %s", runtime.GOOS) +} diff --git a/packetbeat/protos/dns/dns.go b/packetbeat/protos/dns/dns.go index f531af22ed2..028b9449c29 100644 --- a/packetbeat/protos/dns/dns.go +++ b/packetbeat/protos/dns/dns.go @@ -59,11 +59,9 @@ type dnsPlugin struct { results protos.Reporter // Channel where results are pushed. watcher procs.ProcessesWatcher -} - -var debugf = logp.MakeDebug("dns") -const maxDNSTupleRawSize = 16 + 16 + 2 + 2 + 4 + 1 + logger *logp.Logger +} // Transport protocol. type transport uint8 @@ -92,6 +90,15 @@ func (t transport) String() string { type hashableDNSTuple [maxDNSTupleRawSize]byte +const ( + maxDNSTupleRawSize = 2*(sizeofIP+sizeofPort) + sizeofID + sizeofTransport + + sizeofIP = 16 + sizeofPort = 2 + sizeofID = 2 + sizeofTransport = 1 +) + // DnsMessage contains a single DNS message. type dnsMessage struct { ts time.Time // Time when the message was received. @@ -109,8 +116,8 @@ type dnsTuple struct { transport transport id uint16 - raw hashableDNSTuple // Src_ip:Src_port:Dst_ip:Dst_port:Transport:Id - revRaw hashableDNSTuple // Dst_ip:Dst_port:Src_ip:Src_port:Transport:Id + raw hashableDNSTuple // Src_ip:Src_port:Dst_ip:Dst_port:ID:Transport + revRaw hashableDNSTuple // Dst_ip:Dst_port:Src_ip:Src_port:ID:Transport } func dnsTupleFromIPPort(t *common.IPPortTuple, trans transport, id uint16) dnsTuple { @@ -152,21 +159,21 @@ func (t *dnsTuple) computeHashables() { copy(t.raw[18:34], t.DstIP) copy(t.raw[34:36], []byte{byte(t.DstPort >> 8), byte(t.DstPort)}) copy(t.raw[36:38], []byte{byte(t.id >> 8), byte(t.id)}) - t.raw[39] = byte(t.transport) + t.raw[38] = byte(t.transport) copy(t.revRaw[0:16], t.DstIP) copy(t.revRaw[16:18], []byte{byte(t.DstPort >> 8), byte(t.DstPort)}) copy(t.revRaw[18:34], t.SrcIP) copy(t.revRaw[34:36], []byte{byte(t.SrcPort >> 8), byte(t.SrcPort)}) copy(t.revRaw[36:38], []byte{byte(t.id >> 8), byte(t.id)}) - t.revRaw[39] = byte(t.transport) + t.revRaw[38] = byte(t.transport) } func (t *dnsTuple) String() string { return fmt.Sprintf("DnsTuple src[%s:%d] dst[%s:%d] transport[%s] id[%d]", - t.SrcIP.String(), + t.SrcIP, t.SrcPort, - t.DstIP.String(), + t.DstIP, t.DstPort, t.transport, t.id) @@ -212,13 +219,8 @@ func init() { protos.Register("dns", New) } -func New( - testMode bool, - results protos.Reporter, - watcher procs.ProcessesWatcher, - cfg *conf.C, -) (protos.Plugin, error) { - p := &dnsPlugin{} +func New(testMode bool, results protos.Reporter, watcher procs.ProcessesWatcher, cfg *conf.C) (protos.Plugin, error) { + p := &dnsPlugin{logger: logp.NewLogger("dns")} config := defaultConfig if !testMode { if err := cfg.Unpack(&config); err != nil { @@ -240,7 +242,7 @@ func (dns *dnsPlugin) init(results protos.Reporter, watcher procs.ProcessesWatch func(k common.Key, v common.Value) { trans, ok := v.(*dnsTransaction) if !ok { - logp.Err("Expired value is not a *DnsTransaction.") + dns.logger.Error("Expired value is not a *DnsTransaction.") return } dns.expireTransaction(trans) @@ -253,14 +255,13 @@ func (dns *dnsPlugin) init(results protos.Reporter, watcher procs.ProcessesWatch return nil } -func (dns *dnsPlugin) setFromConfig(config *dnsConfig) error { +func (dns *dnsPlugin) setFromConfig(config *dnsConfig) { dns.ports = config.Ports dns.sendRequest = config.SendRequest dns.sendResponse = config.SendResponse dns.includeAuthorities = config.IncludeAuthorities dns.includeAdditionals = config.IncludeAdditionals dns.transactionTimeout = config.TransactionTimeout - return nil } func newTransaction(ts time.Time, tuple dnsTuple, cmd common.ProcessTuple) *dnsTransaction { @@ -292,14 +293,14 @@ func (dns *dnsPlugin) ConnectionTimeout() time.Duration { } func (dns *dnsPlugin) receivedDNSRequest(tuple *dnsTuple, msg *dnsMessage) { - debugf("Processing query. %s", tuple.String()) + dns.logger.Debugf("Processing query. %s", tuple) trans := dns.deleteTransaction(tuple.hashable()) if trans != nil { // This happens if a client puts multiple requests in flight // with the same ID. trans.notes = append(trans.notes, duplicateQueryMsg.Error()) - debugf("%s %s", duplicateQueryMsg.Error(), tuple.String()) + dns.logger.Debugf("%v %s", duplicateQueryMsg, tuple) dns.publishTransaction(trans) dns.deleteTransaction(trans.tuple.hashable()) } @@ -308,7 +309,7 @@ func (dns *dnsPlugin) receivedDNSRequest(tuple *dnsTuple, msg *dnsMessage) { if tuple.transport == transportUDP && (msg.data.IsEdns0() != nil) && msg.length > maxDNSPacketSize { trans.notes = append(trans.notes, udpPacketTooLarge.Error()) - debugf("%s", udpPacketTooLarge.Error()) + dns.logger.Debugf("%v", udpPacketTooLarge) } dns.transactions.Put(tuple.hashable(), trans) @@ -316,13 +317,13 @@ func (dns *dnsPlugin) receivedDNSRequest(tuple *dnsTuple, msg *dnsMessage) { } func (dns *dnsPlugin) receivedDNSResponse(tuple *dnsTuple, msg *dnsMessage) { - debugf("Processing response. %s", tuple.String()) + dns.logger.Debugf("Processing response. %s", tuple) trans := dns.getTransaction(tuple.revHashable()) if trans == nil { trans = newTransaction(msg.ts, tuple.reverse(), msg.cmdlineTuple.Reverse()) trans.notes = append(trans.notes, orphanedResponse.Error()) - debugf("%s %s", orphanedResponse.Error(), tuple.String()) + dns.logger.Debugf("%v %s", orphanedResponse, tuple) unmatchedResponses.Add(1) } @@ -332,7 +333,7 @@ func (dns *dnsPlugin) receivedDNSResponse(tuple *dnsTuple, msg *dnsMessage) { respIsEdns := msg.data.IsEdns0() != nil if !respIsEdns && msg.length > maxDNSPacketSize { trans.notes = append(trans.notes, udpPacketTooLarge.responseError()) - debugf("%s", udpPacketTooLarge.responseError()) + dns.logger.Debugf("%s", udpPacketTooLarge.responseError()) } request := trans.request @@ -342,10 +343,10 @@ func (dns *dnsPlugin) receivedDNSResponse(tuple *dnsTuple, msg *dnsMessage) { switch { case reqIsEdns && !respIsEdns: trans.notes = append(trans.notes, respEdnsNoSupport.Error()) - debugf("%s %s", respEdnsNoSupport.Error(), tuple.String()) + dns.logger.Debugf("%v %s", respEdnsNoSupport, tuple) case !reqIsEdns && respIsEdns: trans.notes = append(trans.notes, respEdnsUnexpected.Error()) - debugf("%s %s", respEdnsUnexpected.Error(), tuple.String()) + dns.logger.Debugf("%v %s", respEdnsUnexpected, tuple) } } } @@ -359,7 +360,7 @@ func (dns *dnsPlugin) publishTransaction(t *dnsTransaction) { return } - debugf("Publishing transaction. %s", t.tuple.String()) + dns.logger.Debugf("Publishing transaction. %s", &t.tuple) evt, pbf := pb.NewBeatEvent(t.ts) @@ -388,18 +389,17 @@ func (dns *dnsPlugin) publishTransaction(t *dnsTransaction) { fields["query"] = dnsQuestionToString(t.request.data.Question[0]) fields["resource"] = t.request.data.Question[0].Name } - addDNSToMapStr(dnsEvent, pbf, t.response.data, dns.includeAuthorities, - dns.includeAdditionals) + addDNSToMapStr(dnsEvent, pbf, t.response.data, dns.includeAuthorities, dns.includeAdditionals, dns.logger) if t.response.data.Rcode == 0 { fields["status"] = common.OK_STATUS } if dns.sendRequest { - fields["request"] = dnsToString(t.request.data) + fields["request"] = dnsToString(t.request.data, dns.logger) } if dns.sendResponse { - fields["response"] = dnsToString(t.response.data) + fields["response"] = dnsToString(t.response.data, dns.logger) } } else if t.request != nil { pbf.Source.Bytes = int64(t.request.length) @@ -411,11 +411,10 @@ func (dns *dnsPlugin) publishTransaction(t *dnsTransaction) { fields["query"] = dnsQuestionToString(t.request.data.Question[0]) fields["resource"] = t.request.data.Question[0].Name } - addDNSToMapStr(dnsEvent, pbf, t.request.data, dns.includeAuthorities, - dns.includeAdditionals) + addDNSToMapStr(dnsEvent, pbf, t.request.data, dns.includeAuthorities, dns.includeAdditionals, dns.logger) if dns.sendRequest { - fields["request"] = dnsToString(t.request.data) + fields["request"] = dnsToString(t.request.data, dns.logger) } } else if t.response != nil { pbf.Destination.Bytes = int64(t.response.length) @@ -427,10 +426,9 @@ func (dns *dnsPlugin) publishTransaction(t *dnsTransaction) { fields["query"] = dnsQuestionToString(t.response.data.Question[0]) fields["resource"] = t.response.data.Question[0].Name } - addDNSToMapStr(dnsEvent, pbf, t.response.data, dns.includeAuthorities, - dns.includeAdditionals) + addDNSToMapStr(dnsEvent, pbf, t.response.data, dns.includeAuthorities, dns.includeAdditionals, dns.logger) if dns.sendResponse { - fields["response"] = dnsToString(t.response.data) + fields["response"] = dnsToString(t.response.data, dns.logger) } } @@ -439,13 +437,13 @@ func (dns *dnsPlugin) publishTransaction(t *dnsTransaction) { func (dns *dnsPlugin) expireTransaction(t *dnsTransaction) { t.notes = append(t.notes, noResponse.Error()) - debugf("%s %s", noResponse.Error(), t.tuple.String()) + dns.logger.Debugf("%v %s", noResponse, &t.tuple) dns.publishTransaction(t) unmatchedRequests.Add(1) } // Adds the DNS message data to the supplied MapStr. -func addDNSToMapStr(m mapstr.M, pbf *pb.Fields, dns *mkdns.Msg, authority bool, additional bool) { +func addDNSToMapStr(m mapstr.M, pbf *pb.Fields, dns *mkdns.Msg, authority bool, additional bool, logger *logp.Logger) { m["id"] = dns.Id m["op_code"] = dnsOpCodeToString(dns.Opcode) @@ -527,7 +525,7 @@ func addDNSToMapStr(m mapstr.M, pbf *pb.Fields, dns *mkdns.Msg, authority bool, m["answers_count"] = len(dns.Answer) if len(dns.Answer) > 0 { var resolvedIPs []string - m["answers"], resolvedIPs = rrsToMapStrs(dns.Answer, true) + m["answers"], resolvedIPs = rrsToMapStrs(dns.Answer, true, logger) if len(resolvedIPs) > 0 { m["resolved_ip"] = resolvedIPs pbf.AddIP(resolvedIPs...) @@ -536,7 +534,7 @@ func addDNSToMapStr(m mapstr.M, pbf *pb.Fields, dns *mkdns.Msg, authority bool, m["authorities_count"] = len(dns.Ns) if authority && len(dns.Ns) > 0 { - m["authorities"], _ = rrsToMapStrs(dns.Ns, false) + m["authorities"], _ = rrsToMapStrs(dns.Ns, false, logger) } if rrOPT != nil { @@ -545,7 +543,7 @@ func addDNSToMapStr(m mapstr.M, pbf *pb.Fields, dns *mkdns.Msg, authority bool, m["additionals_count"] = len(dns.Extra) } if additional && len(dns.Extra) > 0 { - rrsMapStrs, _ := rrsToMapStrs(dns.Extra, false) + rrsMapStrs, _ := rrsToMapStrs(dns.Extra, false, logger) // We do not want OPT RR to appear in the 'additional' section, // that's why rrsMapStrs could be empty even though len(dns.Extra) > 0 if len(rrsMapStrs) > 0 { @@ -590,13 +588,13 @@ func optToMapStr(rrOPT *mkdns.OPT) mapstr.M { // rrsToMapStr converts an slice of RR's to an slice of MapStr's and optionally // returns a list of the IP addresses found in the resource records. -func rrsToMapStrs(records []mkdns.RR, ipList bool) ([]mapstr.M, []string) { +func rrsToMapStrs(records []mkdns.RR, ipList bool, logger *logp.Logger) ([]mapstr.M, []string) { var allIPs []string mapStrSlice := make([]mapstr.M, 0, len(records)) for _, rr := range records { rrHeader := rr.Header() - mapStr, ips := rrToMapStr(rr, ipList) + mapStr, ips := rrToMapStr(rr, ipList, logger) if len(mapStr) == 0 { // OPT pseudo-RR returns an empty MapStr continue } @@ -619,11 +617,11 @@ func rrsToMapStrs(records []mkdns.RR, ipList bool) ([]mapstr.M, []string) { // // TODO An improvement would be to replace 'data' by the real field name // It would require some changes in unit tests -func rrToString(rr mkdns.RR) string { +func rrToString(rr mkdns.RR, logger *logp.Logger) string { var st string var keys []string - mapStr, _ := rrToMapStr(rr, false) + mapStr, _ := rrToMapStr(rr, false, logger) data, ok := mapStr["data"] delete(mapStr, "data") @@ -656,7 +654,7 @@ func rrToString(rr mkdns.RR) string { return b.String() } -func rrToMapStr(rr mkdns.RR, ipList bool) (mapstr.M, []string) { +func rrToMapStr(rr mkdns.RR, ipList bool, logger *logp.Logger) (mapstr.M, []string) { mapStr := mapstr.M{} rrType := rr.Header().Rrtype @@ -671,17 +669,17 @@ func rrToMapStr(rr mkdns.RR, ipList bool) (mapstr.M, []string) { switch x := rr.(type) { default: // We don't have special handling for this type - debugf("No special handling for RR type %s", dnsTypeToString(rrType)) + logger.Debugf("No special handling for RR type %s", dnsTypeToString(rrType)) unsupportedRR := new(mkdns.RFC3597) err := unsupportedRR.ToRFC3597(x) if err == nil { rData, err := hexStringToString(unsupportedRR.Rdata) mapStr["data"] = rData if err != nil { - debugf("%s", err.Error()) + logger.Debugf("%v", err) } } else { - debugf("Rdata for the unhandled RR type %s could not be fetched", dnsTypeToString(rrType)) + logger.Debugf("Rdata for the unhandled RR type %s could not be fetched", dnsTypeToString(rrType)) } // Don't attempt to render IPs for answers that are incomplete. @@ -735,11 +733,11 @@ func rrToMapStr(rr mkdns.RR, ipList bool) (mapstr.M, []string) { mapStr["data"] = trimRightDot(x.Ptr) case *mkdns.RFC3597: // Miekg/dns lib doesn't handle this type - debugf("Unknown RR type %s", dnsTypeToString(rrType)) + logger.Debugf("Unknown RR type %s", dnsTypeToString(rrType)) rData, err := hexStringToString(x.Rdata) mapStr["data"] = rData if err != nil { - debugf("%s", err.Error()) + logger.Debugf("%v", err) } case *mkdns.RRSIG: mapStr["type_covered"] = dnsTypeToString(x.TypeCovered) @@ -781,16 +779,16 @@ func dnsQuestionToString(q mkdns.Question) string { // rrsToString converts an array of RR's to a // string. -func rrsToString(r []mkdns.RR) string { +func rrsToString(r []mkdns.RR, logger *logp.Logger) string { var rrStrs []string for _, rr := range r { - rrStrs = append(rrStrs, rrToString(rr)) + rrStrs = append(rrStrs, rrToString(rr, logger)) } return strings.Join(rrStrs, "; ") } // dnsToString converts a DNS message to a string. -func dnsToString(dns *mkdns.Msg) string { +func dnsToString(dns *mkdns.Msg, logger *logp.Logger) string { var msgType string if dns.Response { msgType = "response" @@ -834,17 +832,17 @@ func dnsToString(dns *mkdns.Msg) string { if len(dns.Answer) > 0 { a = append(a, fmt.Sprintf("ANSWER %s", - rrsToString(dns.Answer))) + rrsToString(dns.Answer, logger))) } if len(dns.Ns) > 0 { a = append(a, fmt.Sprintf("AUTHORITY %s", - rrsToString(dns.Ns))) + rrsToString(dns.Ns, logger))) } if len(dns.Extra) > 0 { a = append(a, fmt.Sprintf("ADDITIONAL %s", - rrsToString(dns.Extra))) + rrsToString(dns.Extra, logger))) } return strings.Join(a, "; ") diff --git a/packetbeat/protos/dns/dns_tcp.go b/packetbeat/protos/dns/dns_tcp.go index 20832ff2479..ac1eacaf88e 100644 --- a/packetbeat/protos/dns/dns_tcp.go +++ b/packetbeat/protos/dns/dns_tcp.go @@ -54,33 +54,31 @@ type dnsConnectionData struct { } func (dns *dnsPlugin) Parse(pkt *protos.Packet, tcpTuple *common.TCPTuple, dir uint8, private protos.ProtocolData) protos.ProtocolData { - defer logp.Recover("Dns ParseTcp") + defer dns.logger.Recover("Dns ParseTcp") - debugf("Parsing packet addressed with %s of length %d.", - pkt.Tuple.String(), len(pkt.Payload)) + dns.logger.Debugf("dns", "Parsing packet addressed with %s of length %d.", &pkt.Tuple, len(pkt.Payload)) - conn := ensureDNSConnection(private) + conn := ensureDNSConnection(private, dns.logger) conn = dns.doParse(conn, pkt, tcpTuple, dir) if conn == nil { return nil } - return conn } -func ensureDNSConnection(private protos.ProtocolData) *dnsConnectionData { +func ensureDNSConnection(private protos.ProtocolData, logger *logp.Logger) *dnsConnectionData { if private == nil { return &dnsConnectionData{} } conn, ok := private.(*dnsConnectionData) if !ok { - logp.Warn("Dns connection data type error, create new one") + logger.Warn("Dns connection data type error, create new one") return &dnsConnectionData{} } if conn == nil { - logp.Warn("Unexpected: dns connection data not set, create new one") + logger.Warn("Unexpected: dns connection data not set, create new one") return &dnsConnectionData{} } @@ -101,16 +99,15 @@ func (dns *dnsPlugin) doParse(conn *dnsConnectionData, pkt *protos.Packet, tcpTu stream.rawData = append(stream.rawData, payload...) if len(stream.rawData) > tcp.TCPMaxDataInStream { - debugf("Stream data too large, dropping DNS stream") + dns.logger.Debugf("dns", "Stream data too large, dropping DNS stream") conn.data[dir] = nil return conn } } decodedData, err := stream.handleTCPRawData() if err != nil { - - if err == incompleteMsg { - debugf("Waiting for more raw data") + if err == incompleteMsg { //nolint:errorlint // incompleteMsg is not wrapped. + dns.logger.Debugf("dns", "Waiting for more raw data") return conn } @@ -118,8 +115,7 @@ func (dns *dnsPlugin) doParse(conn *dnsConnectionData, pkt *protos.Packet, tcpTu dns.publishResponseError(conn, err) } - debugf("%s addresses %s, length %d", err.Error(), - tcpTuple.String(), len(stream.rawData)) + dns.logger.Debugf("dns", "%v addresses %s, length %d", err, tcpTuple, len(stream.rawData)) // This means that malformed requests or responses are being sent... // TODO: publish the situation also if Request @@ -176,7 +172,6 @@ func (dns *dnsPlugin) ReceivedFin(tcpTuple *common.TCPTuple, dir uint8, private return private } stream := conn.data[dir] - if stream == nil || stream.message == nil { return conn } @@ -192,8 +187,7 @@ func (dns *dnsPlugin) ReceivedFin(tcpTuple *common.TCPTuple, dir uint8, private dns.publishResponseError(conn, err) } - debugf("%s addresses %s, length %d", err.Error(), - tcpTuple.String(), len(stream.rawData)) + dns.logger.Debugf("dns", "%v addresses %s, length %d", err, tcpTuple, len(stream.rawData)) return conn } @@ -213,7 +207,6 @@ func (dns *dnsPlugin) GapInStream(tcpTuple *common.TCPTuple, dir uint8, nbytes i } decodedData, err := stream.handleTCPRawData() - if err == nil { dns.messageComplete(conn, tcpTuple, dir, decodedData) return private, true @@ -223,9 +216,8 @@ func (dns *dnsPlugin) GapInStream(tcpTuple *common.TCPTuple, dir uint8, nbytes i dns.publishResponseError(conn, err) } - debugf("%s addresses %s, length %d", err.Error(), - tcpTuple.String(), len(stream.rawData)) - debugf("Dropping the stream %s", tcpTuple.String()) + dns.logger.Debugf("dns", "%v addresses %s, length %d", err, tcpTuple, len(stream.rawData)) + dns.logger.Debugf("dns", "Dropping the stream %s", tcpTuple) // drop the stream because it is binary Data and it would be unexpected to have a decodable message later return private, true @@ -243,26 +235,23 @@ func (dns *dnsPlugin) publishResponseError(conn *dnsConnectionData, err error) { dataOrigin := conn.prevRequest.data dnsTupleOrigin := dnsTupleFromIPPort(&conn.prevRequest.tuple, transportTCP, dataOrigin.Id) - hashDNSTupleOrigin := (&dnsTupleOrigin).hashable() + hashDNSTupleOrigin := dnsTupleOrigin.hashable() trans := dns.deleteTransaction(hashDNSTupleOrigin) - if trans == nil { // happens if Parse, Gap or Fin already published the response error return } - errDNS, ok := err.(*dnsError) - if !ok { - return - } - trans.notes = append(trans.notes, errDNS.responseError()) + if err, ok := err.(*dnsError); ok { //nolint:errorlint // err always comes from handleTCPRawData and is either *dnsError or nil. + trans.notes = append(trans.notes, err.responseError()) - // Should we publish the length (bytes_out) of the failed Response? - // streamReverse.message.Length = len(streamReverse.rawData) - // trans.Response = streamReverse.message + // Should we publish the length (bytes_out) of the failed Response? + // streamReverse.message.Length = len(streamReverse.rawData) + // trans.Response = streamReverse.message - dns.publishTransaction(trans) - dns.deleteTransaction(hashDNSTupleOrigin) + dns.publishTransaction(trans) + dns.deleteTransaction(hashDNSTupleOrigin) + } } // Manages data length prior to decoding the data and manages errors after decoding @@ -298,6 +287,5 @@ func (stream *dnsStream) handleTCPRawData() (*mkdns.Msg, error) { if err != nil { return nil, err } - return decodedData, nil } diff --git a/packetbeat/protos/dns/dns_test.go b/packetbeat/protos/dns/dns_test.go index e4cf5b207c5..c97ee259b65 100644 --- a/packetbeat/protos/dns/dns_test.go +++ b/packetbeat/protos/dns/dns_test.go @@ -85,7 +85,7 @@ type eventStore struct { } func (e *eventStore) publish(event beat.Event) { - publish.MarshalPacketbeatFields(&event, nil, nil) + _, _ = publish.MarshalPacketbeatFields(&event, nil, nil) e.events = append(e.events, event) } @@ -98,7 +98,7 @@ func newDNS(store *eventStore, verbose bool) *dnsPlugin { if verbose { level = logp.DebugLevel } - logp.DevelopmentSetup( + _ = logp.DevelopmentSetup( logp.WithLevel(level), logp.WithSelectors("dns"), ) @@ -327,7 +327,7 @@ func TestRRsToMapStrsWithOPTRecord(t *testing.T) { // The OPT record is a pseudo-record so it doesn't become a real record // in our conversion, and there will be 1 entry instead of 2. - mapStrs, _ := rrsToMapStrs([]mkdns.RR{o, r}, false) + mapStrs, _ := rrsToMapStrs([]mkdns.RR{o, r}, false, logp.NewLogger("dns_test")) assert.Len(t, mapStrs, 1) mapStr := mapStrs[0] diff --git a/packetbeat/protos/dns/dns_udp.go b/packetbeat/protos/dns/dns_udp.go index 5051345494f..89d76dea84c 100644 --- a/packetbeat/protos/dns/dns_udp.go +++ b/packetbeat/protos/dns/dns_udp.go @@ -17,28 +17,23 @@ package dns -import ( - "github.com/elastic/elastic-agent-libs/logp" - - "github.com/elastic/beats/v7/packetbeat/protos" -) +import "github.com/elastic/beats/v7/packetbeat/protos" // Only EDNS packets should have their size beyond this value const maxDNSPacketSize = (1 << 9) // 512 (bytes) func (dns *dnsPlugin) ParseUDP(pkt *protos.Packet) { - defer logp.Recover("Dns ParseUdp") + defer dns.logger.Recover("Dns ParseUdp") packetSize := len(pkt.Payload) - debugf("Parsing packet addressed with %s of length %d.", - pkt.Tuple.String(), packetSize) + dns.logger.Debugf("Parsing packet addressed with %s of length %d.", &pkt.Tuple, packetSize) dnsPkt, err := decodeDNSData(transportUDP, pkt.Payload) if err != nil { // This means that malformed requests or responses are being sent or // that someone is attempting to the DNS port for non-DNS traffic. Both // are issues that a monitoring system should report. - debugf("%s", err.Error()) + dns.logger.Debugf("%v", err) return } diff --git a/packetbeat/protos/dns/names.go b/packetbeat/protos/dns/names.go index afd1080e73e..9961ca6b147 100644 --- a/packetbeat/protos/dns/names.go +++ b/packetbeat/protos/dns/names.go @@ -35,7 +35,7 @@ import ( func dnsOpCodeToString(opCode int) string { s, exists := mkdns.OpcodeToString[opCode] if !exists { - return strconv.Itoa(int(opCode)) + return strconv.Itoa(opCode) } return s } @@ -46,7 +46,7 @@ func dnsOpCodeToString(opCode int) string { func dnsResponseCodeToString(rcode int) string { s, exists := mkdns.RcodeToString[rcode] if !exists { - return fmt.Sprintf("Unknown %d", int(rcode)) + return fmt.Sprintf("Unknown %d", rcode) } return s } @@ -55,7 +55,7 @@ func dnsResponseCodeToString(rcode int) string { // string representation is unknown then the numeric value will be returned // as a string. func dnsTypeToString(t uint16) string { - s, exists := mkdns.TypeToString[uint16(t)] + s, exists := mkdns.TypeToString[t] if !exists { return strconv.Itoa(int(t)) } @@ -66,7 +66,7 @@ func dnsTypeToString(t uint16) string { // string representation is unknown then the numeric value will be returned // as a string. func dnsClassToString(c uint16) string { - s, exists := mkdns.ClassToString[uint16(c)] + s, exists := mkdns.ClassToString[c] if !exists { return strconv.Itoa(int(c)) } @@ -77,7 +77,7 @@ func dnsClassToString(c uint16) string { // string representation is unknown then the numeric value will be returned // as a string. func dnsAlgorithmToString(a uint8) string { - s, exists := mkdns.AlgorithmToString[uint8(a)] + s, exists := mkdns.AlgorithmToString[a] if !exists { return strconv.Itoa(int(a)) } @@ -88,7 +88,7 @@ func dnsAlgorithmToString(a uint8) string { // string representation is unknown then the numeric value will be returned // as a string. func dnsHashToString(h uint8) string { - s, exists := mkdns.HashToString[uint8(h)] + s, exists := mkdns.HashToString[h] if !exists { return strconv.Itoa(int(h)) } diff --git a/packetbeat/protos/dns/names_test.go b/packetbeat/protos/dns/names_test.go index 3e83066e72d..675c50bc06f 100644 --- a/packetbeat/protos/dns/names_test.go +++ b/packetbeat/protos/dns/names_test.go @@ -33,6 +33,7 @@ import ( "github.com/stretchr/testify/assert" "github.com/elastic/beats/v7/packetbeat/pb" + "github.com/elastic/elastic-agent-libs/logp" "github.com/elastic/elastic-agent-libs/mapstr" ) @@ -112,7 +113,7 @@ func assertDNSMessage(t testing.TB, q dnsTestMsg) { } mapStr := mapstr.M{} - addDNSToMapStr(mapStr, pb.NewFields(), dns, true, true) + addDNSToMapStr(mapStr, pb.NewFields(), dns, true, true, logp.NewLogger("dns_test")) if q.question != nil { for k, v := range q.question { assert.NotNil(t, mapStr["question"].(mapstr.M)[k]) diff --git a/testing/environments/snapshot.yml b/testing/environments/snapshot.yml index 2635c0a9eaa..57b59c8d4e4 100644 --- a/testing/environments/snapshot.yml +++ b/testing/environments/snapshot.yml @@ -3,7 +3,7 @@ version: '2.3' services: elasticsearch: - image: docker.elastic.co/elasticsearch/elasticsearch:8.6.0-c49fac70-SNAPSHOT + image: docker.elastic.co/elasticsearch/elasticsearch:8.6.0-40086bc7-SNAPSHOT # When extend is used it merges healthcheck.tests, see: # https://github.com/docker/compose/issues/8962 # healthcheck: @@ -31,7 +31,7 @@ services: - "./docker/elasticsearch/users_roles:/usr/share/elasticsearch/config/users_roles" logstash: - image: docker.elastic.co/logstash/logstash:8.6.0-c49fac70-SNAPSHOT + image: docker.elastic.co/logstash/logstash:8.6.0-40086bc7-SNAPSHOT healthcheck: test: ["CMD", "curl", "-f", "http://localhost:9600/_node/stats"] retries: 600 @@ -44,7 +44,7 @@ services: - 5055:5055 kibana: - image: docker.elastic.co/kibana/kibana:8.6.0-c49fac70-SNAPSHOT + image: docker.elastic.co/kibana/kibana:8.6.0-40086bc7-SNAPSHOT environment: - "ELASTICSEARCH_USERNAME=kibana_system_user" - "ELASTICSEARCH_PASSWORD=testing" diff --git a/x-pack/functionbeat/docs/page_header.html b/x-pack/functionbeat/docs/page_header.html new file mode 100644 index 00000000000..cec30d66bbf --- /dev/null +++ b/x-pack/functionbeat/docs/page_header.html @@ -0,0 +1,3 @@ +Functionbeat will reach End of Support on October 18, 2023. You should consider +moving your deployments to the more versatile and efficient Elastic Serverless +Forwarder. diff --git a/x-pack/heartbeat/monitors/browser/browser.go b/x-pack/heartbeat/monitors/browser/browser.go index a84422b4b82..6411d0d3f09 100644 --- a/x-pack/heartbeat/monitors/browser/browser.go +++ b/x-pack/heartbeat/monitors/browser/browser.go @@ -15,6 +15,7 @@ import ( "github.com/elastic/elastic-agent-libs/config" "github.com/elastic/elastic-agent-libs/logp" + "github.com/elastic/beats/v7/heartbeat/ecserr" "github.com/elastic/beats/v7/heartbeat/monitors/plugin" ) @@ -24,14 +25,12 @@ func init() { var showExperimentalOnce = sync.Once{} -var ErrNotSyntheticsCapableError = fmt.Errorf("synthetic monitors cannot be created outside the official elastic docker image") - func create(name string, cfg *config.C) (p plugin.Plugin, err error) { // We don't want users running synthetics in environments that don't have the required GUI libraries etc, so we check // this flag. When we're ready to support the many possible configurations of systems outside the docker environment // we can remove this check. if os.Getenv("ELASTIC_SYNTHETICS_CAPABLE") != "true" { - return plugin.Plugin{}, ErrNotSyntheticsCapableError + return plugin.Plugin{}, ecserr.NewNotSyntheticsCapableError() } showExperimentalOnce.Do(func() {